Merge branch 'master' of https://github.com/grpc/grpc into channel-tracing

reviewable/pr13883/r1
ncteisen 7 years ago
commit b04efac6a5
  1. 2
      CMakeLists.txt
  2. 4
      Makefile
  3. 4
      Rakefile
  4. 994
      binding.gyp
  5. 1
      build.yaml
  6. 12
      doc/PROTOCOL-WEB.md
  7. 12
      examples/python/interceptors/headers/header_manipulator_client_interceptor.py
  8. 10
      examples/python/multiplex/multiplex_client.py
  9. 4
      examples/python/multiplex/multiplex_server.py
  10. 18
      examples/python/multiplex/run_codegen.py
  11. 9
      examples/python/route_guide/route_guide_client.py
  12. 4
      examples/python/route_guide/route_guide_server.py
  13. 9
      examples/python/route_guide/run_codegen.py
  14. 1
      gRPC-Core.podspec
  15. 5
      grpc.gyp
  16. 1
      include/grpc++/impl/codegen/client_unary_call.h
  17. 34
      include/grpc/impl/codegen/port_platform.h
  18. 103
      package.json
  19. 6
      src/core/ext/filters/client_channel/backup_poller.cc
  20. 6
      src/core/ext/filters/client_channel/channel_connectivity.cc
  21. 71
      src/core/ext/filters/client_channel/client_channel.cc
  22. 95
      src/core/ext/filters/client_channel/lb_policy.cc
  23. 90
      src/core/ext/filters/client_channel/lb_policy.h
  24. 6
      src/core/ext/filters/client_channel/lb_policy/grpclb/client_load_reporting_filter.cc
  25. 712
      src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.cc
  26. 89
      src/core/ext/filters/client_channel/lb_policy/pick_first/pick_first.cc
  27. 122
      src/core/ext/filters/client_channel/lb_policy/round_robin/round_robin.cc
  28. 4
      src/core/ext/filters/client_channel/lb_policy/subchannel_list.cc
  29. 6
      src/core/ext/filters/client_channel/subchannel.cc
  30. 10
      src/core/ext/filters/http/client/http_client_filter.cc
  31. 14
      src/core/ext/filters/http/message_compress/message_compress_filter.cc
  32. 10
      src/core/ext/filters/http/server/http_server_filter.cc
  33. 10
      src/core/ext/filters/load_reporting/server_load_reporting_filter.cc
  34. 6
      src/core/ext/filters/max_age/max_age_filter.cc
  35. 10
      src/core/ext/filters/message_size/message_size_filter.cc
  36. 6
      src/core/ext/filters/workarounds/workaround_cronet_compression_filter.cc
  37. 5
      src/core/ext/transport/chttp2/transport/writing.cc
  38. 2
      src/core/lib/iomgr/ev_epoll1_linux.cc
  39. 2
      src/core/lib/iomgr/ev_epollex_linux.cc
  40. 2
      src/core/lib/iomgr/ev_epollsig_linux.cc
  41. 30
      src/core/lib/iomgr/ev_poll_posix.cc
  42. 10
      src/core/lib/iomgr/tcp_posix.cc
  43. 30
      src/core/lib/iomgr/tcp_uv.cc
  44. 8
      src/core/lib/iomgr/wakeup_fd_cv.cc
  45. 24
      src/core/lib/iomgr/wakeup_fd_cv.h
  46. 10
      src/core/lib/security/transport/client_auth_filter.cc
  47. 14
      src/core/lib/security/transport/server_auth_filter.cc
  48. 5
      src/core/lib/surface/call.cc
  49. 24
      src/core/lib/surface/server.cc
  50. 3
      src/core/tsi/ssl_transport_security.cc
  51. 4
      src/csharp/Grpc.Auth/Grpc.Auth.csproj
  52. 4
      src/csharp/Grpc.Core.Testing/Grpc.Core.Testing.csproj
  53. 4
      src/csharp/Grpc.Core/Grpc.Core.csproj
  54. 19
      src/csharp/Grpc.Core/SourceLink.csproj.include
  55. 4
      src/csharp/Grpc.HealthCheck/Grpc.HealthCheck.csproj
  56. 4
      src/csharp/Grpc.Reflection/Grpc.Reflection.csproj
  57. 29
      src/node/health_check/package.json
  58. 41
      src/node/tools/package.json
  59. 10
      src/objective-c/GRPCClient/GRPCCall+ChannelArg.h
  60. 21
      src/objective-c/GRPCClient/GRPCCall+ChannelArg.m
  61. 3
      src/objective-c/GRPCClient/private/GRPCHost.h
  62. 7
      src/objective-c/GRPCClient/private/GRPCHost.m
  63. 34
      src/objective-c/tests/InteropTests.m
  64. 68
      src/objective-c/tests/RemoteTestClient/messages.proto
  65. 24
      src/python/grpcio/commands.py
  66. 74
      src/python/grpcio/grpc/__init__.py
  67. 4
      src/python/grpcio/grpc/_auth.py
  68. 148
      src/python/grpcio/grpc/_channel.py
  69. 15
      src/python/grpcio/grpc/_cython/_cygrpc/call.pyx.pxi
  70. 6
      src/python/grpcio/grpc/_cython/_cygrpc/channel.pyx.pxi
  71. 42
      src/python/grpcio/grpc/_cython/_cygrpc/completion_queue.pyx.pxi
  72. 45
      src/python/grpcio/grpc/_cython/_cygrpc/event.pxd.pxi
  73. 55
      src/python/grpcio/grpc/_cython/_cygrpc/event.pyx.pxi
  74. 24
      src/python/grpcio/grpc/_cython/_cygrpc/grpc.pxi
  75. 109
      src/python/grpcio/grpc/_cython/_cygrpc/operation.pxd.pxi
  76. 238
      src/python/grpcio/grpc/_cython/_cygrpc/operation.pyx.pxi
  77. 58
      src/python/grpcio/grpc/_cython/_cygrpc/records.pxd.pxi
  78. 284
      src/python/grpcio/grpc/_cython/_cygrpc/records.pyx.pxi
  79. 29
      src/python/grpcio/grpc/_cython/_cygrpc/server.pyx.pxi
  80. 58
      src/python/grpcio/grpc/_cython/_cygrpc/tag.pxd.pxi
  81. 87
      src/python/grpcio/grpc/_cython/_cygrpc/tag.pyx.pxi
  82. 3
      src/python/grpcio/grpc/_cython/cygrpc.pxd
  83. 3
      src/python/grpcio/grpc/_cython/cygrpc.pyx
  84. 7
      src/python/grpcio/grpc/_interceptor.py
  85. 9
      src/python/grpcio/grpc/_plugin_wrapping.py
  86. 126
      src/python/grpcio/grpc/_server.py
  87. 12
      src/python/grpcio/grpc/_utilities.py
  88. 117
      src/python/grpcio/grpc/beta/_client_adaptations.py
  89. 5
      src/python/grpcio/grpc/beta/_metadata.py
  90. 75
      src/python/grpcio/grpc/beta/_server_adaptations.py
  91. 4
      src/python/grpcio/grpc/beta/implementations.py
  92. 4
      src/python/grpcio/grpc/framework/foundation/callable_util.py
  93. 15
      src/python/grpcio/grpc/framework/interfaces/base/utilities.py
  94. 15
      src/python/grpcio/grpc/framework/interfaces/face/face.py
  95. 6
      src/python/grpcio_health_checking/health_commands.py
  96. 6
      src/python/grpcio_health_checking/setup.py
  97. 15
      src/python/grpcio_reflection/grpc_reflection/v1alpha/reflection.py
  98. 6
      src/python/grpcio_reflection/setup.py
  99. 32
      src/python/grpcio_testing/grpc_testing/_channel/_multi_callable.py
  100. 4
      src/python/grpcio_testing/grpc_testing/_channel/_rpc_state.py
  101. Some files were not shown because too many files have changed in this diff Show More

@ -4628,6 +4628,7 @@ add_library(end2end_tests
test/core/end2end/tests/filter_call_init_fails.cc
test/core/end2end/tests/filter_causes_close.cc
test/core/end2end/tests/filter_latency.cc
test/core/end2end/tests/filter_status_code.cc
test/core/end2end/tests/graceful_server_shutdown.cc
test/core/end2end/tests/high_initial_seqno.cc
test/core/end2end/tests/hpack_size.cc
@ -4729,6 +4730,7 @@ add_library(end2end_nosec_tests
test/core/end2end/tests/filter_call_init_fails.cc
test/core/end2end/tests/filter_causes_close.cc
test/core/end2end/tests/filter_latency.cc
test/core/end2end/tests/filter_status_code.cc
test/core/end2end/tests/graceful_server_shutdown.cc
test/core/end2end/tests/high_initial_seqno.cc
test/core/end2end/tests/hpack_size.cc

@ -327,7 +327,7 @@ CXXFLAGS += -std=c++11
ifeq ($(SYSTEM),Darwin)
CXXFLAGS += -stdlib=libc++
endif
CPPFLAGS += -g -Wall -Wextra -Werror -Wno-long-long -Wno-unused-parameter -DOSATOMIC_USE_INLINED=1
CPPFLAGS += -g -Wall -Wextra -Werror -Wno-long-long -Wno-unused-parameter -DOSATOMIC_USE_INLINED=1 -Wno-deprecated-declarations
COREFLAGS += -fno-rtti -fno-exceptions
LDFLAGS += -g
@ -8570,6 +8570,7 @@ LIBEND2END_TESTS_SRC = \
test/core/end2end/tests/filter_call_init_fails.cc \
test/core/end2end/tests/filter_causes_close.cc \
test/core/end2end/tests/filter_latency.cc \
test/core/end2end/tests/filter_status_code.cc \
test/core/end2end/tests/graceful_server_shutdown.cc \
test/core/end2end/tests/high_initial_seqno.cc \
test/core/end2end/tests/hpack_size.cc \
@ -8668,6 +8669,7 @@ LIBEND2END_NOSEC_TESTS_SRC = \
test/core/end2end/tests/filter_call_init_fails.cc \
test/core/end2end/tests/filter_causes_close.cc \
test/core/end2end/tests/filter_latency.cc \
test/core/end2end/tests/filter_status_code.cc \
test/core/end2end/tests/graceful_server_shutdown.cc \
test/core/end2end/tests/high_initial_seqno.cc \
test/core/end2end/tests/hpack_size.cc \

@ -113,10 +113,10 @@ task 'gem:native' do
if RUBY_PLATFORM =~ /darwin/
FileUtils.touch 'grpc_c.32.ruby'
FileUtils.touch 'grpc_c.64.ruby'
system "rake cross native gem RUBY_CC_VERSION=2.4.0:2.3.0:2.2.2:2.1.5:2.0.0 V=#{verbose} GRPC_CONFIG=#{grpc_config}"
system "rake cross native gem RUBY_CC_VERSION=2.5.0:2.4.0:2.3.0:2.2.2:2.1.6:2.0.0 V=#{verbose} GRPC_CONFIG=#{grpc_config}"
else
Rake::Task['dlls'].execute
docker_for_windows "gem update --system && bundle && rake cross native gem RUBY_CC_VERSION=2.4.0:2.3.0:2.2.2:2.1.5:2.0.0 V=#{verbose} GRPC_CONFIG=#{grpc_config}"
docker_for_windows "gem update --system && bundle && rake cross native gem RUBY_CC_VERSION=2.5.0:2.4.0:2.3.0:2.2.2:2.1.6:2.0.0 V=#{verbose} GRPC_CONFIG=#{grpc_config}"
end
end

@ -1,994 +0,0 @@
# GRPC Node gyp file
# This currently builds the Node extension and dependencies
# This file has been automatically generated from a template file.
# Please look at the templates directory instead.
# This file can be regenerated from the template by running
# tools/buildgen/generate_projects.sh
# Copyright 2015 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Some of this file is built with the help of
# https://n8.io/converting-a-c-library-to-gyp/
{
'variables': {
'runtime%': 'node',
# Some Node installations use the system installation of OpenSSL, and on
# some systems, the system OpenSSL still does not have ALPN support. This
# will let users recompile gRPC to work without ALPN.
'grpc_alpn%': 'true',
# Indicates that the library should be built with gcov.
'grpc_gcov%': 'false',
# Indicates that the library should be built with compatibility for musl
# libc, so that it can run on Alpine Linux. This is only necessary if not
# building on Alpine Linux
'grpc_alpine%': 'false'
},
'target_defaults': {
'configurations': {
'Release': {
'cflags': [
'-O2',
],
'defines': [
'NDEBUG',
],
},
'Debug': {
'cflags': [
'-O0',
],
'defines': [
'_DEBUG',
'DEBUG',
],
},
},
'cflags': [
'-g',
'-Wall',
'-Wextra',
'-Werror',
'-Wno-long-long',
'-Wno-unused-parameter',
'-DOSATOMIC_USE_INLINED=1',
],
'ldflags': [
'-g',
],
'cflags_c': [
'-Werror',
'-std=c99'
],
'cflags_cc': [
'-Werror',
'-std=c++11'
],
'include_dirs': [
'.',
'include'
],
'defines': [
'GPR_BACKWARDS_COMPATIBILITY_MODE',
'GRPC_ARES=0',
'GRPC_UV'
],
'conditions': [
['grpc_gcov=="true"', {
'cflags': [
'-O0',
'-fprofile-arcs',
'-ftest-coverage',
'-Wno-return-type',
],
'defines': [
'_DEBUG',
'DEBUG',
'GPR_GCOV',
],
'ldflags': [
'-fprofile-arcs',
'-ftest-coverage',
'-rdynamic',
],
}],
['grpc_alpine=="true"', {
'defines': [
'GPR_MUSL_LIBC_COMPAT'
]
}],
['OS!="win" and runtime=="electron"', {
"defines": [
'OPENSSL_NO_THREADS'
]
}],
# This is the condition for using boringssl
['OS=="win" or runtime=="electron"', {
"include_dirs": [
"third_party/boringssl/include"
],
"defines": [
'OPENSSL_NO_ASM'
]
}, {
'conditions': [
["target_arch=='ia32'", {
"include_dirs": [ "<(node_root_dir)/deps/openssl/config/piii" ]
}],
["target_arch=='x64'", {
"include_dirs": [ "<(node_root_dir)/deps/openssl/config/k8" ]
}],
["target_arch=='arm'", {
"include_dirs": [ "<(node_root_dir)/deps/openssl/config/arm" ]
}],
['grpc_alpn=="true"', {
'defines': [
'TSI_OPENSSL_ALPN_SUPPORT=1'
],
}, {
'defines': [
'TSI_OPENSSL_ALPN_SUPPORT=0'
],
}]
],
'include_dirs': [
'<(node_root_dir)/deps/openssl/openssl/include',
]
}],
['OS == "win"', {
"include_dirs": [
"third_party/zlib",
"third_party/cares/cares"
],
"defines": [
'_WIN32_WINNT=0x0600',
'WIN32_LEAN_AND_MEAN',
'_HAS_EXCEPTIONS=0',
'UNICODE',
'_UNICODE',
'NOMINMAX',
],
"msvs_settings": {
'VCCLCompilerTool': {
'RuntimeLibrary': 1, # static debug
}
},
"libraries": [
"ws2_32"
]
}, { # OS != "win"
'include_dirs': [
'<(node_root_dir)/deps/zlib',
'<(node_root_dir)/deps/cares/include'
]
}],
['OS == "mac"', {
'xcode_settings': {
'OTHER_CFLAGS': [
'-g',
'-Wall',
'-Wextra',
'-Werror',
'-Wno-long-long',
'-Wno-unused-parameter',
'-DOSATOMIC_USE_INLINED=1',
],
'OTHER_CPLUSPLUSFLAGS': [
'-g',
'-Wall',
'-Wextra',
'-Werror',
'-Wno-long-long',
'-Wno-unused-parameter',
'-DOSATOMIC_USE_INLINED=1',
'-stdlib=libc++',
'-std=c++11',
'-Wno-error=deprecated-declarations'
],
},
}]
]
},
'conditions': [
['OS=="win" or runtime=="electron"', {
'targets': [
{
'target_name': 'boringssl',
'product_prefix': 'lib',
'type': 'static_library',
'dependencies': [
],
'sources': [
'src/boringssl/err_data.c',
'third_party/boringssl/crypto/aes/aes.c',
'third_party/boringssl/crypto/aes/key_wrap.c',
'third_party/boringssl/crypto/aes/mode_wrappers.c',
'third_party/boringssl/crypto/asn1/a_bitstr.c',
'third_party/boringssl/crypto/asn1/a_bool.c',
'third_party/boringssl/crypto/asn1/a_d2i_fp.c',
'third_party/boringssl/crypto/asn1/a_dup.c',
'third_party/boringssl/crypto/asn1/a_enum.c',
'third_party/boringssl/crypto/asn1/a_gentm.c',
'third_party/boringssl/crypto/asn1/a_i2d_fp.c',
'third_party/boringssl/crypto/asn1/a_int.c',
'third_party/boringssl/crypto/asn1/a_mbstr.c',
'third_party/boringssl/crypto/asn1/a_object.c',
'third_party/boringssl/crypto/asn1/a_octet.c',
'third_party/boringssl/crypto/asn1/a_print.c',
'third_party/boringssl/crypto/asn1/a_strnid.c',
'third_party/boringssl/crypto/asn1/a_time.c',
'third_party/boringssl/crypto/asn1/a_type.c',
'third_party/boringssl/crypto/asn1/a_utctm.c',
'third_party/boringssl/crypto/asn1/a_utf8.c',
'third_party/boringssl/crypto/asn1/asn1_lib.c',
'third_party/boringssl/crypto/asn1/asn1_par.c',
'third_party/boringssl/crypto/asn1/asn_pack.c',
'third_party/boringssl/crypto/asn1/f_enum.c',
'third_party/boringssl/crypto/asn1/f_int.c',
'third_party/boringssl/crypto/asn1/f_string.c',
'third_party/boringssl/crypto/asn1/t_bitst.c',
'third_party/boringssl/crypto/asn1/tasn_dec.c',
'third_party/boringssl/crypto/asn1/tasn_enc.c',
'third_party/boringssl/crypto/asn1/tasn_fre.c',
'third_party/boringssl/crypto/asn1/tasn_new.c',
'third_party/boringssl/crypto/asn1/tasn_typ.c',
'third_party/boringssl/crypto/asn1/tasn_utl.c',
'third_party/boringssl/crypto/asn1/time_support.c',
'third_party/boringssl/crypto/asn1/x_bignum.c',
'third_party/boringssl/crypto/asn1/x_long.c',
'third_party/boringssl/crypto/base64/base64.c',
'third_party/boringssl/crypto/bio/bio.c',
'third_party/boringssl/crypto/bio/bio_mem.c',
'third_party/boringssl/crypto/bio/connect.c',
'third_party/boringssl/crypto/bio/fd.c',
'third_party/boringssl/crypto/bio/file.c',
'third_party/boringssl/crypto/bio/hexdump.c',
'third_party/boringssl/crypto/bio/pair.c',
'third_party/boringssl/crypto/bio/printf.c',
'third_party/boringssl/crypto/bio/socket.c',
'third_party/boringssl/crypto/bio/socket_helper.c',
'third_party/boringssl/crypto/bn/add.c',
'third_party/boringssl/crypto/bn/asm/x86_64-gcc.c',
'third_party/boringssl/crypto/bn/bn.c',
'third_party/boringssl/crypto/bn/bn_asn1.c',
'third_party/boringssl/crypto/bn/cmp.c',
'third_party/boringssl/crypto/bn/convert.c',
'third_party/boringssl/crypto/bn/ctx.c',
'third_party/boringssl/crypto/bn/div.c',
'third_party/boringssl/crypto/bn/exponentiation.c',
'third_party/boringssl/crypto/bn/gcd.c',
'third_party/boringssl/crypto/bn/generic.c',
'third_party/boringssl/crypto/bn/kronecker.c',
'third_party/boringssl/crypto/bn/montgomery.c',
'third_party/boringssl/crypto/bn/montgomery_inv.c',
'third_party/boringssl/crypto/bn/mul.c',
'third_party/boringssl/crypto/bn/prime.c',
'third_party/boringssl/crypto/bn/random.c',
'third_party/boringssl/crypto/bn/rsaz_exp.c',
'third_party/boringssl/crypto/bn/shift.c',
'third_party/boringssl/crypto/bn/sqrt.c',
'third_party/boringssl/crypto/buf/buf.c',
'third_party/boringssl/crypto/bytestring/asn1_compat.c',
'third_party/boringssl/crypto/bytestring/ber.c',
'third_party/boringssl/crypto/bytestring/cbb.c',
'third_party/boringssl/crypto/bytestring/cbs.c',
'third_party/boringssl/crypto/chacha/chacha.c',
'third_party/boringssl/crypto/cipher/aead.c',
'third_party/boringssl/crypto/cipher/cipher.c',
'third_party/boringssl/crypto/cipher/derive_key.c',
'third_party/boringssl/crypto/cipher/e_aes.c',
'third_party/boringssl/crypto/cipher/e_chacha20poly1305.c',
'third_party/boringssl/crypto/cipher/e_des.c',
'third_party/boringssl/crypto/cipher/e_null.c',
'third_party/boringssl/crypto/cipher/e_rc2.c',
'third_party/boringssl/crypto/cipher/e_rc4.c',
'third_party/boringssl/crypto/cipher/e_ssl3.c',
'third_party/boringssl/crypto/cipher/e_tls.c',
'third_party/boringssl/crypto/cipher/tls_cbc.c',
'third_party/boringssl/crypto/cmac/cmac.c',
'third_party/boringssl/crypto/conf/conf.c',
'third_party/boringssl/crypto/cpu-aarch64-linux.c',
'third_party/boringssl/crypto/cpu-arm-linux.c',
'third_party/boringssl/crypto/cpu-arm.c',
'third_party/boringssl/crypto/cpu-intel.c',
'third_party/boringssl/crypto/cpu-ppc64le.c',
'third_party/boringssl/crypto/crypto.c',
'third_party/boringssl/crypto/curve25519/curve25519.c',
'third_party/boringssl/crypto/curve25519/spake25519.c',
'third_party/boringssl/crypto/curve25519/x25519-x86_64.c',
'third_party/boringssl/crypto/des/des.c',
'third_party/boringssl/crypto/dh/check.c',
'third_party/boringssl/crypto/dh/dh.c',
'third_party/boringssl/crypto/dh/dh_asn1.c',
'third_party/boringssl/crypto/dh/params.c',
'third_party/boringssl/crypto/digest/digest.c',
'third_party/boringssl/crypto/digest/digests.c',
'third_party/boringssl/crypto/dsa/dsa.c',
'third_party/boringssl/crypto/dsa/dsa_asn1.c',
'third_party/boringssl/crypto/ec/ec.c',
'third_party/boringssl/crypto/ec/ec_asn1.c',
'third_party/boringssl/crypto/ec/ec_key.c',
'third_party/boringssl/crypto/ec/ec_montgomery.c',
'third_party/boringssl/crypto/ec/oct.c',
'third_party/boringssl/crypto/ec/p224-64.c',
'third_party/boringssl/crypto/ec/p256-64.c',
'third_party/boringssl/crypto/ec/p256-x86_64.c',
'third_party/boringssl/crypto/ec/simple.c',
'third_party/boringssl/crypto/ec/util-64.c',
'third_party/boringssl/crypto/ec/wnaf.c',
'third_party/boringssl/crypto/ecdh/ecdh.c',
'third_party/boringssl/crypto/ecdsa/ecdsa.c',
'third_party/boringssl/crypto/ecdsa/ecdsa_asn1.c',
'third_party/boringssl/crypto/engine/engine.c',
'third_party/boringssl/crypto/err/err.c',
'third_party/boringssl/crypto/evp/digestsign.c',
'third_party/boringssl/crypto/evp/evp.c',
'third_party/boringssl/crypto/evp/evp_asn1.c',
'third_party/boringssl/crypto/evp/evp_ctx.c',
'third_party/boringssl/crypto/evp/p_dsa_asn1.c',
'third_party/boringssl/crypto/evp/p_ec.c',
'third_party/boringssl/crypto/evp/p_ec_asn1.c',
'third_party/boringssl/crypto/evp/p_rsa.c',
'third_party/boringssl/crypto/evp/p_rsa_asn1.c',
'third_party/boringssl/crypto/evp/pbkdf.c',
'third_party/boringssl/crypto/evp/print.c',
'third_party/boringssl/crypto/evp/sign.c',
'third_party/boringssl/crypto/ex_data.c',
'third_party/boringssl/crypto/hkdf/hkdf.c',
'third_party/boringssl/crypto/hmac/hmac.c',
'third_party/boringssl/crypto/lhash/lhash.c',
'third_party/boringssl/crypto/md4/md4.c',
'third_party/boringssl/crypto/md5/md5.c',
'third_party/boringssl/crypto/mem.c',
'third_party/boringssl/crypto/modes/cbc.c',
'third_party/boringssl/crypto/modes/cfb.c',
'third_party/boringssl/crypto/modes/ctr.c',
'third_party/boringssl/crypto/modes/gcm.c',
'third_party/boringssl/crypto/modes/ofb.c',
'third_party/boringssl/crypto/modes/polyval.c',
'third_party/boringssl/crypto/obj/obj.c',
'third_party/boringssl/crypto/obj/obj_xref.c',
'third_party/boringssl/crypto/pem/pem_all.c',
'third_party/boringssl/crypto/pem/pem_info.c',
'third_party/boringssl/crypto/pem/pem_lib.c',
'third_party/boringssl/crypto/pem/pem_oth.c',
'third_party/boringssl/crypto/pem/pem_pk8.c',
'third_party/boringssl/crypto/pem/pem_pkey.c',
'third_party/boringssl/crypto/pem/pem_x509.c',
'third_party/boringssl/crypto/pem/pem_xaux.c',
'third_party/boringssl/crypto/pkcs8/p5_pbev2.c',
'third_party/boringssl/crypto/pkcs8/p8_pkey.c',
'third_party/boringssl/crypto/pkcs8/pkcs8.c',
'third_party/boringssl/crypto/poly1305/poly1305.c',
'third_party/boringssl/crypto/poly1305/poly1305_arm.c',
'third_party/boringssl/crypto/poly1305/poly1305_vec.c',
'third_party/boringssl/crypto/pool/pool.c',
'third_party/boringssl/crypto/rand/deterministic.c',
'third_party/boringssl/crypto/rand/fuchsia.c',
'third_party/boringssl/crypto/rand/rand.c',
'third_party/boringssl/crypto/rand/urandom.c',
'third_party/boringssl/crypto/rand/windows.c',
'third_party/boringssl/crypto/rc4/rc4.c',
'third_party/boringssl/crypto/refcount_c11.c',
'third_party/boringssl/crypto/refcount_lock.c',
'third_party/boringssl/crypto/rsa/blinding.c',
'third_party/boringssl/crypto/rsa/padding.c',
'third_party/boringssl/crypto/rsa/rsa.c',
'third_party/boringssl/crypto/rsa/rsa_asn1.c',
'third_party/boringssl/crypto/rsa/rsa_impl.c',
'third_party/boringssl/crypto/sha/sha1-altivec.c',
'third_party/boringssl/crypto/sha/sha1.c',
'third_party/boringssl/crypto/sha/sha256.c',
'third_party/boringssl/crypto/sha/sha512.c',
'third_party/boringssl/crypto/stack/stack.c',
'third_party/boringssl/crypto/thread.c',
'third_party/boringssl/crypto/thread_none.c',
'third_party/boringssl/crypto/thread_pthread.c',
'third_party/boringssl/crypto/thread_win.c',
'third_party/boringssl/crypto/x509/a_digest.c',
'third_party/boringssl/crypto/x509/a_sign.c',
'third_party/boringssl/crypto/x509/a_strex.c',
'third_party/boringssl/crypto/x509/a_verify.c',
'third_party/boringssl/crypto/x509/algorithm.c',
'third_party/boringssl/crypto/x509/asn1_gen.c',
'third_party/boringssl/crypto/x509/by_dir.c',
'third_party/boringssl/crypto/x509/by_file.c',
'third_party/boringssl/crypto/x509/i2d_pr.c',
'third_party/boringssl/crypto/x509/pkcs7.c',
'third_party/boringssl/crypto/x509/rsa_pss.c',
'third_party/boringssl/crypto/x509/t_crl.c',
'third_party/boringssl/crypto/x509/t_req.c',
'third_party/boringssl/crypto/x509/t_x509.c',
'third_party/boringssl/crypto/x509/t_x509a.c',
'third_party/boringssl/crypto/x509/x509.c',
'third_party/boringssl/crypto/x509/x509_att.c',
'third_party/boringssl/crypto/x509/x509_cmp.c',
'third_party/boringssl/crypto/x509/x509_d2.c',
'third_party/boringssl/crypto/x509/x509_def.c',
'third_party/boringssl/crypto/x509/x509_ext.c',
'third_party/boringssl/crypto/x509/x509_lu.c',
'third_party/boringssl/crypto/x509/x509_obj.c',
'third_party/boringssl/crypto/x509/x509_r2x.c',
'third_party/boringssl/crypto/x509/x509_req.c',
'third_party/boringssl/crypto/x509/x509_set.c',
'third_party/boringssl/crypto/x509/x509_trs.c',
'third_party/boringssl/crypto/x509/x509_txt.c',
'third_party/boringssl/crypto/x509/x509_v3.c',
'third_party/boringssl/crypto/x509/x509_vfy.c',
'third_party/boringssl/crypto/x509/x509_vpm.c',
'third_party/boringssl/crypto/x509/x509cset.c',
'third_party/boringssl/crypto/x509/x509name.c',
'third_party/boringssl/crypto/x509/x509rset.c',
'third_party/boringssl/crypto/x509/x509spki.c',
'third_party/boringssl/crypto/x509/x509type.c',
'third_party/boringssl/crypto/x509/x_algor.c',
'third_party/boringssl/crypto/x509/x_all.c',
'third_party/boringssl/crypto/x509/x_attrib.c',
'third_party/boringssl/crypto/x509/x_crl.c',
'third_party/boringssl/crypto/x509/x_exten.c',
'third_party/boringssl/crypto/x509/x_info.c',
'third_party/boringssl/crypto/x509/x_name.c',
'third_party/boringssl/crypto/x509/x_pkey.c',
'third_party/boringssl/crypto/x509/x_pubkey.c',
'third_party/boringssl/crypto/x509/x_req.c',
'third_party/boringssl/crypto/x509/x_sig.c',
'third_party/boringssl/crypto/x509/x_spki.c',
'third_party/boringssl/crypto/x509/x_val.c',
'third_party/boringssl/crypto/x509/x_x509.c',
'third_party/boringssl/crypto/x509/x_x509a.c',
'third_party/boringssl/crypto/x509v3/pcy_cache.c',
'third_party/boringssl/crypto/x509v3/pcy_data.c',
'third_party/boringssl/crypto/x509v3/pcy_lib.c',
'third_party/boringssl/crypto/x509v3/pcy_map.c',
'third_party/boringssl/crypto/x509v3/pcy_node.c',
'third_party/boringssl/crypto/x509v3/pcy_tree.c',
'third_party/boringssl/crypto/x509v3/v3_akey.c',
'third_party/boringssl/crypto/x509v3/v3_akeya.c',
'third_party/boringssl/crypto/x509v3/v3_alt.c',
'third_party/boringssl/crypto/x509v3/v3_bcons.c',
'third_party/boringssl/crypto/x509v3/v3_bitst.c',
'third_party/boringssl/crypto/x509v3/v3_conf.c',
'third_party/boringssl/crypto/x509v3/v3_cpols.c',
'third_party/boringssl/crypto/x509v3/v3_crld.c',
'third_party/boringssl/crypto/x509v3/v3_enum.c',
'third_party/boringssl/crypto/x509v3/v3_extku.c',
'third_party/boringssl/crypto/x509v3/v3_genn.c',
'third_party/boringssl/crypto/x509v3/v3_ia5.c',
'third_party/boringssl/crypto/x509v3/v3_info.c',
'third_party/boringssl/crypto/x509v3/v3_int.c',
'third_party/boringssl/crypto/x509v3/v3_lib.c',
'third_party/boringssl/crypto/x509v3/v3_ncons.c',
'third_party/boringssl/crypto/x509v3/v3_pci.c',
'third_party/boringssl/crypto/x509v3/v3_pcia.c',
'third_party/boringssl/crypto/x509v3/v3_pcons.c',
'third_party/boringssl/crypto/x509v3/v3_pku.c',
'third_party/boringssl/crypto/x509v3/v3_pmaps.c',
'third_party/boringssl/crypto/x509v3/v3_prn.c',
'third_party/boringssl/crypto/x509v3/v3_purp.c',
'third_party/boringssl/crypto/x509v3/v3_skey.c',
'third_party/boringssl/crypto/x509v3/v3_sxnet.c',
'third_party/boringssl/crypto/x509v3/v3_utl.c',
'third_party/boringssl/ssl/bio_ssl.c',
'third_party/boringssl/ssl/custom_extensions.c',
'third_party/boringssl/ssl/d1_both.c',
'third_party/boringssl/ssl/d1_lib.c',
'third_party/boringssl/ssl/d1_pkt.c',
'third_party/boringssl/ssl/d1_srtp.c',
'third_party/boringssl/ssl/dtls_method.c',
'third_party/boringssl/ssl/dtls_record.c',
'third_party/boringssl/ssl/handshake_client.c',
'third_party/boringssl/ssl/handshake_server.c',
'third_party/boringssl/ssl/s3_both.c',
'third_party/boringssl/ssl/s3_lib.c',
'third_party/boringssl/ssl/s3_pkt.c',
'third_party/boringssl/ssl/ssl_aead_ctx.c',
'third_party/boringssl/ssl/ssl_asn1.c',
'third_party/boringssl/ssl/ssl_buffer.c',
'third_party/boringssl/ssl/ssl_cert.c',
'third_party/boringssl/ssl/ssl_cipher.c',
'third_party/boringssl/ssl/ssl_ecdh.c',
'third_party/boringssl/ssl/ssl_file.c',
'third_party/boringssl/ssl/ssl_lib.c',
'third_party/boringssl/ssl/ssl_privkey.c',
'third_party/boringssl/ssl/ssl_privkey_cc.cc',
'third_party/boringssl/ssl/ssl_session.c',
'third_party/boringssl/ssl/ssl_stat.c',
'third_party/boringssl/ssl/ssl_transcript.c',
'third_party/boringssl/ssl/ssl_x509.c',
'third_party/boringssl/ssl/t1_enc.c',
'third_party/boringssl/ssl/t1_lib.c',
'third_party/boringssl/ssl/tls13_both.c',
'third_party/boringssl/ssl/tls13_client.c',
'third_party/boringssl/ssl/tls13_enc.c',
'third_party/boringssl/ssl/tls13_server.c',
'third_party/boringssl/ssl/tls_method.c',
'third_party/boringssl/ssl/tls_record.c',
],
'conditions': [
['OS == "mac"', {
'xcode_settings': {
'MACOSX_DEPLOYMENT_TARGET': '10.9'
}
}]
]
},
],
}],
['OS == "win" and runtime!="electron"', {
'targets': [
{
# IMPORTANT WINDOWS BUILD INFORMATION
# This library does not build on Windows without modifying the Node
# development packages that node-gyp downloads in order to build.
# Due to https://github.com/nodejs/node/issues/4932, the headers for
# BoringSSL conflict with the OpenSSL headers included by default
# when including the Node headers. The remedy for this is to remove
# the OpenSSL headers, from the downloaded Node development package,
# which is typically located in `.node-gyp` in your home directory.
#
# This is not true of Electron, which does not have OpenSSL headers.
'target_name': 'WINDOWS_BUILD_WARNING',
'rules': [
{
'rule_name': 'WINDOWS_BUILD_WARNING',
'extension': 'S',
'inputs': [
'package.json'
],
'outputs': [
'ignore_this_part'
],
'action': ['echo', 'IMPORTANT: Due to https://github.com/nodejs/node/issues/4932, to build this library on Windows, you must first remove <(node_root_dir)/include/node/openssl/']
}
]
},
]
}],
['OS == "win"', {
'targets': [
# Only want to compile zlib under Windows
{
'target_name': 'z',
'product_prefix': 'lib',
'type': 'static_library',
'dependencies': [
],
'sources': [
'third_party/zlib/adler32.c',
'third_party/zlib/compress.c',
'third_party/zlib/crc32.c',
'third_party/zlib/deflate.c',
'third_party/zlib/gzclose.c',
'third_party/zlib/gzlib.c',
'third_party/zlib/gzread.c',
'third_party/zlib/gzwrite.c',
'third_party/zlib/infback.c',
'third_party/zlib/inffast.c',
'third_party/zlib/inflate.c',
'third_party/zlib/inftrees.c',
'third_party/zlib/trees.c',
'third_party/zlib/uncompr.c',
'third_party/zlib/zutil.c',
]
},
]
}]
],
'targets': [
{
'target_name': 'gpr',
'product_prefix': 'lib',
'type': 'static_library',
'dependencies': [
],
'sources': [
'src/core/lib/profiling/basic_timers.c',
'src/core/lib/profiling/stap_timers.c',
'src/core/lib/support/alloc.c',
'src/core/lib/support/arena.c',
'src/core/lib/support/atm.c',
'src/core/lib/support/avl.c',
'src/core/lib/support/backoff.c',
'src/core/lib/support/cmdline.c',
'src/core/lib/support/cpu_iphone.c',
'src/core/lib/support/cpu_linux.c',
'src/core/lib/support/cpu_posix.c',
'src/core/lib/support/cpu_windows.c',
'src/core/lib/support/env_linux.c',
'src/core/lib/support/env_posix.c',
'src/core/lib/support/env_windows.c',
'src/core/lib/support/fork.c',
'src/core/lib/support/histogram.c',
'src/core/lib/support/host_port.c',
'src/core/lib/support/log.c',
'src/core/lib/support/log_android.c',
'src/core/lib/support/log_linux.c',
'src/core/lib/support/log_posix.c',
'src/core/lib/support/log_windows.c',
'src/core/lib/support/mpscq.c',
'src/core/lib/support/murmur_hash.c',
'src/core/lib/support/stack_lockfree.c',
'src/core/lib/support/string.c',
'src/core/lib/support/string_posix.c',
'src/core/lib/support/string_util_windows.c',
'src/core/lib/support/string_windows.c',
'src/core/lib/support/subprocess_posix.c',
'src/core/lib/support/subprocess_windows.c',
'src/core/lib/support/sync.c',
'src/core/lib/support/sync_posix.c',
'src/core/lib/support/sync_windows.c',
'src/core/lib/support/thd.c',
'src/core/lib/support/thd_posix.c',
'src/core/lib/support/thd_windows.c',
'src/core/lib/support/time.c',
'src/core/lib/support/time_posix.c',
'src/core/lib/support/time_precise.c',
'src/core/lib/support/time_windows.c',
'src/core/lib/support/tls_pthread.c',
'src/core/lib/support/tmpfile_msys.c',
'src/core/lib/support/tmpfile_posix.c',
'src/core/lib/support/tmpfile_windows.c',
'src/core/lib/support/wrap_memcpy.c',
],
'conditions': [
['OS == "mac"', {
'xcode_settings': {
'MACOSX_DEPLOYMENT_TARGET': '10.9'
}
}]
]
},
{
'target_name': 'grpc',
'product_prefix': 'lib',
'type': 'static_library',
'dependencies': [
'gpr',
],
'sources': [
'src/core/lib/surface/init.c',
'src/core/lib/channel/channel_args.c',
'src/core/lib/channel/channel_stack.c',
'src/core/lib/channel/channel_stack_builder.c',
'src/core/lib/channel/connected_channel.c',
'src/core/lib/channel/handshaker.c',
'src/core/lib/channel/handshaker_factory.c',
'src/core/lib/channel/handshaker_registry.c',
'src/core/lib/compression/compression.c',
'src/core/lib/compression/message_compress.c',
'src/core/lib/compression/stream_compression.c',
'src/core/lib/compression/stream_compression_gzip.c',
'src/core/lib/compression/stream_compression_identity.c',
'src/core/lib/debug/stats.c',
'src/core/lib/debug/stats_data.c',
'src/core/lib/http/format_request.c',
'src/core/lib/http/httpcli.c',
'src/core/lib/http/parser.c',
'src/core/lib/iomgr/call_combiner.c',
'src/core/lib/iomgr/closure.c',
'src/core/lib/iomgr/combiner.c',
'src/core/lib/iomgr/endpoint.c',
'src/core/lib/iomgr/endpoint_pair_posix.c',
'src/core/lib/iomgr/endpoint_pair_uv.c',
'src/core/lib/iomgr/endpoint_pair_windows.c',
'src/core/lib/iomgr/error.c',
'src/core/lib/iomgr/ev_epoll1_linux.c',
'src/core/lib/iomgr/ev_epollex_linux.c',
'src/core/lib/iomgr/ev_epollsig_linux.c',
'src/core/lib/iomgr/ev_poll_posix.c',
'src/core/lib/iomgr/ev_posix.c',
'src/core/lib/iomgr/ev_windows.c',
'src/core/lib/iomgr/exec_ctx.c',
'src/core/lib/iomgr/executor.c',
'src/core/lib/iomgr/fork_posix.c',
'src/core/lib/iomgr/fork_windows.c',
'src/core/lib/iomgr/gethostname_fallback.c',
'src/core/lib/iomgr/gethostname_host_name_max.c',
'src/core/lib/iomgr/gethostname_sysconf.c',
'src/core/lib/iomgr/iocp_windows.c',
'src/core/lib/iomgr/iomgr.c',
'src/core/lib/iomgr/iomgr_posix.c',
'src/core/lib/iomgr/iomgr_uv.c',
'src/core/lib/iomgr/iomgr_windows.c',
'src/core/lib/iomgr/is_epollexclusive_available.c',
'src/core/lib/iomgr/load_file.c',
'src/core/lib/iomgr/lockfree_event.c',
'src/core/lib/iomgr/network_status_tracker.c',
'src/core/lib/iomgr/polling_entity.c',
'src/core/lib/iomgr/pollset_set_uv.c',
'src/core/lib/iomgr/pollset_set_windows.c',
'src/core/lib/iomgr/pollset_uv.c',
'src/core/lib/iomgr/pollset_windows.c',
'src/core/lib/iomgr/resolve_address_posix.c',
'src/core/lib/iomgr/resolve_address_uv.c',
'src/core/lib/iomgr/resolve_address_windows.c',
'src/core/lib/iomgr/resource_quota.c',
'src/core/lib/iomgr/sockaddr_utils.c',
'src/core/lib/iomgr/socket_factory_posix.c',
'src/core/lib/iomgr/socket_mutator.c',
'src/core/lib/iomgr/socket_utils_common_posix.c',
'src/core/lib/iomgr/socket_utils_linux.c',
'src/core/lib/iomgr/socket_utils_posix.c',
'src/core/lib/iomgr/socket_utils_uv.c',
'src/core/lib/iomgr/socket_utils_windows.c',
'src/core/lib/iomgr/socket_windows.c',
'src/core/lib/iomgr/tcp_client_posix.c',
'src/core/lib/iomgr/tcp_client_uv.c',
'src/core/lib/iomgr/tcp_client_windows.c',
'src/core/lib/iomgr/tcp_posix.c',
'src/core/lib/iomgr/tcp_server_posix.c',
'src/core/lib/iomgr/tcp_server_utils_posix_common.c',
'src/core/lib/iomgr/tcp_server_utils_posix_ifaddrs.c',
'src/core/lib/iomgr/tcp_server_utils_posix_noifaddrs.c',
'src/core/lib/iomgr/tcp_server_uv.c',
'src/core/lib/iomgr/tcp_server_windows.c',
'src/core/lib/iomgr/tcp_uv.c',
'src/core/lib/iomgr/tcp_windows.c',
'src/core/lib/iomgr/time_averaged_stats.c',
'src/core/lib/iomgr/timer_generic.c',
'src/core/lib/iomgr/timer_heap.c',
'src/core/lib/iomgr/timer_manager.c',
'src/core/lib/iomgr/timer_uv.c',
'src/core/lib/iomgr/udp_server.c',
'src/core/lib/iomgr/unix_sockets_posix.c',
'src/core/lib/iomgr/unix_sockets_posix_noop.c',
'src/core/lib/iomgr/wakeup_fd_cv.c',
'src/core/lib/iomgr/wakeup_fd_eventfd.c',
'src/core/lib/iomgr/wakeup_fd_nospecial.c',
'src/core/lib/iomgr/wakeup_fd_pipe.c',
'src/core/lib/iomgr/wakeup_fd_posix.c',
'src/core/lib/json/json.c',
'src/core/lib/json/json_reader.c',
'src/core/lib/json/json_string.c',
'src/core/lib/json/json_writer.c',
'src/core/lib/slice/b64.c',
'src/core/lib/slice/percent_encoding.c',
'src/core/lib/slice/slice.c',
'src/core/lib/slice/slice_buffer.c',
'src/core/lib/slice/slice_hash_table.c',
'src/core/lib/slice/slice_intern.c',
'src/core/lib/slice/slice_string_helpers.c',
'src/core/lib/surface/alarm.c',
'src/core/lib/surface/api_trace.c',
'src/core/lib/surface/byte_buffer.c',
'src/core/lib/surface/byte_buffer_reader.c',
'src/core/lib/surface/call.c',
'src/core/lib/surface/call_details.c',
'src/core/lib/surface/call_log_batch.c',
'src/core/lib/surface/channel.c',
'src/core/lib/surface/channel_init.c',
'src/core/lib/surface/channel_ping.c',
'src/core/lib/surface/channel_stack_type.c',
'src/core/lib/surface/completion_queue.c',
'src/core/lib/surface/completion_queue_factory.c',
'src/core/lib/surface/event_string.c',
'src/core/lib/surface/lame_client.cc',
'src/core/lib/surface/metadata_array.c',
'src/core/lib/surface/server.c',
'src/core/lib/surface/validate_metadata.c',
'src/core/lib/surface/version.c',
'src/core/lib/transport/bdp_estimator.c',
'src/core/lib/transport/byte_stream.c',
'src/core/lib/transport/connectivity_state.c',
'src/core/lib/transport/error_utils.c',
'src/core/lib/transport/metadata.c',
'src/core/lib/transport/metadata_batch.c',
'src/core/lib/transport/pid_controller.c',
'src/core/lib/transport/service_config.c',
'src/core/lib/transport/static_metadata.c',
'src/core/lib/transport/status_conversion.c',
'src/core/lib/transport/timeout_encoding.c',
'src/core/lib/transport/transport.c',
'src/core/lib/transport/transport_op_string.c',
'src/core/lib/debug/trace.c',
'src/core/ext/transport/chttp2/server/secure/server_secure_chttp2.c',
'src/core/ext/transport/chttp2/transport/bin_decoder.c',
'src/core/ext/transport/chttp2/transport/bin_encoder.c',
'src/core/ext/transport/chttp2/transport/chttp2_plugin.c',
'src/core/ext/transport/chttp2/transport/chttp2_transport.c',
'src/core/ext/transport/chttp2/transport/flow_control.c',
'src/core/ext/transport/chttp2/transport/frame_data.c',
'src/core/ext/transport/chttp2/transport/frame_goaway.c',
'src/core/ext/transport/chttp2/transport/frame_ping.c',
'src/core/ext/transport/chttp2/transport/frame_rst_stream.c',
'src/core/ext/transport/chttp2/transport/frame_settings.c',
'src/core/ext/transport/chttp2/transport/frame_window_update.c',
'src/core/ext/transport/chttp2/transport/hpack_encoder.c',
'src/core/ext/transport/chttp2/transport/hpack_parser.c',
'src/core/ext/transport/chttp2/transport/hpack_table.c',
'src/core/ext/transport/chttp2/transport/http2_settings.c',
'src/core/ext/transport/chttp2/transport/huffsyms.c',
'src/core/ext/transport/chttp2/transport/incoming_metadata.c',
'src/core/ext/transport/chttp2/transport/parsing.c',
'src/core/ext/transport/chttp2/transport/stream_lists.c',
'src/core/ext/transport/chttp2/transport/stream_map.c',
'src/core/ext/transport/chttp2/transport/varint.c',
'src/core/ext/transport/chttp2/transport/writing.c',
'src/core/ext/transport/chttp2/alpn/alpn.c',
'src/core/ext/filters/http/client/http_client_filter.c',
'src/core/ext/filters/http/http_filters_plugin.c',
'src/core/ext/filters/http/message_compress/message_compress_filter.c',
'src/core/ext/filters/http/server/http_server_filter.c',
'src/core/lib/http/httpcli_security_connector.c',
'src/core/lib/security/context/security_context.c',
'src/core/lib/security/credentials/composite/composite_credentials.c',
'src/core/lib/security/credentials/credentials.c',
'src/core/lib/security/credentials/credentials_metadata.c',
'src/core/lib/security/credentials/fake/fake_credentials.c',
'src/core/lib/security/credentials/google_default/credentials_generic.c',
'src/core/lib/security/credentials/google_default/google_default_credentials.c',
'src/core/lib/security/credentials/iam/iam_credentials.c',
'src/core/lib/security/credentials/jwt/json_token.c',
'src/core/lib/security/credentials/jwt/jwt_credentials.c',
'src/core/lib/security/credentials/jwt/jwt_verifier.c',
'src/core/lib/security/credentials/oauth2/oauth2_credentials.c',
'src/core/lib/security/credentials/plugin/plugin_credentials.c',
'src/core/lib/security/credentials/ssl/ssl_credentials.c',
'src/core/lib/security/transport/client_auth_filter.c',
'src/core/lib/security/transport/lb_targets_info.c',
'src/core/lib/security/transport/secure_endpoint.c',
'src/core/lib/security/transport/security_connector.c',
'src/core/lib/security/transport/security_handshaker.c',
'src/core/lib/security/transport/server_auth_filter.c',
'src/core/lib/security/transport/tsi_error.c',
'src/core/lib/security/util/json_util.c',
'src/core/lib/surface/init_secure.c',
'src/core/tsi/fake_transport_security.c',
'src/core/tsi/gts_transport_security.c',
'src/core/tsi/ssl_transport_security.c',
'src/core/tsi/transport_security_grpc.c',
'src/core/tsi/transport_security.c',
'src/core/tsi/transport_security_adapter.c',
'src/core/ext/transport/chttp2/server/chttp2_server.c',
'src/core/ext/transport/chttp2/client/secure/secure_channel_create.c',
'src/core/ext/filters/client_channel/channel_connectivity.c',
'src/core/ext/filters/client_channel/client_channel.c',
'src/core/ext/filters/client_channel/client_channel_factory.c',
'src/core/ext/filters/client_channel/client_channel_plugin.c',
'src/core/ext/filters/client_channel/connector.c',
'src/core/ext/filters/client_channel/http_connect_handshaker.c',
'src/core/ext/filters/client_channel/http_proxy.c',
'src/core/ext/filters/client_channel/lb_policy.c',
'src/core/ext/filters/client_channel/lb_policy_factory.c',
'src/core/ext/filters/client_channel/lb_policy_registry.c',
'src/core/ext/filters/client_channel/parse_address.c',
'src/core/ext/filters/client_channel/proxy_mapper.c',
'src/core/ext/filters/client_channel/proxy_mapper_registry.c',
'src/core/ext/filters/client_channel/resolver.c',
'src/core/ext/filters/client_channel/resolver_factory.c',
'src/core/ext/filters/client_channel/resolver_registry.c',
'src/core/ext/filters/client_channel/retry_throttle.c',
'src/core/ext/filters/client_channel/subchannel.c',
'src/core/ext/filters/client_channel/subchannel_index.c',
'src/core/ext/filters/client_channel/uri_parser.c',
'src/core/ext/filters/deadline/deadline_filter.c',
'src/core/ext/transport/chttp2/client/chttp2_connector.c',
'src/core/ext/transport/chttp2/server/insecure/server_chttp2.c',
'src/core/ext/transport/chttp2/server/insecure/server_chttp2_posix.c',
'src/core/ext/transport/chttp2/client/insecure/channel_create.c',
'src/core/ext/transport/chttp2/client/insecure/channel_create_posix.c',
'src/core/ext/transport/inproc/inproc_plugin.c',
'src/core/ext/transport/inproc/inproc_transport.c',
'src/core/ext/filters/client_channel/lb_policy/grpclb/client_load_reporting_filter.c',
'src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.c',
'src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_channel_secure.c',
'src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_client_stats.c',
'src/core/ext/filters/client_channel/lb_policy/grpclb/load_balancer_api.c',
'src/core/ext/filters/client_channel/lb_policy/grpclb/proto/grpc/lb/v1/load_balancer.pb.c',
'third_party/nanopb/pb_common.c',
'third_party/nanopb/pb_decode.c',
'third_party/nanopb/pb_encode.c',
'src/core/ext/filters/client_channel/resolver/fake/fake_resolver.c',
'src/core/ext/filters/client_channel/lb_policy/pick_first/pick_first.c',
'src/core/ext/filters/client_channel/lb_policy/round_robin/round_robin.c',
'src/core/ext/filters/client_channel/resolver/dns/c_ares/dns_resolver_ares.c',
'src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver_posix.c',
'src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.c',
'src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper_fallback.c',
'src/core/ext/filters/client_channel/resolver/dns/native/dns_resolver.c',
'src/core/ext/filters/client_channel/resolver/sockaddr/sockaddr_resolver.c',
'src/core/ext/filters/load_reporting/server_load_reporting_filter.c',
'src/core/ext/filters/load_reporting/server_load_reporting_plugin.c',
'src/core/ext/census/base_resources.c',
'src/core/ext/census/context.c',
'src/core/ext/census/gen/census.pb.c',
'src/core/ext/census/gen/trace_context.pb.c',
'src/core/ext/census/grpc_context.c',
'src/core/ext/census/grpc_filter.c',
'src/core/ext/census/grpc_plugin.c',
'src/core/ext/census/initialize.c',
'src/core/ext/census/intrusive_hash_map.c',
'src/core/ext/census/mlog.c',
'src/core/ext/census/operation.c',
'src/core/ext/census/placeholders.c',
'src/core/ext/census/resource.c',
'src/core/ext/census/trace_context.c',
'src/core/ext/census/tracing.c',
'src/core/ext/filters/max_age/max_age_filter.c',
'src/core/ext/filters/message_size/message_size_filter.c',
'src/core/ext/filters/workarounds/workaround_cronet_compression_filter.c',
'src/core/ext/filters/workarounds/workaround_utils.c',
'src/core/plugin_registry/grpc_plugin_registry.c',
],
'conditions': [
['OS == "mac"', {
'xcode_settings': {
'MACOSX_DEPLOYMENT_TARGET': '10.9'
}
}]
]
},
{
'include_dirs': [
"<!(node -e \"require('nan')\")"
],
'cflags': [
'-pthread',
'-zdefs',
'-Wno-error=deprecated-declarations'
],
"conditions": [
['OS=="win" or runtime=="electron"', {
'dependencies': [
"boringssl",
]
}],
['OS=="win"', {
'dependencies': [
"z",
]
}],
['OS=="linux"', {
'ldflags': [
'-Wl,-wrap,memcpy'
]
}],
['OS == "mac"', {
'xcode_settings': {
'MACOSX_DEPLOYMENT_TARGET': '10.9'
}
}]
],
"target_name": "grpc_node",
"sources": [
"src/node/ext/byte_buffer.cc",
"src/node/ext/call.cc",
"src/node/ext/call_credentials.cc",
"src/node/ext/channel.cc",
"src/node/ext/channel_credentials.cc",
"src/node/ext/completion_queue.cc",
"src/node/ext/node_grpc.cc",
"src/node/ext/server.cc",
"src/node/ext/server_credentials.cc",
"src/node/ext/slice.cc",
"src/node/ext/timeval.cc",
],
"dependencies": [
"grpc",
"gpr",
]
},
{
"target_name": "action_after_build",
"type": "none",
"dependencies": [ "<(module_name)" ],
"copies": [
{
"files": [ "<(PRODUCT_DIR)/<(module_name).node"],
"destination": "<(module_path)"
}
]
}
]
}

@ -5021,6 +5021,7 @@ defaults:
global:
COREFLAGS: -fno-rtti -fno-exceptions
CPPFLAGS: -g -Wall -Wextra -Werror -Wno-long-long -Wno-unused-parameter -DOSATOMIC_USE_INLINED=1
-Wno-deprecated-declarations
LDFLAGS: -g
zlib:
CFLAGS: -Wno-sign-conversion -Wno-conversion -Wno-unused-value -Wno-implicit-function-declaration

@ -3,14 +3,14 @@
gRPC-Web provides a JS client library that supports the same API
as gRPC-Node to access a gRPC service. Due to browser limitation,
the Web client library implements a different protocol than the
[native gRPC protocol](https://grpc.io/docs/guides/wire.html).
[native gRPC protocol](PROTOCOL-HTTP2.md).
This protocol is designed to make it easy for a proxy to translate
between the protocols as this is the most likely deployment model.
This document lists the differences between the two protocols.
To help tracking future revisions, this document describes a delta
with the protocol details specified in the
[native gRPC protocol](https://grpc.io/docs/guides/wire.html).
[native gRPC protocol](PROTOCOL-HTTP2.md).
# Design goals
@ -31,7 +31,7 @@ web-specific features such as CORS, XSRF
* become optional (in 1-2 years) when browsers are able to speak the native
gRPC protocol via the new [whatwg fetch/streams API](https://github.com/whatwg/fetch)
# Protocol differences vs [gRPC over HTTP2](https://grpc.io/docs/guides/wire.html)
# Protocol differences vs [gRPC over HTTP2](PROTOCOL-HTTP2.md)
Content-Type
@ -53,14 +53,14 @@ HTTP wire protocols
---
HTTP/2 related behavior (specified in [gRPC over HTTP2](https://grpc.io/docs/guides/wire.html))
HTTP/2 related behavior (specified in [gRPC over HTTP2](PROTOCOL-HTTP2.md))
1. stream-id is not supported or used
2. go-away is not supported or used
---
Message framing (vs. [http2-transport-mapping](https://grpc.io/docs/guides/wire.html#http2-transport-mapping))
Message framing (vs. [http2-transport-mapping](PROTOCOL-HTTP2.md#http2-transport-mapping))
1. Response status encoded as part of the response body
* Key-value pairs encoded as a HTTP/1 headers block (without the terminating newline), per https://tools.ietf.org/html/rfc7230#section-3.2
@ -86,7 +86,7 @@ in the body.
User Agent
* Do NOT use User-Agent header (which is to be set by browsers, by default)
* Use X-User-Agent: grpc-web-javascript/0.1 (follow the same format as specified in [gRPC over HTTP2](https://grpc.io/docs/guides/wire.html))
* Use X-User-Agent: grpc-web-javascript/0.1 (follow the same format as specified in [gRPC over HTTP2](PROTOCOL-HTTP2.md))
---

@ -20,9 +20,10 @@ import generic_client_interceptor
class _ClientCallDetails(
collections.namedtuple('_ClientCallDetails',
('method', 'timeout', 'metadata',
'credentials')), grpc.ClientCallDetails):
collections.namedtuple(
'_ClientCallDetails',
('method', 'timeout', 'metadata', 'credentials')),
grpc.ClientCallDetails):
pass
@ -33,7 +34,10 @@ def header_adder_interceptor(header, value):
metadata = []
if client_call_details.metadata is not None:
metadata = list(client_call_details.metadata)
metadata.append((header, value,))
metadata.append((
header,
value,
))
client_call_details = _ClientCallDetails(
client_call_details.method, client_call_details.timeout, metadata,
client_call_details.credentials)

@ -46,9 +46,9 @@ def guide_get_one_feature(route_guide_stub, point):
def guide_get_feature(route_guide_stub):
guide_get_one_feature(
route_guide_stub,
route_guide_pb2.Point(latitude=409146138, longitude=-746188906))
guide_get_one_feature(route_guide_stub,
route_guide_pb2.Point(
latitude=409146138, longitude=-746188906))
guide_get_one_feature(route_guide_stub,
route_guide_pb2.Point(latitude=0, longitude=0))
@ -101,8 +101,8 @@ def generate_messages():
def guide_route_chat(route_guide_stub):
responses = route_guide_stub.RouteChat(generate_messages())
for response in responses:
print("Received message %s at %s" %
(response.message, response.location))
print("Received message %s at %s" % (response.message,
response.location))
def run():

@ -124,8 +124,8 @@ def serve():
server = grpc.server(futures.ThreadPoolExecutor(max_workers=10))
helloworld_pb2_grpc.add_GreeterServicer_to_server(_GreeterServicer(),
server)
route_guide_pb2_grpc.add_RouteGuideServicer_to_server(_RouteGuideServicer(),
server)
route_guide_pb2_grpc.add_RouteGuideServicer_to_server(
_RouteGuideServicer(), server)
server.add_insecure_port('[::]:50051')
server.start()
try:

@ -15,7 +15,17 @@
from grpc_tools import protoc
protoc.main(('', '-I../../protos', '--python_out=.', '--grpc_python_out=.',
'../../protos/helloworld.proto',))
protoc.main(('', '-I../../protos', '--python_out=.', '--grpc_python_out=.',
'../../protos/route_guide.proto',))
protoc.main((
'',
'-I../../protos',
'--python_out=.',
'--grpc_python_out=.',
'../../protos/helloworld.proto',
))
protoc.main((
'',
'-I../../protos',
'--python_out=.',
'--grpc_python_out=.',
'../../protos/route_guide.proto',
))

@ -43,8 +43,9 @@ def guide_get_one_feature(stub, point):
def guide_get_feature(stub):
guide_get_one_feature(
stub, route_guide_pb2.Point(latitude=409146138, longitude=-746188906))
guide_get_one_feature(stub,
route_guide_pb2.Point(
latitude=409146138, longitude=-746188906))
guide_get_one_feature(stub, route_guide_pb2.Point(latitude=0, longitude=0))
@ -94,8 +95,8 @@ def generate_messages():
def guide_route_chat(stub):
responses = stub.RouteChat(generate_messages())
for response in responses:
print("Received message %s at %s" %
(response.message, response.location))
print("Received message %s at %s" % (response.message,
response.location))
def run():

@ -113,8 +113,8 @@ class RouteGuideServicer(route_guide_pb2_grpc.RouteGuideServicer):
def serve():
server = grpc.server(futures.ThreadPoolExecutor(max_workers=10))
route_guide_pb2_grpc.add_RouteGuideServicer_to_server(RouteGuideServicer(),
server)
route_guide_pb2_grpc.add_RouteGuideServicer_to_server(
RouteGuideServicer(), server)
server.add_insecure_port('[::]:50051')
server.start()
try:

@ -15,5 +15,10 @@
from grpc_tools import protoc
protoc.main(('', '-I../../protos', '--python_out=.', '--grpc_python_out=.',
'../../protos/route_guide.proto',))
protoc.main((
'',
'-I../../protos',
'--python_out=.',
'--grpc_python_out=.',
'../../protos/route_guide.proto',
))

@ -1046,6 +1046,7 @@ Pod::Spec.new do |s|
'test/core/end2end/tests/filter_call_init_fails.cc',
'test/core/end2end/tests/filter_causes_close.cc',
'test/core/end2end/tests/filter_latency.cc',
'test/core/end2end/tests/filter_status_code.cc',
'test/core/end2end/tests/graceful_server_shutdown.cc',
'test/core/end2end/tests/high_initial_seqno.cc',
'test/core/end2end/tests/hpack_size.cc',

@ -57,6 +57,7 @@
'-Wno-long-long',
'-Wno-unused-parameter',
'-DOSATOMIC_USE_INLINED=1',
'-Wno-deprecated-declarations',
],
'ldflags': [
'-g',
@ -134,6 +135,7 @@
'-Wno-long-long',
'-Wno-unused-parameter',
'-DOSATOMIC_USE_INLINED=1',
'-Wno-deprecated-declarations',
],
'OTHER_CPLUSPLUSFLAGS': [
'-g',
@ -143,6 +145,7 @@
'-Wno-long-long',
'-Wno-unused-parameter',
'-DOSATOMIC_USE_INLINED=1',
'-Wno-deprecated-declarations',
'-stdlib=libc++',
'-std=c++11',
'-Wno-error=deprecated-declarations'
@ -2385,6 +2388,7 @@
'test/core/end2end/tests/filter_call_init_fails.cc',
'test/core/end2end/tests/filter_causes_close.cc',
'test/core/end2end/tests/filter_latency.cc',
'test/core/end2end/tests/filter_status_code.cc',
'test/core/end2end/tests/graceful_server_shutdown.cc',
'test/core/end2end/tests/high_initial_seqno.cc',
'test/core/end2end/tests/hpack_size.cc',
@ -2457,6 +2461,7 @@
'test/core/end2end/tests/filter_call_init_fails.cc',
'test/core/end2end/tests/filter_causes_close.cc',
'test/core/end2end/tests/filter_latency.cc',
'test/core/end2end/tests/filter_status_code.cc',
'test/core/end2end/tests/graceful_server_shutdown.cc',
'test/core/end2end/tests/high_initial_seqno.cc',
'test/core/end2end/tests/hpack_size.cc',

@ -65,6 +65,7 @@ class BlockingUnaryCallImpl {
context->initial_metadata_flags());
ops.RecvInitialMetadata(context);
ops.RecvMessage(result);
ops.AllowNoMessage();
ops.ClientSendClose();
ops.ClientRecvStatus(context, &status_);
call.PerformOps(&ops);

@ -195,12 +195,25 @@
#define GPR_PTHREAD_TLS 1
#else /* __MAC_OS_X_VERSION_MIN_REQUIRED < __MAC_10_7 */
#define GPR_CPU_POSIX 1
/* TODO(vjpai): there is a reported issue in bazel build for Mac where __thread
in a header is currently not working (bazelbuild/bazel#4341). Remove
the following conditional and use GPR_GCC_TLS when that is fixed */
#ifndef GRPC_BAZEL_BUILD
#define GPR_GCC_TLS 1
#else /* GRPC_BAZEL_BUILD */
#define GPR_PTHREAD_TLS 1
#endif /* GRPC_BAZEL_BUILD */
#define GPR_APPLE_PTHREAD_NAME 1
#endif
#else /* __MAC_OS_X_VERSION_MIN_REQUIRED */
#define GPR_CPU_POSIX 1
/* TODO(vjpai): Remove the following conditional and use only GPR_GCC_TLS
when bazelbuild/bazel#4341 is fixed */
#ifndef GRPC_BAZEL_BUILD
#define GPR_GCC_TLS 1
#else /* GRPC_BAZEL_BUILD */
#define GPR_PTHREAD_TLS 1
#endif /* GRPC_BAZEL_BUILD */
#endif
#define GPR_POSIX_CRASH_HANDLER 1
#endif
@ -305,20 +318,23 @@
* This is primarily because of linker problems and toolchain misconfiguration:
* TLS isn't supported until NDK r12b per
* https://developer.android.com/ndk/downloads/revision_history.html
* TLS also does not work with Android NDK if GCC is being used as the compiler
* instead of Clang.
* Since NDK r16, `__NDK_MAJOR__` and `__NDK_MINOR__` are defined in
* <android/ndk-version.h>. For NDK < r16, users should define these macros,
* e.g. `-D__NDK_MAJOR__=11 -D__NKD_MINOR__=0` for NDK r11. */
#if defined(__ANDROID__) && defined(__clang__) && defined(GPR_GCC_TLS)
#if defined(__ANDROID__) && defined(GPR_GCC_TLS)
#if __has_include(<android/ndk-version.h>)
#include <android/ndk-version.h>
#endif /* __has_include(<android/ndk-version.h>) */
#if defined(__ANDROID__) && defined(__clang__) && defined(__NDK_MAJOR__) && \
defined(__NDK_MINOR__) && \
((__NDK_MAJOR__ < 12) || ((__NDK_MAJOR__ == 12) && (__NDK_MINOR__ < 1)))
#if (defined(__clang__) && defined(__NDK_MAJOR__) && defined(__NDK_MINOR__) && \
((__NDK_MAJOR__ < 12) || \
((__NDK_MAJOR__ == 12) && (__NDK_MINOR__ < 1)))) || \
(defined(__GNUC__) && !defined(__clang__))
#undef GPR_GCC_TLS
#define GPR_PTHREAD_TLS 1
#endif
#endif /*defined(__ANDROID__) && defined(__clang__) && defined(GPR_GCC_TLS) */
#endif /*defined(__ANDROID__) && defined(GPR_GCC_TLS) */
#if defined(__has_include)
#if __has_include(<atomic>)
@ -418,6 +434,14 @@ typedef unsigned __int64 uint64_t;
#endif
#endif
#ifndef GRPC_UNUSED
#if defined(__GNUC__) && !defined(__MINGW32__)
#define GRPC_UNUSED __attribute__((unused))
#else
#define GRPC_UNUSED
#endif
#endif
#ifndef GPR_PRINT_FORMAT_CHECK
#ifdef __GNUC__
#define GPR_PRINT_FORMAT_CHECK(FORMAT_STR, ARGS) \

@ -1,103 +0,0 @@
{
"name": "grpc",
"version": "1.7.2",
"author": "Google Inc.",
"description": "gRPC Library for Node",
"homepage": "https://grpc.io/",
"repository": {
"type": "git",
"url": "https://github.com/grpc/grpc.git"
},
"bugs": "https://github.com/grpc/grpc/issues",
"contributors": [
{
"name": "Michael Lumish",
"email": "mlumish@google.com"
}
],
"directories": {
"lib": "src/node/src"
},
"scripts": {
"lint": "node ./node_modules/jshint/bin/jshint src/node/src src/node/test src/node/interop src/node/index.js --exclude-path=src/node/.jshintignore",
"test": "./node_modules/.bin/mocha src/node/test && npm run-script lint",
"electron-build": "./node_modules/.bin/node-pre-gyp configure build --runtime=electron --disturl=https://atom.io/download/atom-shell",
"gen_docs": "./node_modules/.bin/jsdoc -c src/node/jsdoc_conf.json",
"coverage": "./node_modules/.bin/istanbul cover ./node_modules/.bin/_mocha src/node/test",
"install": "./node_modules/.bin/node-pre-gyp install --fallback-to-build --library=static_library"
},
"bundledDependencies": [
"node-pre-gyp"
],
"dependencies": {
"arguejs": "^0.2.3",
"lodash": "^4.15.0",
"nan": "^2.0.0",
"node-pre-gyp": "^0.6.35",
"protobufjs": "^5.0.0"
},
"devDependencies": {
"async": "^2.0.1",
"body-parser": "^1.15.2",
"electron-mocha": "^3.1.1",
"express": "^4.14.0",
"google-auth-library": "^0.9.2",
"google-protobuf": "^3.0.0",
"istanbul": "^0.4.4",
"jsdoc": "^3.3.2",
"jshint": "^2.5.0",
"minimist": "^1.1.0",
"mocha": "^3.0.2",
"mocha-jenkins-reporter": "^0.2.3",
"poisson-process": "^0.2.1"
},
"engines": {
"node": ">=4"
},
"binary": {
"module_name": "grpc_node",
"module_path": "src/node/extension_binary/{node_abi}-{platform}-{arch}",
"host": "https://storage.googleapis.com/",
"remote_path": "grpc-precompiled-binaries/node/{name}/v{version}",
"package_name": "{node_abi}-{platform}-{arch}.tar.gz"
},
"files": [
"LICENSE",
"src/node/README.md",
"src/proto",
"etc",
"src/node/index.js",
"src/node/src",
"src/node/ext",
"include/grpc",
"src/core",
"src/boringssl",
"src/zlib",
"third_party/nanopb",
"third_party/zlib",
"third_party/boringssl",
"binding.gyp"
],
"main": "src/node/index.js",
"license": "Apache-2.0",
"jshintConfig": {
"bitwise": true,
"curly": true,
"eqeqeq": true,
"esnext": true,
"freeze": true,
"immed": true,
"indent": 2,
"latedef": "nofunc",
"maxlen": 80,
"mocha": true,
"newcap": true,
"node": true,
"noarg": true,
"quotmark": "single",
"strict": true,
"trailing": true,
"undef": true,
"unused": "vars"
}
}

@ -33,7 +33,8 @@
#define DEFAULT_POLL_INTERVAL_MS 5000
typedef struct backup_poller {
namespace {
struct backup_poller {
grpc_timer polling_timer;
grpc_closure run_poller_closure;
grpc_closure shutdown_closure;
@ -42,7 +43,8 @@ typedef struct backup_poller {
bool shutting_down; // guarded by pollset_mu
gpr_refcount refs;
gpr_refcount shutdown_refs;
} backup_poller;
};
} // namespace
static gpr_once g_once = GPR_ONCE_INIT;
static gpr_mu g_poller_mu;

@ -58,7 +58,8 @@ typedef enum {
CALLING_BACK_AND_FINISHED,
} callback_phase;
typedef struct {
namespace {
struct state_watcher {
gpr_mu mu;
callback_phase phase;
grpc_closure on_complete;
@ -71,7 +72,8 @@ typedef struct {
grpc_channel* channel;
grpc_error* error;
void* tag;
} state_watcher;
};
} // namespace
static void delete_state_watcher(state_watcher* w) {
grpc_channel_element* client_channel_elem = grpc_channel_stack_last_element(

@ -553,6 +553,7 @@ static void on_resolver_result_changed_locked(void* arg, grpc_error* error) {
}
grpc_pollset_set_del_pollset_set(chand->lb_policy->interested_parties,
chand->interested_parties);
grpc_lb_policy_shutdown_locked(chand->lb_policy, new_lb_policy);
GRPC_LB_POLICY_UNREF(chand->lb_policy, "channel");
}
chand->lb_policy = new_lb_policy;
@ -658,6 +659,7 @@ static void start_transport_op_locked(void* arg, grpc_error* error_ignored) {
if (chand->lb_policy != nullptr) {
grpc_pollset_set_del_pollset_set(chand->lb_policy->interested_parties,
chand->interested_parties);
grpc_lb_policy_shutdown_locked(chand->lb_policy, nullptr);
GRPC_LB_POLICY_UNREF(chand->lb_policy, "channel");
chand->lb_policy = nullptr;
}
@ -792,6 +794,7 @@ static void cc_destroy_channel_elem(grpc_channel_element* elem) {
if (chand->lb_policy != nullptr) {
grpc_pollset_set_del_pollset_set(chand->lb_policy->interested_parties,
chand->interested_parties);
grpc_lb_policy_shutdown_locked(chand->lb_policy, nullptr);
GRPC_LB_POLICY_UNREF(chand->lb_policy, "channel");
}
gpr_free(chand->info_lb_policy_name);
@ -852,12 +855,10 @@ typedef struct client_channel_call_data {
grpc_subchannel_call* subchannel_call;
grpc_error* error;
grpc_lb_policy* lb_policy; // Holds ref while LB pick is pending.
grpc_lb_policy_pick_state pick;
grpc_closure lb_pick_closure;
grpc_closure lb_pick_cancel_closure;
grpc_connected_subchannel* connected_subchannel;
grpc_call_context_element subchannel_call_context[GRPC_CONTEXT_COUNT];
grpc_polling_entity* pollent;
grpc_transport_stream_op_batch* waiting_for_pick_batches[MAX_WAITING_BATCHES];
@ -866,8 +867,6 @@ typedef struct client_channel_call_data {
grpc_transport_stream_op_batch* initial_metadata_batch;
grpc_linked_mdelem lb_token_mdelem;
grpc_closure on_complete;
grpc_closure* original_on_complete;
} call_data;
@ -1005,16 +1004,16 @@ static void create_subchannel_call_locked(grpc_call_element* elem,
channel_data* chand = (channel_data*)elem->channel_data;
call_data* calld = (call_data*)elem->call_data;
const grpc_connected_subchannel_call_args call_args = {
calld->pollent, // pollent
calld->path, // path
calld->call_start_time, // start_time
calld->deadline, // deadline
calld->arena, // arena
calld->subchannel_call_context, // context
calld->call_combiner // call_combiner
calld->pollent, // pollent
calld->path, // path
calld->call_start_time, // start_time
calld->deadline, // deadline
calld->arena, // arena
calld->pick.subchannel_call_context, // context
calld->call_combiner // call_combiner
};
grpc_error* new_error = grpc_connected_subchannel_create_call(
calld->connected_subchannel, &call_args, &calld->subchannel_call);
calld->pick.connected_subchannel, &call_args, &calld->subchannel_call);
if (grpc_client_channel_trace.enabled()) {
gpr_log(GPR_DEBUG, "chand=%p calld=%p: create subchannel_call=%p: error=%s",
chand, calld, calld->subchannel_call, grpc_error_string(new_error));
@ -1032,7 +1031,7 @@ static void create_subchannel_call_locked(grpc_call_element* elem,
static void pick_done_locked(grpc_call_element* elem, grpc_error* error) {
call_data* calld = (call_data*)elem->call_data;
channel_data* chand = (channel_data*)elem->channel_data;
if (calld->connected_subchannel == nullptr) {
if (calld->pick.connected_subchannel == nullptr) {
// Failed to create subchannel.
GRPC_ERROR_UNREF(calld->error);
calld->error = error == GRPC_ERROR_NONE
@ -1071,13 +1070,16 @@ static void pick_callback_cancel_locked(void* arg, grpc_error* error) {
grpc_call_element* elem = (grpc_call_element*)arg;
channel_data* chand = (channel_data*)elem->channel_data;
call_data* calld = (call_data*)elem->call_data;
if (calld->lb_policy != nullptr) {
// Note: chand->lb_policy may have changed since we started our pick,
// in which case we will be cancelling the pick on a policy other than
// the one we started it on. However, this will just be a no-op.
if (error != GRPC_ERROR_NONE && chand->lb_policy != nullptr) {
if (grpc_client_channel_trace.enabled()) {
gpr_log(GPR_DEBUG, "chand=%p calld=%p: cancelling pick from LB policy %p",
chand, calld, calld->lb_policy);
chand, calld, chand->lb_policy);
}
grpc_lb_policy_cancel_pick_locked(
calld->lb_policy, &calld->connected_subchannel, GRPC_ERROR_REF(error));
grpc_lb_policy_cancel_pick_locked(chand->lb_policy, &calld->pick,
GRPC_ERROR_REF(error));
}
GRPC_CALL_STACK_UNREF(calld->owning_call, "pick_callback_cancel");
}
@ -1092,9 +1094,6 @@ static void pick_callback_done_locked(void* arg, grpc_error* error) {
gpr_log(GPR_DEBUG, "chand=%p calld=%p: pick completed asynchronously",
chand, calld);
}
GPR_ASSERT(calld->lb_policy != nullptr);
GRPC_LB_POLICY_UNREF(calld->lb_policy, "pick_subchannel");
calld->lb_policy = nullptr;
async_pick_done_locked(elem, GRPC_ERROR_REF(error));
}
@ -1128,26 +1127,21 @@ static bool pick_callback_start_locked(grpc_call_element* elem) {
initial_metadata_flags &= ~GRPC_INITIAL_METADATA_WAIT_FOR_READY;
}
}
const grpc_lb_policy_pick_args inputs = {
calld->pick.initial_metadata =
calld->initial_metadata_batch->payload->send_initial_metadata
.send_initial_metadata,
initial_metadata_flags, &calld->lb_token_mdelem};
// Keep a ref to the LB policy in calld while the pick is pending.
GRPC_LB_POLICY_REF(chand->lb_policy, "pick_subchannel");
calld->lb_policy = chand->lb_policy;
.send_initial_metadata;
calld->pick.initial_metadata_flags = initial_metadata_flags;
GRPC_CLOSURE_INIT(&calld->lb_pick_closure, pick_callback_done_locked, elem,
grpc_combiner_scheduler(chand->combiner));
const bool pick_done = grpc_lb_policy_pick_locked(
chand->lb_policy, &inputs, &calld->connected_subchannel,
calld->subchannel_call_context, nullptr, &calld->lb_pick_closure);
calld->pick.on_complete = &calld->lb_pick_closure;
const bool pick_done =
grpc_lb_policy_pick_locked(chand->lb_policy, &calld->pick);
if (pick_done) {
/* synchronous grpc_lb_policy_pick call. Unref the LB policy. */
if (grpc_client_channel_trace.enabled()) {
gpr_log(GPR_DEBUG, "chand=%p calld=%p: pick completed synchronously",
chand, calld);
}
GRPC_LB_POLICY_UNREF(calld->lb_policy, "pick_subchannel");
calld->lb_policy = nullptr;
} else {
GRPC_CALL_STACK_REF(calld->owning_call, "pick_callback_cancel");
grpc_call_combiner_set_notify_on_cancel(
@ -1289,7 +1283,7 @@ static void start_pick_locked(void* arg, grpc_error* ignored) {
grpc_call_element* elem = (grpc_call_element*)arg;
call_data* calld = (call_data*)elem->call_data;
channel_data* chand = (channel_data*)elem->channel_data;
GPR_ASSERT(calld->connected_subchannel == nullptr);
GPR_ASSERT(calld->pick.connected_subchannel == nullptr);
if (chand->lb_policy != nullptr) {
// We already have an LB policy, so ask it for a pick.
if (pick_callback_start_locked(elem)) {
@ -1467,15 +1461,14 @@ static void cc_destroy_call_elem(grpc_call_element* elem,
GRPC_SUBCHANNEL_CALL_UNREF(calld->subchannel_call,
"client_channel_destroy_call");
}
GPR_ASSERT(calld->lb_policy == nullptr);
GPR_ASSERT(calld->waiting_for_pick_batches_count == 0);
if (calld->connected_subchannel != nullptr) {
GRPC_CONNECTED_SUBCHANNEL_UNREF(calld->connected_subchannel, "picked");
if (calld->pick.connected_subchannel != nullptr) {
GRPC_CONNECTED_SUBCHANNEL_UNREF(calld->pick.connected_subchannel, "picked");
}
for (size_t i = 0; i < GRPC_CONTEXT_COUNT; ++i) {
if (calld->subchannel_call_context[i].value != nullptr) {
calld->subchannel_call_context[i].destroy(
calld->subchannel_call_context[i].value);
if (calld->pick.subchannel_call_context[i].value != nullptr) {
calld->pick.subchannel_call_context[i].destroy(
calld->pick.subchannel_call_context[i].value);
}
}
GRPC_CLOSURE_SCHED(then_schedule_closure, GRPC_ERROR_NONE);

@ -19,8 +19,6 @@
#include "src/core/ext/filters/client_channel/lb_policy.h"
#include "src/core/lib/iomgr/combiner.h"
#define WEAK_REF_BITS 16
grpc_core::DebugOnlyTraceFlag grpc_trace_lb_policy_refcount(
false, "lb_policy_refcount");
@ -28,91 +26,60 @@ void grpc_lb_policy_init(grpc_lb_policy* policy,
const grpc_lb_policy_vtable* vtable,
grpc_combiner* combiner) {
policy->vtable = vtable;
gpr_atm_no_barrier_store(&policy->ref_pair, 1 << WEAK_REF_BITS);
gpr_ref_init(&policy->refs, 1);
policy->interested_parties = grpc_pollset_set_create();
policy->combiner = GRPC_COMBINER_REF(combiner, "lb_policy");
}
#ifndef NDEBUG
#define REF_FUNC_EXTRA_ARGS , const char *file, int line, const char *reason
#define REF_MUTATE_EXTRA_ARGS REF_FUNC_EXTRA_ARGS, const char* purpose
#define REF_FUNC_PASS_ARGS(new_reason) , file, line, new_reason
#define REF_MUTATE_PASS_ARGS(purpose) , file, line, reason, purpose
void grpc_lb_policy_ref(grpc_lb_policy* lb_policy, const char* file, int line,
const char* reason) {
if (grpc_trace_lb_policy_refcount.enabled()) {
gpr_atm old_refs = gpr_atm_no_barrier_load(&lb_policy->refs.count);
gpr_log(file, line, GPR_LOG_SEVERITY_DEBUG,
"LB_POLICY:%p ref %" PRIdPTR " -> %" PRIdPTR " %s", lb_policy,
old_refs, old_refs + 1, reason);
}
#else
#define REF_FUNC_EXTRA_ARGS
#define REF_MUTATE_EXTRA_ARGS
#define REF_FUNC_PASS_ARGS(new_reason)
#define REF_MUTATE_PASS_ARGS(x)
void grpc_lb_policy_ref(grpc_lb_policy* lb_policy) {
#endif
gpr_ref(&lb_policy->refs);
}
static gpr_atm ref_mutate(grpc_lb_policy* c, gpr_atm delta,
int barrier REF_MUTATE_EXTRA_ARGS) {
gpr_atm old_val = barrier ? gpr_atm_full_fetch_add(&c->ref_pair, delta)
: gpr_atm_no_barrier_fetch_add(&c->ref_pair, delta);
#ifndef NDEBUG
void grpc_lb_policy_unref(grpc_lb_policy* lb_policy, const char* file, int line,
const char* reason) {
if (grpc_trace_lb_policy_refcount.enabled()) {
gpr_atm old_refs = gpr_atm_no_barrier_load(&lb_policy->refs.count);
gpr_log(file, line, GPR_LOG_SEVERITY_DEBUG,
"LB_POLICY: %p %12s 0x%" PRIxPTR " -> 0x%" PRIxPTR " [%s]", c,
purpose, old_val, old_val + delta, reason);
"LB_POLICY:%p unref %" PRIdPTR " -> %" PRIdPTR " %s", lb_policy,
old_refs, old_refs - 1, reason);
}
#else
void grpc_lb_policy_unref(grpc_lb_policy* lb_policy) {
#endif
return old_val;
}
void grpc_lb_policy_ref(grpc_lb_policy* policy REF_FUNC_EXTRA_ARGS) {
ref_mutate(policy, 1 << WEAK_REF_BITS, 0 REF_MUTATE_PASS_ARGS("STRONG_REF"));
}
static void shutdown_locked(void* arg, grpc_error* error) {
grpc_lb_policy* policy = (grpc_lb_policy*)arg;
policy->vtable->shutdown_locked(policy);
GRPC_LB_POLICY_WEAK_UNREF(policy, "strong-unref");
}
void grpc_lb_policy_unref(grpc_lb_policy* policy REF_FUNC_EXTRA_ARGS) {
gpr_atm old_val =
ref_mutate(policy, (gpr_atm)1 - (gpr_atm)(1 << WEAK_REF_BITS),
1 REF_MUTATE_PASS_ARGS("STRONG_UNREF"));
gpr_atm mask = ~(gpr_atm)((1 << WEAK_REF_BITS) - 1);
gpr_atm check = 1 << WEAK_REF_BITS;
if ((old_val & mask) == check) {
GRPC_CLOSURE_SCHED(
GRPC_CLOSURE_CREATE(shutdown_locked, policy,
grpc_combiner_scheduler(policy->combiner)),
GRPC_ERROR_NONE);
} else {
grpc_lb_policy_weak_unref(policy REF_FUNC_PASS_ARGS("strong-unref"));
if (gpr_unref(&lb_policy->refs)) {
grpc_pollset_set_destroy(lb_policy->interested_parties);
grpc_combiner* combiner = lb_policy->combiner;
lb_policy->vtable->destroy(lb_policy);
GRPC_COMBINER_UNREF(combiner, "lb_policy");
}
}
void grpc_lb_policy_weak_ref(grpc_lb_policy* policy REF_FUNC_EXTRA_ARGS) {
ref_mutate(policy, 1, 0 REF_MUTATE_PASS_ARGS("WEAK_REF"));
}
void grpc_lb_policy_weak_unref(grpc_lb_policy* policy REF_FUNC_EXTRA_ARGS) {
gpr_atm old_val =
ref_mutate(policy, -(gpr_atm)1, 1 REF_MUTATE_PASS_ARGS("WEAK_UNREF"));
if (old_val == 1) {
grpc_pollset_set_destroy(policy->interested_parties);
grpc_combiner* combiner = policy->combiner;
policy->vtable->destroy(policy);
GRPC_COMBINER_UNREF(combiner, "lb_policy");
}
void grpc_lb_policy_shutdown_locked(grpc_lb_policy* policy,
grpc_lb_policy* new_policy) {
policy->vtable->shutdown_locked(policy, new_policy);
}
int grpc_lb_policy_pick_locked(grpc_lb_policy* policy,
const grpc_lb_policy_pick_args* pick_args,
grpc_connected_subchannel** target,
grpc_call_context_element* context,
void** user_data, grpc_closure* on_complete) {
return policy->vtable->pick_locked(policy, pick_args, target, context,
user_data, on_complete);
grpc_lb_policy_pick_state* pick) {
return policy->vtable->pick_locked(policy, pick);
}
void grpc_lb_policy_cancel_pick_locked(grpc_lb_policy* policy,
grpc_connected_subchannel** target,
grpc_lb_policy_pick_state* pick,
grpc_error* error) {
policy->vtable->cancel_pick_locked(policy, target, error);
policy->vtable->cancel_pick_locked(policy, pick, error);
}
void grpc_lb_policy_cancel_picks_locked(grpc_lb_policy* policy,

@ -33,7 +33,7 @@ extern grpc_core::DebugOnlyTraceFlag grpc_trace_lb_policy_refcount;
struct grpc_lb_policy {
const grpc_lb_policy_vtable* vtable;
gpr_atm ref_pair;
gpr_refcount refs;
/* owned pointer to interested parties in load balancing decisions */
grpc_pollset_set* interested_parties;
/* combiner under which lb_policy actions take place */
@ -42,32 +42,42 @@ struct grpc_lb_policy {
grpc_closure* request_reresolution;
};
/** Extra arguments for an LB pick */
typedef struct grpc_lb_policy_pick_args {
/** Initial metadata associated with the picking call. */
/// State used for an LB pick.
typedef struct grpc_lb_policy_pick_state {
/// Initial metadata associated with the picking call.
grpc_metadata_batch* initial_metadata;
/** Bitmask used for selective cancelling. See \a
* grpc_lb_policy_cancel_picks() and \a GRPC_INITIAL_METADATA_* in
* grpc_types.h */
/// Bitmask used for selective cancelling. See \a
/// grpc_lb_policy_cancel_picks() and \a GRPC_INITIAL_METADATA_* in
/// grpc_types.h.
uint32_t initial_metadata_flags;
/** Storage for LB token in \a initial_metadata, or NULL if not used */
grpc_linked_mdelem* lb_token_mdelem_storage;
} grpc_lb_policy_pick_args;
/// Storage for LB token in \a initial_metadata, or NULL if not used.
grpc_linked_mdelem lb_token_mdelem_storage;
/// Closure to run when pick is complete, if not completed synchronously.
grpc_closure* on_complete;
/// Will be set to the selected subchannel, or NULL on failure or when
/// the LB policy decides to drop the call.
grpc_connected_subchannel* connected_subchannel;
/// Will be populated with context to pass to the subchannel call, if needed.
grpc_call_context_element subchannel_call_context[GRPC_CONTEXT_COUNT];
/// Upon success, \a *user_data will be set to whatever opaque information
/// may need to be propagated from the LB policy, or NULL if not needed.
void** user_data;
/// Next pointer. For internal use by LB policy.
struct grpc_lb_policy_pick_state* next;
} grpc_lb_policy_pick_state;
struct grpc_lb_policy_vtable {
void (*destroy)(grpc_lb_policy* policy);
void (*shutdown_locked)(grpc_lb_policy* policy);
/// \see grpc_lb_policy_shutdown_locked().
void (*shutdown_locked)(grpc_lb_policy* policy, grpc_lb_policy* new_policy);
/** \see grpc_lb_policy_pick */
int (*pick_locked)(grpc_lb_policy* policy,
const grpc_lb_policy_pick_args* pick_args,
grpc_connected_subchannel** target,
grpc_call_context_element* context, void** user_data,
grpc_closure* on_complete);
int (*pick_locked)(grpc_lb_policy* policy, grpc_lb_policy_pick_state* pick);
/** \see grpc_lb_policy_cancel_pick */
void (*cancel_pick_locked)(grpc_lb_policy* policy,
grpc_connected_subchannel** target,
grpc_lb_policy_pick_state* pick,
grpc_error* error);
/** \see grpc_lb_policy_cancel_picks */
@ -103,37 +113,19 @@ struct grpc_lb_policy_vtable {
};
#ifndef NDEBUG
/* Strong references: the policy will shutdown when they reach zero */
#define GRPC_LB_POLICY_REF(p, r) \
grpc_lb_policy_ref((p), __FILE__, __LINE__, (r))
#define GRPC_LB_POLICY_UNREF(p, r) \
grpc_lb_policy_unref((p), __FILE__, __LINE__, (r))
/* Weak references: they don't prevent the shutdown of the LB policy. When no
* strong references are left but there are still weak ones, shutdown is called.
* Once the weak reference also reaches zero, the LB policy is destroyed. */
#define GRPC_LB_POLICY_WEAK_REF(p, r) \
grpc_lb_policy_weak_ref((p), __FILE__, __LINE__, (r))
#define GRPC_LB_POLICY_WEAK_UNREF(p, r) \
grpc_lb_policy_weak_unref((p), __FILE__, __LINE__, (r))
void grpc_lb_policy_ref(grpc_lb_policy* policy, const char* file, int line,
const char* reason);
void grpc_lb_policy_unref(grpc_lb_policy* policy, const char* file, int line,
const char* reason);
void grpc_lb_policy_weak_ref(grpc_lb_policy* policy, const char* file, int line,
const char* reason);
void grpc_lb_policy_weak_unref(grpc_lb_policy* policy, const char* file,
int line, const char* reason);
#else
#else // !NDEBUG
#define GRPC_LB_POLICY_REF(p, r) grpc_lb_policy_ref((p))
#define GRPC_LB_POLICY_UNREF(p, r) grpc_lb_policy_unref((p))
#define GRPC_LB_POLICY_WEAK_REF(p, r) grpc_lb_policy_weak_ref((p))
#define GRPC_LB_POLICY_WEAK_UNREF(p, r) grpc_lb_policy_weak_unref((p))
void grpc_lb_policy_ref(grpc_lb_policy* policy);
void grpc_lb_policy_unref(grpc_lb_policy* policy);
void grpc_lb_policy_weak_ref(grpc_lb_policy* policy);
void grpc_lb_policy_weak_unref(grpc_lb_policy* policy);
#endif
/** called by concrete implementations to initialize the base struct */
@ -141,28 +133,24 @@ void grpc_lb_policy_init(grpc_lb_policy* policy,
const grpc_lb_policy_vtable* vtable,
grpc_combiner* combiner);
/** Finds an appropriate subchannel for a call, based on \a pick_args.
\a target will be set to the selected subchannel, or NULL on failure
or when the LB policy decides to drop the call.
/// Shuts down \a policy.
/// If \a new_policy is non-null, any pending picks will be restarted
/// on that policy; otherwise, they will be failed.
void grpc_lb_policy_shutdown_locked(grpc_lb_policy* policy,
grpc_lb_policy* new_policy);
Upon success, \a user_data will be set to whatever opaque information
may need to be propagated from the LB policy, or NULL if not needed.
\a context will be populated with context to pass to the subchannel
call, if needed.
/** Finds an appropriate subchannel for a call, based on data in \a pick.
\a pick must remain alive until the pick is complete.
If the pick succeeds and a result is known immediately, a non-zero
value will be returned. Otherwise, \a on_complete will be invoked
value will be returned. Otherwise, \a pick->on_complete will be invoked
once the pick is complete with its error argument set to indicate
success or failure.
Any IO should be done under the \a interested_parties \a grpc_pollset_set
in the \a grpc_lb_policy struct. */
int grpc_lb_policy_pick_locked(grpc_lb_policy* policy,
const grpc_lb_policy_pick_args* pick_args,
grpc_connected_subchannel** target,
grpc_call_context_element* context,
void** user_data, grpc_closure* on_complete);
grpc_lb_policy_pick_state* pick);
/** Perform a connected subchannel ping (see \a grpc_connected_subchannel_ping)
against one of the connected subchannels managed by \a policy. */
@ -170,11 +158,11 @@ void grpc_lb_policy_ping_one_locked(grpc_lb_policy* policy,
grpc_closure* on_initiate,
grpc_closure* on_ack);
/** Cancel picks for \a target.
/** Cancel picks for \a pick.
The \a on_complete callback of the pending picks will be invoked with \a
*target set to NULL. */
void grpc_lb_policy_cancel_pick_locked(grpc_lb_policy* policy,
grpc_connected_subchannel** target,
grpc_lb_policy_pick_state* pick,
grpc_error* error);
/** Cancel all pending picks for which their \a initial_metadata_flags (as given

@ -32,7 +32,8 @@ static grpc_error* init_channel_elem(grpc_channel_element* elem,
static void destroy_channel_elem(grpc_channel_element* elem) {}
typedef struct {
namespace {
struct call_data {
// Stats object to update.
grpc_grpclb_client_stats* client_stats;
// State for intercepting send_initial_metadata.
@ -43,7 +44,8 @@ typedef struct {
grpc_closure recv_initial_metadata_ready;
grpc_closure* original_recv_initial_metadata_ready;
bool recv_initial_metadata_succeeded;
} call_data;
};
} // namespace
static void on_complete_for_send(void* arg, grpc_error* error) {
call_data* calld = (call_data*)arg;

@ -31,13 +31,6 @@
grpc_core::TraceFlag grpc_lb_pick_first_trace(false, "pick_first");
typedef struct pending_pick {
struct pending_pick* next;
uint32_t initial_metadata_flags;
grpc_connected_subchannel** target;
grpc_closure* on_complete;
} pending_pick;
typedef struct {
/** base policy: must be first */
grpc_lb_policy base;
@ -52,7 +45,7 @@ typedef struct {
/** are we shut down? */
bool shutdown;
/** list of picks that are waiting on connectivity */
pending_pick* pending_picks;
grpc_lb_policy_pick_state* pending_picks;
/** our connectivity state tracker */
grpc_connectivity_state_tracker state_tracker;
} pick_first_lb_policy;
@ -70,19 +63,27 @@ static void pf_destroy(grpc_lb_policy* pol) {
}
}
static void pf_shutdown_locked(grpc_lb_policy* pol) {
static void pf_shutdown_locked(grpc_lb_policy* pol,
grpc_lb_policy* new_policy) {
pick_first_lb_policy* p = (pick_first_lb_policy*)pol;
grpc_error* error = GRPC_ERROR_CREATE_FROM_STATIC_STRING("Channel shutdown");
if (grpc_lb_pick_first_trace.enabled()) {
gpr_log(GPR_DEBUG, "Pick First %p Shutting down", p);
}
p->shutdown = true;
pending_pick* pp;
while ((pp = p->pending_picks) != nullptr) {
p->pending_picks = pp->next;
*pp->target = nullptr;
GRPC_CLOSURE_SCHED(pp->on_complete, GRPC_ERROR_REF(error));
gpr_free(pp);
grpc_lb_policy_pick_state* pick;
while ((pick = p->pending_picks) != nullptr) {
p->pending_picks = pick->next;
if (new_policy != nullptr) {
// Hand off to new LB policy.
if (grpc_lb_policy_pick_locked(new_policy, pick)) {
// Synchronous return, schedule closure.
GRPC_CLOSURE_SCHED(pick->on_complete, GRPC_ERROR_NONE);
}
} else {
pick->connected_subchannel = nullptr;
GRPC_CLOSURE_SCHED(pick->on_complete, GRPC_ERROR_REF(error));
}
}
grpc_connectivity_state_set(&p->state_tracker, GRPC_CHANNEL_SHUTDOWN,
GRPC_ERROR_REF(error), "shutdown");
@ -102,19 +103,18 @@ static void pf_shutdown_locked(grpc_lb_policy* pol) {
}
static void pf_cancel_pick_locked(grpc_lb_policy* pol,
grpc_connected_subchannel** target,
grpc_lb_policy_pick_state* pick,
grpc_error* error) {
pick_first_lb_policy* p = (pick_first_lb_policy*)pol;
pending_pick* pp = p->pending_picks;
grpc_lb_policy_pick_state* pp = p->pending_picks;
p->pending_picks = nullptr;
while (pp != nullptr) {
pending_pick* next = pp->next;
if (pp->target == target) {
*target = nullptr;
GRPC_CLOSURE_SCHED(pp->on_complete,
grpc_lb_policy_pick_state* next = pp->next;
if (pp == pick) {
pick->connected_subchannel = nullptr;
GRPC_CLOSURE_SCHED(pick->on_complete,
GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
"Pick Cancelled", &error, 1));
gpr_free(pp);
} else {
pp->next = p->pending_picks;
p->pending_picks = pp;
@ -129,21 +129,20 @@ static void pf_cancel_picks_locked(grpc_lb_policy* pol,
uint32_t initial_metadata_flags_eq,
grpc_error* error) {
pick_first_lb_policy* p = (pick_first_lb_policy*)pol;
pending_pick* pp = p->pending_picks;
grpc_lb_policy_pick_state* pick = p->pending_picks;
p->pending_picks = nullptr;
while (pp != nullptr) {
pending_pick* next = pp->next;
if ((pp->initial_metadata_flags & initial_metadata_flags_mask) ==
while (pick != nullptr) {
grpc_lb_policy_pick_state* next = pick->next;
if ((pick->initial_metadata_flags & initial_metadata_flags_mask) ==
initial_metadata_flags_eq) {
GRPC_CLOSURE_SCHED(pp->on_complete,
GRPC_CLOSURE_SCHED(pick->on_complete,
GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
"Pick Cancelled", &error, 1));
gpr_free(pp);
} else {
pp->next = p->pending_picks;
p->pending_picks = pp;
pick->next = p->pending_picks;
p->pending_picks = pick;
}
pp = next;
pick = next;
}
GRPC_ERROR_UNREF(error);
}
@ -173,27 +172,20 @@ static void pf_exit_idle_locked(grpc_lb_policy* pol) {
}
static int pf_pick_locked(grpc_lb_policy* pol,
const grpc_lb_policy_pick_args* pick_args,
grpc_connected_subchannel** target,
grpc_call_context_element* context, void** user_data,
grpc_closure* on_complete) {
grpc_lb_policy_pick_state* pick) {
pick_first_lb_policy* p = (pick_first_lb_policy*)pol;
// If we have a selected subchannel already, return synchronously.
if (p->selected != nullptr) {
*target = GRPC_CONNECTED_SUBCHANNEL_REF(p->selected->connected_subchannel,
"picked");
pick->connected_subchannel = GRPC_CONNECTED_SUBCHANNEL_REF(
p->selected->connected_subchannel, "picked");
return 1;
}
// No subchannel selected yet, so handle asynchronously.
if (!p->started_picking) {
start_picking_locked(p);
}
pending_pick* pp = (pending_pick*)gpr_malloc(sizeof(*pp));
pp->next = p->pending_picks;
pp->target = target;
pp->initial_metadata_flags = pick_args->initial_metadata_flags;
pp->on_complete = on_complete;
p->pending_picks = pp;
pick->next = p->pending_picks;
p->pending_picks = pick;
return 0;
}
@ -479,18 +471,17 @@ static void pf_connectivity_changed_locked(void* arg, grpc_error* error) {
// Drop all other subchannels, since we are now connected.
destroy_unselected_subchannels_locked(p);
// Update any calls that were waiting for a pick.
pending_pick* pp;
while ((pp = p->pending_picks)) {
p->pending_picks = pp->next;
*pp->target = GRPC_CONNECTED_SUBCHANNEL_REF(
grpc_lb_policy_pick_state* pick;
while ((pick = p->pending_picks)) {
p->pending_picks = pick->next;
pick->connected_subchannel = GRPC_CONNECTED_SUBCHANNEL_REF(
p->selected->connected_subchannel, "picked");
if (grpc_lb_pick_first_trace.enabled()) {
gpr_log(GPR_INFO,
"Servicing pending pick with selected subchannel %p",
(void*)p->selected);
}
GRPC_CLOSURE_SCHED(pp->on_complete, GRPC_ERROR_NONE);
gpr_free(pp);
GRPC_CLOSURE_SCHED(pick->on_complete, GRPC_ERROR_NONE);
}
// Renew notification.
grpc_lb_subchannel_data_start_connectivity_watch(sd);

@ -41,29 +41,6 @@
grpc_core::TraceFlag grpc_lb_round_robin_trace(false, "round_robin");
/** List of entities waiting for a pick.
*
* Once a pick is available, \a target is updated and \a on_complete called. */
typedef struct pending_pick {
struct pending_pick* next;
/* output argument where to store the pick()ed user_data. It'll be NULL if no
* such data is present or there's an error (the definite test for errors is
* \a target being NULL). */
void** user_data;
/* bitmask passed to pick() and used for selective cancelling. See
* grpc_lb_policy_cancel_picks() */
uint32_t initial_metadata_flags;
/* output argument where to store the pick()ed connected subchannel, or NULL
* upon error. */
grpc_connected_subchannel** target;
/* to be invoked once the pick() has completed (regardless of success) */
grpc_closure* on_complete;
} pending_pick;
typedef struct round_robin_lb_policy {
/** base policy: must be first */
grpc_lb_policy base;
@ -75,7 +52,7 @@ typedef struct round_robin_lb_policy {
/** are we shutting down? */
bool shutdown;
/** List of picks that are waiting on connectivity */
pending_pick* pending_picks;
grpc_lb_policy_pick_state* pending_picks;
/** our connectivity state tracker */
grpc_connectivity_state_tracker state_tracker;
@ -167,19 +144,27 @@ static void rr_destroy(grpc_lb_policy* pol) {
gpr_free(p);
}
static void rr_shutdown_locked(grpc_lb_policy* pol) {
static void rr_shutdown_locked(grpc_lb_policy* pol,
grpc_lb_policy* new_policy) {
round_robin_lb_policy* p = (round_robin_lb_policy*)pol;
grpc_error* error = GRPC_ERROR_CREATE_FROM_STATIC_STRING("Channel shutdown");
if (grpc_lb_round_robin_trace.enabled()) {
gpr_log(GPR_DEBUG, "[RR %p] Shutting down", p);
}
p->shutdown = true;
pending_pick* pp;
while ((pp = p->pending_picks) != nullptr) {
p->pending_picks = pp->next;
*pp->target = nullptr;
GRPC_CLOSURE_SCHED(pp->on_complete, GRPC_ERROR_REF(error));
gpr_free(pp);
grpc_lb_policy_pick_state* pick;
while ((pick = p->pending_picks) != nullptr) {
p->pending_picks = pick->next;
if (new_policy != nullptr) {
// Hand off to new LB policy.
if (grpc_lb_policy_pick_locked(new_policy, pick)) {
// Synchronous return; schedule callback.
GRPC_CLOSURE_SCHED(pick->on_complete, GRPC_ERROR_NONE);
}
} else {
pick->connected_subchannel = nullptr;
GRPC_CLOSURE_SCHED(pick->on_complete, GRPC_ERROR_REF(error));
}
}
grpc_connectivity_state_set(&p->state_tracker, GRPC_CHANNEL_SHUTDOWN,
GRPC_ERROR_REF(error), "rr_shutdown");
@ -199,19 +184,18 @@ static void rr_shutdown_locked(grpc_lb_policy* pol) {
}
static void rr_cancel_pick_locked(grpc_lb_policy* pol,
grpc_connected_subchannel** target,
grpc_lb_policy_pick_state* pick,
grpc_error* error) {
round_robin_lb_policy* p = (round_robin_lb_policy*)pol;
pending_pick* pp = p->pending_picks;
grpc_lb_policy_pick_state* pp = p->pending_picks;
p->pending_picks = nullptr;
while (pp != nullptr) {
pending_pick* next = pp->next;
if (pp->target == target) {
*target = nullptr;
GRPC_CLOSURE_SCHED(pp->on_complete,
grpc_lb_policy_pick_state* next = pp->next;
if (pp == pick) {
pick->connected_subchannel = nullptr;
GRPC_CLOSURE_SCHED(pick->on_complete,
GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
"Pick cancelled", &error, 1));
gpr_free(pp);
} else {
pp->next = p->pending_picks;
p->pending_picks = pp;
@ -226,22 +210,21 @@ static void rr_cancel_picks_locked(grpc_lb_policy* pol,
uint32_t initial_metadata_flags_eq,
grpc_error* error) {
round_robin_lb_policy* p = (round_robin_lb_policy*)pol;
pending_pick* pp = p->pending_picks;
grpc_lb_policy_pick_state* pick = p->pending_picks;
p->pending_picks = nullptr;
while (pp != nullptr) {
pending_pick* next = pp->next;
if ((pp->initial_metadata_flags & initial_metadata_flags_mask) ==
while (pick != nullptr) {
grpc_lb_policy_pick_state* next = pick->next;
if ((pick->initial_metadata_flags & initial_metadata_flags_mask) ==
initial_metadata_flags_eq) {
*pp->target = nullptr;
GRPC_CLOSURE_SCHED(pp->on_complete,
pick->connected_subchannel = nullptr;
GRPC_CLOSURE_SCHED(pick->on_complete,
GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
"Pick cancelled", &error, 1));
gpr_free(pp);
} else {
pp->next = p->pending_picks;
p->pending_picks = pp;
pick->next = p->pending_picks;
p->pending_picks = pick;
}
pp = next;
pick = next;
}
GRPC_ERROR_UNREF(error);
}
@ -266,13 +249,10 @@ static void rr_exit_idle_locked(grpc_lb_policy* pol) {
}
static int rr_pick_locked(grpc_lb_policy* pol,
const grpc_lb_policy_pick_args* pick_args,
grpc_connected_subchannel** target,
grpc_call_context_element* context, void** user_data,
grpc_closure* on_complete) {
grpc_lb_policy_pick_state* pick) {
round_robin_lb_policy* p = (round_robin_lb_policy*)pol;
if (grpc_lb_round_robin_trace.enabled()) {
gpr_log(GPR_INFO, "[RR %p] Trying to pick (shutdown: %d)", (void*)pol,
gpr_log(GPR_INFO, "[RR %p] Trying to pick (shutdown: %d)", pol,
p->shutdown);
}
GPR_ASSERT(!p->shutdown);
@ -282,18 +262,18 @@ static int rr_pick_locked(grpc_lb_policy* pol,
/* readily available, report right away */
grpc_lb_subchannel_data* sd =
&p->subchannel_list->subchannels[next_ready_index];
*target =
pick->connected_subchannel =
GRPC_CONNECTED_SUBCHANNEL_REF(sd->connected_subchannel, "rr_picked");
if (user_data != nullptr) {
*user_data = sd->user_data;
if (pick->user_data != nullptr) {
*pick->user_data = sd->user_data;
}
if (grpc_lb_round_robin_trace.enabled()) {
gpr_log(
GPR_DEBUG,
"[RR %p] Picked target <-- Subchannel %p (connected %p) (sl %p, "
"index %lu)",
(void*)p, (void*)sd->subchannel, (void*)*target,
(void*)sd->subchannel_list, (unsigned long)next_ready_index);
"index %" PRIuPTR ")",
p, sd->subchannel, pick->connected_subchannel, sd->subchannel_list,
next_ready_index);
}
/* only advance the last picked pointer if the selection was used */
update_last_ready_subchannel_index_locked(p, next_ready_index);
@ -304,13 +284,8 @@ static int rr_pick_locked(grpc_lb_policy* pol,
if (!p->started_picking) {
start_picking_locked(p);
}
pending_pick* pp = (pending_pick*)gpr_malloc(sizeof(*pp));
pp->next = p->pending_picks;
pp->target = target;
pp->on_complete = on_complete;
pp->initial_metadata_flags = pick_args->initial_metadata_flags;
pp->user_data = user_data;
p->pending_picks = pp;
pick->next = p->pending_picks;
p->pending_picks = pick;
return 0;
}
@ -493,13 +468,13 @@ static void rr_connectivity_changed_locked(void* arg, grpc_error* error) {
// picks, update the last picked pointer
update_last_ready_subchannel_index_locked(p, next_ready_index);
}
pending_pick* pp;
while ((pp = p->pending_picks)) {
p->pending_picks = pp->next;
*pp->target = GRPC_CONNECTED_SUBCHANNEL_REF(
grpc_lb_policy_pick_state* pick;
while ((pick = p->pending_picks)) {
p->pending_picks = pick->next;
pick->connected_subchannel = GRPC_CONNECTED_SUBCHANNEL_REF(
selected->connected_subchannel, "rr_picked");
if (pp->user_data != nullptr) {
*pp->user_data = selected->user_data;
if (pick->user_data != nullptr) {
*pick->user_data = selected->user_data;
}
if (grpc_lb_round_robin_trace.enabled()) {
gpr_log(GPR_DEBUG,
@ -508,8 +483,7 @@ static void rr_connectivity_changed_locked(void* arg, grpc_error* error) {
(void*)p, (void*)selected->subchannel,
(void*)p->subchannel_list, (unsigned long)next_ready_index);
}
GRPC_CLOSURE_SCHED(pp->on_complete, GRPC_ERROR_NONE);
gpr_free(pp);
GRPC_CLOSURE_SCHED(pick->on_complete, GRPC_ERROR_NONE);
}
}
// Renew notification.

@ -213,13 +213,13 @@ void grpc_lb_subchannel_list_unref(grpc_lb_subchannel_list* subchannel_list,
void grpc_lb_subchannel_list_ref_for_connectivity_watch(
grpc_lb_subchannel_list* subchannel_list, const char* reason) {
GRPC_LB_POLICY_WEAK_REF(subchannel_list->policy, reason);
GRPC_LB_POLICY_REF(subchannel_list->policy, reason);
grpc_lb_subchannel_list_ref(subchannel_list, reason);
}
void grpc_lb_subchannel_list_unref_for_connectivity_watch(
grpc_lb_subchannel_list* subchannel_list, const char* reason) {
GRPC_LB_POLICY_WEAK_UNREF(subchannel_list->policy, reason);
GRPC_LB_POLICY_UNREF(subchannel_list->policy, reason);
grpc_lb_subchannel_list_unref(subchannel_list, reason);
}

@ -61,11 +61,13 @@
((grpc_connected_subchannel*)(gpr_atm_##barrier##_load( \
&(subchannel)->connected_subchannel)))
typedef struct {
namespace {
struct state_watcher {
grpc_closure closure;
grpc_subchannel* subchannel;
grpc_connectivity_state connectivity_state;
} state_watcher;
};
} // namespace
typedef struct external_state_watcher {
grpc_subchannel* subchannel;

@ -35,7 +35,8 @@
/* default maximum size of payload eligable for GET request */
static const size_t kMaxPayloadSizeForGet = 2048;
typedef struct call_data {
namespace {
struct call_data {
grpc_call_combiner* call_combiner;
// State for handling send_initial_metadata ops.
grpc_linked_mdelem method;
@ -60,13 +61,14 @@ typedef struct call_data {
grpc_closure on_send_message_next_done;
grpc_closure* original_send_message_on_complete;
grpc_closure send_message_on_complete;
} call_data;
};
typedef struct channel_data {
struct channel_data {
grpc_mdelem static_scheme;
grpc_mdelem user_agent;
size_t max_payload_size_for_get;
} channel_data;
};
} // namespace
static grpc_error* client_filter_incoming_metadata(grpc_call_element* elem,
grpc_metadata_batch* b) {

@ -35,16 +35,17 @@
#include "src/core/lib/surface/call.h"
#include "src/core/lib/transport/static_metadata.h"
typedef enum {
namespace {
enum initial_metadata_state {
// Initial metadata not yet seen.
INITIAL_METADATA_UNSEEN = 0,
// Initial metadata seen; compression algorithm set.
HAS_COMPRESSION_ALGORITHM,
// Initial metadata seen; no compression algorithm set.
NO_COMPRESSION_ALGORITHM,
} initial_metadata_state;
};
typedef struct call_data {
struct call_data {
grpc_call_combiner* call_combiner;
grpc_linked_mdelem compression_algorithm_storage;
grpc_linked_mdelem stream_compression_algorithm_storage;
@ -62,9 +63,9 @@ typedef struct call_data {
grpc_closure* original_send_message_on_complete;
grpc_closure send_message_on_complete;
grpc_closure on_send_message_next_done;
} call_data;
};
typedef struct channel_data {
struct channel_data {
/** The default, channel-level, compression algorithm */
grpc_compression_algorithm default_compression_algorithm;
/** Bitset of enabled algorithms */
@ -78,7 +79,8 @@ typedef struct channel_data {
uint32_t enabled_stream_compression_algorithms_bitset;
/** Supported stream compression algorithms */
uint32_t supported_stream_compression_algorithms;
} channel_data;
};
} // namespace
static bool skip_compression(grpc_call_element* elem, uint32_t flags,
bool has_compression_algorithm) {

@ -31,7 +31,8 @@
#define EXPECTED_CONTENT_TYPE "application/grpc"
#define EXPECTED_CONTENT_TYPE_LENGTH sizeof(EXPECTED_CONTENT_TYPE) - 1
typedef struct call_data {
namespace {
struct call_data {
grpc_call_combiner* call_combiner;
grpc_linked_mdelem status;
@ -60,11 +61,12 @@ typedef struct call_data {
grpc_closure hs_on_recv;
grpc_closure hs_on_complete;
grpc_closure hs_recv_message_ready;
} call_data;
};
typedef struct channel_data {
struct channel_data {
uint8_t unused;
} channel_data;
};
} // namespace
static grpc_error* server_filter_outgoing_metadata(grpc_call_element* elem,
grpc_metadata_batch* b) {

@ -31,7 +31,8 @@
#include "src/core/lib/slice/slice_internal.h"
#include "src/core/lib/transport/static_metadata.h"
typedef struct call_data {
namespace {
struct call_data {
intptr_t id; /**< an id unique to the call */
bool have_trailing_md_string;
grpc_slice trailing_md_string;
@ -48,11 +49,12 @@ typedef struct call_data {
/* to get notified of the availability of the incoming initial metadata. */
grpc_closure on_initial_md_ready;
grpc_metadata_batch* recv_initial_metadata;
} call_data;
};
typedef struct channel_data {
struct channel_data {
intptr_t id; /**< an id unique to the channel */
} channel_data;
};
} // namespace
static void on_initial_md_ready(void* user_data, grpc_error* err) {
grpc_call_element* elem = (grpc_call_element*)user_data;

@ -37,7 +37,8 @@
#define MAX_CONNECTION_IDLE_INTEGER_OPTIONS \
{ DEFAULT_MAX_CONNECTION_IDLE_MS, 1, INT_MAX }
typedef struct channel_data {
namespace {
struct channel_data {
/* We take a reference to the channel stack for the timer callback */
grpc_channel_stack* channel_stack;
/* Guards access to max_age_timer, max_age_timer_pending, max_age_grace_timer
@ -84,7 +85,8 @@ typedef struct channel_data {
grpc_connectivity_state connectivity_state;
/* Number of active calls */
gpr_atm call_count;
} channel_data;
};
} // namespace
/* Increase the nubmer of active calls. Before the increasement, if there are no
calls, the max_idle_timer should be cancelled. */

@ -86,7 +86,8 @@ static void* refcounted_message_size_limits_create_from_json(
return value;
}
typedef struct call_data {
namespace {
struct call_data {
grpc_call_combiner* call_combiner;
message_size_limits limits;
// Receive closures are chained: we inject this closure as the
@ -97,13 +98,14 @@ typedef struct call_data {
grpc_byte_stream** recv_message;
// Original recv_message_ready callback, invoked after our own.
grpc_closure* next_recv_message_ready;
} call_data;
};
typedef struct channel_data {
struct channel_data {
message_size_limits limits;
// Maps path names to refcounted_message_size_limits structs.
grpc_slice_hash_table* method_limit_table;
} channel_data;
};
} // namespace
// Callback invoked when we receive a message. Here we check the max
// receive message size.

@ -25,7 +25,8 @@
#include "src/core/lib/surface/channel_init.h"
#include "src/core/lib/transport/metadata.h"
typedef struct call_data {
namespace {
struct call_data {
// Receive closures are chained: we inject this closure as the
// recv_initial_metadata_ready up-call on transport_stream_op, and remember to
// call our next_recv_initial_metadata_ready member after handling it.
@ -37,7 +38,8 @@ typedef struct call_data {
// Marks whether the workaround is active
bool workaround_active;
} call_data;
};
} // namespace
// Find the user agent metadata element in the batch
static bool get_user_agent_mdelem(const grpc_metadata_batch* batch,

@ -138,10 +138,11 @@ static void report_stall(grpc_chttp2_transport* t, grpc_chttp2_stream* s,
const char* staller) {
gpr_log(
GPR_DEBUG,
"%s:%p stream %d stalled by %s [fc:pending=%" PRIdPTR ":flowed=%" PRId64
"%s:%p stream %d stalled by %s [fc:pending=%" PRIdPTR
":pending-compressed=%" PRIdPTR ":flowed=%" PRId64
":peer_initwin=%d:t_win=%" PRId64 ":s_win=%d:s_delta=%" PRId64 "]",
t->peer_string, t, s->id, staller, s->flow_controlled_buffer.length,
s->flow_controlled_bytes_flowed,
s->compressed_data_buffer.length, s->flow_controlled_bytes_flowed,
t->settings[GRPC_ACKED_SETTINGS]
[GRPC_CHTTP2_SETTINGS_INITIAL_WINDOW_SIZE],
t->flow_control->remote_window(),

@ -1232,8 +1232,6 @@ const grpc_event_engine_vtable* grpc_init_epoll1_linux(bool explicit_request) {
/* If GRPC_LINUX_EPOLL is not defined, it means epoll is not available. Return
* NULL */
const grpc_event_engine_vtable* grpc_init_epoll1_linux(bool explicit_request) {
gpr_log(GPR_ERROR,
"Skipping epoll1 because GRPC_LINUX_EPOLL is not defined.");
return nullptr;
}
#endif /* defined(GRPC_POSIX_SOCKET) */

@ -1449,8 +1449,6 @@ const grpc_event_engine_vtable* grpc_init_epollex_linux(
* NULL */
const grpc_event_engine_vtable* grpc_init_epollex_linux(
bool explicitly_requested) {
gpr_log(GPR_ERROR,
"Skipping epollex because GRPC_LINUX_EPOLL is not defined.");
return nullptr;
}
#endif /* defined(GRPC_POSIX_SOCKET) */

@ -1732,8 +1732,6 @@ const grpc_event_engine_vtable* grpc_init_epollsig_linux(
* NULL */
const grpc_event_engine_vtable* grpc_init_epollsig_linux(
bool explicit_request) {
gpr_log(GPR_ERROR,
"Skipping epollsig because GRPC_LINUX_EPOLL is not defined.");
return nullptr;
}
#endif /* defined(GRPC_POSIX_SOCKET) */

@ -71,6 +71,7 @@ struct grpc_fd {
int shutdown;
int closed;
int released;
gpr_atm pollhup;
grpc_error* shutdown_error;
/* The watcher list.
@ -242,7 +243,7 @@ struct grpc_pollset_set {
typedef struct poll_result {
gpr_refcount refcount;
cv_node* watchers;
grpc_cv_node* watchers;
int watchcount;
struct pollfd* fds;
nfds_t nfds;
@ -273,7 +274,7 @@ typedef struct poll_hash_table {
} poll_hash_table;
poll_hash_table poll_cache;
cv_fd_table g_cvfds;
grpc_cv_fd_table g_cvfds;
/*******************************************************************************
* fd_posix.c
@ -335,6 +336,7 @@ static grpc_fd* fd_create(int fd, const char* name) {
r->on_done_closure = nullptr;
r->closed = 0;
r->released = 0;
gpr_atm_no_barrier_store(&r->pollhup, 0);
r->read_notifier_pollset = nullptr;
char* name2;
@ -462,7 +464,7 @@ static grpc_error* fd_shutdown_error(grpc_fd* fd) {
static void notify_on_locked(grpc_fd* fd, grpc_closure** st,
grpc_closure* closure) {
if (fd->shutdown) {
if (fd->shutdown || gpr_atm_no_barrier_load(&fd->pollhup)) {
GRPC_CLOSURE_SCHED(closure,
GRPC_ERROR_CREATE_FROM_STATIC_STRING("FD shutdown"));
} else if (*st == CLOSURE_NOT_READY) {
@ -950,7 +952,8 @@ static grpc_error* pollset_work(grpc_pollset* pollset,
pfds[0].events = POLLIN;
pfds[0].revents = 0;
for (i = 0; i < pollset->fd_count; i++) {
if (fd_is_orphaned(pollset->fds[i])) {
if (fd_is_orphaned(pollset->fds[i]) ||
gpr_atm_no_barrier_load(&pollset->fds[i]->pollhup) == 1) {
GRPC_FD_UNREF(pollset->fds[i], "multipoller");
} else {
pollset->fds[fd_count++] = pollset->fds[i];
@ -1017,6 +1020,12 @@ static grpc_error* pollset_work(grpc_pollset* pollset,
pfds[i].fd, (pfds[i].revents & POLLIN_CHECK) != 0,
(pfds[i].revents & POLLOUT_CHECK) != 0, pfds[i].revents);
}
/* This is a mitigation to prevent poll() from spinning on a
** POLLHUP https://github.com/grpc/grpc/pull/13665
*/
if (pfds[i].revents & POLLHUP) {
gpr_atm_no_barrier_store(&watchers[i].fd->pollhup, 1);
}
fd_end_poll(&watchers[i], pfds[i].revents & POLLIN_CHECK,
pfds[i].revents & POLLOUT_CHECK, pollset);
}
@ -1435,7 +1444,7 @@ static void decref_poll_result(poll_result* res) {
}
}
void remove_cvn(cv_node** head, cv_node* target) {
void remove_cvn(grpc_cv_node** head, grpc_cv_node* target) {
if (target->next) {
target->next->prev = target->prev;
}
@ -1460,7 +1469,7 @@ static void run_poll(void* args) {
result->completed = 1;
result->retval = retval;
result->err = errno;
cv_node* watcher = result->watchers;
grpc_cv_node* watcher = result->watchers;
while (watcher) {
gpr_cv_signal(watcher->cv);
watcher = watcher->next;
@ -1494,17 +1503,17 @@ static void run_poll(void* args) {
static int cvfd_poll(struct pollfd* fds, nfds_t nfds, int timeout) {
unsigned int i;
int res, idx;
cv_node* pollcv;
grpc_cv_node* pollcv;
int skip_poll = 0;
nfds_t nsockfds = 0;
poll_result* result = nullptr;
gpr_mu_lock(&g_cvfds.mu);
pollcv = (cv_node*)gpr_malloc(sizeof(cv_node));
pollcv = (grpc_cv_node*)gpr_malloc(sizeof(grpc_cv_node));
pollcv->next = nullptr;
gpr_cv pollcv_cv;
gpr_cv_init(&pollcv_cv);
pollcv->cv = &pollcv_cv;
cv_node* fd_cvs = (cv_node*)gpr_malloc(nfds * sizeof(cv_node));
grpc_cv_node* fd_cvs = (grpc_cv_node*)gpr_malloc(nfds * sizeof(grpc_cv_node));
for (i = 0; i < nfds; i++) {
fds[i].revents = 0;
@ -1600,7 +1609,8 @@ static void global_cv_fd_table_init() {
gpr_cv_init(&g_cvfds.shutdown_cv);
gpr_ref_init(&g_cvfds.pollcount, 1);
g_cvfds.size = CV_DEFAULT_TABLE_SIZE;
g_cvfds.cvfds = (fd_node*)gpr_malloc(sizeof(fd_node) * CV_DEFAULT_TABLE_SIZE);
g_cvfds.cvfds =
(grpc_fd_node*)gpr_malloc(sizeof(grpc_fd_node) * CV_DEFAULT_TABLE_SIZE);
g_cvfds.free_fds = nullptr;
thread_grace = gpr_time_from_millis(POLLCV_THREAD_GRACE_MS, GPR_TIMESPAN);
for (int i = 0; i < CV_DEFAULT_TABLE_SIZE; i++) {

@ -63,7 +63,8 @@ typedef size_t msg_iovlen_type;
grpc_core::TraceFlag grpc_tcp_trace(false, "tcp");
typedef struct {
namespace {
struct grpc_tcp {
grpc_endpoint base;
grpc_fd* em_fd;
int fd;
@ -96,12 +97,13 @@ typedef struct {
grpc_resource_user* resource_user;
grpc_resource_user_slice_allocator slice_allocator;
} grpc_tcp;
};
typedef struct backup_poller {
struct backup_poller {
gpr_mu* pollset_mu;
grpc_closure run_poller;
} backup_poller;
};
} // namespace
#define BACKUP_POLLER_POLLSET(b) ((grpc_pollset*)((b) + 1))

@ -65,6 +65,17 @@ typedef struct {
grpc_pollset* pollset;
} grpc_tcp;
static grpc_error* tcp_annotate_error(grpc_error* src_error, grpc_tcp* tcp) {
return grpc_error_set_str(
grpc_error_set_int(
src_error,
/* All tcp errors are marked with UNAVAILABLE so that application may
* choose to retry. */
GRPC_ERROR_INT_GRPC_STATUS, GRPC_STATUS_UNAVAILABLE),
GRPC_ERROR_STR_TARGET_ADDRESS,
grpc_slice_from_copied_string(tcp->peer_string));
}
static void tcp_free(grpc_tcp* tcp) {
grpc_resource_user_unref(tcp->resource_user);
gpr_free(tcp->handle);
@ -162,7 +173,8 @@ static void read_callback(uv_stream_t* stream, ssize_t nread,
// TODO(murgatroid99): figure out what the return value here means
uv_read_stop(stream);
if (nread == UV_EOF) {
error = GRPC_ERROR_CREATE_FROM_STATIC_STRING("EOF");
error =
tcp_annotate_error(GRPC_ERROR_CREATE_FROM_STATIC_STRING("EOF"), tcp);
grpc_slice_buffer_reset_and_unref_internal(tcp->read_slices);
} else if (nread > 0) {
// Successful read
@ -177,7 +189,8 @@ static void read_callback(uv_stream_t* stream, ssize_t nread,
}
} else {
// nread < 0: Error
error = GRPC_ERROR_CREATE_FROM_STATIC_STRING("TCP Read failed");
error = tcp_annotate_error(
GRPC_ERROR_CREATE_FROM_STATIC_STRING("TCP Read failed"), tcp);
grpc_slice_buffer_reset_and_unref_internal(tcp->read_slices);
}
call_read_cb(tcp, error);
@ -194,7 +207,9 @@ static void tcp_read_allocation_done(void* tcpp, grpc_error* error) {
status =
uv_read_start((uv_stream_t*)tcp->handle, alloc_uv_buf, read_callback);
if (status != 0) {
error = GRPC_ERROR_CREATE_FROM_STATIC_STRING("TCP Read failed at start");
error = tcp_annotate_error(
GRPC_ERROR_CREATE_FROM_STATIC_STRING("TCP Read failed at start"),
tcp);
error = grpc_error_set_str(
error, GRPC_ERROR_STR_OS_ERROR,
grpc_slice_from_static_string(uv_strerror(status)));
@ -235,7 +250,8 @@ static void write_callback(uv_write_t* req, int status) {
if (status == 0) {
error = GRPC_ERROR_NONE;
} else {
error = GRPC_ERROR_CREATE_FROM_STATIC_STRING("TCP Write failed");
error = tcp_annotate_error(
GRPC_ERROR_CREATE_FROM_STATIC_STRING("TCP Write failed"), tcp);
}
if (grpc_tcp_trace.enabled()) {
const char* str = grpc_error_string(error);
@ -268,8 +284,10 @@ static void uv_endpoint_write(grpc_endpoint* ep,
}
if (tcp->shutting_down) {
GRPC_CLOSURE_SCHED(cb, GRPC_ERROR_CREATE_FROM_STATIC_STRING(
"TCP socket is shutting down"));
GRPC_CLOSURE_SCHED(cb,
tcp_annotate_error(GRPC_ERROR_CREATE_FROM_STATIC_STRING(
"TCP socket is shutting down"),
tcp));
return;
}

@ -34,7 +34,7 @@
#define MAX_TABLE_RESIZE 256
extern cv_fd_table g_cvfds;
extern grpc_cv_fd_table g_cvfds;
static grpc_error* cv_fd_init(grpc_wakeup_fd* fd_info) {
unsigned int i, newsize;
@ -42,8 +42,8 @@ static grpc_error* cv_fd_init(grpc_wakeup_fd* fd_info) {
gpr_mu_lock(&g_cvfds.mu);
if (!g_cvfds.free_fds) {
newsize = GPR_MIN(g_cvfds.size * 2, g_cvfds.size + MAX_TABLE_RESIZE);
g_cvfds.cvfds =
(fd_node*)gpr_realloc(g_cvfds.cvfds, sizeof(fd_node) * newsize);
g_cvfds.cvfds = (grpc_fd_node*)gpr_realloc(g_cvfds.cvfds,
sizeof(grpc_fd_node) * newsize);
for (i = g_cvfds.size; i < newsize; i++) {
g_cvfds.cvfds[i].is_set = 0;
g_cvfds.cvfds[i].cvs = nullptr;
@ -64,7 +64,7 @@ static grpc_error* cv_fd_init(grpc_wakeup_fd* fd_info) {
}
static grpc_error* cv_fd_wakeup(grpc_wakeup_fd* fd_info) {
cv_node* cvn;
grpc_cv_node* cvn;
gpr_mu_lock(&g_cvfds.mu);
g_cvfds.cvfds[GRPC_FD_TO_IDX(fd_info->read_fd)].is_set = 1;
cvn = g_cvfds.cvfds[GRPC_FD_TO_IDX(fd_info->read_fd)].cvs;

@ -40,27 +40,27 @@
#define GRPC_FD_TO_IDX(fd) (-(fd)-1)
#define GRPC_IDX_TO_FD(idx) (-(idx)-1)
typedef struct cv_node {
typedef struct grpc_cv_node {
gpr_cv* cv;
struct cv_node* next;
struct cv_node* prev;
} cv_node;
struct grpc_cv_node* next;
struct grpc_cv_node* prev;
} grpc_cv_node;
typedef struct fd_node {
typedef struct grpc_fd_node {
int is_set;
cv_node* cvs;
struct fd_node* next_free;
} fd_node;
grpc_cv_node* cvs;
struct grpc_fd_node* next_free;
} grpc_fd_node;
typedef struct cv_fd_table {
typedef struct grpc_cv_fd_table {
gpr_mu mu;
gpr_refcount pollcount;
gpr_cv shutdown_cv;
fd_node* cvfds;
fd_node* free_fds;
grpc_fd_node* cvfds;
grpc_fd_node* free_fds;
unsigned int size;
grpc_poll_function_type poll;
} cv_fd_table;
} grpc_cv_fd_table;
extern const grpc_wakeup_fd_vtable grpc_cv_wakeup_fd_vtable;

@ -37,8 +37,9 @@
#define MAX_CREDENTIALS_METADATA_COUNT 4
namespace {
/* We can have a per-call credentials. */
typedef struct {
struct call_data {
grpc_call_stack* owning_call;
grpc_call_combiner* call_combiner;
grpc_call_credentials* creds;
@ -57,13 +58,14 @@ typedef struct {
grpc_closure async_result_closure;
grpc_closure check_call_host_cancel_closure;
grpc_closure get_request_metadata_cancel_closure;
} call_data;
};
/* We can have a per-channel credentials. */
typedef struct {
struct channel_data {
grpc_channel_security_connector* security_connector;
grpc_auth_context* auth_context;
} channel_data;
};
} // namespace
void grpc_auth_metadata_context_reset(
grpc_auth_metadata_context* auth_md_context) {

@ -26,13 +26,14 @@
#include "src/core/lib/security/transport/auth_filters.h"
#include "src/core/lib/slice/slice_internal.h"
typedef enum {
namespace {
enum async_state {
STATE_INIT = 0,
STATE_DONE,
STATE_CANCELLED,
} async_state;
};
typedef struct call_data {
struct call_data {
grpc_call_combiner* call_combiner;
grpc_call_stack* owning_call;
grpc_transport_stream_op_batch* recv_initial_metadata_batch;
@ -44,12 +45,13 @@ typedef struct call_data {
grpc_auth_context* auth_context;
grpc_closure cancel_closure;
gpr_atm state; // async_state
} call_data;
};
typedef struct channel_data {
struct channel_data {
grpc_auth_context* auth_context;
grpc_server_credentials* creds;
} channel_data;
};
} // namespace
static grpc_metadata_array metadata_batch_to_md_array(
const grpc_metadata_batch* batch) {

@ -1851,8 +1851,9 @@ static grpc_call_error call_start_batch(grpc_call* call, const grpc_op* ops,
{
grpc_error* override_error = GRPC_ERROR_NONE;
if (op->data.send_status_from_server.status != GRPC_STATUS_OK) {
override_error = GRPC_ERROR_CREATE_FROM_STATIC_STRING(
"Error from server send status");
override_error =
error_from_status(op->data.send_status_from_server.status,
"Returned non-ok status");
}
if (op->data.send_status_from_server.status_details != nullptr) {
call->send_extra_metadata[1].md = grpc_mdelem_from_slices(

@ -44,24 +44,23 @@
#include "src/core/lib/transport/metadata.h"
#include "src/core/lib/transport/static_metadata.h"
typedef struct listener {
grpc_core::TraceFlag grpc_server_channel_trace(false, "server_channel");
namespace {
struct listener {
void* arg;
void (*start)(grpc_server* server, void* arg, grpc_pollset** pollsets,
size_t pollset_count);
void (*destroy)(grpc_server* server, void* arg, grpc_closure* closure);
struct listener* next;
grpc_closure destroy_done;
} listener;
};
typedef struct call_data call_data;
typedef struct channel_data channel_data;
typedef struct registered_method registered_method;
enum requested_call_type { BATCH_CALL, REGISTERED_CALL };
typedef enum { BATCH_CALL, REGISTERED_CALL } requested_call_type;
struct registered_method;
grpc_core::TraceFlag grpc_server_channel_trace(false, "server_channel");
typedef struct requested_call {
struct requested_call {
gpr_mpscq_node request_link; /* must be first */
requested_call_type type;
size_t cq_idx;
@ -81,15 +80,15 @@ typedef struct requested_call {
grpc_byte_buffer** optional_payload;
} registered;
} data;
} requested_call;
};
typedef struct channel_registered_method {
struct channel_registered_method {
registered_method* server_registered_method;
uint32_t flags;
bool has_host;
grpc_slice method;
grpc_slice host;
} channel_registered_method;
};
struct channel_data {
grpc_server* server;
@ -176,6 +175,7 @@ typedef struct {
grpc_channel** channels;
size_t num_channels;
} channel_broadcaster;
} // namespace
struct grpc_server {
grpc_channel_args* channel_args;

@ -116,6 +116,9 @@ typedef struct {
static gpr_once init_openssl_once = GPR_ONCE_INIT;
static gpr_mu* openssl_mutexes = nullptr;
static void openssl_locking_cb(int mode, int type, const char* file,
int line) GRPC_UNUSED;
static unsigned long openssl_thread_id_cb(void) GRPC_UNUSED;
static void openssl_locking_cb(int mode, int type, const char* file, int line) {
if (mode & CRYPTO_LOCK) {

@ -15,12 +15,12 @@
<PackageTags>gRPC RPC Protocol HTTP/2 Auth OAuth2</PackageTags>
<PackageProjectUrl>https://github.com/grpc/grpc</PackageProjectUrl>
<PackageLicenseUrl>https://github.com/grpc/grpc/blob/master/LICENSE</PackageLicenseUrl>
<IncludeSymbols>true</IncludeSymbols>
<IncludeSource>true</IncludeSource>
<GenerateDocumentationFile>true</GenerateDocumentationFile>
<TreatWarningsAsErrors>true</TreatWarningsAsErrors>
</PropertyGroup>
<Import Project="..\Grpc.Core\SourceLink.csproj.include" />
<ItemGroup>
<Compile Include="..\Grpc.Core\Version.cs" />
</ItemGroup>

@ -15,12 +15,12 @@
<PackageTags>gRPC test testing</PackageTags>
<PackageProjectUrl>https://github.com/grpc/grpc</PackageProjectUrl>
<PackageLicenseUrl>https://github.com/grpc/grpc/blob/master/LICENSE</PackageLicenseUrl>
<IncludeSymbols>true</IncludeSymbols>
<IncludeSource>true</IncludeSource>
<GenerateDocumentationFile>true</GenerateDocumentationFile>
<TreatWarningsAsErrors>true</TreatWarningsAsErrors>
</PropertyGroup>
<Import Project="..\Grpc.Core\SourceLink.csproj.include" />
<ItemGroup>
<Compile Include="..\Grpc.Core\Version.cs" />
</ItemGroup>

@ -14,12 +14,12 @@
<PackageTags>gRPC RPC Protocol HTTP/2</PackageTags>
<PackageProjectUrl>https://github.com/grpc/grpc</PackageProjectUrl>
<PackageLicenseUrl>https://github.com/grpc/grpc/blob/master/LICENSE</PackageLicenseUrl>
<IncludeSymbols>true</IncludeSymbols>
<IncludeSource>true</IncludeSource>
<GenerateDocumentationFile>true</GenerateDocumentationFile>
<TreatWarningsAsErrors>true</TreatWarningsAsErrors>
</PropertyGroup>
<Import Project="SourceLink.csproj.include" />
<ItemGroup>
<EmbeddedResource Include="..\..\..\etc\roots.pem" />
<Content Include="..\nativelibs\csharp_ext_macos_x64\libgrpc_csharp_ext.dylib">

@ -0,0 +1,19 @@
<!-- Ensure that debugging of the resulting NuGet packages work (we're using SourceLink). -->
<Project>
<ItemGroup Label="dotnet pack instructions">
<Content Include="$(OutputPath)netstandard1.5\$(PackageId).pdb">
<Pack>true</Pack>
<PackagePath>lib/netstandard1.5</PackagePath>
</Content>
<Content Include="$(OutputPath)net45\$(PackageId).pdb">
<Pack>true</Pack>
<PackagePath>lib/net45</PackagePath>
</Content>
</ItemGroup>
<ItemGroup>
<PackageReference Include="SourceLink.Embed.AllSourceFiles" Version="2.7.3" PrivateAssets="all" />
</ItemGroup>
</Project>

@ -14,12 +14,12 @@
<PackageTags>gRPC health check</PackageTags>
<PackageProjectUrl>https://github.com/grpc/grpc</PackageProjectUrl>
<PackageLicenseUrl>https://github.com/grpc/grpc/blob/master/LICENSE</PackageLicenseUrl>
<IncludeSymbols>true</IncludeSymbols>
<IncludeSource>true</IncludeSource>
<GenerateDocumentationFile>true</GenerateDocumentationFile>
<TreatWarningsAsErrors>true</TreatWarningsAsErrors>
</PropertyGroup>
<Import Project="..\Grpc.Core\SourceLink.csproj.include" />
<ItemGroup>
<Compile Include="..\Grpc.Core\Version.cs" />
</ItemGroup>

@ -14,12 +14,12 @@
<PackageTags>gRPC reflection</PackageTags>
<PackageProjectUrl>https://github.com/grpc/grpc</PackageProjectUrl>
<PackageLicenseUrl>https://github.com/grpc/grpc/blob/master/LICENSE</PackageLicenseUrl>
<IncludeSymbols>true</IncludeSymbols>
<IncludeSource>true</IncludeSource>
<GenerateDocumentationFile>true</GenerateDocumentationFile>
<TreatWarningsAsErrors>true</TreatWarningsAsErrors>
</PropertyGroup>
<Import Project="..\Grpc.Core\SourceLink.csproj.include" />
<ItemGroup>
<Compile Include="..\Grpc.Core\Version.cs" />
</ItemGroup>

@ -1,29 +0,0 @@
{
"name": "grpc-health-check",
"version": "1.7.2",
"author": "Google Inc.",
"description": "Health check service for use with gRPC",
"repository": {
"type": "git",
"url": "https://github.com/grpc/grpc.git"
},
"bugs": "https://github.com/grpc/grpc/issues",
"contributors": [
{
"name": "Michael Lumish",
"email": "mlumish@google.com"
}
],
"dependencies": {
"grpc": "^1.7.2",
"lodash": "^3.9.3",
"google-protobuf": "^3.0.0"
},
"files": [
"LICENSE",
"health.js",
"v1"
],
"main": "src/node/index.js",
"license": "Apache-2.0"
}

@ -1,41 +0,0 @@
{
"name": "grpc-tools",
"version": "1.7.2",
"author": "Google Inc.",
"description": "Tools for developing with gRPC on Node.js",
"homepage": "https://grpc.io/",
"repository": {
"type": "git",
"url": "https://github.com/grpc/grpc.git"
},
"bugs": "https://github.com/grpc/grpc/issues",
"contributors": [
{
"name": "Michael Lumish",
"email": "mlumish@google.com"
}
],
"bin": {
"grpc_tools_node_protoc": "./bin/protoc.js",
"grpc_tools_node_protoc_plugin": "./bin/protoc_plugin.js"
},
"scripts": {
"install": "./node_modules/.bin/node-pre-gyp install"
},
"bundledDependencies": ["node-pre-gyp"],
"binary": {
"module_name": "grpc_tools",
"host": "https://storage.googleapis.com/",
"remote_path": "grpc-precompiled-binaries/node/{name}/v{version}",
"package_name": "{platform}-{arch}.tar.gz",
"module_path": "bin"
},
"files": [
"index.js",
"bin/protoc.js",
"bin/protoc_plugin.js",
"bin/google/protobuf",
"LICENSE"
],
"main": "index.js"
}

@ -19,6 +19,12 @@
#include <AvailabilityMacros.h>
typedef NS_ENUM(NSInteger, GRPCCompressAlgorithm) {
GRPCCompressNone,
GRPCCompressDeflate,
GRPCCompressGzip,
};
/**
* Methods to configure GRPC channel options.
*/
@ -36,4 +42,8 @@
+ (void)closeOpenConnections DEPRECATED_MSG_ATTRIBUTE("The API for this feature is experimental, "
"and might be removed or modified at any "
"time.");
+ (void)setDefaultCompressMethod:(GRPCCompressAlgorithm)algorithm
forhost:(nonnull NSString *)host;
@end

@ -20,6 +20,8 @@
#import "private/GRPCHost.h"
#import <grpc/impl/codegen/compression_types.h>
@implementation GRPCCall (ChannelArg)
+ (void)setUserAgentPrefix:(nonnull NSString *)userAgentPrefix forHost:(nonnull NSString *)host {
@ -36,4 +38,23 @@
[GRPCHost flushChannelCache];
}
+ (void)setDefaultCompressMethod:(GRPCCompressAlgorithm)algorithm
forhost:(nonnull NSString *)host {
GRPCHost *hostConfig = [GRPCHost hostWithAddress:host];
switch (algorithm) {
case GRPCCompressNone:
hostConfig.compressAlgorithm = GRPC_COMPRESS_NONE;
break;
case GRPCCompressDeflate:
hostConfig.compressAlgorithm = GRPC_COMPRESS_DEFLATE;
break;
case GRPCCompressGzip:
hostConfig.compressAlgorithm = GRPC_COMPRESS_GZIP;
break;
default:
NSLog(@"Invalid compression algorithm");
abort();
}
}
@end

@ -18,6 +18,8 @@
#import <Foundation/Foundation.h>
#import <grpc/impl/codegen/compression_types.h>
NS_ASSUME_NONNULL_BEGIN
@class GRPCCompletionQueue;
@ -32,6 +34,7 @@ struct grpc_channel_credentials;
@property(nonatomic, readonly) NSString *address;
@property(nonatomic, copy, nullable) NSString *userAgentPrefix;
@property(nonatomic, nullable) struct grpc_channel_credentials *channelCreds;
@property(nonatomic) grpc_compression_algorithm compressAlgorithm;
/** The following properties should only be modified for testing: */

@ -87,6 +87,7 @@ static GRPCConnectivityMonitor *connectivityMonitor = nil;
_address = address;
_secure = YES;
kHostCache[address] = self;
_compressAlgorithm = GRPC_COMPRESS_NONE;
}
// Keep a single monitor to flush the cache if the connectivity status changes
// Thread safety guarded by @synchronized(kHostCache)
@ -226,6 +227,12 @@ static GRPCConnectivityMonitor *connectivityMonitor = nil;
}
// Use 10000ms initial backoff time for correct behavior on bad/slow networks
args[@GRPC_ARG_INITIAL_RECONNECT_BACKOFF_MS] = @10000;
if (_compressAlgorithm != GRPC_COMPRESS_NONE) {
args[@GRPC_COMPRESSION_CHANNEL_DEFAULT_ALGORITHM] =
[NSNumber numberWithInt:_compressAlgorithm];
}
return args;
}

@ -68,6 +68,10 @@
}
@end
BOOL isRemoteInteropTest(NSString *host) {
return [host isEqualToString:@"grpc-test.sandbox.googleapis.com"];
}
#pragma mark Tests
@implementation InteropTests {
@ -452,4 +456,34 @@
[self waitForExpectationsWithTimeout:TEST_TIMEOUT handler:nil];
}
- (void)testCompressedUnaryRPC {
// This test needs to be disabled for remote test because interop server grpc-test
// does not support compression.
if (isRemoteInteropTest(self.class.host)) {
return;
}
XCTAssertNotNil(self.class.host);
__weak XCTestExpectation *expectation = [self expectationWithDescription:@"LargeUnary"];
RMTSimpleRequest *request = [RMTSimpleRequest message];
request.responseType = RMTPayloadType_Compressable;
request.responseSize = 314159;
request.payload.body = [NSMutableData dataWithLength:271828];
request.expectCompressed.value = YES;
[GRPCCall setDefaultCompressMethod:GRPCCompressGzip forhost:self.class.host];
[_service unaryCallWithRequest:request handler:^(RMTSimpleResponse *response, NSError *error) {
XCTAssertNil(error, @"Finished with unexpected error: %@", error);
RMTSimpleResponse *expectedResponse = [RMTSimpleResponse message];
expectedResponse.payload.type = RMTPayloadType_Compressable;
expectedResponse.payload.body = [NSMutableData dataWithLength:314159];
XCTAssertEqualObjects(response, expectedResponse);
[expectation fulfill];
}];
[self waitForExpectationsWithTimeout:TEST_TIMEOUT handler:nil];
}
@end

@ -20,34 +20,45 @@ package grpc.testing;
option objc_class_prefix = "RMT";
// TODO(dgq): Go back to using well-known types once
// https://github.com/grpc/grpc/issues/6980 has been fixed.
// import "google/protobuf/wrappers.proto";
message BoolValue {
// The bool value.
bool value = 1;
}
// DEPRECATED, don't use. To be removed shortly.
// The type of payload that should be returned.
enum PayloadType {
// Compressable text format.
COMPRESSABLE = 0;
// Uncompressable binary format.
UNCOMPRESSABLE = 1;
// Randomly chosen from all other formats defined in this enum.
RANDOM = 2;
}
// A block of data, to simply increase gRPC message size.
message Payload {
// DEPRECATED, don't use. To be removed shortly.
// The type of data in body.
PayloadType type = 1;
// Primary contents of payload.
bytes body = 2;
}
// A protobuf representation for grpc status. This is used by test
// clients to specify a status that the server should attempt to return.
message EchoStatus {
int32 code = 1;
string message = 2;
}
// Unary request.
message SimpleRequest {
// DEPRECATED, don't use. To be removed shortly.
// Desired payload type in the response from the server.
// If response_type is RANDOM, server randomly chooses one from other formats.
PayloadType response_type = 1;
// Desired payload size in the response from the server.
// If response_type is COMPRESSABLE, this denotes the size before compression.
int32 response_size = 2;
// Optional input payload sent along with the request.
@ -58,6 +69,18 @@ message SimpleRequest {
// Whether SimpleResponse should include OAuth scope.
bool fill_oauth_scope = 5;
// Whether to request the server to compress the response. This field is
// "nullable" in order to interoperate seamlessly with clients not able to
// implement the full compression tests by introspecting the call to verify
// the response's compression status.
BoolValue response_compressed = 6;
// Whether server should return a given status
EchoStatus response_status = 7;
// Whether the server should expect this request to be compressed.
BoolValue expect_compressed = 8;
}
// Unary response, as configured by the request.
@ -76,6 +99,12 @@ message StreamingInputCallRequest {
// Optional input payload sent along with the request.
Payload payload = 1;
// Whether the server should expect this request to be compressed. This field
// is "nullable" in order to interoperate seamlessly with servers not able to
// implement the full compression tests by introspecting the call to verify
// the request's compression status.
BoolValue expect_compressed = 2;
// Not expecting any payload from the response.
}
@ -88,16 +117,22 @@ message StreamingInputCallResponse {
// Configuration for a particular response.
message ResponseParameters {
// Desired payload sizes in responses from the server.
// If response_type is COMPRESSABLE, this denotes the size before compression.
int32 size = 1;
// Desired interval between consecutive responses in the response stream in
// microseconds.
int32 interval_us = 2;
// Whether to request the server to compress the response. This field is
// "nullable" in order to interoperate seamlessly with clients not able to
// implement the full compression tests by introspecting the call to verify
// the response's compression status.
BoolValue compressed = 3;
}
// Server-streaming request.
message StreamingOutputCallRequest {
// DEPRECATED, don't use. To be removed shortly.
// Desired payload type in the response from the server.
// If response_type is RANDOM, the payload from each response in the stream
// might be of different types. This is to simulate a mixed type of payload
@ -109,6 +144,9 @@ message StreamingOutputCallRequest {
// Optional input payload sent along with the request.
Payload payload = 3;
// Whether server should return a given status
EchoStatus response_status = 7;
}
// Server-streaming response, as configured by the request and parameters.
@ -116,3 +154,17 @@ message StreamingOutputCallResponse {
// Payload to increase response size.
Payload payload = 1;
}
// For reconnect interop test only.
// Client tells server what reconnection parameters it used.
message ReconnectParams {
int32 max_reconnect_backoff_ms = 1;
}
// For reconnect interop test only.
// Server tells client whether its reconnects are following the spec and the
// reconnect backoffs it saw.
message ReconnectInfo {
bool passed = 1;
repeated int32 backoff_ms = 2;
}

@ -104,8 +104,8 @@ def _get_grpc_custom_bdist(decorated_basename, target_bdist_basename):
with open(bdist_path, 'w') as bdist_file:
bdist_file.write(bdist_data)
except IOError as error:
raise CommandError('{}\n\nCould not write grpcio bdist: {}'
.format(traceback.format_exc(), error.message))
raise CommandError('{}\n\nCould not write grpcio bdist: {}'.format(
traceback.format_exc(), error.message))
return bdist_path
@ -141,7 +141,8 @@ class SphinxDocumentation(setuptools.Command):
with open(glossary_filepath, 'a') as glossary_filepath:
glossary_filepath.write(API_GLOSSARY)
sphinx.main(
['', os.path.join('doc', 'src'), os.path.join('doc', 'build')])
['', os.path.join('doc', 'src'),
os.path.join('doc', 'build')])
class BuildProjectMetadata(setuptools.Command):
@ -189,10 +190,11 @@ def check_and_update_cythonization(extensions):
for source in extension.sources:
base, file_ext = os.path.splitext(source)
if file_ext == '.pyx':
generated_pyx_source = next((base + gen_ext
for gen_ext in ('.c', '.cpp',)
if os.path.isfile(base + gen_ext)),
None)
generated_pyx_source = next(
(base + gen_ext for gen_ext in (
'.c',
'.cpp',
) if os.path.isfile(base + gen_ext)), None)
if generated_pyx_source:
generated_pyx_sources.append(generated_pyx_source)
else:
@ -299,10 +301,10 @@ class Gather(setuptools.Command):
"""Command to gather project dependencies."""
description = 'gather dependencies for grpcio'
user_options = [
('test', 't', 'flag indicating to gather test dependencies'),
('install', 'i', 'flag indicating to gather install dependencies')
]
user_options = [('test', 't',
'flag indicating to gather test dependencies'),
('install', 'i',
'flag indicating to gather install dependencies')]
def initialize_options(self):
self.test = False

@ -1376,8 +1376,8 @@ def metadata_call_credentials(metadata_plugin, name=None):
A CallCredentials.
"""
from grpc import _plugin_wrapping # pylint: disable=cyclic-import
return _plugin_wrapping.metadata_plugin_call_credentials(metadata_plugin,
name)
return _plugin_wrapping.metadata_plugin_call_credentials(
metadata_plugin, name)
def access_token_call_credentials(access_token):
@ -1631,25 +1631,57 @@ def server(thread_pool,
################################### __all__ #################################
__all__ = (
'FutureTimeoutError', 'FutureCancelledError', 'Future',
'ChannelConnectivity', 'StatusCode', 'RpcError', 'RpcContext', 'Call',
'ChannelCredentials', 'CallCredentials', 'AuthMetadataContext',
'AuthMetadataPluginCallback', 'AuthMetadataPlugin', 'ClientCallDetails',
'ServerCertificateConfiguration', 'ServerCredentials',
'UnaryUnaryMultiCallable', 'UnaryStreamMultiCallable',
'StreamUnaryMultiCallable', 'StreamStreamMultiCallable',
'UnaryUnaryClientInterceptor', 'UnaryStreamClientInterceptor',
'StreamUnaryClientInterceptor', 'StreamStreamClientInterceptor', 'Channel',
'ServicerContext', 'RpcMethodHandler', 'HandlerCallDetails',
'GenericRpcHandler', 'ServiceRpcHandler', 'Server', 'ServerInterceptor',
'unary_unary_rpc_method_handler', 'unary_stream_rpc_method_handler',
'stream_unary_rpc_method_handler', 'stream_stream_rpc_method_handler',
'method_handlers_generic_handler', 'ssl_channel_credentials',
'metadata_call_credentials', 'access_token_call_credentials',
'composite_call_credentials', 'composite_channel_credentials',
'ssl_server_credentials', 'ssl_server_certificate_configuration',
'dynamic_ssl_server_credentials', 'channel_ready_future',
'insecure_channel', 'secure_channel', 'intercept_channel', 'server',)
'FutureTimeoutError',
'FutureCancelledError',
'Future',
'ChannelConnectivity',
'StatusCode',
'RpcError',
'RpcContext',
'Call',
'ChannelCredentials',
'CallCredentials',
'AuthMetadataContext',
'AuthMetadataPluginCallback',
'AuthMetadataPlugin',
'ClientCallDetails',
'ServerCertificateConfiguration',
'ServerCredentials',
'UnaryUnaryMultiCallable',
'UnaryStreamMultiCallable',
'StreamUnaryMultiCallable',
'StreamStreamMultiCallable',
'UnaryUnaryClientInterceptor',
'UnaryStreamClientInterceptor',
'StreamUnaryClientInterceptor',
'StreamStreamClientInterceptor',
'Channel',
'ServicerContext',
'RpcMethodHandler',
'HandlerCallDetails',
'GenericRpcHandler',
'ServiceRpcHandler',
'Server',
'ServerInterceptor',
'unary_unary_rpc_method_handler',
'unary_stream_rpc_method_handler',
'stream_unary_rpc_method_handler',
'stream_stream_rpc_method_handler',
'method_handlers_generic_handler',
'ssl_channel_credentials',
'metadata_call_credentials',
'access_token_call_credentials',
'composite_call_credentials',
'composite_channel_credentials',
'ssl_server_credentials',
'ssl_server_certificate_configuration',
'dynamic_ssl_server_credentials',
'channel_ready_future',
'insecure_channel',
'secure_channel',
'intercept_channel',
'server',
)
############################### Extension Shims ################################

@ -54,7 +54,9 @@ class GoogleCallCredentials(grpc.AuthMetadataPlugin):
if self._is_jwt:
future = self._pool.submit(
self._credentials.get_access_token,
additional_claims={'aud': context.service_url})
additional_claims={
'aud': context.service_url
})
else:
future = self._pool.submit(self._credentials.get_access_token)
future.add_done_callback(_create_get_token_callback(callback))

@ -29,24 +29,32 @@ _USER_AGENT = 'grpc-python/{}'.format(_grpcio_metadata.__version__)
_EMPTY_FLAGS = 0
_INFINITE_FUTURE = cygrpc.Timespec(float('+inf'))
_UNARY_UNARY_INITIAL_DUE = (cygrpc.OperationType.send_initial_metadata,
cygrpc.OperationType.send_message,
cygrpc.OperationType.send_close_from_client,
cygrpc.OperationType.receive_initial_metadata,
cygrpc.OperationType.receive_message,
cygrpc.OperationType.receive_status_on_client,)
_UNARY_STREAM_INITIAL_DUE = (cygrpc.OperationType.send_initial_metadata,
cygrpc.OperationType.send_message,
cygrpc.OperationType.send_close_from_client,
cygrpc.OperationType.receive_initial_metadata,
cygrpc.OperationType.receive_status_on_client,)
_STREAM_UNARY_INITIAL_DUE = (cygrpc.OperationType.send_initial_metadata,
cygrpc.OperationType.receive_initial_metadata,
cygrpc.OperationType.receive_message,
cygrpc.OperationType.receive_status_on_client,)
_STREAM_STREAM_INITIAL_DUE = (cygrpc.OperationType.send_initial_metadata,
cygrpc.OperationType.receive_initial_metadata,
cygrpc.OperationType.receive_status_on_client,)
_UNARY_UNARY_INITIAL_DUE = (
cygrpc.OperationType.send_initial_metadata,
cygrpc.OperationType.send_message,
cygrpc.OperationType.send_close_from_client,
cygrpc.OperationType.receive_initial_metadata,
cygrpc.OperationType.receive_message,
cygrpc.OperationType.receive_status_on_client,
)
_UNARY_STREAM_INITIAL_DUE = (
cygrpc.OperationType.send_initial_metadata,
cygrpc.OperationType.send_message,
cygrpc.OperationType.send_close_from_client,
cygrpc.OperationType.receive_initial_metadata,
cygrpc.OperationType.receive_status_on_client,
)
_STREAM_UNARY_INITIAL_DUE = (
cygrpc.OperationType.send_initial_metadata,
cygrpc.OperationType.receive_initial_metadata,
cygrpc.OperationType.receive_message,
cygrpc.OperationType.receive_status_on_client,
)
_STREAM_STREAM_INITIAL_DUE = (
cygrpc.OperationType.send_initial_metadata,
cygrpc.OperationType.receive_initial_metadata,
cygrpc.OperationType.receive_status_on_client,
)
_CHANNEL_SUBSCRIPTION_CALLBACK_ERROR_LOG_MESSAGE = (
'Exception calling channel subscription callback!')
@ -129,12 +137,12 @@ def _abort(state, code, details):
def _handle_event(event, state, response_deserializer):
callbacks = []
for batch_operation in event.batch_operations:
operation_type = batch_operation.type
operation_type = batch_operation.type()
state.due.remove(operation_type)
if operation_type == cygrpc.OperationType.receive_initial_metadata:
state.initial_metadata = batch_operation.received_metadata
state.initial_metadata = batch_operation.initial_metadata()
elif operation_type == cygrpc.OperationType.receive_message:
serialized_response = batch_operation.received_message.bytes()
serialized_response = batch_operation.message()
if serialized_response is not None:
response = _common.deserialize(serialized_response,
response_deserializer)
@ -144,18 +152,17 @@ def _handle_event(event, state, response_deserializer):
else:
state.response = response
elif operation_type == cygrpc.OperationType.receive_status_on_client:
state.trailing_metadata = batch_operation.received_metadata
state.trailing_metadata = batch_operation.trailing_metadata()
if state.code is None:
code = _common.CYGRPC_STATUS_CODE_TO_STATUS_CODE.get(
batch_operation.received_status_code)
batch_operation.code())
if code is None:
state.code = grpc.StatusCode.UNKNOWN
state.details = _unknown_code_details(
batch_operation.received_status_code,
batch_operation.received_status_details)
code, batch_operation.details())
else:
state.code = code
state.details = batch_operation.received_status_details
state.details = batch_operation.details()
callbacks.extend(state.callbacks)
state.callbacks = None
return callbacks
@ -200,7 +207,7 @@ def _consume_request_iterator(request_iterator, state, call,
_abort(state, grpc.StatusCode.INTERNAL, details)
return
else:
operations = (cygrpc.operation_send_message(
operations = (cygrpc.SendMessageOperation(
serialized_request, _EMPTY_FLAGS),)
call.start_client_batch(operations, event_handler)
state.due.add(cygrpc.OperationType.send_message)
@ -216,7 +223,7 @@ def _consume_request_iterator(request_iterator, state, call,
with state.condition:
if state.code is None:
operations = (
cygrpc.operation_send_close_from_client(_EMPTY_FLAGS),)
cygrpc.SendCloseFromClientOperation(_EMPTY_FLAGS),)
call.start_client_batch(operations, event_handler)
state.due.add(cygrpc.OperationType.send_close_from_client)
@ -319,7 +326,7 @@ class _Rendezvous(grpc.RpcError, grpc.Future, grpc.Call):
event_handler = _event_handler(self._state, self._call,
self._response_deserializer)
self._call.start_client_batch(
(cygrpc.operation_receive_message(_EMPTY_FLAGS),),
(cygrpc.ReceiveMessageOperation(_EMPTY_FLAGS),),
event_handler)
self._state.due.add(cygrpc.OperationType.receive_message)
elif self._state.code is grpc.StatusCode.OK:
@ -453,12 +460,13 @@ class _UnaryUnaryMultiCallable(grpc.UnaryUnaryMultiCallable):
else:
state = _RPCState(_UNARY_UNARY_INITIAL_DUE, None, None, None, None)
operations = (
cygrpc.operation_send_initial_metadata(metadata, _EMPTY_FLAGS),
cygrpc.operation_send_message(serialized_request, _EMPTY_FLAGS),
cygrpc.operation_send_close_from_client(_EMPTY_FLAGS),
cygrpc.operation_receive_initial_metadata(_EMPTY_FLAGS),
cygrpc.operation_receive_message(_EMPTY_FLAGS),
cygrpc.operation_receive_status_on_client(_EMPTY_FLAGS),)
cygrpc.SendInitialMetadataOperation(metadata, _EMPTY_FLAGS),
cygrpc.SendMessageOperation(serialized_request, _EMPTY_FLAGS),
cygrpc.SendCloseFromClientOperation(_EMPTY_FLAGS),
cygrpc.ReceiveInitialMetadataOperation(_EMPTY_FLAGS),
cygrpc.ReceiveMessageOperation(_EMPTY_FLAGS),
cygrpc.ReceiveStatusOnClientOperation(_EMPTY_FLAGS),
)
return state, operations, deadline, deadline_timespec, None
def _blocking(self, request, timeout, metadata, credentials):
@ -536,14 +544,15 @@ class _UnaryStreamMultiCallable(grpc.UnaryStreamMultiCallable):
self._response_deserializer)
with state.condition:
call.start_client_batch(
(cygrpc.operation_receive_initial_metadata(_EMPTY_FLAGS),),
(cygrpc.ReceiveInitialMetadataOperation(_EMPTY_FLAGS),),
event_handler)
operations = (
cygrpc.operation_send_initial_metadata(
metadata, _EMPTY_FLAGS), cygrpc.operation_send_message(
serialized_request, _EMPTY_FLAGS),
cygrpc.operation_send_close_from_client(_EMPTY_FLAGS),
cygrpc.operation_receive_status_on_client(_EMPTY_FLAGS),)
cygrpc.SendInitialMetadataOperation(metadata, _EMPTY_FLAGS),
cygrpc.SendMessageOperation(serialized_request,
_EMPTY_FLAGS),
cygrpc.SendCloseFromClientOperation(_EMPTY_FLAGS),
cygrpc.ReceiveStatusOnClientOperation(_EMPTY_FLAGS),
)
call_error = call.start_client_batch(operations, event_handler)
if call_error != cygrpc.CallError.ok:
_call_error_set_RPCstate(state, call_error, metadata)
@ -573,12 +582,12 @@ class _StreamUnaryMultiCallable(grpc.StreamUnaryMultiCallable):
call.set_credentials(credentials._credentials)
with state.condition:
call.start_client_batch(
(cygrpc.operation_receive_initial_metadata(_EMPTY_FLAGS),),
None)
(cygrpc.ReceiveInitialMetadataOperation(_EMPTY_FLAGS),), None)
operations = (
cygrpc.operation_send_initial_metadata(metadata, _EMPTY_FLAGS),
cygrpc.operation_receive_message(_EMPTY_FLAGS),
cygrpc.operation_receive_status_on_client(_EMPTY_FLAGS),)
cygrpc.SendInitialMetadataOperation(metadata, _EMPTY_FLAGS),
cygrpc.ReceiveMessageOperation(_EMPTY_FLAGS),
cygrpc.ReceiveStatusOnClientOperation(_EMPTY_FLAGS),
)
call_error = call.start_client_batch(operations, None)
_check_call_error(call_error, metadata)
_consume_request_iterator(request_iterator, state, call,
@ -624,12 +633,13 @@ class _StreamUnaryMultiCallable(grpc.StreamUnaryMultiCallable):
event_handler = _event_handler(state, call, self._response_deserializer)
with state.condition:
call.start_client_batch(
(cygrpc.operation_receive_initial_metadata(_EMPTY_FLAGS),),
(cygrpc.ReceiveInitialMetadataOperation(_EMPTY_FLAGS),),
event_handler)
operations = (
cygrpc.operation_send_initial_metadata(metadata, _EMPTY_FLAGS),
cygrpc.operation_receive_message(_EMPTY_FLAGS),
cygrpc.operation_receive_status_on_client(_EMPTY_FLAGS),)
cygrpc.SendInitialMetadataOperation(metadata, _EMPTY_FLAGS),
cygrpc.ReceiveMessageOperation(_EMPTY_FLAGS),
cygrpc.ReceiveStatusOnClientOperation(_EMPTY_FLAGS),
)
call_error = call.start_client_batch(operations, event_handler)
if call_error != cygrpc.CallError.ok:
_call_error_set_RPCstate(state, call_error, metadata)
@ -664,11 +674,12 @@ class _StreamStreamMultiCallable(grpc.StreamStreamMultiCallable):
event_handler = _event_handler(state, call, self._response_deserializer)
with state.condition:
call.start_client_batch(
(cygrpc.operation_receive_initial_metadata(_EMPTY_FLAGS),),
(cygrpc.ReceiveInitialMetadataOperation(_EMPTY_FLAGS),),
event_handler)
operations = (
cygrpc.operation_send_initial_metadata(metadata, _EMPTY_FLAGS),
cygrpc.operation_receive_status_on_client(_EMPTY_FLAGS),)
cygrpc.SendInitialMetadataOperation(metadata, _EMPTY_FLAGS),
cygrpc.ReceiveStatusOnClientOperation(_EMPTY_FLAGS),
)
call_error = call.start_client_batch(operations, event_handler)
if call_error != cygrpc.CallError.ok:
_call_error_set_RPCstate(state, call_error, metadata)
@ -789,7 +800,11 @@ def _deliver(state, initial_connectivity, initial_callbacks):
def _spawn_delivery(state, callbacks):
delivering_thread = threading.Thread(
target=_deliver, args=(state, state.connectivity, callbacks,))
target=_deliver, args=(
state,
state.connectivity,
callbacks,
))
delivering_thread.start()
state.delivering = True
@ -864,17 +879,16 @@ def _subscribe(state, callback, try_to_connect):
def _unsubscribe(state, callback):
with state.lock:
for index, (subscribed_callback, unused_connectivity
) in enumerate(state.callbacks_and_connectivities):
for index, (subscribed_callback, unused_connectivity) in enumerate(
state.callbacks_and_connectivities):
if callback == subscribed_callback:
state.callbacks_and_connectivities.pop(index)
break
def _options(options):
return list(options) + [
(cygrpc.ChannelArgKey.primary_user_agent_string, _USER_AGENT)
]
return list(options) + [(cygrpc.ChannelArgKey.primary_user_agent_string,
_USER_AGENT)]
class Channel(grpc.Channel):
@ -889,8 +903,8 @@ class Channel(grpc.Channel):
credentials: A cygrpc.ChannelCredentials or None.
"""
self._channel = cygrpc.Channel(
_common.encode(target),
_common.channel_args(_options(options)), credentials)
_common.encode(target), _common.channel_args(_options(options)),
credentials)
self._call_state = _ChannelCallState(self._channel)
self._connectivity_state = _ChannelConnectivityState(self._channel)
@ -910,8 +924,7 @@ class Channel(grpc.Channel):
request_serializer=None,
response_deserializer=None):
return _UnaryUnaryMultiCallable(
self._channel,
_channel_managed_call_management(self._call_state),
self._channel, _channel_managed_call_management(self._call_state),
_common.encode(method), request_serializer, response_deserializer)
def unary_stream(self,
@ -919,8 +932,7 @@ class Channel(grpc.Channel):
request_serializer=None,
response_deserializer=None):
return _UnaryStreamMultiCallable(
self._channel,
_channel_managed_call_management(self._call_state),
self._channel, _channel_managed_call_management(self._call_state),
_common.encode(method), request_serializer, response_deserializer)
def stream_unary(self,
@ -928,8 +940,7 @@ class Channel(grpc.Channel):
request_serializer=None,
response_deserializer=None):
return _StreamUnaryMultiCallable(
self._channel,
_channel_managed_call_management(self._call_state),
self._channel, _channel_managed_call_management(self._call_state),
_common.encode(method), request_serializer, response_deserializer)
def stream_stream(self,
@ -937,8 +948,7 @@ class Channel(grpc.Channel):
request_serializer=None,
response_deserializer=None):
return _StreamStreamMultiCallable(
self._channel,
_channel_managed_call_management(self._call_state),
self._channel, _channel_managed_call_management(self._call_state),
_common.encode(method), request_serializer, response_deserializer)
def __del__(self):

@ -26,16 +26,13 @@ cdef class Call:
def _start_batch(self, operations, tag, retain_self):
if not self.is_valid:
raise ValueError("invalid call object cannot be used from Python")
cdef OperationTag operation_tag = OperationTag(tag, operations)
if retain_self:
operation_tag.operation_call = self
else:
operation_tag.operation_call = None
operation_tag.store_ops()
cpython.Py_INCREF(operation_tag)
cdef _BatchOperationTag batch_operation_tag = _BatchOperationTag(
tag, operations, self if retain_self else None)
batch_operation_tag.prepare()
cpython.Py_INCREF(batch_operation_tag)
return grpc_call_start_batch(
self.c_call, operation_tag.c_ops, operation_tag.c_nops,
<cpython.PyObject *>operation_tag, NULL)
self.c_call, batch_operation_tag.c_ops, batch_operation_tag.c_nops,
<cpython.PyObject *>batch_operation_tag, NULL)
def start_client_batch(self, operations, tag):
# We don't reference this call in the operations tag because

@ -76,12 +76,12 @@ cdef class Channel:
def watch_connectivity_state(
self, grpc_connectivity_state last_observed_state,
Timespec deadline not None, CompletionQueue queue not None, tag):
cdef OperationTag operation_tag = OperationTag(tag, None)
cpython.Py_INCREF(operation_tag)
cdef _ConnectivityTag connectivity_tag = _ConnectivityTag(tag)
cpython.Py_INCREF(connectivity_tag)
with nogil:
grpc_channel_watch_connectivity_state(
self.c_channel, last_observed_state, deadline.c_time,
queue.c_completion_queue, <cpython.PyObject *>operation_tag)
queue.c_completion_queue, <cpython.PyObject *>connectivity_tag)
def target(self):
cdef char *target = NULL

@ -37,42 +37,20 @@ cdef class CompletionQueue:
self.is_shutdown = False
cdef _interpret_event(self, grpc_event event):
cdef OperationTag tag = None
cdef object user_tag = None
cdef Call operation_call = None
cdef CallDetails request_call_details = None
cdef object request_metadata = None
cdef object batch_operations = None
cdef _Tag tag = None
if event.type == GRPC_QUEUE_TIMEOUT:
return Event(
event.type, False, None, None, None, None, False, None)
# NOTE(nathaniel): For now we coopt ConnectivityEvent here.
return ConnectivityEvent(GRPC_QUEUE_TIMEOUT, False, None)
elif event.type == GRPC_QUEUE_SHUTDOWN:
self.is_shutdown = True
return Event(
event.type, True, None, None, None, None, False, None)
# NOTE(nathaniel): For now we coopt ConnectivityEvent here.
return ConnectivityEvent(GRPC_QUEUE_TIMEOUT, True, None)
else:
if event.tag != NULL:
tag = <OperationTag>event.tag
# We receive event tags only after they've been inc-ref'd elsewhere in
# the code.
cpython.Py_DECREF(tag)
if tag.shutting_down_server is not None:
tag.shutting_down_server.notify_shutdown_complete()
user_tag = tag.user_tag
operation_call = tag.operation_call
request_call_details = tag.request_call_details
if tag.is_new_request:
request_metadata = _metadata(&tag._c_request_metadata)
grpc_metadata_array_destroy(&tag._c_request_metadata)
batch_operations = tag.release_ops()
if tag.is_new_request:
# Stuff in the tag not explicitly handled by us needs to live through
# the life of the call
operation_call.references.extend(tag.references)
return Event(
event.type, event.success, user_tag, operation_call,
request_call_details, request_metadata, tag.is_new_request,
batch_operations)
tag = <_Tag>event.tag
# We receive event tags only after they've been inc-ref'd elsewhere in
# the code.
cpython.Py_DECREF(tag)
return tag.event(event)
def poll(self, Timespec deadline=None):
# We name this 'poll' to avoid problems with CPython's expectations for

@ -0,0 +1,45 @@
# Copyright 2017 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
cdef class ConnectivityEvent:
cdef readonly grpc_completion_type completion_type
cdef readonly bint success
cdef readonly object tag
cdef class RequestCallEvent:
cdef readonly grpc_completion_type completion_type
cdef readonly bint success
cdef readonly object tag
cdef readonly Call call
cdef readonly CallDetails call_details
cdef readonly tuple invocation_metadata
cdef class BatchOperationEvent:
cdef readonly grpc_completion_type completion_type
cdef readonly bint success
cdef readonly object tag
cdef readonly object batch_operations
cdef class ServerShutdownEvent:
cdef readonly grpc_completion_type completion_type
cdef readonly bint success
cdef readonly object tag

@ -0,0 +1,55 @@
# Copyright 2017 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
cdef class ConnectivityEvent:
def __cinit__(
self, grpc_completion_type completion_type, bint success, object tag):
self.completion_type = completion_type
self.success = success
self.tag = tag
cdef class RequestCallEvent:
def __cinit__(
self, grpc_completion_type completion_type, bint success, object tag,
Call call, CallDetails call_details, tuple invocation_metadata):
self.completion_type = completion_type
self.success = success
self.tag = tag
self.call = call
self.call_details = call_details
self.invocation_metadata = invocation_metadata
cdef class BatchOperationEvent:
def __cinit__(
self, grpc_completion_type completion_type, bint success, object tag,
object batch_operations):
self.completion_type = completion_type
self.success = success
self.tag = tag
self.batch_operations = batch_operations
cdef class ServerShutdownEvent:
def __cinit__(
self, grpc_completion_type completion_type, bint success, object tag):
self.completion_type = completion_type
self.success = success
self.tag = tag

@ -17,6 +17,7 @@ cimport libc.time
# Typedef types with approximately the same semantics to provide their names to
# Cython
ctypedef unsigned char uint8_t
ctypedef int int32_t
ctypedef unsigned uint32_t
ctypedef long int64_t
@ -25,6 +26,7 @@ ctypedef long int64_t
cdef extern from "grpc/support/alloc.h":
void *gpr_malloc(size_t size) nogil
void *gpr_zalloc(size_t size) nogil
void gpr_free(void *ptr) nogil
void *gpr_realloc(void *p, size_t size) nogil
@ -183,6 +185,18 @@ cdef extern from "grpc/grpc.h":
size_t arguments_length "num_args"
grpc_arg *arguments "args"
ctypedef enum grpc_compression_level:
GRPC_COMPRESS_LEVEL_NONE
GRPC_COMPRESS_LEVEL_LOW
GRPC_COMPRESS_LEVEL_MED
GRPC_COMPRESS_LEVEL_HIGH
ctypedef enum grpc_stream_compression_level:
GRPC_STREAM_COMPRESS_LEVEL_NONE
GRPC_STREAM_COMPRESS_LEVEL_LOW
GRPC_STREAM_COMPRESS_LEVEL_MED
GRPC_STREAM_COMPRESS_LEVEL_HIGH
ctypedef enum grpc_call_error:
GRPC_CALL_OK
GRPC_CALL_ERROR
@ -258,9 +272,19 @@ cdef extern from "grpc/grpc.h":
GRPC_OP_RECV_STATUS_ON_CLIENT
GRPC_OP_RECV_CLOSE_ON_SERVER
ctypedef struct grpc_op_send_initial_metadata_maybe_compression_level:
uint8_t is_set
grpc_compression_level level
ctypedef struct grpc_op_send_initial_metadata_maybe_stream_compression_level:
uint8_t is_set
grpc_stream_compression_level level
ctypedef struct grpc_op_data_send_initial_metadata:
size_t count
grpc_metadata *metadata
grpc_op_send_initial_metadata_maybe_compression_level maybe_compression_level
grpc_op_send_initial_metadata_maybe_stream_compression_level maybe_stream_compression_level
ctypedef struct grpc_op_data_send_status_from_server:
size_t trailing_metadata_count

@ -0,0 +1,109 @@
# Copyright 2017 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
cdef class Operation:
cdef void c(self)
cdef void un_c(self)
# TODO(https://github.com/grpc/grpc/issues/7950): Eliminate this!
cdef grpc_op c_op
cdef class SendInitialMetadataOperation(Operation):
cdef readonly object _initial_metadata;
cdef readonly int _flags
cdef grpc_metadata *_c_initial_metadata
cdef size_t _c_initial_metadata_count
cdef void c(self)
cdef void un_c(self)
cdef class SendMessageOperation(Operation):
cdef readonly bytes _message
cdef readonly int _flags
cdef grpc_byte_buffer *_c_message_byte_buffer
cdef void c(self)
cdef void un_c(self)
cdef class SendCloseFromClientOperation(Operation):
cdef readonly int _flags
cdef void c(self)
cdef void un_c(self)
cdef class SendStatusFromServerOperation(Operation):
cdef readonly object _trailing_metadata
cdef readonly object _code
cdef readonly object _details
cdef readonly int _flags
cdef grpc_metadata *_c_trailing_metadata
cdef size_t _c_trailing_metadata_count
cdef grpc_slice _c_details
cdef void c(self)
cdef void un_c(self)
cdef class ReceiveInitialMetadataOperation(Operation):
cdef readonly int _flags
cdef tuple _initial_metadata
cdef grpc_metadata_array _c_initial_metadata
cdef void c(self)
cdef void un_c(self)
cdef class ReceiveMessageOperation(Operation):
cdef readonly int _flags
cdef grpc_byte_buffer *_c_message_byte_buffer
cdef bytes _message
cdef void c(self)
cdef void un_c(self)
cdef class ReceiveStatusOnClientOperation(Operation):
cdef readonly int _flags
cdef grpc_metadata_array _c_trailing_metadata
cdef grpc_status_code _c_code
cdef grpc_slice _c_details
cdef tuple _trailing_metadata
cdef object _code
cdef str _details
cdef void c(self)
cdef void un_c(self)
cdef class ReceiveCloseOnServerOperation(Operation):
cdef readonly int _flags
cdef object _cancelled
cdef int _c_cancelled
cdef void c(self)
cdef void un_c(self)

@ -0,0 +1,238 @@
# Copyright 2017 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
cdef class Operation:
cdef void c(self):
raise NotImplementedError()
cdef void un_c(self):
raise NotImplementedError()
cdef class SendInitialMetadataOperation(Operation):
def __cinit__(self, initial_metadata, flags):
self._initial_metadata = initial_metadata
self._flags = flags
def type(self):
return GRPC_OP_SEND_INITIAL_METADATA
cdef void c(self):
self.c_op.type = GRPC_OP_SEND_INITIAL_METADATA
self.c_op.flags = self._flags
_store_c_metadata(
self._initial_metadata, &self._c_initial_metadata,
&self._c_initial_metadata_count)
self.c_op.data.send_initial_metadata.metadata = self._c_initial_metadata
self.c_op.data.send_initial_metadata.count = self._c_initial_metadata_count
self.c_op.data.send_initial_metadata.maybe_compression_level.is_set = 0
self.c_op.data.send_initial_metadata.maybe_stream_compression_level.is_set = 0
cdef void un_c(self):
_release_c_metadata(
self._c_initial_metadata, self._c_initial_metadata_count)
cdef class SendMessageOperation(Operation):
def __cinit__(self, bytes message, int flags):
self._message = message
self._flags = flags
def type(self):
return GRPC_OP_SEND_MESSAGE
cdef void c(self):
self.c_op.type = GRPC_OP_SEND_MESSAGE
self.c_op.flags = self._flags
cdef grpc_slice message_slice = grpc_slice_from_copied_buffer(
self._message, len(self._message))
self._c_message_byte_buffer = grpc_raw_byte_buffer_create(
&message_slice, 1)
grpc_slice_unref(message_slice)
self.c_op.data.send_message.send_message = self._c_message_byte_buffer
cdef void un_c(self):
grpc_byte_buffer_destroy(self._c_message_byte_buffer)
cdef class SendCloseFromClientOperation(Operation):
def __cinit__(self, int flags):
self._flags = flags
def type(self):
return GRPC_OP_SEND_CLOSE_FROM_CLIENT
cdef void c(self):
self.c_op.type = GRPC_OP_SEND_CLOSE_FROM_CLIENT
self.c_op.flags = self._flags
cdef void un_c(self):
pass
cdef class SendStatusFromServerOperation(Operation):
def __cinit__(self, trailing_metadata, code, object details, int flags):
self._trailing_metadata = trailing_metadata
self._code = code
self._details = details
self._flags = flags
def type(self):
return GRPC_OP_SEND_STATUS_FROM_SERVER
cdef void c(self):
self.c_op.type = GRPC_OP_SEND_STATUS_FROM_SERVER
self.c_op.flags = self._flags
_store_c_metadata(
self._trailing_metadata, &self._c_trailing_metadata,
&self._c_trailing_metadata_count)
self.c_op.data.send_status_from_server.trailing_metadata = (
self._c_trailing_metadata)
self.c_op.data.send_status_from_server.trailing_metadata_count = (
self._c_trailing_metadata_count)
self.c_op.data.send_status_from_server.status = self._code
self._c_details = _slice_from_bytes(_encode(self._details))
self.c_op.data.send_status_from_server.status_details = &self._c_details
cdef void un_c(self):
grpc_slice_unref(self._c_details)
_release_c_metadata(
self._c_trailing_metadata, self._c_trailing_metadata_count)
cdef class ReceiveInitialMetadataOperation(Operation):
def __cinit__(self, flags):
self._flags = flags
def type(self):
return GRPC_OP_RECV_INITIAL_METADATA
cdef void c(self):
self.c_op.type = GRPC_OP_RECV_INITIAL_METADATA
self.c_op.flags = self._flags
grpc_metadata_array_init(&self._c_initial_metadata)
self.c_op.data.receive_initial_metadata.receive_initial_metadata = (
&self._c_initial_metadata)
cdef void un_c(self):
self._initial_metadata = _metadata(&self._c_initial_metadata)
grpc_metadata_array_destroy(&self._c_initial_metadata)
def initial_metadata(self):
return self._initial_metadata
cdef class ReceiveMessageOperation(Operation):
def __cinit__(self, flags):
self._flags = flags
def type(self):
return GRPC_OP_RECV_MESSAGE
cdef void c(self):
self.c_op.type = GRPC_OP_RECV_MESSAGE
self.c_op.flags = self._flags
self.c_op.data.receive_message.receive_message = (
&self._c_message_byte_buffer)
cdef void un_c(self):
cdef grpc_byte_buffer_reader message_reader
cdef bint message_reader_status
cdef grpc_slice message_slice
cdef size_t message_slice_length
cdef void *message_slice_pointer
if self._c_message_byte_buffer != NULL:
message_reader_status = grpc_byte_buffer_reader_init(
&message_reader, self._c_message_byte_buffer)
if message_reader_status:
message = bytearray()
while grpc_byte_buffer_reader_next(&message_reader, &message_slice):
message_slice_pointer = grpc_slice_start_ptr(message_slice)
message_slice_length = grpc_slice_length(message_slice)
message += (<char *>message_slice_pointer)[:message_slice_length]
grpc_slice_unref(message_slice)
grpc_byte_buffer_reader_destroy(&message_reader)
self._message = bytes(message)
else:
self._message = None
grpc_byte_buffer_destroy(self._c_message_byte_buffer)
else:
self._message = None
def message(self):
return self._message
cdef class ReceiveStatusOnClientOperation(Operation):
def __cinit__(self, flags):
self._flags = flags
def type(self):
return GRPC_OP_RECV_STATUS_ON_CLIENT
cdef void c(self):
self.c_op.type = GRPC_OP_RECV_STATUS_ON_CLIENT
self.c_op.flags = self._flags
grpc_metadata_array_init(&self._c_trailing_metadata)
self.c_op.data.receive_status_on_client.trailing_metadata = (
&self._c_trailing_metadata)
self.c_op.data.receive_status_on_client.status = (
&self._c_code)
self.c_op.data.receive_status_on_client.status_details = (
&self._c_details)
cdef void un_c(self):
self._trailing_metadata = _metadata(&self._c_trailing_metadata)
grpc_metadata_array_destroy(&self._c_trailing_metadata)
self._code = self._c_code
self._details = _decode(_slice_bytes(self._c_details))
grpc_slice_unref(self._c_details)
def trailing_metadata(self):
return self._trailing_metadata
def code(self):
return self._code
def details(self):
return self._details
cdef class ReceiveCloseOnServerOperation(Operation):
def __cinit__(self, flags):
self._flags = flags
def type(self):
return GRPC_OP_RECV_CLOSE_ON_SERVER
cdef void c(self):
self.c_op.type = GRPC_OP_RECV_CLOSE_ON_SERVER
self.c_op.flags = self._flags
self.c_op.data.receive_close_on_server.cancelled = &self._c_cancelled
cdef void un_c(self):
self._cancelled = bool(self._c_cancelled)
def cancelled(self):
return self._cancelled

@ -28,48 +28,6 @@ cdef class CallDetails:
cdef grpc_call_details c_details
cdef class OperationTag:
cdef object user_tag
cdef list references
# This allows CompletionQueue to notify the Python Server object that the
# underlying GRPC core server has shutdown
cdef Server shutting_down_server
cdef Call operation_call
cdef CallDetails request_call_details
cdef grpc_metadata_array _c_request_metadata
cdef grpc_op *c_ops
cdef size_t c_nops
cdef readonly object _operations
cdef bint is_new_request
cdef void store_ops(self)
cdef object release_ops(self)
cdef class Event:
cdef readonly grpc_completion_type type
cdef readonly bint success
cdef readonly object tag
# For Server.request_call
cdef readonly bint is_new_request
cdef readonly CallDetails request_call_details
cdef readonly object request_metadata
# For server calls
cdef readonly Call operation_call
# For Call.start_batch
cdef readonly object batch_operations
cdef class ByteBuffer:
cdef grpc_byte_buffer *c_byte_buffer
cdef class SslPemKeyCertPair:
cdef grpc_ssl_pem_key_cert_pair c_pair
@ -89,22 +47,6 @@ cdef class ChannelArgs:
cdef list args
cdef class Operation:
cdef grpc_op c_op
cdef bint _c_metadata_needs_release
cdef size_t _c_metadata_count
cdef grpc_metadata *_c_metadata
cdef ByteBuffer _received_message
cdef bint _c_metadata_array_needs_destruction
cdef grpc_metadata_array _c_metadata_array
cdef grpc_status_code _received_status_code
cdef grpc_slice _status_details
cdef int _received_cancelled
cdef readonly bint is_valid
cdef object references
cdef class CompressionOptions:
cdef grpc_compression_options c_options

@ -218,111 +218,6 @@ cdef class CallDetails:
return timespec
cdef class OperationTag:
def __cinit__(self, user_tag, operations):
self.user_tag = user_tag
self.references = []
self._operations = operations
cdef void store_ops(self):
self.c_nops = 0 if self._operations is None else len(self._operations)
if 0 < self.c_nops:
self.c_ops = <grpc_op *>gpr_malloc(sizeof(grpc_op) * self.c_nops)
for index in range(self.c_nops):
self.c_ops[index] = (<Operation>(self._operations[index])).c_op
cdef object release_ops(self):
if 0 < self.c_nops:
for index, operation in enumerate(self._operations):
(<Operation>operation).c_op = self.c_ops[index]
gpr_free(self.c_ops)
return self._operations
else:
return ()
cdef class Event:
def __cinit__(self, grpc_completion_type type, bint success,
object tag, Call operation_call,
CallDetails request_call_details,
object request_metadata,
bint is_new_request,
object batch_operations):
self.type = type
self.success = success
self.tag = tag
self.operation_call = operation_call
self.request_call_details = request_call_details
self.request_metadata = request_metadata
self.batch_operations = batch_operations
self.is_new_request = is_new_request
cdef class ByteBuffer:
def __cinit__(self, bytes data):
grpc_init()
if data is None:
self.c_byte_buffer = NULL
return
cdef char *c_data = data
cdef grpc_slice data_slice
cdef size_t data_length = len(data)
with nogil:
data_slice = grpc_slice_from_copied_buffer(c_data, data_length)
with nogil:
self.c_byte_buffer = grpc_raw_byte_buffer_create(
&data_slice, 1)
with nogil:
grpc_slice_unref(data_slice)
def bytes(self):
cdef grpc_byte_buffer_reader reader
cdef grpc_slice data_slice
cdef size_t data_slice_length
cdef void *data_slice_pointer
cdef bint reader_status
if self.c_byte_buffer != NULL:
with nogil:
reader_status = grpc_byte_buffer_reader_init(
&reader, self.c_byte_buffer)
if not reader_status:
return None
result = bytearray()
with nogil:
while grpc_byte_buffer_reader_next(&reader, &data_slice):
data_slice_pointer = grpc_slice_start_ptr(data_slice)
data_slice_length = grpc_slice_length(data_slice)
with gil:
result += (<char *>data_slice_pointer)[:data_slice_length]
grpc_slice_unref(data_slice)
with nogil:
grpc_byte_buffer_reader_destroy(&reader)
return bytes(result)
else:
return None
def __len__(self):
cdef size_t result
if self.c_byte_buffer != NULL:
with nogil:
result = grpc_byte_buffer_length(self.c_byte_buffer)
return result
else:
return 0
def __str__(self):
return self.bytes()
def __dealloc__(self):
if self.c_byte_buffer != NULL:
grpc_byte_buffer_destroy(self.c_byte_buffer)
grpc_shutdown()
cdef class SslPemKeyCertPair:
def __cinit__(self, bytes private_key, bytes certificate_chain):
@ -407,185 +302,6 @@ cdef class ChannelArgs:
return self.args[i]
cdef class Operation:
def __cinit__(self):
grpc_init()
self.references = []
self._c_metadata_needs_release = False
self._c_metadata_array_needs_destruction = False
self._status_details = grpc_empty_slice()
self.is_valid = False
@property
def type(self):
return self.c_op.type
@property
def flags(self):
return self.c_op.flags
@property
def has_status(self):
return self.c_op.type == GRPC_OP_RECV_STATUS_ON_CLIENT
@property
def received_message(self):
if self.c_op.type != GRPC_OP_RECV_MESSAGE:
raise TypeError("self must be an operation receiving a message")
return self._received_message
@property
def received_message_or_none(self):
if self.c_op.type != GRPC_OP_RECV_MESSAGE:
return None
return self._received_message
@property
def received_metadata(self):
if (self.c_op.type != GRPC_OP_RECV_INITIAL_METADATA and
self.c_op.type != GRPC_OP_RECV_STATUS_ON_CLIENT):
raise TypeError("self must be an operation receiving metadata")
return _metadata(&self._c_metadata_array)
@property
def received_status_code(self):
if self.c_op.type != GRPC_OP_RECV_STATUS_ON_CLIENT:
raise TypeError("self must be an operation receiving a status code")
return self._received_status_code
@property
def received_status_code_or_none(self):
if self.c_op.type != GRPC_OP_RECV_STATUS_ON_CLIENT:
return None
return self._received_status_code
@property
def received_status_details(self):
if self.c_op.type != GRPC_OP_RECV_STATUS_ON_CLIENT:
raise TypeError("self must be an operation receiving status details")
return _slice_bytes(self._status_details)
@property
def received_status_details_or_none(self):
if self.c_op.type != GRPC_OP_RECV_STATUS_ON_CLIENT:
return None
return _slice_bytes(self._status_details)
@property
def received_cancelled(self):
if self.c_op.type != GRPC_OP_RECV_CLOSE_ON_SERVER:
raise TypeError("self must be an operation receiving cancellation "
"information")
return False if self._received_cancelled == 0 else True
@property
def received_cancelled_or_none(self):
if self.c_op.type != GRPC_OP_RECV_CLOSE_ON_SERVER:
return None
return False if self._received_cancelled == 0 else True
def __dealloc__(self):
if self._c_metadata_needs_release:
_release_c_metadata(self._c_metadata, self._c_metadata_count)
if self._c_metadata_array_needs_destruction:
grpc_metadata_array_destroy(&self._c_metadata_array)
grpc_slice_unref(self._status_details)
grpc_shutdown()
def operation_send_initial_metadata(metadata, int flags):
cdef Operation op = Operation()
op.c_op.type = GRPC_OP_SEND_INITIAL_METADATA
op.c_op.flags = flags
_store_c_metadata(metadata, &op._c_metadata, &op._c_metadata_count)
op._c_metadata_needs_release = True
op.c_op.data.send_initial_metadata.count = op._c_metadata_count
op.c_op.data.send_initial_metadata.metadata = op._c_metadata
op.is_valid = True
return op
def operation_send_message(data, int flags):
cdef Operation op = Operation()
op.c_op.type = GRPC_OP_SEND_MESSAGE
op.c_op.flags = flags
byte_buffer = ByteBuffer(data)
op.c_op.data.send_message.send_message = byte_buffer.c_byte_buffer
op.references.append(byte_buffer)
op.is_valid = True
return op
def operation_send_close_from_client(int flags):
cdef Operation op = Operation()
op.c_op.type = GRPC_OP_SEND_CLOSE_FROM_CLIENT
op.c_op.flags = flags
op.is_valid = True
return op
def operation_send_status_from_server(
metadata, grpc_status_code code, bytes details, int flags):
cdef Operation op = Operation()
op.c_op.type = GRPC_OP_SEND_STATUS_FROM_SERVER
op.c_op.flags = flags
_store_c_metadata(metadata, &op._c_metadata, &op._c_metadata_count)
op._c_metadata_needs_release = True
op.c_op.data.send_status_from_server.trailing_metadata_count = (
op._c_metadata_count)
op.c_op.data.send_status_from_server.trailing_metadata = op._c_metadata
op.c_op.data.send_status_from_server.status = code
grpc_slice_unref(op._status_details)
op._status_details = _slice_from_bytes(details)
op.c_op.data.send_status_from_server.status_details = &op._status_details
op.is_valid = True
return op
def operation_receive_initial_metadata(int flags):
cdef Operation op = Operation()
op.c_op.type = GRPC_OP_RECV_INITIAL_METADATA
op.c_op.flags = flags
grpc_metadata_array_init(&op._c_metadata_array)
op.c_op.data.receive_initial_metadata.receive_initial_metadata = (
&op._c_metadata_array)
op._c_metadata_array_needs_destruction = True
op.is_valid = True
return op
def operation_receive_message(int flags):
cdef Operation op = Operation()
op.c_op.type = GRPC_OP_RECV_MESSAGE
op.c_op.flags = flags
op._received_message = ByteBuffer(None)
# n.b. the c_op.data.receive_message field needs to be deleted by us,
# anyway, so we just let that be handled by the ByteBuffer() we allocated
# the line before.
op.c_op.data.receive_message.receive_message = (
&op._received_message.c_byte_buffer)
op.is_valid = True
return op
def operation_receive_status_on_client(int flags):
cdef Operation op = Operation()
op.c_op.type = GRPC_OP_RECV_STATUS_ON_CLIENT
op.c_op.flags = flags
grpc_metadata_array_init(&op._c_metadata_array)
op.c_op.data.receive_status_on_client.trailing_metadata = (
&op._c_metadata_array)
op._c_metadata_array_needs_destruction = True
op.c_op.data.receive_status_on_client.status = (
&op._received_status_code)
op.c_op.data.receive_status_on_client.status_details = (
&op._status_details)
op.is_valid = True
return op
def operation_receive_close_on_server(int flags):
cdef Operation op = Operation()
op.c_op.type = GRPC_OP_RECV_CLOSE_ON_SERVER
op.c_op.flags = flags
op.c_op.data.receive_close_on_server.cancelled = &op._received_cancelled
op.is_valid = True
return op
cdef class CompressionOptions:
def __cinit__(self):

@ -78,19 +78,15 @@ cdef class Server:
raise ValueError("server must be started and not shutting down")
if server_queue not in self.registered_completion_queues:
raise ValueError("server_queue must be a registered completion queue")
cdef OperationTag operation_tag = OperationTag(tag, None)
operation_tag.operation_call = Call()
operation_tag.request_call_details = CallDetails()
grpc_metadata_array_init(&operation_tag._c_request_metadata)
operation_tag.references.extend([self, call_queue, server_queue])
operation_tag.is_new_request = True
cpython.Py_INCREF(operation_tag)
cdef _RequestCallTag request_call_tag = _RequestCallTag(tag)
request_call_tag.prepare()
cpython.Py_INCREF(request_call_tag)
return grpc_server_request_call(
self.c_server, &operation_tag.operation_call.c_call,
&operation_tag.request_call_details.c_details,
&operation_tag._c_request_metadata,
self.c_server, &request_call_tag.call.c_call,
&request_call_tag.call_details.c_details,
&request_call_tag.c_invocation_metadata,
call_queue.c_completion_queue, server_queue.c_completion_queue,
<cpython.PyObject *>operation_tag)
<cpython.PyObject *>request_call_tag)
def register_completion_queue(
self, CompletionQueue queue not None):
@ -131,16 +127,14 @@ cdef class Server:
cdef _c_shutdown(self, CompletionQueue queue, tag):
self.is_shutting_down = True
operation_tag = OperationTag(tag, None)
operation_tag.shutting_down_server = self
cpython.Py_INCREF(operation_tag)
cdef _ServerShutdownTag server_shutdown_tag = _ServerShutdownTag(tag, self)
cpython.Py_INCREF(server_shutdown_tag)
with nogil:
grpc_server_shutdown_and_notify(
self.c_server, queue.c_completion_queue,
<cpython.PyObject *>operation_tag)
<cpython.PyObject *>server_shutdown_tag)
def shutdown(self, CompletionQueue queue not None, tag):
cdef OperationTag operation_tag
if queue.is_shutting_down:
raise ValueError("queue must be live")
elif not self.is_started:
@ -153,7 +147,8 @@ cdef class Server:
self._c_shutdown(queue, tag)
cdef notify_shutdown_complete(self):
# called only by a completion queue on receiving our shutdown operation tag
# called only after our server shutdown tag has emerged from a completion
# queue.
self.is_shutdown = True
def cancel_all_calls(self):

@ -0,0 +1,58 @@
# Copyright 2017 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
cdef class _Tag:
cdef object event(self, grpc_event c_event)
cdef class _ConnectivityTag(_Tag):
cdef readonly object _user_tag
cdef ConnectivityEvent event(self, grpc_event c_event)
cdef class _RequestCallTag(_Tag):
cdef readonly object _user_tag
cdef Call call
cdef CallDetails call_details
cdef grpc_metadata_array c_invocation_metadata
cdef void prepare(self)
cdef RequestCallEvent event(self, grpc_event c_event)
cdef class _BatchOperationTag(_Tag):
cdef object _user_tag
cdef readonly object _operations
cdef readonly object _retained_call
cdef grpc_op *c_ops
cdef size_t c_nops
cdef void prepare(self)
cdef BatchOperationEvent event(self, grpc_event c_event)
cdef class _ServerShutdownTag(_Tag):
cdef readonly object _user_tag
# This allows CompletionQueue to notify the Python Server object that the
# underlying GRPC core server has shutdown
cdef readonly Server _shutting_down_server
cdef ServerShutdownEvent event(self, grpc_event c_event)

@ -0,0 +1,87 @@
# Copyright 2017 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
cdef class _Tag:
cdef object event(self, grpc_event c_event):
raise NotImplementedError()
cdef class _ConnectivityTag(_Tag):
def __cinit__(self, user_tag):
self._user_tag = user_tag
cdef ConnectivityEvent event(self, grpc_event c_event):
return ConnectivityEvent(c_event.type, c_event.success, self._user_tag)
cdef class _RequestCallTag(_Tag):
def __cinit__(self, user_tag):
self._user_tag = user_tag
self.call = None
self.call_details = None
cdef void prepare(self):
self.call = Call()
self.call_details = CallDetails()
grpc_metadata_array_init(&self.c_invocation_metadata)
cdef RequestCallEvent event(self, grpc_event c_event):
cdef tuple invocation_metadata = _metadata(&self.c_invocation_metadata)
grpc_metadata_array_destroy(&self.c_invocation_metadata)
return RequestCallEvent(
c_event.type, c_event.success, self._user_tag, self.call,
self.call_details, invocation_metadata)
cdef class _BatchOperationTag:
def __cinit__(self, user_tag, operations, call):
self._user_tag = user_tag
self._operations = operations
self._retained_call = call
cdef void prepare(self):
self.c_nops = 0 if self._operations is None else len(self._operations)
if 0 < self.c_nops:
self.c_ops = <grpc_op *>gpr_malloc(sizeof(grpc_op) * self.c_nops)
for index, operation in enumerate(self._operations):
(<Operation>operation).c()
self.c_ops[index] = (<Operation>operation).c_op
cdef BatchOperationEvent event(self, grpc_event c_event):
if 0 < self.c_nops:
for index, operation in enumerate(self._operations):
(<Operation>operation).c_op = self.c_ops[index]
(<Operation>operation).un_c()
gpr_free(self.c_ops)
return BatchOperationEvent(
c_event.type, c_event.success, self._user_tag, self._operations)
else:
return BatchOperationEvent(
c_event.type, c_event.success, self._user_tag, ())
cdef class _ServerShutdownTag(_Tag):
def __cinit__(self, user_tag, shutting_down_server):
self._user_tag = user_tag
self._shutting_down_server = shutting_down_server
cdef ServerShutdownEvent event(self, grpc_event c_event):
self._shutting_down_server.notify_shutdown_complete()
return ServerShutdownEvent(c_event.type, c_event.success, self._user_tag)

@ -18,7 +18,10 @@ include "_cygrpc/call.pxd.pxi"
include "_cygrpc/channel.pxd.pxi"
include "_cygrpc/credentials.pxd.pxi"
include "_cygrpc/completion_queue.pxd.pxi"
include "_cygrpc/event.pxd.pxi"
include "_cygrpc/metadata.pxd.pxi"
include "_cygrpc/operation.pxd.pxi"
include "_cygrpc/records.pxd.pxi"
include "_cygrpc/security.pxd.pxi"
include "_cygrpc/server.pxd.pxi"
include "_cygrpc/tag.pxd.pxi"

@ -25,10 +25,13 @@ include "_cygrpc/call.pyx.pxi"
include "_cygrpc/channel.pyx.pxi"
include "_cygrpc/credentials.pyx.pxi"
include "_cygrpc/completion_queue.pyx.pxi"
include "_cygrpc/event.pyx.pxi"
include "_cygrpc/metadata.pyx.pxi"
include "_cygrpc/operation.pyx.pxi"
include "_cygrpc/records.pyx.pxi"
include "_cygrpc/security.pyx.pxi"
include "_cygrpc/server.pyx.pxi"
include "_cygrpc/tag.pyx.pxi"
#
# initialize gRPC

@ -44,9 +44,10 @@ def service_pipeline(interceptors):
class _ClientCallDetails(
collections.namedtuple('_ClientCallDetails',
('method', 'timeout', 'metadata',
'credentials')), grpc.ClientCallDetails):
collections.namedtuple(
'_ClientCallDetails',
('method', 'timeout', 'metadata', 'credentials')),
grpc.ClientCallDetails):
pass

@ -23,7 +23,9 @@ from grpc._cython import cygrpc
class _AuthMetadataContext(
collections.namedtuple('AuthMetadataContext', (
'service_url', 'method_name',)), grpc.AuthMetadataContext):
'service_url',
'method_name',
)), grpc.AuthMetadataContext):
pass
@ -70,8 +72,9 @@ class _Plugin(object):
_common.decode(service_url), _common.decode(method_name))
callback_state = _CallbackState()
try:
self._metadata_plugin(
context, _AuthMetadataPluginCallback(callback_state, callback))
self._metadata_plugin(context,
_AuthMetadataPluginCallback(
callback_state, callback))
except Exception as exception: # pylint: disable=broad-except
logging.exception(
'AuthMetadataPluginCallback "%s" raised exception!',

@ -50,7 +50,7 @@ _UNEXPECTED_EXIT_SERVER_GRACE = 1.0
def _serialized_request(request_event):
return request_event.batch_operations[0].received_message.bytes()
return request_event.batch_operations[0].message()
def _application_code(code):
@ -78,7 +78,9 @@ def _details(state):
class _HandlerCallDetails(
collections.namedtuple('_HandlerCallDetails', (
'method', 'invocation_metadata',)), grpc.HandlerCallDetails):
'method',
'invocation_metadata',
)), grpc.HandlerCallDetails):
pass
@ -130,13 +132,15 @@ def _abort(state, call, code, details):
effective_code = _abortion_code(state, code)
effective_details = details if state.details is None else state.details
if state.initial_metadata_allowed:
operations = (cygrpc.operation_send_initial_metadata(
(), _EMPTY_FLAGS), cygrpc.operation_send_status_from_server(
operations = (
cygrpc.SendInitialMetadataOperation(None, _EMPTY_FLAGS),
cygrpc.SendStatusFromServerOperation(
state.trailing_metadata, effective_code, effective_details,
_EMPTY_FLAGS),)
_EMPTY_FLAGS),
)
token = _SEND_INITIAL_METADATA_AND_SEND_STATUS_FROM_SERVER_TOKEN
else:
operations = (cygrpc.operation_send_status_from_server(
operations = (cygrpc.SendStatusFromServerOperation(
state.trailing_metadata, effective_code, effective_details,
_EMPTY_FLAGS),)
token = _SEND_STATUS_FROM_SERVER_TOKEN
@ -150,8 +154,7 @@ def _receive_close_on_server(state):
def receive_close_on_server(receive_close_on_server_event):
with state.condition:
if receive_close_on_server_event.batch_operations[
0].received_cancelled:
if receive_close_on_server_event.batch_operations[0].cancelled():
state.client = _CANCELLED
elif state.client is _OPEN:
state.client = _CLOSED
@ -218,11 +221,10 @@ class _Context(grpc.ServicerContext):
def time_remaining(self):
return max(
float(self._rpc_event.request_call_details.deadline) - time.time(),
0)
float(self._rpc_event.call_details.deadline) - time.time(), 0)
def cancel(self):
self._rpc_event.operation_call.cancel()
self._rpc_event.call.cancel()
def add_callback(self, callback):
with self._state.condition:
@ -237,23 +239,23 @@ class _Context(grpc.ServicerContext):
self._state.disable_next_compression = True
def invocation_metadata(self):
return self._rpc_event.request_metadata
return self._rpc_event.invocation_metadata
def peer(self):
return _common.decode(self._rpc_event.operation_call.peer())
return _common.decode(self._rpc_event.call.peer())
def peer_identities(self):
return cygrpc.peer_identities(self._rpc_event.operation_call)
return cygrpc.peer_identities(self._rpc_event.call)
def peer_identity_key(self):
id_key = cygrpc.peer_identity_key(self._rpc_event.operation_call)
id_key = cygrpc.peer_identity_key(self._rpc_event.call)
return id_key if id_key is None else _common.decode(id_key)
def auth_context(self):
return {
_common.decode(key): value
for key, value in six.iteritems(
cygrpc.auth_context(self._rpc_event.operation_call))
cygrpc.auth_context(self._rpc_event.call))
}
def send_initial_metadata(self, initial_metadata):
@ -262,9 +264,9 @@ class _Context(grpc.ServicerContext):
_raise_rpc_error(self._state)
else:
if self._state.initial_metadata_allowed:
operation = cygrpc.operation_send_initial_metadata(
operation = cygrpc.SendInitialMetadataOperation(
initial_metadata, _EMPTY_FLAGS)
self._rpc_event.operation_call.start_server_batch(
self._rpc_event.call.start_server_batch(
(operation,), _send_initial_metadata(self._state))
self._state.initial_metadata_allowed = False
self._state.due.add(_SEND_INITIAL_METADATA_TOKEN)
@ -305,7 +307,7 @@ class _RequestIterator(object):
raise StopIteration()
else:
self._call.start_server_batch(
(cygrpc.operation_receive_message(_EMPTY_FLAGS),),
(cygrpc.ReceiveMessageOperation(_EMPTY_FLAGS),),
_receive_message(self._state, self._call,
self._request_deserializer))
self._state.due.add(_RECEIVE_MESSAGE_TOKEN)
@ -347,9 +349,9 @@ def _unary_request(rpc_event, state, request_deserializer):
if state.client is _CANCELLED or state.statused:
return None
else:
rpc_event.operation_call.start_server_batch(
(cygrpc.operation_receive_message(_EMPTY_FLAGS),),
_receive_message(state, rpc_event.operation_call,
rpc_event.call.start_server_batch(
(cygrpc.ReceiveMessageOperation(_EMPTY_FLAGS),),
_receive_message(state, rpc_event.call,
request_deserializer))
state.due.add(_RECEIVE_MESSAGE_TOKEN)
while True:
@ -357,8 +359,8 @@ def _unary_request(rpc_event, state, request_deserializer):
if state.request is None:
if state.client is _CLOSED:
details = '"{}" requires exactly one request message.'.format(
rpc_event.request_call_details.method)
_abort(state, rpc_event.operation_call,
rpc_event.call_details.method)
_abort(state, rpc_event.call,
cygrpc.StatusCode.unimplemented,
_common.encode(details))
return None
@ -379,13 +381,13 @@ def _call_behavior(rpc_event, state, behavior, argument, request_deserializer):
except Exception as exception: # pylint: disable=broad-except
with state.condition:
if exception is state.abortion:
_abort(state, rpc_event.operation_call,
cygrpc.StatusCode.unknown, b'RPC Aborted')
_abort(state, rpc_event.call, cygrpc.StatusCode.unknown,
b'RPC Aborted')
elif exception not in state.rpc_errors:
details = 'Exception calling application: {}'.format(exception)
logging.exception(details)
_abort(state, rpc_event.operation_call,
cygrpc.StatusCode.unknown, _common.encode(details))
_abort(state, rpc_event.call, cygrpc.StatusCode.unknown,
_common.encode(details))
return None, False
@ -397,13 +399,13 @@ def _take_response_from_response_iterator(rpc_event, state, response_iterator):
except Exception as exception: # pylint: disable=broad-except
with state.condition:
if exception is state.abortion:
_abort(state, rpc_event.operation_call,
cygrpc.StatusCode.unknown, b'RPC Aborted')
_abort(state, rpc_event.call, cygrpc.StatusCode.unknown,
b'RPC Aborted')
elif exception not in state.rpc_errors:
details = 'Exception iterating responses: {}'.format(exception)
logging.exception(details)
_abort(state, rpc_event.operation_call,
cygrpc.StatusCode.unknown, _common.encode(details))
_abort(state, rpc_event.call, cygrpc.StatusCode.unknown,
_common.encode(details))
return None, False
@ -411,7 +413,7 @@ def _serialize_response(rpc_event, state, response, response_serializer):
serialized_response = _common.serialize(response, response_serializer)
if serialized_response is None:
with state.condition:
_abort(state, rpc_event.operation_call, cygrpc.StatusCode.internal,
_abort(state, rpc_event.call, cygrpc.StatusCode.internal,
b'Failed to serialize response!')
return None
else:
@ -424,17 +426,19 @@ def _send_response(rpc_event, state, serialized_response):
return False
else:
if state.initial_metadata_allowed:
operations = (cygrpc.operation_send_initial_metadata(
(), _EMPTY_FLAGS), cygrpc.operation_send_message(
serialized_response, _EMPTY_FLAGS),)
operations = (
cygrpc.SendInitialMetadataOperation(None, _EMPTY_FLAGS),
cygrpc.SendMessageOperation(serialized_response,
_EMPTY_FLAGS),
)
state.initial_metadata_allowed = False
token = _SEND_INITIAL_METADATA_AND_SEND_MESSAGE_TOKEN
else:
operations = (cygrpc.operation_send_message(serialized_response,
_EMPTY_FLAGS),)
operations = (cygrpc.SendMessageOperation(
serialized_response, _EMPTY_FLAGS),)
token = _SEND_MESSAGE_TOKEN
rpc_event.operation_call.start_server_batch(
operations, _send_message(state, token))
rpc_event.call.start_server_batch(operations,
_send_message(state, token))
state.due.add(token)
while True:
state.condition.wait()
@ -448,17 +452,17 @@ def _status(rpc_event, state, serialized_response):
code = _completion_code(state)
details = _details(state)
operations = [
cygrpc.operation_send_status_from_server(
cygrpc.SendStatusFromServerOperation(
state.trailing_metadata, code, details, _EMPTY_FLAGS),
]
if state.initial_metadata_allowed:
operations.append(
cygrpc.operation_send_initial_metadata((), _EMPTY_FLAGS))
cygrpc.SendInitialMetadataOperation(None, _EMPTY_FLAGS))
if serialized_response is not None:
operations.append(
cygrpc.operation_send_message(serialized_response,
_EMPTY_FLAGS))
rpc_event.operation_call.start_server_batch(
cygrpc.SendMessageOperation(serialized_response,
_EMPTY_FLAGS))
rpc_event.call.start_server_batch(
operations,
_send_status_from_server(state, _SEND_STATUS_FROM_SERVER_TOKEN))
state.statused = True
@ -525,7 +529,7 @@ def _handle_unary_stream(rpc_event, state, method_handler, thread_pool):
def _handle_stream_unary(rpc_event, state, method_handler, thread_pool):
request_iterator = _RequestIterator(state, rpc_event.operation_call,
request_iterator = _RequestIterator(state, rpc_event.call,
method_handler.request_deserializer)
return thread_pool.submit(
_unary_response_in_pool, rpc_event, state, method_handler.stream_unary,
@ -534,7 +538,7 @@ def _handle_stream_unary(rpc_event, state, method_handler, thread_pool):
def _handle_stream_stream(rpc_event, state, method_handler, thread_pool):
request_iterator = _RequestIterator(state, rpc_event.operation_call,
request_iterator = _RequestIterator(state, rpc_event.call,
method_handler.request_deserializer)
return thread_pool.submit(
_stream_response_in_pool, rpc_event, state,
@ -552,8 +556,8 @@ def _find_method_handler(rpc_event, generic_handlers, interceptor_pipeline):
return None
handler_call_details = _HandlerCallDetails(
_common.decode(rpc_event.request_call_details.method),
rpc_event.request_metadata)
_common.decode(rpc_event.call_details.method),
rpc_event.invocation_metadata)
if interceptor_pipeline is not None:
return interceptor_pipeline.execute(query_handlers,
@ -563,21 +567,23 @@ def _find_method_handler(rpc_event, generic_handlers, interceptor_pipeline):
def _reject_rpc(rpc_event, status, details):
operations = (cygrpc.operation_send_initial_metadata((), _EMPTY_FLAGS),
cygrpc.operation_receive_close_on_server(_EMPTY_FLAGS),
cygrpc.operation_send_status_from_server((), status, details,
_EMPTY_FLAGS),)
operations = (
cygrpc.SendInitialMetadataOperation(None, _EMPTY_FLAGS),
cygrpc.ReceiveCloseOnServerOperation(_EMPTY_FLAGS),
cygrpc.SendStatusFromServerOperation(None, status, details,
_EMPTY_FLAGS),
)
rpc_state = _RPCState()
rpc_event.operation_call.start_server_batch(
operations, lambda ignored_event: (rpc_state, (),))
rpc_event.call.start_server_batch(operations,
lambda ignored_event: (rpc_state, (),))
return rpc_state
def _handle_with_method_handler(rpc_event, method_handler, thread_pool):
state = _RPCState()
with state.condition:
rpc_event.operation_call.start_server_batch(
(cygrpc.operation_receive_close_on_server(_EMPTY_FLAGS),),
rpc_event.call.start_server_batch(
(cygrpc.ReceiveCloseOnServerOperation(_EMPTY_FLAGS),),
_receive_close_on_server(state))
state.due.add(_RECEIVE_CLOSE_ON_SERVER_TOKEN)
if method_handler.request_streaming:
@ -600,7 +606,7 @@ def _handle_call(rpc_event, generic_handlers, interceptor_pipeline, thread_pool,
concurrency_exceeded):
if not rpc_event.success:
return None, None
if rpc_event.request_call_details.method is not None:
if rpc_event.call_details.method is not None:
try:
method_handler = _find_method_handler(rpc_event, generic_handlers,
interceptor_pipeline)
@ -799,8 +805,8 @@ class Server(grpc.Server):
return _add_insecure_port(self._state, _common.encode(address))
def add_secure_port(self, address, server_credentials):
return _add_secure_port(self._state,
_common.encode(address), server_credentials)
return _add_secure_port(self._state, _common.encode(address),
server_credentials)
def start(self):
_start(self._state)

@ -29,9 +29,15 @@ _DONE_CALLBACK_EXCEPTION_LOG_MESSAGE = (
class RpcMethodHandler(
collections.namedtuple('_RpcMethodHandler', (
'request_streaming', 'response_streaming', 'request_deserializer',
'response_serializer', 'unary_unary', 'unary_stream',
'stream_unary', 'stream_stream',)), grpc.RpcMethodHandler):
'request_streaming',
'response_streaming',
'request_deserializer',
'response_serializer',
'unary_unary',
'unary_stream',
'stream_unary',
'stream_stream',
)), grpc.RpcMethodHandler):
pass

@ -51,8 +51,7 @@ def _abortion(rpc_error_call):
code = rpc_error_call.code()
pair = _STATUS_CODE_TO_ABORTION_KIND_AND_ABORTION_ERROR_CLASS.get(code)
error_kind = face.Abortion.Kind.LOCAL_FAILURE if pair is None else pair[0]
return face.Abortion(error_kind,
rpc_error_call.initial_metadata(),
return face.Abortion(error_kind, rpc_error_call.initial_metadata(),
rpc_error_call.trailing_metadata(), code,
rpc_error_call.details())
@ -441,9 +440,14 @@ class _GenericStub(face.GenericStub):
metadata=None,
with_call=None,
protocol_options=None):
request_serializer = self._request_serializers.get((group, method,))
response_deserializer = self._response_deserializers.get((group,
method,))
request_serializer = self._request_serializers.get((
group,
method,
))
response_deserializer = self._response_deserializers.get((
group,
method,
))
return _blocking_unary_unary(self._channel, group, method, timeout,
with_call, protocol_options, metadata,
self._metadata_transformer, request,
@ -456,9 +460,14 @@ class _GenericStub(face.GenericStub):
timeout,
metadata=None,
protocol_options=None):
request_serializer = self._request_serializers.get((group, method,))
response_deserializer = self._response_deserializers.get((group,
method,))
request_serializer = self._request_serializers.get((
group,
method,
))
response_deserializer = self._response_deserializers.get((
group,
method,
))
return _future_unary_unary(self._channel, group, method, timeout,
protocol_options, metadata,
self._metadata_transformer, request,
@ -471,9 +480,14 @@ class _GenericStub(face.GenericStub):
timeout,
metadata=None,
protocol_options=None):
request_serializer = self._request_serializers.get((group, method,))
response_deserializer = self._response_deserializers.get((group,
method,))
request_serializer = self._request_serializers.get((
group,
method,
))
response_deserializer = self._response_deserializers.get((
group,
method,
))
return _unary_stream(self._channel, group, method, timeout,
protocol_options, metadata,
self._metadata_transformer, request,
@ -487,9 +501,14 @@ class _GenericStub(face.GenericStub):
metadata=None,
with_call=None,
protocol_options=None):
request_serializer = self._request_serializers.get((group, method,))
response_deserializer = self._response_deserializers.get((group,
method,))
request_serializer = self._request_serializers.get((
group,
method,
))
response_deserializer = self._response_deserializers.get((
group,
method,
))
return _blocking_stream_unary(
self._channel, group, method, timeout, with_call, protocol_options,
metadata, self._metadata_transformer, request_iterator,
@ -502,9 +521,14 @@ class _GenericStub(face.GenericStub):
timeout,
metadata=None,
protocol_options=None):
request_serializer = self._request_serializers.get((group, method,))
response_deserializer = self._response_deserializers.get((group,
method,))
request_serializer = self._request_serializers.get((
group,
method,
))
response_deserializer = self._response_deserializers.get((
group,
method,
))
return _future_stream_unary(
self._channel, group, method, timeout, protocol_options, metadata,
self._metadata_transformer, request_iterator, request_serializer,
@ -517,9 +541,14 @@ class _GenericStub(face.GenericStub):
timeout,
metadata=None,
protocol_options=None):
request_serializer = self._request_serializers.get((group, method,))
response_deserializer = self._response_deserializers.get((group,
method,))
request_serializer = self._request_serializers.get((
group,
method,
))
response_deserializer = self._response_deserializers.get((
group,
method,
))
return _stream_stream(self._channel, group, method, timeout,
protocol_options, metadata,
self._metadata_transformer, request_iterator,
@ -568,33 +597,53 @@ class _GenericStub(face.GenericStub):
raise NotImplementedError()
def unary_unary(self, group, method):
request_serializer = self._request_serializers.get((group, method,))
response_deserializer = self._response_deserializers.get((group,
method,))
request_serializer = self._request_serializers.get((
group,
method,
))
response_deserializer = self._response_deserializers.get((
group,
method,
))
return _UnaryUnaryMultiCallable(
self._channel, group, method, self._metadata_transformer,
request_serializer, response_deserializer)
def unary_stream(self, group, method):
request_serializer = self._request_serializers.get((group, method,))
response_deserializer = self._response_deserializers.get((group,
method,))
request_serializer = self._request_serializers.get((
group,
method,
))
response_deserializer = self._response_deserializers.get((
group,
method,
))
return _UnaryStreamMultiCallable(
self._channel, group, method, self._metadata_transformer,
request_serializer, response_deserializer)
def stream_unary(self, group, method):
request_serializer = self._request_serializers.get((group, method,))
response_deserializer = self._response_deserializers.get((group,
method,))
request_serializer = self._request_serializers.get((
group,
method,
))
response_deserializer = self._response_deserializers.get((
group,
method,
))
return _StreamUnaryMultiCallable(
self._channel, group, method, self._metadata_transformer,
request_serializer, response_deserializer)
def stream_stream(self, group, method):
request_serializer = self._request_serializers.get((group, method,))
response_deserializer = self._response_deserializers.get((group,
method,))
request_serializer = self._request_serializers.get((
group,
method,
))
response_deserializer = self._response_deserializers.get((
group,
method,
))
return _StreamStreamMultiCallable(
self._channel, group, method, self._metadata_transformer,
request_serializer, response_deserializer)
@ -624,8 +673,8 @@ class _DynamicStub(face.DynamicStub):
elif method_cardinality is cardinality.Cardinality.STREAM_STREAM:
return self._generic_stub.stream_stream(self._group, attr)
else:
raise AttributeError('_DynamicStub object has no attribute "%s"!' %
attr)
raise AttributeError(
'_DynamicStub object has no attribute "%s"!' % attr)
def __enter__(self):
return self

@ -15,7 +15,10 @@
import collections
_Metadatum = collections.namedtuple('_Metadatum', ('key', 'value',))
_Metadatum = collections.namedtuple('_Metadatum', (
'key',
'value',
))
def _beta_metadatum(key, value):

@ -245,9 +245,15 @@ def _adapt_stream_stream_event(stream_stream_event):
class _SimpleMethodHandler(
collections.namedtuple('_MethodHandler', (
'request_streaming', 'response_streaming', 'request_deserializer',
'response_serializer', 'unary_unary', 'unary_stream',
'stream_unary', 'stream_stream',)), grpc.RpcMethodHandler):
'request_streaming',
'response_streaming',
'request_deserializer',
'response_serializer',
'unary_unary',
'unary_stream',
'stream_unary',
'stream_stream',
)), grpc.RpcMethodHandler):
pass
@ -255,15 +261,17 @@ def _simple_method_handler(implementation, request_deserializer,
response_serializer):
if implementation.style is style.Service.INLINE:
if implementation.cardinality is cardinality.Cardinality.UNARY_UNARY:
return _SimpleMethodHandler(
False, False, request_deserializer, response_serializer,
_adapt_unary_request_inline(implementation.unary_unary_inline),
None, None, None)
return _SimpleMethodHandler(False, False, request_deserializer,
response_serializer,
_adapt_unary_request_inline(
implementation.unary_unary_inline),
None, None, None)
elif implementation.cardinality is cardinality.Cardinality.UNARY_STREAM:
return _SimpleMethodHandler(
False, True, request_deserializer, response_serializer, None,
_adapt_unary_request_inline(implementation.unary_stream_inline),
None, None)
return _SimpleMethodHandler(False, True, request_deserializer,
response_serializer, None,
_adapt_unary_request_inline(
implementation.unary_stream_inline),
None, None)
elif implementation.cardinality is cardinality.Cardinality.STREAM_UNARY:
return _SimpleMethodHandler(True, False, request_deserializer,
response_serializer, None, None,
@ -278,26 +286,28 @@ def _simple_method_handler(implementation, request_deserializer,
implementation.stream_stream_inline))
elif implementation.style is style.Service.EVENT:
if implementation.cardinality is cardinality.Cardinality.UNARY_UNARY:
return _SimpleMethodHandler(
False, False, request_deserializer, response_serializer,
_adapt_unary_unary_event(implementation.unary_unary_event),
None, None, None)
return _SimpleMethodHandler(False, False, request_deserializer,
response_serializer,
_adapt_unary_unary_event(
implementation.unary_unary_event),
None, None, None)
elif implementation.cardinality is cardinality.Cardinality.UNARY_STREAM:
return _SimpleMethodHandler(
False, True, request_deserializer, response_serializer, None,
_adapt_unary_stream_event(implementation.unary_stream_event),
None, None)
return _SimpleMethodHandler(False, True, request_deserializer,
response_serializer, None,
_adapt_unary_stream_event(
implementation.unary_stream_event),
None, None)
elif implementation.cardinality is cardinality.Cardinality.STREAM_UNARY:
return _SimpleMethodHandler(
True, False, request_deserializer, response_serializer, None,
None,
_adapt_stream_unary_event(implementation.stream_unary_event),
None)
return _SimpleMethodHandler(True, False, request_deserializer,
response_serializer, None, None,
_adapt_stream_unary_event(
implementation.stream_unary_event),
None)
elif implementation.cardinality is cardinality.Cardinality.STREAM_STREAM:
return _SimpleMethodHandler(
True, True, request_deserializer, response_serializer, None,
None, None,
_adapt_stream_stream_event(implementation.stream_stream_event))
return _SimpleMethodHandler(True, True, request_deserializer,
response_serializer, None, None, None,
_adapt_stream_stream_event(
implementation.stream_stream_event))
def _flatten_method_pair_map(method_pair_map):
@ -325,10 +335,11 @@ class _GenericRpcHandler(grpc.GenericRpcHandler):
method_implementation = self._method_implementations.get(
handler_call_details.method)
if method_implementation is not None:
return _simple_method_handler(
method_implementation,
self._request_deserializers.get(handler_call_details.method),
self._response_serializers.get(handler_call_details.method))
return _simple_method_handler(method_implementation,
self._request_deserializers.get(
handler_call_details.method),
self._response_serializers.get(
handler_call_details.method))
elif self._multi_method_implementation is None:
return None
else:

@ -110,8 +110,8 @@ def insecure_channel(host, port):
Returns:
A Channel to the remote host through which RPCs may be conducted.
"""
channel = grpc.insecure_channel(host
if port is None else '%s:%d' % (host, port))
channel = grpc.insecure_channel(host if port is None else '%s:%d' % (host,
port))
return Channel(channel)

@ -50,8 +50,8 @@ class _EasyOutcome(
def _call_logging_exceptions(behavior, message, *args, **kwargs):
try:
return _EasyOutcome(Outcome.Kind.RETURNED,
behavior(*args, **kwargs), None)
return _EasyOutcome(Outcome.Kind.RETURNED, behavior(*args, **kwargs),
None)
except Exception as e: # pylint: disable=broad-except
logging.exception(message)
return _EasyOutcome(Outcome.Kind.RAISED, None, e)

@ -19,15 +19,22 @@ from grpc.framework.interfaces.base import base
class _Completion(base.Completion,
collections.namedtuple('_Completion', ('terminal_metadata',
'code', 'message',))):
collections.namedtuple('_Completion', (
'terminal_metadata',
'code',
'message',
))):
"""A trivial implementation of base.Completion."""
class _Subscription(base.Subscription,
collections.namedtuple('_Subscription', (
'kind', 'termination_callback', 'allowance', 'operator',
'protocol_receiver',))):
'kind',
'termination_callback',
'allowance',
'operator',
'protocol_receiver',
))):
"""A trivial implementation of base.Subscription."""

@ -50,13 +50,20 @@ class NoSuchMethodError(Exception):
self.method = method
def __repr__(self):
return 'face.NoSuchMethodError(%s, %s)' % (self.group, self.method,)
return 'face.NoSuchMethodError(%s, %s)' % (
self.group,
self.method,
)
class Abortion(
collections.namedtuple('Abortion',
('kind', 'initial_metadata', 'terminal_metadata',
'code', 'details',))):
collections.namedtuple('Abortion', (
'kind',
'initial_metadata',
'terminal_metadata',
'code',
'details',
))):
"""A value describing RPC abortion.
Attributes:

@ -36,9 +36,9 @@ class CopyProtoModules(setuptools.Command):
def run(self):
if os.path.isfile(HEALTH_PROTO):
shutil.copyfile(
HEALTH_PROTO,
os.path.join(ROOT_DIR, 'grpc_health/v1/health.proto'))
shutil.copyfile(HEALTH_PROTO,
os.path.join(ROOT_DIR,
'grpc_health/v1/health.proto'))
class BuildPackageProtos(setuptools.Command):

@ -56,8 +56,10 @@ PACKAGE_DIRECTORIES = {
'': '.',
}
INSTALL_REQUIRES = ('protobuf>=3.5.0.post1',
'grpcio>={version}'.format(version=grpc_version.VERSION),)
INSTALL_REQUIRES = (
'protobuf>=3.5.0.post1',
'grpcio>={version}'.format(version=grpc_version.VERSION),
)
try:
import health_commands as _health_commands

@ -27,7 +27,8 @@ def _not_found_error():
return reflection_pb2.ServerReflectionResponse(
error_response=reflection_pb2.ErrorResponse(
error_code=grpc.StatusCode.NOT_FOUND.value[0],
error_message=grpc.StatusCode.NOT_FOUND.value[1].encode(),))
error_message=grpc.StatusCode.NOT_FOUND.value[1].encode(),
))
def _file_descriptor_response(descriptor):
@ -101,10 +102,11 @@ class ReflectionServicer(reflection_pb2_grpc.ServerReflectionServicer):
def _list_services(self):
return reflection_pb2.ServerReflectionResponse(
list_services_response=reflection_pb2.ListServiceResponse(service=[
reflection_pb2.ServiceResponse(name=service_name)
for service_name in self._service_names
]))
list_services_response=reflection_pb2.ListServiceResponse(
service=[
reflection_pb2.ServiceResponse(name=service_name)
for service_name in self._service_names
]))
def ServerReflectionInfo(self, request_iterator, context):
# pylint: disable=unused-argument
@ -128,7 +130,8 @@ class ReflectionServicer(reflection_pb2_grpc.ServerReflectionServicer):
error_response=reflection_pb2.ErrorResponse(
error_code=grpc.StatusCode.INVALID_ARGUMENT.value[0],
error_message=grpc.StatusCode.INVALID_ARGUMENT.value[1]
.encode(),))
.encode(),
))
def enable_server_reflection(service_names, server, pool=None):

@ -57,8 +57,10 @@ PACKAGE_DIRECTORIES = {
'': '.',
}
INSTALL_REQUIRES = ('protobuf>=3.5.0.post1',
'grpcio>={version}'.format(version=grpc_version.VERSION),)
INSTALL_REQUIRES = (
'protobuf>=3.5.0.post1',
'grpcio>={version}'.format(version=grpc_version.VERSION),
)
try:
import reflection_commands as _reflection_commands

@ -27,20 +27,20 @@ class UnaryUnary(grpc.UnaryUnaryMultiCallable):
def __call__(self, request, timeout=None, metadata=None, credentials=None):
rpc_handler = self._channel_handler.invoke_rpc(
self._method_full_rpc_name,
_common.fuss_with_metadata(metadata), [request], True, timeout)
self._method_full_rpc_name, _common.fuss_with_metadata(metadata),
[request], True, timeout)
return _invocation.blocking_unary_response(rpc_handler)
def with_call(self, request, timeout=None, metadata=None, credentials=None):
rpc_handler = self._channel_handler.invoke_rpc(
self._method_full_rpc_name,
_common.fuss_with_metadata(metadata), [request], True, timeout)
self._method_full_rpc_name, _common.fuss_with_metadata(metadata),
[request], True, timeout)
return _invocation.blocking_unary_response_with_call(rpc_handler)
def future(self, request, timeout=None, metadata=None, credentials=None):
rpc_handler = self._channel_handler.invoke_rpc(
self._method_full_rpc_name,
_common.fuss_with_metadata(metadata), [request], True, timeout)
self._method_full_rpc_name, _common.fuss_with_metadata(metadata),
[request], True, timeout)
return _invocation.future_call(rpc_handler)
@ -52,8 +52,8 @@ class UnaryStream(grpc.StreamStreamMultiCallable):
def __call__(self, request, timeout=None, metadata=None, credentials=None):
rpc_handler = self._channel_handler.invoke_rpc(
self._method_full_rpc_name,
_common.fuss_with_metadata(metadata), [request], True, timeout)
self._method_full_rpc_name, _common.fuss_with_metadata(metadata),
[request], True, timeout)
return _invocation.ResponseIteratorCall(rpc_handler)
@ -69,8 +69,8 @@ class StreamUnary(grpc.StreamUnaryMultiCallable):
metadata=None,
credentials=None):
rpc_handler = self._channel_handler.invoke_rpc(
self._method_full_rpc_name,
_common.fuss_with_metadata(metadata), [], False, timeout)
self._method_full_rpc_name, _common.fuss_with_metadata(metadata),
[], False, timeout)
_invocation.consume_requests(request_iterator, rpc_handler)
return _invocation.blocking_unary_response(rpc_handler)
@ -80,8 +80,8 @@ class StreamUnary(grpc.StreamUnaryMultiCallable):
metadata=None,
credentials=None):
rpc_handler = self._channel_handler.invoke_rpc(
self._method_full_rpc_name,
_common.fuss_with_metadata(metadata), [], False, timeout)
self._method_full_rpc_name, _common.fuss_with_metadata(metadata),
[], False, timeout)
_invocation.consume_requests(request_iterator, rpc_handler)
return _invocation.blocking_unary_response_with_call(rpc_handler)
@ -91,8 +91,8 @@ class StreamUnary(grpc.StreamUnaryMultiCallable):
metadata=None,
credentials=None):
rpc_handler = self._channel_handler.invoke_rpc(
self._method_full_rpc_name,
_common.fuss_with_metadata(metadata), [], False, timeout)
self._method_full_rpc_name, _common.fuss_with_metadata(metadata),
[], False, timeout)
_invocation.consume_requests(request_iterator, rpc_handler)
return _invocation.future_call(rpc_handler)
@ -109,8 +109,8 @@ class StreamStream(grpc.StreamStreamMultiCallable):
metadata=None,
credentials=None):
rpc_handler = self._channel_handler.invoke_rpc(
self._method_full_rpc_name,
_common.fuss_with_metadata(metadata), [], False, timeout)
self._method_full_rpc_name, _common.fuss_with_metadata(metadata),
[], False, timeout)
_invocation.consume_requests(request_iterator, rpc_handler)
return _invocation.ResponseIteratorCall(rpc_handler)

@ -179,8 +179,8 @@ class State(_common.ChannelRpcHandler):
elif self._code is None:
self._condition.wait()
else:
raise ValueError(
'Status code unexpectedly {}!'.format(self._code))
raise ValueError('Status code unexpectedly {}!'.format(
self._code))
def is_active(self):
raise NotImplementedError()

Some files were not shown because too many files have changed in this diff Show More

Loading…
Cancel
Save