Merge github.com:grpc/grpc into flowctl+millis

reviewable/pr12677/r1
Craig Tiller 7 years ago
commit 1e868f0f95
  1. 4
      BUILD
  2. 12
      CMakeLists.txt
  3. 12
      Makefile
  4. 2
      binding.gyp
  5. 4
      build.yaml
  6. 2
      config.m4
  7. 2
      config.w32
  8. 1
      doc/environment_variables.md
  9. 6
      gRPC-Core.podspec
  10. 4
      grpc.gemspec
  11. 8
      grpc.gyp
  12. 10
      include/grpc++/generic/generic_stub.h
  13. 6
      include/grpc++/support/channel_arguments.h
  14. 2
      include/grpc/grpc.h
  15. 35
      include/grpc/grpc_security.h
  16. 6
      include/grpc/impl/codegen/grpc_types.h
  17. 5
      include/grpc/impl/codegen/port_platform.h
  18. 4
      package.xml
  19. 4
      setup.py
  20. 28
      src/core/ext/filters/client_channel/http_proxy.c
  21. 417
      src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.c
  22. 2
      src/core/ext/filters/client_channel/lb_policy_factory.c
  23. 2
      src/core/ext/filters/client_channel/lb_policy_factory.h
  24. 57
      src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver_posix.c
  25. 74
      src/core/ext/transport/chttp2/transport/chttp2_transport.c
  26. 4
      src/core/ext/transport/chttp2/transport/frame_data.c
  27. 17
      src/core/ext/transport/chttp2/transport/hpack_parser.c
  28. 20
      src/core/ext/transport/chttp2/transport/internal.h
  29. 106
      src/core/ext/transport/chttp2/transport/writing.c
  30. 179
      src/core/lib/compression/stream_compression.c
  31. 34
      src/core/lib/compression/stream_compression.h
  32. 228
      src/core/lib/compression/stream_compression_gzip.c
  33. 26
      src/core/lib/compression/stream_compression_gzip.h
  34. 94
      src/core/lib/compression/stream_compression_identity.c
  35. 27
      src/core/lib/compression/stream_compression_identity.h
  36. 62
      src/core/lib/debug/stats_data.c
  37. 26
      src/core/lib/debug/stats_data.h
  38. 8
      src/core/lib/debug/stats_data.yaml
  39. 2
      src/core/lib/debug/stats_data_bq_schema.sql
  40. 69
      src/core/lib/iomgr/executor.c
  41. 4
      src/core/lib/iomgr/socket_utils_windows.c
  42. 134
      src/core/lib/iomgr/timer_generic.c
  43. 3
      src/core/lib/iomgr/timer_generic.h
  44. 14
      src/core/lib/security/credentials/composite/composite_credentials.c
  45. 182
      src/core/lib/security/credentials/plugin/plugin_credentials.c
  46. 2
      src/core/lib/security/credentials/plugin/plugin_credentials.h
  47. 6
      src/core/lib/surface/init_secure.c
  48. 10
      src/cpp/client/generic_stub.cc
  49. 80
      src/cpp/client/secure_credentials.cc
  50. 17
      src/cpp/client/secure_credentials.h
  51. 4
      src/cpp/common/channel_arguments.cc
  52. 9
      src/csharp/Grpc.Core/Internal/NativeMetadataCredentialsPlugin.cs
  53. 59
      src/csharp/Grpc.IntegrationTesting/MetadataCredentialsTest.cs
  54. 8
      src/csharp/ext/grpc_csharp_ext.c
  55. 10
      src/node/ext/call_credentials.cc
  56. 8
      src/node/ext/call_credentials.h
  57. 46
      src/php/ext/grpc/call_credentials.c
  58. 9
      src/php/ext/grpc/call_credentials.h
  59. 10
      src/python/grpcio/grpc/_cython/_cygrpc/credentials.pxd.pxi
  60. 45
      src/python/grpcio/grpc/_cython/_cygrpc/credentials.pyx.pxi
  61. 11
      src/python/grpcio/grpc/_cython/_cygrpc/grpc.pxi
  62. 14
      src/python/grpcio/grpc/_cython/_cygrpc/records.pyx.pxi
  63. 2
      src/python/grpcio/grpc_core_dependencies.py
  64. 10
      src/python/grpcio/support.py
  65. 2
      src/python/grpcio_tests/tests/unit/_metadata_code_details_test.py
  66. 2
      src/ruby/end2end/killed_client_thread_client.rb
  67. 8
      src/ruby/end2end/killed_client_thread_driver.rb
  68. 8
      src/ruby/ext/grpc/rb_call_credentials.c
  69. 1
      src/ruby/lib/grpc.rb
  70. 43
      src/ruby/lib/grpc/generic/active_call.rb
  71. 29
      src/ruby/lib/grpc/generic/bidi_call.rb
  72. 133
      src/ruby/lib/grpc/generic/client_stub.rb
  73. 53
      src/ruby/lib/grpc/generic/interceptor_registry.rb
  74. 186
      src/ruby/lib/grpc/generic/interceptors.rb
  75. 80
      src/ruby/lib/grpc/generic/rpc_desc.rb
  76. 18
      src/ruby/lib/grpc/generic/rpc_server.rb
  77. 9
      src/ruby/lib/grpc/google_rpc_status_utils.rb
  78. 1
      src/ruby/pb/grpc/testing/duplicate/echo_duplicate_services_pb.rb
  79. 35
      src/ruby/spec/channel_connection_spec.rb
  80. 19
      src/ruby/spec/generic/active_call_spec.rb
  81. 153
      src/ruby/spec/generic/client_interceptors_spec.rb
  82. 65
      src/ruby/spec/generic/interceptor_registry_spec.rb
  83. 35
      src/ruby/spec/generic/rpc_server_spec.rb
  84. 218
      src/ruby/spec/generic/server_interceptors_spec.rb
  85. 77
      src/ruby/spec/google_rpc_status_utils_spec.rb
  86. 4
      src/ruby/spec/spec_helper.rb
  87. 73
      src/ruby/spec/support/helpers.rb
  88. 147
      src/ruby/spec/support/services.rb
  89. 4
      templates/test/cpp/naming/create_private_dns_zone.sh.template
  90. 32
      templates/test/cpp/naming/create_private_dns_zone_defs.include
  91. 4
      templates/test/cpp/naming/private_dns_zone_init.sh.template
  92. 40
      templates/test/cpp/naming/private_dns_zone_init_defs.include
  93. 64
      templates/test/cpp/naming/resolver_gce_integration_tests_defs.include
  94. 4
      templates/test/cpp/naming/resolver_gce_integration_tests_runner.sh.template
  95. 6
      templates/tools/dockerfile/clang_format.include
  96. 3
      templates/tools/dockerfile/grpc_clang_format/Dockerfile.template
  97. 2
      templates/tools/dockerfile/python_deps.include
  98. 27
      templates/tools/dockerfile/test/sanity/Dockerfile.template
  99. 11
      test/core/bad_client/bad_client.c
  100. 1
      test/core/bad_client/bad_client.h
  101. Some files were not shown because too many files have changed in this diff Show More

@ -572,6 +572,8 @@ grpc_cc_library(
"src/core/lib/compression/compression.c",
"src/core/lib/compression/message_compress.c",
"src/core/lib/compression/stream_compression.c",
"src/core/lib/compression/stream_compression_gzip.c",
"src/core/lib/compression/stream_compression_identity.c",
"src/core/lib/debug/stats.c",
"src/core/lib/debug/stats_data.c",
"src/core/lib/http/format_request.c",
@ -704,6 +706,8 @@ grpc_cc_library(
"src/core/lib/compression/algorithm_metadata.h",
"src/core/lib/compression/message_compress.h",
"src/core/lib/compression/stream_compression.h",
"src/core/lib/compression/stream_compression_gzip.h",
"src/core/lib/compression/stream_compression_identity.h",
"src/core/lib/debug/stats.h",
"src/core/lib/debug/stats_data.h",
"src/core/lib/http/format_request.h",

@ -965,6 +965,8 @@ add_library(grpc
src/core/lib/compression/compression.c
src/core/lib/compression/message_compress.c
src/core/lib/compression/stream_compression.c
src/core/lib/compression/stream_compression_gzip.c
src/core/lib/compression/stream_compression_identity.c
src/core/lib/debug/stats.c
src/core/lib/debug/stats_data.c
src/core/lib/http/format_request.c
@ -1315,6 +1317,8 @@ add_library(grpc_cronet
src/core/lib/compression/compression.c
src/core/lib/compression/message_compress.c
src/core/lib/compression/stream_compression.c
src/core/lib/compression/stream_compression_gzip.c
src/core/lib/compression/stream_compression_identity.c
src/core/lib/debug/stats.c
src/core/lib/debug/stats_data.c
src/core/lib/http/format_request.c
@ -1633,6 +1637,8 @@ add_library(grpc_test_util
src/core/lib/compression/compression.c
src/core/lib/compression/message_compress.c
src/core/lib/compression/stream_compression.c
src/core/lib/compression/stream_compression_gzip.c
src/core/lib/compression/stream_compression_identity.c
src/core/lib/debug/stats.c
src/core/lib/debug/stats_data.c
src/core/lib/http/format_request.c
@ -1895,6 +1901,8 @@ add_library(grpc_test_util_unsecure
src/core/lib/compression/compression.c
src/core/lib/compression/message_compress.c
src/core/lib/compression/stream_compression.c
src/core/lib/compression/stream_compression_gzip.c
src/core/lib/compression/stream_compression_identity.c
src/core/lib/debug/stats.c
src/core/lib/debug/stats_data.c
src/core/lib/http/format_request.c
@ -2143,6 +2151,8 @@ add_library(grpc_unsecure
src/core/lib/compression/compression.c
src/core/lib/compression/message_compress.c
src/core/lib/compression/stream_compression.c
src/core/lib/compression/stream_compression_gzip.c
src/core/lib/compression/stream_compression_identity.c
src/core/lib/debug/stats.c
src/core/lib/debug/stats_data.c
src/core/lib/http/format_request.c
@ -2899,6 +2909,8 @@ add_library(grpc++_cronet
src/core/lib/compression/compression.c
src/core/lib/compression/message_compress.c
src/core/lib/compression/stream_compression.c
src/core/lib/compression/stream_compression_gzip.c
src/core/lib/compression/stream_compression_identity.c
src/core/lib/debug/stats.c
src/core/lib/debug/stats_data.c
src/core/lib/http/format_request.c

@ -2956,6 +2956,8 @@ LIBGRPC_SRC = \
src/core/lib/compression/compression.c \
src/core/lib/compression/message_compress.c \
src/core/lib/compression/stream_compression.c \
src/core/lib/compression/stream_compression_gzip.c \
src/core/lib/compression/stream_compression_identity.c \
src/core/lib/debug/stats.c \
src/core/lib/debug/stats_data.c \
src/core/lib/http/format_request.c \
@ -3306,6 +3308,8 @@ LIBGRPC_CRONET_SRC = \
src/core/lib/compression/compression.c \
src/core/lib/compression/message_compress.c \
src/core/lib/compression/stream_compression.c \
src/core/lib/compression/stream_compression_gzip.c \
src/core/lib/compression/stream_compression_identity.c \
src/core/lib/debug/stats.c \
src/core/lib/debug/stats_data.c \
src/core/lib/http/format_request.c \
@ -3623,6 +3627,8 @@ LIBGRPC_TEST_UTIL_SRC = \
src/core/lib/compression/compression.c \
src/core/lib/compression/message_compress.c \
src/core/lib/compression/stream_compression.c \
src/core/lib/compression/stream_compression_gzip.c \
src/core/lib/compression/stream_compression_identity.c \
src/core/lib/debug/stats.c \
src/core/lib/debug/stats_data.c \
src/core/lib/http/format_request.c \
@ -3876,6 +3882,8 @@ LIBGRPC_TEST_UTIL_UNSECURE_SRC = \
src/core/lib/compression/compression.c \
src/core/lib/compression/message_compress.c \
src/core/lib/compression/stream_compression.c \
src/core/lib/compression/stream_compression_gzip.c \
src/core/lib/compression/stream_compression_identity.c \
src/core/lib/debug/stats.c \
src/core/lib/debug/stats_data.c \
src/core/lib/http/format_request.c \
@ -4102,6 +4110,8 @@ LIBGRPC_UNSECURE_SRC = \
src/core/lib/compression/compression.c \
src/core/lib/compression/message_compress.c \
src/core/lib/compression/stream_compression.c \
src/core/lib/compression/stream_compression_gzip.c \
src/core/lib/compression/stream_compression_identity.c \
src/core/lib/debug/stats.c \
src/core/lib/debug/stats_data.c \
src/core/lib/http/format_request.c \
@ -4841,6 +4851,8 @@ LIBGRPC++_CRONET_SRC = \
src/core/lib/compression/compression.c \
src/core/lib/compression/message_compress.c \
src/core/lib/compression/stream_compression.c \
src/core/lib/compression/stream_compression_gzip.c \
src/core/lib/compression/stream_compression_identity.c \
src/core/lib/debug/stats.c \
src/core/lib/debug/stats_data.c \
src/core/lib/http/format_request.c \

@ -667,6 +667,8 @@
'src/core/lib/compression/compression.c',
'src/core/lib/compression/message_compress.c',
'src/core/lib/compression/stream_compression.c',
'src/core/lib/compression/stream_compression_gzip.c',
'src/core/lib/compression/stream_compression_identity.c',
'src/core/lib/debug/stats.c',
'src/core/lib/debug/stats_data.c',
'src/core/lib/http/format_request.c',

@ -194,6 +194,8 @@ filegroups:
- src/core/lib/compression/compression.c
- src/core/lib/compression/message_compress.c
- src/core/lib/compression/stream_compression.c
- src/core/lib/compression/stream_compression_gzip.c
- src/core/lib/compression/stream_compression_identity.c
- src/core/lib/debug/stats.c
- src/core/lib/debug/stats_data.c
- src/core/lib/http/format_request.c
@ -346,6 +348,8 @@ filegroups:
- src/core/lib/compression/algorithm_metadata.h
- src/core/lib/compression/message_compress.h
- src/core/lib/compression/stream_compression.h
- src/core/lib/compression/stream_compression_gzip.h
- src/core/lib/compression/stream_compression_identity.h
- src/core/lib/debug/stats.h
- src/core/lib/debug/stats_data.h
- src/core/lib/http/format_request.h

@ -96,6 +96,8 @@ if test "$PHP_GRPC" != "no"; then
src/core/lib/compression/compression.c \
src/core/lib/compression/message_compress.c \
src/core/lib/compression/stream_compression.c \
src/core/lib/compression/stream_compression_gzip.c \
src/core/lib/compression/stream_compression_identity.c \
src/core/lib/debug/stats.c \
src/core/lib/debug/stats_data.c \
src/core/lib/http/format_request.c \

@ -73,6 +73,8 @@ if (PHP_GRPC != "no") {
"src\\core\\lib\\compression\\compression.c " +
"src\\core\\lib\\compression\\message_compress.c " +
"src\\core\\lib\\compression\\stream_compression.c " +
"src\\core\\lib\\compression\\stream_compression_gzip.c " +
"src\\core\\lib\\compression\\stream_compression_identity.c " +
"src\\core\\lib\\debug\\stats.c " +
"src\\core\\lib\\debug\\stats_data.c " +
"src\\core\\lib\\http\\format_request.c " +

@ -58,6 +58,7 @@ some configuration as environment variables that can be set.
completion queue
- round_robin - traces the round_robin load balancing policy
- pick_first - traces the pick first load balancing policy
- plugin_credentials - traces plugin credentials
- resource_quota - trace resource quota objects internals
- glb - traces the grpclb load balancer
- queue_pluck

@ -329,6 +329,8 @@ Pod::Spec.new do |s|
'src/core/lib/compression/algorithm_metadata.h',
'src/core/lib/compression/message_compress.h',
'src/core/lib/compression/stream_compression.h',
'src/core/lib/compression/stream_compression_gzip.h',
'src/core/lib/compression/stream_compression_identity.h',
'src/core/lib/debug/stats.h',
'src/core/lib/debug/stats_data.h',
'src/core/lib/http/format_request.h',
@ -479,6 +481,8 @@ Pod::Spec.new do |s|
'src/core/lib/compression/compression.c',
'src/core/lib/compression/message_compress.c',
'src/core/lib/compression/stream_compression.c',
'src/core/lib/compression/stream_compression_gzip.c',
'src/core/lib/compression/stream_compression_identity.c',
'src/core/lib/debug/stats.c',
'src/core/lib/debug/stats_data.c',
'src/core/lib/http/format_request.c',
@ -825,6 +829,8 @@ Pod::Spec.new do |s|
'src/core/lib/compression/algorithm_metadata.h',
'src/core/lib/compression/message_compress.h',
'src/core/lib/compression/stream_compression.h',
'src/core/lib/compression/stream_compression_gzip.h',
'src/core/lib/compression/stream_compression_identity.h',
'src/core/lib/debug/stats.h',
'src/core/lib/debug/stats_data.h',
'src/core/lib/http/format_request.h',

@ -262,6 +262,8 @@ Gem::Specification.new do |s|
s.files += %w( src/core/lib/compression/algorithm_metadata.h )
s.files += %w( src/core/lib/compression/message_compress.h )
s.files += %w( src/core/lib/compression/stream_compression.h )
s.files += %w( src/core/lib/compression/stream_compression_gzip.h )
s.files += %w( src/core/lib/compression/stream_compression_identity.h )
s.files += %w( src/core/lib/debug/stats.h )
s.files += %w( src/core/lib/debug/stats_data.h )
s.files += %w( src/core/lib/http/format_request.h )
@ -416,6 +418,8 @@ Gem::Specification.new do |s|
s.files += %w( src/core/lib/compression/compression.c )
s.files += %w( src/core/lib/compression/message_compress.c )
s.files += %w( src/core/lib/compression/stream_compression.c )
s.files += %w( src/core/lib/compression/stream_compression_gzip.c )
s.files += %w( src/core/lib/compression/stream_compression_identity.c )
s.files += %w( src/core/lib/debug/stats.c )
s.files += %w( src/core/lib/debug/stats_data.c )
s.files += %w( src/core/lib/http/format_request.c )

@ -233,6 +233,8 @@
'src/core/lib/compression/compression.c',
'src/core/lib/compression/message_compress.c',
'src/core/lib/compression/stream_compression.c',
'src/core/lib/compression/stream_compression_gzip.c',
'src/core/lib/compression/stream_compression_identity.c',
'src/core/lib/debug/stats.c',
'src/core/lib/debug/stats_data.c',
'src/core/lib/http/format_request.c',
@ -533,6 +535,8 @@
'src/core/lib/compression/compression.c',
'src/core/lib/compression/message_compress.c',
'src/core/lib/compression/stream_compression.c',
'src/core/lib/compression/stream_compression_gzip.c',
'src/core/lib/compression/stream_compression_identity.c',
'src/core/lib/debug/stats.c',
'src/core/lib/debug/stats_data.c',
'src/core/lib/http/format_request.c',
@ -738,6 +742,8 @@
'src/core/lib/compression/compression.c',
'src/core/lib/compression/message_compress.c',
'src/core/lib/compression/stream_compression.c',
'src/core/lib/compression/stream_compression_gzip.c',
'src/core/lib/compression/stream_compression_identity.c',
'src/core/lib/debug/stats.c',
'src/core/lib/debug/stats_data.c',
'src/core/lib/http/format_request.c',
@ -928,6 +934,8 @@
'src/core/lib/compression/compression.c',
'src/core/lib/compression/message_compress.c',
'src/core/lib/compression/stream_compression.c',
'src/core/lib/compression/stream_compression_gzip.c',
'src/core/lib/compression/stream_compression_identity.c',
'src/core/lib/debug/stats.c',
'src/core/lib/debug/stats_data.c',
'src/core/lib/http/format_request.c',

@ -20,6 +20,7 @@
#define GRPCXX_GENERIC_GENERIC_STUB_H
#include <grpc++/support/async_stream.h>
#include <grpc++/support/async_unary_call.h>
#include <grpc++/support/byte_buffer.h>
namespace grpc {
@ -27,6 +28,7 @@ namespace grpc {
class CompletionQueue;
typedef ClientAsyncReaderWriter<ByteBuffer, ByteBuffer>
GenericClientAsyncReaderWriter;
typedef ClientAsyncResponseReader<ByteBuffer> GenericClientAsyncResponseReader;
/// Generic stubs provide a type-unsafe interface to call gRPC methods
/// by name.
@ -51,6 +53,14 @@ class GenericStub final {
std::unique_ptr<GenericClientAsyncReaderWriter> PrepareCall(
ClientContext* context, const grpc::string& method, CompletionQueue* cq);
/// Setup a unary call to a named method \a method using \a context, and don't
/// start it. Let it be started explicitly with StartCall.
/// The return value only indicates whether or not registration of the call
/// succeeded (i.e. the call won't proceed if the return value is nullptr).
std::unique_ptr<GenericClientAsyncResponseReader> PrepareUnaryCall(
ClientContext* context, const grpc::string& method,
const ByteBuffer& request, CompletionQueue* cq);
private:
std::shared_ptr<ChannelInterface> channel_;
};

@ -64,6 +64,12 @@ class ChannelArguments {
/// Set the compression algorithm for the channel.
void SetCompressionAlgorithm(grpc_compression_algorithm algorithm);
/// Set the grpclb fallback timeout (in ms) for the channel. If this amount
/// of time has passed but we have not gotten any non-empty \a serverlist from
/// the balancer, we will fall back to use the backend address(es) returned by
/// the resolver.
void SetGrpclbFallbackTimeout(int fallback_timeout);
/// Set the socket mutator for the channel.
void SetSocketMutator(grpc_socket_mutator* mutator);

@ -313,7 +313,7 @@ GRPCAPI grpc_call_error grpc_call_cancel_with_status(grpc_call *call,
void *reserved);
/** Ref a call.
THREAD SAFETY: grpc_call_unref is thread-compatible */
THREAD SAFETY: grpc_call_ref is thread-compatible */
GRPCAPI void grpc_call_ref(grpc_call *call);
/** Unref a call.

@ -249,19 +249,40 @@ typedef struct {
void *reserved;
} grpc_auth_metadata_context;
/** Maximum number of metadata entries returnable by a credentials plugin via
a synchronous return. */
#define GRPC_METADATA_CREDENTIALS_PLUGIN_SYNC_MAX 4
/** grpc_metadata_credentials plugin is an API user provided structure used to
create grpc_credentials objects that can be set on a channel (composed) or
a call. See grpc_credentials_metadata_create_from_plugin below.
The grpc client stack will call the get_metadata method of the plugin for
every call in scope for the credentials created from it. */
typedef struct {
/** The implementation of this method has to be non-blocking.
- context is the information that can be used by the plugin to create auth
metadata.
- cb is the callback that needs to be called when the metadata is ready.
- user_data needs to be passed as the first parameter of the callback. */
void (*get_metadata)(void *state, grpc_auth_metadata_context context,
grpc_credentials_plugin_metadata_cb cb, void *user_data);
/** The implementation of this method has to be non-blocking, but can
be performed synchronously or asynchronously.
If processing occurs synchronously, returns non-zero and populates
creds_md, num_creds_md, status, and error_details. In this case,
the caller takes ownership of the entries in creds_md and of
error_details. Note that if the plugin needs to return more than
GRPC_METADATA_CREDENTIALS_PLUGIN_SYNC_MAX entries in creds_md, it must
return asynchronously.
If processing occurs asynchronously, returns zero and invokes \a cb
when processing is completed. \a user_data will be passed as the
first parameter of the callback. NOTE: \a cb MUST be invoked in a
different thread, not from the thread in which \a get_metadata() is
invoked.
\a context is the information that can be used by the plugin to create
auth metadata. */
int (*get_metadata)(
void *state, grpc_auth_metadata_context context,
grpc_credentials_plugin_metadata_cb cb, void *user_data,
grpc_metadata creds_md[GRPC_METADATA_CREDENTIALS_PLUGIN_SYNC_MAX],
size_t *num_creds_md, grpc_status_code *status,
const char **error_details);
/** Destroys the plugin state. */
void (*destroy)(void *state);

@ -288,7 +288,11 @@ typedef struct {
"grpc.experimental.tcp_max_read_chunk_size"
/* Timeout in milliseconds to use for calls to the grpclb load balancer.
If 0 or unset, the balancer calls will have no deadline. */
#define GRPC_ARG_GRPCLB_CALL_TIMEOUT_MS "grpc.grpclb_timeout_ms"
#define GRPC_ARG_GRPCLB_CALL_TIMEOUT_MS "grpc.grpclb_call_timeout_ms"
/* Timeout in milliseconds to wait for the serverlist from the grpclb load
balancer before using fallback backend addresses from the resolver.
If 0, fallback will never be used. */
#define GRPC_ARG_GRPCLB_FALLBACK_TIMEOUT_MS "grpc.grpclb_fallback_timeout_ms"
/** If non-zero, grpc server's cronet compression workaround will be enabled */
#define GRPC_ARG_WORKAROUND_CRONET_COMPRESSION \
"grpc.workaround.cronet_compression"

@ -183,7 +183,6 @@
#define _BSD_SOURCE
#endif
#if TARGET_OS_IPHONE
#define GPR_FORBID_UNREACHABLE_CODE 1
#define GPR_PLATFORM_STRING "ios"
#define GPR_CPU_IPHONE 1
#define GPR_PTHREAD_TLS 1
@ -292,10 +291,6 @@
#endif
#ifdef _MSC_VER
#ifdef _PYTHON_MSVC
// The Python 3.5 Windows runtime is missing InetNtop
#define GPR_WIN_INET_NTOP
#endif // _PYTHON_MSVC
#if _MSC_VER < 1700
typedef __int8 int8_t;
typedef __int16 int16_t;

@ -274,6 +274,8 @@
<file baseinstalldir="/" name="src/core/lib/compression/algorithm_metadata.h" role="src" />
<file baseinstalldir="/" name="src/core/lib/compression/message_compress.h" role="src" />
<file baseinstalldir="/" name="src/core/lib/compression/stream_compression.h" role="src" />
<file baseinstalldir="/" name="src/core/lib/compression/stream_compression_gzip.h" role="src" />
<file baseinstalldir="/" name="src/core/lib/compression/stream_compression_identity.h" role="src" />
<file baseinstalldir="/" name="src/core/lib/debug/stats.h" role="src" />
<file baseinstalldir="/" name="src/core/lib/debug/stats_data.h" role="src" />
<file baseinstalldir="/" name="src/core/lib/http/format_request.h" role="src" />
@ -428,6 +430,8 @@
<file baseinstalldir="/" name="src/core/lib/compression/compression.c" role="src" />
<file baseinstalldir="/" name="src/core/lib/compression/message_compress.c" role="src" />
<file baseinstalldir="/" name="src/core/lib/compression/stream_compression.c" role="src" />
<file baseinstalldir="/" name="src/core/lib/compression/stream_compression_gzip.c" role="src" />
<file baseinstalldir="/" name="src/core/lib/compression/stream_compression_identity.c" role="src" />
<file baseinstalldir="/" name="src/core/lib/debug/stats.c" role="src" />
<file baseinstalldir="/" name="src/core/lib/debug/stats_data.c" role="src" />
<file baseinstalldir="/" name="src/core/lib/http/format_request.c" role="src" />

@ -110,8 +110,6 @@ if EXTRA_ENV_COMPILE_ARGS is None:
EXTRA_ENV_COMPILE_ARGS += ' -D_ftime=_ftime32 -D_timeb=__timeb32 -D_ftime_s=_ftime32_s'
else:
EXTRA_ENV_COMPILE_ARGS += ' -D_ftime=_ftime64 -D_timeb=__timeb64'
elif 'win32' in sys.platform:
EXTRA_ENV_COMPILE_ARGS += ' -D_PYTHON_MSVC'
elif "linux" in sys.platform:
EXTRA_ENV_COMPILE_ARGS += ' -std=c++11 -std=gnu99 -fvisibility=hidden -fno-wrapv -fno-exceptions'
elif "darwin" in sys.platform:
@ -163,7 +161,7 @@ if "win32" in sys.platform:
# TODO(zyc): Re-enble c-ares on x64 and x86 windows after fixing the
# ares_library_init compilation issue
DEFINE_MACROS += (('WIN32_LEAN_AND_MEAN', 1), ('CARES_STATICLIB', 1),
('GRPC_ARES', 0),)
('GRPC_ARES', 0), ('NTDDI_VERSION', 0x06000000),)
if '64bit' in platform.architecture()[0]:
DEFINE_MACROS += (('MS_WIN64', 1),)
elif sys.version_info >= (3, 5):

@ -91,6 +91,7 @@ static bool proxy_mapper_map_name(grpc_exec_ctx* exec_ctx,
char* user_cred = NULL;
*name_to_resolve = get_http_proxy_server(exec_ctx, &user_cred);
if (*name_to_resolve == NULL) return false;
char* no_proxy_str = NULL;
grpc_uri* uri =
grpc_uri_parse(exec_ctx, server_uri, false /* suppress_errors */);
if (uri == NULL || uri->path[0] == '\0') {
@ -98,20 +99,14 @@ static bool proxy_mapper_map_name(grpc_exec_ctx* exec_ctx,
"'http_proxy' environment variable set, but cannot "
"parse server URI '%s' -- not using proxy",
server_uri);
if (uri != NULL) {
gpr_free(user_cred);
grpc_uri_destroy(uri);
}
return false;
goto no_use_proxy;
}
if (strcmp(uri->scheme, "unix") == 0) {
gpr_log(GPR_INFO, "not using proxy for Unix domain socket '%s'",
server_uri);
gpr_free(user_cred);
grpc_uri_destroy(uri);
return false;
goto no_use_proxy;
}
char* no_proxy_str = gpr_getenv("no_proxy");
no_proxy_str = gpr_getenv("no_proxy");
if (no_proxy_str != NULL) {
static const char* NO_PROXY_SEPARATOR = ",";
bool use_proxy = true;
@ -147,12 +142,7 @@ static bool proxy_mapper_map_name(grpc_exec_ctx* exec_ctx,
gpr_free(no_proxy_hosts);
gpr_free(server_host);
gpr_free(server_port);
if (!use_proxy) {
grpc_uri_destroy(uri);
gpr_free(*name_to_resolve);
*name_to_resolve = NULL;
return false;
}
if (!use_proxy) goto no_use_proxy;
}
}
grpc_arg args_to_add[2];
@ -173,9 +163,15 @@ static bool proxy_mapper_map_name(grpc_exec_ctx* exec_ctx,
} else {
*new_args = grpc_channel_args_copy_and_add(args, args_to_add, 1);
}
gpr_free(user_cred);
grpc_uri_destroy(uri);
gpr_free(user_cred);
return true;
no_use_proxy:
if (uri != NULL) grpc_uri_destroy(uri);
gpr_free(*name_to_resolve);
*name_to_resolve = NULL;
gpr_free(user_cred);
return false;
}
static bool proxy_mapper_map_address(grpc_exec_ctx* exec_ctx,

@ -123,6 +123,7 @@
#define GRPC_GRPCLB_RECONNECT_BACKOFF_MULTIPLIER 1.6
#define GRPC_GRPCLB_RECONNECT_MAX_BACKOFF_SECONDS 120
#define GRPC_GRPCLB_RECONNECT_JITTER 0.2
#define GRPC_GRPCLB_DEFAULT_FALLBACK_TIMEOUT_MS 10000
grpc_tracer_flag grpc_lb_glb_trace = GRPC_TRACER_INITIALIZER(false, "glb");
@ -299,6 +300,10 @@ typedef struct glb_lb_policy {
/** timeout in milliseconds for the LB call. 0 means no deadline. */
int lb_call_timeout_ms;
/** timeout in milliseconds for before using fallback backend addresses.
* 0 means not using fallback. */
int lb_fallback_timeout_ms;
/** for communicating with the LB server */
grpc_channel *lb_channel;
@ -325,6 +330,9 @@ typedef struct glb_lb_policy {
* Otherwise, we delegate to the RR policy. */
size_t serverlist_index;
/** stores the backend addresses from the resolver */
grpc_lb_addresses *fallback_backend_addresses;
/** list of picks that are waiting on RR's policy connectivity */
pending_pick *pending_picks;
@ -345,6 +353,9 @@ typedef struct glb_lb_policy {
/** is \a lb_call_retry_timer active? */
bool retry_timer_active;
/** is \a lb_fallback_timer active? */
bool fallback_timer_active;
/** called upon changes to the LB channel's connectivity. */
grpc_closure lb_channel_on_connectivity_changed;
@ -354,9 +365,6 @@ typedef struct glb_lb_policy {
/************************************************************/
/* client data associated with the LB server communication */
/************************************************************/
/* Finished sending initial request. */
grpc_closure lb_on_sent_initial_request;
/* Status from the LB server has been received. This signals the end of the LB
* call. */
grpc_closure lb_on_server_status_received;
@ -367,6 +375,9 @@ typedef struct glb_lb_policy {
/* LB call retry timer callback. */
grpc_closure lb_on_call_retry;
/* LB fallback timer callback. */
grpc_closure lb_on_fallback;
grpc_call *lb_call; /* streaming call to the LB server, */
grpc_metadata_array lb_initial_metadata_recv; /* initial MD from LB server */
@ -390,7 +401,9 @@ typedef struct glb_lb_policy {
/** LB call retry timer */
grpc_timer lb_call_retry_timer;
bool initial_request_sent;
/** LB fallback timer */
grpc_timer lb_fallback_timer;
bool seen_initial_response;
/* Stats for client-side load reporting. Should be unreffed and
@ -536,6 +549,32 @@ static grpc_lb_addresses *process_serverlist_locked(
return lb_addresses;
}
/* Returns the backend addresses extracted from the given addresses */
static grpc_lb_addresses *extract_backend_addresses_locked(
grpc_exec_ctx *exec_ctx, const grpc_lb_addresses *addresses) {
/* first pass: count the number of backend addresses */
size_t num_backends = 0;
for (size_t i = 0; i < addresses->num_addresses; ++i) {
if (!addresses->addresses[i].is_balancer) {
++num_backends;
}
}
/* second pass: actually populate the addresses and (empty) LB tokens */
grpc_lb_addresses *backend_addresses =
grpc_lb_addresses_create(num_backends, &lb_token_vtable);
size_t num_copied = 0;
for (size_t i = 0; i < addresses->num_addresses; ++i) {
if (addresses->addresses[i].is_balancer) continue;
const grpc_resolved_address *addr = &addresses->addresses[i].address;
grpc_lb_addresses_set_address(backend_addresses, num_copied, &addr->addr,
addr->len, false /* is_balancer */,
NULL /* balancer_name */,
(void *)GRPC_MDELEM_LB_TOKEN_EMPTY.payload);
++num_copied;
}
return backend_addresses;
}
static void update_lb_connectivity_status_locked(
grpc_exec_ctx *exec_ctx, glb_lb_policy *glb_policy,
grpc_connectivity_state rr_state, grpc_error *rr_state_error) {
@ -603,35 +642,38 @@ static bool pick_from_internal_rr_locked(
grpc_exec_ctx *exec_ctx, glb_lb_policy *glb_policy,
const grpc_lb_policy_pick_args *pick_args, bool force_async,
grpc_connected_subchannel **target, wrapped_rr_closure_arg *wc_arg) {
// Look at the index into the serverlist to see if we should drop this call.
grpc_grpclb_server *server =
glb_policy->serverlist->servers[glb_policy->serverlist_index++];
if (glb_policy->serverlist_index == glb_policy->serverlist->num_servers) {
glb_policy->serverlist_index = 0; // Wrap-around.
}
if (server->drop) {
// Not using the RR policy, so unref it.
if (GRPC_TRACER_ON(grpc_lb_glb_trace)) {
gpr_log(GPR_INFO, "Unreffing RR for drop (0x%" PRIxPTR ")",
(intptr_t)wc_arg->rr_policy);
// Check for drops if we are not using fallback backend addresses.
if (glb_policy->serverlist != NULL) {
// Look at the index into the serverlist to see if we should drop this call.
grpc_grpclb_server *server =
glb_policy->serverlist->servers[glb_policy->serverlist_index++];
if (glb_policy->serverlist_index == glb_policy->serverlist->num_servers) {
glb_policy->serverlist_index = 0; // Wrap-around.
}
GRPC_LB_POLICY_UNREF(exec_ctx, wc_arg->rr_policy, "glb_pick_sync");
// Update client load reporting stats to indicate the number of
// dropped calls. Note that we have to do this here instead of in
// the client_load_reporting filter, because we do not create a
// subchannel call (and therefore no client_load_reporting filter)
// for dropped calls.
grpc_grpclb_client_stats_add_call_dropped_locked(server->load_balance_token,
wc_arg->client_stats);
grpc_grpclb_client_stats_unref(wc_arg->client_stats);
if (force_async) {
GPR_ASSERT(wc_arg->wrapped_closure != NULL);
GRPC_CLOSURE_SCHED(exec_ctx, wc_arg->wrapped_closure, GRPC_ERROR_NONE);
if (server->drop) {
// Not using the RR policy, so unref it.
if (GRPC_TRACER_ON(grpc_lb_glb_trace)) {
gpr_log(GPR_INFO, "Unreffing RR for drop (0x%" PRIxPTR ")",
(intptr_t)wc_arg->rr_policy);
}
GRPC_LB_POLICY_UNREF(exec_ctx, wc_arg->rr_policy, "glb_pick_sync");
// Update client load reporting stats to indicate the number of
// dropped calls. Note that we have to do this here instead of in
// the client_load_reporting filter, because we do not create a
// subchannel call (and therefore no client_load_reporting filter)
// for dropped calls.
grpc_grpclb_client_stats_add_call_dropped_locked(
server->load_balance_token, wc_arg->client_stats);
grpc_grpclb_client_stats_unref(wc_arg->client_stats);
if (force_async) {
GPR_ASSERT(wc_arg->wrapped_closure != NULL);
GRPC_CLOSURE_SCHED(exec_ctx, wc_arg->wrapped_closure, GRPC_ERROR_NONE);
gpr_free(wc_arg->free_when_done);
return false;
}
gpr_free(wc_arg->free_when_done);
return false;
return true;
}
gpr_free(wc_arg->free_when_done);
return true;
}
// Pick via the RR policy.
const bool pick_done = grpc_lb_policy_pick_locked(
@ -669,8 +711,18 @@ static bool pick_from_internal_rr_locked(
static grpc_lb_policy_args *lb_policy_args_create(grpc_exec_ctx *exec_ctx,
glb_lb_policy *glb_policy) {
grpc_lb_addresses *addresses =
process_serverlist_locked(exec_ctx, glb_policy->serverlist);
grpc_lb_addresses *addresses;
if (glb_policy->serverlist != NULL) {
GPR_ASSERT(glb_policy->serverlist->num_servers > 0);
addresses = process_serverlist_locked(exec_ctx, glb_policy->serverlist);
} else {
// If rr_handover_locked() is invoked when we haven't received any
// serverlist from the balancer, we use the fallback backends returned by
// the resolver. Note that the fallback backend list may be empty, in which
// case the new round_robin policy will keep the requested picks pending.
GPR_ASSERT(glb_policy->fallback_backend_addresses != NULL);
addresses = grpc_lb_addresses_copy(glb_policy->fallback_backend_addresses);
}
GPR_ASSERT(addresses != NULL);
grpc_lb_policy_args *args = (grpc_lb_policy_args *)gpr_zalloc(sizeof(*args));
args->client_channel_factory = glb_policy->cc_factory;
@ -776,8 +828,6 @@ static void create_rr_locked(grpc_exec_ctx *exec_ctx, glb_lb_policy *glb_policy,
/* glb_policy->rr_policy may be NULL (initial handover) */
static void rr_handover_locked(grpc_exec_ctx *exec_ctx,
glb_lb_policy *glb_policy) {
GPR_ASSERT(glb_policy->serverlist != NULL &&
glb_policy->serverlist->num_servers > 0);
if (glb_policy->shutting_down) return;
grpc_lb_policy_args *args = lb_policy_args_create(exec_ctx, glb_policy);
GPR_ASSERT(args != NULL);
@ -926,6 +976,9 @@ static void glb_destroy(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol) {
if (glb_policy->serverlist != NULL) {
grpc_grpclb_destroy_serverlist(glb_policy->serverlist);
}
if (glb_policy->fallback_backend_addresses != NULL) {
grpc_lb_addresses_destroy(exec_ctx, glb_policy->fallback_backend_addresses);
}
grpc_fake_resolver_response_generator_unref(glb_policy->response_generator);
grpc_subchannel_index_unref();
if (glb_policy->pending_update_args != NULL) {
@ -1067,10 +1120,26 @@ static void glb_cancel_picks_locked(grpc_exec_ctx *exec_ctx,
GRPC_ERROR_UNREF(error);
}
static void lb_on_fallback_timer_locked(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error);
static void query_for_backends_locked(grpc_exec_ctx *exec_ctx,
glb_lb_policy *glb_policy);
static void start_picking_locked(grpc_exec_ctx *exec_ctx,
glb_lb_policy *glb_policy) {
/* start a timer to fall back */
if (glb_policy->lb_fallback_timeout_ms > 0 &&
glb_policy->serverlist == NULL && !glb_policy->fallback_timer_active) {
grpc_millis deadline =
grpc_exec_ctx_now(exec_ctx) + glb_policy->lb_fallback_timeout_ms;
GRPC_LB_POLICY_WEAK_REF(&glb_policy->base, "grpclb_fallback_timer");
GRPC_CLOSURE_INIT(&glb_policy->lb_on_fallback, lb_on_fallback_timer_locked,
glb_policy,
grpc_combiner_scheduler(glb_policy->base.combiner));
glb_policy->fallback_timer_active = true;
grpc_timer_init(exec_ctx, &glb_policy->lb_fallback_timer, deadline,
&glb_policy->lb_on_fallback);
}
glb_policy->started_picking = true;
grpc_backoff_reset(&glb_policy->lb_call_backoff_state);
query_for_backends_locked(exec_ctx, glb_policy);
@ -1173,6 +1242,56 @@ static void glb_notify_on_state_change_locked(grpc_exec_ctx *exec_ctx,
exec_ctx, &glb_policy->state_tracker, current, notify);
}
static void lb_call_on_retry_timer_locked(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error) {
glb_lb_policy *glb_policy = (glb_lb_policy *)arg;
glb_policy->retry_timer_active = false;
if (!glb_policy->shutting_down && error == GRPC_ERROR_NONE) {
if (GRPC_TRACER_ON(grpc_lb_glb_trace)) {
gpr_log(GPR_INFO, "Restaring call to LB server (grpclb %p)",
(void *)glb_policy);
}
GPR_ASSERT(glb_policy->lb_call == NULL);
query_for_backends_locked(exec_ctx, glb_policy);
}
GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, &glb_policy->base, "grpclb_retry_timer");
}
static void maybe_restart_lb_call(grpc_exec_ctx *exec_ctx,
glb_lb_policy *glb_policy) {
if (glb_policy->started_picking && glb_policy->updating_lb_call) {
if (glb_policy->retry_timer_active) {
grpc_timer_cancel(exec_ctx, &glb_policy->lb_call_retry_timer);
}
if (!glb_policy->shutting_down) start_picking_locked(exec_ctx, glb_policy);
glb_policy->updating_lb_call = false;
} else if (!glb_policy->shutting_down) {
/* if we aren't shutting down, restart the LB client call after some time */
grpc_millis next_try =
grpc_backoff_step(exec_ctx, &glb_policy->lb_call_backoff_state);
if (GRPC_TRACER_ON(grpc_lb_glb_trace)) {
gpr_log(GPR_DEBUG, "Connection to LB server lost (grpclb: %p)...",
(void *)glb_policy);
grpc_millis timeout = next_try - grpc_exec_ctx_now(exec_ctx);
if (timeout > 0) {
gpr_log(GPR_DEBUG, "... retry_timer_active in %" PRIdPTR "ms.",
timeout);
} else {
gpr_log(GPR_DEBUG, "... retry_timer_active immediately.");
}
}
GRPC_LB_POLICY_WEAK_REF(&glb_policy->base, "grpclb_retry_timer");
GRPC_CLOSURE_INIT(&glb_policy->lb_on_call_retry,
lb_call_on_retry_timer_locked, glb_policy,
grpc_combiner_scheduler(glb_policy->base.combiner));
glb_policy->retry_timer_active = true;
grpc_timer_init(exec_ctx, &glb_policy->lb_call_retry_timer, next_try,
&glb_policy->lb_on_call_retry);
}
GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, &glb_policy->base,
"lb_on_server_status_received_locked");
}
static void send_client_load_report_locked(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error);
@ -1202,21 +1321,6 @@ static void client_load_report_done_locked(grpc_exec_ctx *exec_ctx, void *arg,
schedule_next_client_load_report(exec_ctx, glb_policy);
}
static void do_send_client_load_report_locked(grpc_exec_ctx *exec_ctx,
glb_lb_policy *glb_policy) {
grpc_op op;
memset(&op, 0, sizeof(op));
op.op = GRPC_OP_SEND_MESSAGE;
op.data.send_message.send_message = glb_policy->client_load_report_payload;
GRPC_CLOSURE_INIT(&glb_policy->client_load_report_closure,
client_load_report_done_locked, glb_policy,
grpc_combiner_scheduler(glb_policy->base.combiner));
grpc_call_error call_error = grpc_call_start_batch_and_execute(
exec_ctx, glb_policy->lb_call, &op, 1,
&glb_policy->client_load_report_closure);
GPR_ASSERT(GRPC_CALL_OK == call_error);
}
static bool load_report_counters_are_zero(grpc_grpclb_request *request) {
grpc_grpclb_dropped_call_counts *drop_entries =
(grpc_grpclb_dropped_call_counts *)
@ -1236,6 +1340,9 @@ static void send_client_load_report_locked(grpc_exec_ctx *exec_ctx, void *arg,
glb_policy->client_load_report_timer_pending = false;
GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, &glb_policy->base,
"client_load_report");
if (glb_policy->lb_call == NULL) {
maybe_restart_lb_call(exec_ctx, glb_policy);
}
return;
}
// Construct message payload.
@ -1259,17 +1366,23 @@ static void send_client_load_report_locked(grpc_exec_ctx *exec_ctx, void *arg,
grpc_raw_byte_buffer_create(&request_payload_slice, 1);
grpc_slice_unref_internal(exec_ctx, request_payload_slice);
grpc_grpclb_request_destroy(request);
// If we've already sent the initial request, then we can go ahead and
// sent the load report. Otherwise, we need to wait until the initial
// request has been sent to send this
// (see lb_on_sent_initial_request_locked() below).
if (glb_policy->initial_request_sent) {
do_send_client_load_report_locked(exec_ctx, glb_policy);
// Send load report message.
grpc_op op;
memset(&op, 0, sizeof(op));
op.op = GRPC_OP_SEND_MESSAGE;
op.data.send_message.send_message = glb_policy->client_load_report_payload;
GRPC_CLOSURE_INIT(&glb_policy->client_load_report_closure,
client_load_report_done_locked, glb_policy,
grpc_combiner_scheduler(glb_policy->base.combiner));
grpc_call_error call_error = grpc_call_start_batch_and_execute(
exec_ctx, glb_policy->lb_call, &op, 1,
&glb_policy->client_load_report_closure);
if (call_error != GRPC_CALL_OK) {
gpr_log(GPR_ERROR, "call_error=%d", call_error);
GPR_ASSERT(GRPC_CALL_OK == call_error);
}
}
static void lb_on_sent_initial_request_locked(grpc_exec_ctx *exec_ctx,
void *arg, grpc_error *error);
static void lb_on_server_status_received_locked(grpc_exec_ctx *exec_ctx,
void *arg, grpc_error *error);
static void lb_on_response_received_locked(grpc_exec_ctx *exec_ctx, void *arg,
@ -1312,9 +1425,6 @@ static void lb_call_init_locked(grpc_exec_ctx *exec_ctx,
grpc_slice_unref_internal(exec_ctx, request_payload_slice);
grpc_grpclb_request_destroy(request);
GRPC_CLOSURE_INIT(&glb_policy->lb_on_sent_initial_request,
lb_on_sent_initial_request_locked, glb_policy,
grpc_combiner_scheduler(glb_policy->base.combiner));
GRPC_CLOSURE_INIT(&glb_policy->lb_on_server_status_received,
lb_on_server_status_received_locked, glb_policy,
grpc_combiner_scheduler(glb_policy->base.combiner));
@ -1329,7 +1439,6 @@ static void lb_call_init_locked(grpc_exec_ctx *exec_ctx,
GRPC_GRPCLB_MIN_CONNECT_TIMEOUT_SECONDS * 1000,
GRPC_GRPCLB_RECONNECT_MAX_BACKOFF_SECONDS * 1000);
glb_policy->initial_request_sent = false;
glb_policy->seen_initial_response = false;
glb_policy->last_client_load_report_counters_were_zero = false;
}
@ -1346,7 +1455,7 @@ static void lb_call_destroy_locked(grpc_exec_ctx *exec_ctx,
grpc_byte_buffer_destroy(glb_policy->lb_request_payload);
grpc_slice_unref_internal(exec_ctx, glb_policy->lb_call_status_details);
if (!glb_policy->client_load_report_timer_pending) {
if (glb_policy->client_load_report_timer_pending) {
grpc_timer_cancel(exec_ctx, &glb_policy->client_load_report_timer);
}
}
@ -1370,7 +1479,7 @@ static void query_for_backends_locked(grpc_exec_ctx *exec_ctx,
GPR_ASSERT(glb_policy->lb_call != NULL);
grpc_call_error call_error;
grpc_op ops[4];
grpc_op ops[3];
memset(ops, 0, sizeof(ops));
grpc_op *op = ops;
@ -1391,13 +1500,8 @@ static void query_for_backends_locked(grpc_exec_ctx *exec_ctx,
op->flags = 0;
op->reserved = NULL;
op++;
/* take a weak ref (won't prevent calling of \a glb_shutdown if the strong ref
* count goes to zero) to be unref'd in lb_on_sent_initial_request_locked() */
GRPC_LB_POLICY_WEAK_REF(&glb_policy->base,
"lb_on_sent_initial_request_locked");
call_error = grpc_call_start_batch_and_execute(
exec_ctx, glb_policy->lb_call, ops, (size_t)(op - ops),
&glb_policy->lb_on_sent_initial_request);
call_error = grpc_call_start_batch_and_execute(exec_ctx, glb_policy->lb_call,
ops, (size_t)(op - ops), NULL);
GPR_ASSERT(GRPC_CALL_OK == call_error);
op = ops;
@ -1434,19 +1538,6 @@ static void query_for_backends_locked(grpc_exec_ctx *exec_ctx,
GPR_ASSERT(GRPC_CALL_OK == call_error);
}
static void lb_on_sent_initial_request_locked(grpc_exec_ctx *exec_ctx,
void *arg, grpc_error *error) {
glb_lb_policy *glb_policy = (glb_lb_policy *)arg;
glb_policy->initial_request_sent = true;
// If we attempted to send a client load report before the initial
// request was sent, send the load report now.
if (glb_policy->client_load_report_payload != NULL) {
do_send_client_load_report_locked(exec_ctx, glb_policy);
}
GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, &glb_policy->base,
"lb_on_sent_initial_request_locked");
}
static void lb_on_response_received_locked(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error) {
glb_lb_policy *glb_policy = (glb_lb_policy *)arg;
@ -1520,6 +1611,15 @@ static void lb_on_response_received_locked(grpc_exec_ctx *exec_ctx, void *arg,
if (glb_policy->serverlist != NULL) {
/* dispose of the old serverlist */
grpc_grpclb_destroy_serverlist(glb_policy->serverlist);
} else {
/* or dispose of the fallback */
grpc_lb_addresses_destroy(exec_ctx,
glb_policy->fallback_backend_addresses);
glb_policy->fallback_backend_addresses = NULL;
if (glb_policy->fallback_timer_active) {
grpc_timer_cancel(exec_ctx, &glb_policy->lb_fallback_timer);
glb_policy->fallback_timer_active = false;
}
}
/* and update the copy in the glb_lb_policy instance. This
* serverlist instance will be destroyed either upon the next
@ -1530,9 +1630,7 @@ static void lb_on_response_received_locked(grpc_exec_ctx *exec_ctx, void *arg,
}
} else {
if (GRPC_TRACER_ON(grpc_lb_glb_trace)) {
gpr_log(GPR_INFO,
"Received empty server list. Picks will stay pending until "
"a response with > 0 servers is received");
gpr_log(GPR_INFO, "Received empty server list, ignoring.");
}
grpc_grpclb_destroy_serverlist(serverlist);
}
@ -1567,19 +1665,25 @@ static void lb_on_response_received_locked(grpc_exec_ctx *exec_ctx, void *arg,
}
}
static void lb_call_on_retry_timer_locked(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error) {
static void lb_on_fallback_timer_locked(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error) {
glb_lb_policy *glb_policy = (glb_lb_policy *)arg;
glb_policy->retry_timer_active = false;
if (!glb_policy->shutting_down && error == GRPC_ERROR_NONE) {
if (GRPC_TRACER_ON(grpc_lb_glb_trace)) {
gpr_log(GPR_INFO, "Restaring call to LB server (grpclb %p)",
(void *)glb_policy);
glb_policy->fallback_timer_active = false;
/* If we receive a serverlist after the timer fires but before this callback
* actually runs, don't fall back. */
if (glb_policy->serverlist == NULL) {
if (!glb_policy->shutting_down && error == GRPC_ERROR_NONE) {
if (GRPC_TRACER_ON(grpc_lb_glb_trace)) {
gpr_log(GPR_INFO,
"Falling back to use backends from resolver (grpclb %p)",
(void *)glb_policy);
}
GPR_ASSERT(glb_policy->fallback_backend_addresses != NULL);
rr_handover_locked(exec_ctx, glb_policy);
}
GPR_ASSERT(glb_policy->lb_call == NULL);
query_for_backends_locked(exec_ctx, glb_policy);
}
GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, &glb_policy->base, "grpclb_retry_timer");
GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, &glb_policy->base,
"grpclb_fallback_timer");
}
static void lb_on_server_status_received_locked(grpc_exec_ctx *exec_ctx,
@ -1598,65 +1702,30 @@ static void lb_on_server_status_received_locked(grpc_exec_ctx *exec_ctx,
}
/* We need to perform cleanups no matter what. */
lb_call_destroy_locked(exec_ctx, glb_policy);
if (glb_policy->started_picking && glb_policy->updating_lb_call) {
if (glb_policy->retry_timer_active) {
grpc_timer_cancel(exec_ctx, &glb_policy->lb_call_retry_timer);
}
if (!glb_policy->shutting_down) start_picking_locked(exec_ctx, glb_policy);
glb_policy->updating_lb_call = false;
} else if (!glb_policy->shutting_down) {
/* if we aren't shutting down, restart the LB client call after some time */
grpc_millis next_try =
grpc_backoff_step(exec_ctx, &glb_policy->lb_call_backoff_state);
if (GRPC_TRACER_ON(grpc_lb_glb_trace)) {
gpr_log(GPR_DEBUG, "Connection to LB server lost (grpclb: %p)...",
(void *)glb_policy);
grpc_millis timeout = next_try - grpc_exec_ctx_now(exec_ctx);
if (timeout > 0) {
gpr_log(GPR_DEBUG,
"... retry_timer_active in %" PRIdPTR " milliseconds.",
timeout);
} else {
gpr_log(GPR_DEBUG, "... retry_timer_active immediately.");
}
}
GRPC_LB_POLICY_WEAK_REF(&glb_policy->base, "grpclb_retry_timer");
GRPC_CLOSURE_INIT(&glb_policy->lb_on_call_retry,
lb_call_on_retry_timer_locked, glb_policy,
grpc_combiner_scheduler(glb_policy->base.combiner));
glb_policy->retry_timer_active = true;
grpc_timer_init(exec_ctx, &glb_policy->lb_call_retry_timer, next_try,
&glb_policy->lb_on_call_retry);
// If the load report timer is still pending, we wait for it to be
// called before restarting the call. Otherwise, we restart the call
// here.
if (!glb_policy->client_load_report_timer_pending) {
maybe_restart_lb_call(exec_ctx, glb_policy);
}
}
static void fallback_update_locked(grpc_exec_ctx *exec_ctx,
glb_lb_policy *glb_policy,
const grpc_lb_addresses *addresses) {
GPR_ASSERT(glb_policy->fallback_backend_addresses != NULL);
grpc_lb_addresses_destroy(exec_ctx, glb_policy->fallback_backend_addresses);
glb_policy->fallback_backend_addresses =
extract_backend_addresses_locked(exec_ctx, addresses);
if (glb_policy->lb_fallback_timeout_ms > 0 &&
!glb_policy->fallback_timer_active) {
rr_handover_locked(exec_ctx, glb_policy);
}
GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, &glb_policy->base,
"lb_on_server_status_received_locked");
}
static void glb_update_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy,
const grpc_lb_policy_args *args) {
glb_lb_policy *glb_policy = (glb_lb_policy *)policy;
if (glb_policy->updating_lb_channel) {
if (GRPC_TRACER_ON(grpc_lb_glb_trace)) {
gpr_log(GPR_INFO,
"Update already in progress for grpclb %p. Deferring update.",
(void *)glb_policy);
}
if (glb_policy->pending_update_args != NULL) {
grpc_channel_args_destroy(exec_ctx,
glb_policy->pending_update_args->args);
gpr_free(glb_policy->pending_update_args);
}
glb_policy->pending_update_args = (grpc_lb_policy_args *)gpr_zalloc(
sizeof(*glb_policy->pending_update_args));
glb_policy->pending_update_args->client_channel_factory =
args->client_channel_factory;
glb_policy->pending_update_args->args = grpc_channel_args_copy(args->args);
glb_policy->pending_update_args->combiner = args->combiner;
return;
}
glb_policy->updating_lb_channel = true;
// Propagate update to lb_channel (pick first).
const grpc_arg *arg =
grpc_channel_args_find(args->args, GRPC_ARG_LB_ADDRESSES);
if (arg == NULL || arg->type != GRPC_ARG_POINTER) {
@ -1674,13 +1743,43 @@ static void glb_update_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy,
"ignoring.",
(void *)glb_policy);
}
return;
}
const grpc_lb_addresses *addresses =
(const grpc_lb_addresses *)arg->value.pointer.p;
if (glb_policy->serverlist == NULL) {
// If a non-empty serverlist hasn't been received from the balancer,
// propagate the update to fallback_backend_addresses.
fallback_update_locked(exec_ctx, glb_policy, addresses);
} else if (glb_policy->updating_lb_channel) {
// If we have recieved serverlist from the balancer, we need to defer update
// when there is an in-progress one.
if (GRPC_TRACER_ON(grpc_lb_glb_trace)) {
gpr_log(GPR_INFO,
"Update already in progress for grpclb %p. Deferring update.",
(void *)glb_policy);
}
if (glb_policy->pending_update_args != NULL) {
grpc_channel_args_destroy(exec_ctx,
glb_policy->pending_update_args->args);
gpr_free(glb_policy->pending_update_args);
}
glb_policy->pending_update_args = (grpc_lb_policy_args *)gpr_zalloc(
sizeof(*glb_policy->pending_update_args));
glb_policy->pending_update_args->client_channel_factory =
args->client_channel_factory;
glb_policy->pending_update_args->args = grpc_channel_args_copy(args->args);
glb_policy->pending_update_args->combiner = args->combiner;
return;
}
glb_policy->updating_lb_channel = true;
GPR_ASSERT(glb_policy->lb_channel != NULL);
grpc_channel_args *lb_channel_args = build_lb_channel_args(
exec_ctx, addresses, glb_policy->response_generator, args->args);
/* Propagate updates to the LB channel through the fake resolver */
/* Propagate updates to the LB channel (pick first) through the fake resolver
*/
grpc_fake_resolver_response_generator_set_response(
exec_ctx, glb_policy->response_generator, lb_channel_args);
grpc_channel_args_destroy(exec_ctx, lb_channel_args);
@ -1783,13 +1882,7 @@ static const grpc_lb_policy_vtable glb_lb_policy_vtable = {
static grpc_lb_policy *glb_create(grpc_exec_ctx *exec_ctx,
grpc_lb_policy_factory *factory,
grpc_lb_policy_args *args) {
/* Count the number of gRPC-LB addresses. There must be at least one.
* TODO(roth): For now, we ignore non-balancer addresses, but in the
* future, we may change the behavior such that we fall back to using
* the non-balancer addresses if we cannot reach any balancers. In the
* fallback case, we should use the LB policy indicated by
* GRPC_ARG_LB_POLICY_NAME (although if that specifies grpclb or is
* unset, we should default to pick_first). */
/* Count the number of gRPC-LB addresses. There must be at least one. */
const grpc_arg *arg =
grpc_channel_args_find(args->args, GRPC_ARG_LB_ADDRESSES);
if (arg == NULL || arg->type != GRPC_ARG_POINTER) {
@ -1825,6 +1918,11 @@ static grpc_lb_policy *glb_create(grpc_exec_ctx *exec_ctx,
glb_policy->lb_call_timeout_ms =
grpc_channel_arg_get_integer(arg, (grpc_integer_options){0, 0, INT_MAX});
arg = grpc_channel_args_find(args->args, GRPC_ARG_GRPCLB_FALLBACK_TIMEOUT_MS);
glb_policy->lb_fallback_timeout_ms = grpc_channel_arg_get_integer(
arg, (grpc_integer_options){GRPC_GRPCLB_DEFAULT_FALLBACK_TIMEOUT_MS, 0,
INT_MAX});
// Make sure that GRPC_ARG_LB_POLICY_NAME is set in channel args,
// since we use this to trigger the client_load_reporting filter.
grpc_arg new_arg = grpc_channel_arg_string_create(
@ -1833,6 +1931,11 @@ static grpc_lb_policy *glb_create(grpc_exec_ctx *exec_ctx,
glb_policy->args = grpc_channel_args_copy_and_add_and_remove(
args->args, args_to_remove, GPR_ARRAY_SIZE(args_to_remove), &new_arg, 1);
/* Extract the backend addresses (may be empty) from the resolver for
* fallback. */
glb_policy->fallback_backend_addresses =
extract_backend_addresses_locked(exec_ctx, addresses);
/* Create a client channel over them to communicate with a LB service */
glb_policy->response_generator =
grpc_fake_resolver_response_generator_create();

@ -56,7 +56,7 @@ grpc_lb_addresses* grpc_lb_addresses_copy(const grpc_lb_addresses* addresses) {
}
void grpc_lb_addresses_set_address(grpc_lb_addresses* addresses, size_t index,
void* address, size_t address_len,
const void* address, size_t address_len,
bool is_balancer, const char* balancer_name,
void* user_data) {
GPR_ASSERT(index < addresses->num_addresses);

@ -73,7 +73,7 @@ grpc_lb_addresses *grpc_lb_addresses_copy(const grpc_lb_addresses *addresses);
* \a address is a socket address of length \a address_len.
* Takes ownership of \a balancer_name. */
void grpc_lb_addresses_set_address(grpc_lb_addresses *addresses, size_t index,
void *address, size_t address_len,
const void *address, size_t address_len,
bool is_balancer, const char *balancer_name,
void *user_data);

@ -20,6 +20,7 @@
#if GRPC_ARES == 1 && defined(GRPC_POSIX_SOCKET)
#include <ares.h>
#include <sys/ioctl.h>
#include "src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver.h"
@ -37,8 +38,6 @@
typedef struct fd_node {
/** the owner of this fd node */
grpc_ares_ev_driver *ev_driver;
/** the grpc_fd owned by this fd node */
grpc_fd *fd;
/** a closure wrapping on_readable_cb, which should be invoked when the
grpc_fd in this node becomes readable. */
grpc_closure read_closure;
@ -50,10 +49,14 @@ typedef struct fd_node {
/** mutex guarding the rest of the state */
gpr_mu mu;
/** the grpc_fd owned by this fd node */
grpc_fd *fd;
/** if the readable closure has been registered */
bool readable_registered;
/** if the writable closure has been registered */
bool writable_registered;
/** if the fd is being shut down */
bool shutting_down;
} fd_node;
struct grpc_ares_ev_driver {
@ -100,7 +103,6 @@ static void fd_node_destroy(grpc_exec_ctx *exec_ctx, fd_node *fdn) {
GPR_ASSERT(!fdn->readable_registered);
GPR_ASSERT(!fdn->writable_registered);
gpr_mu_destroy(&fdn->mu);
grpc_pollset_set_del_fd(exec_ctx, fdn->ev_driver->pollset_set, fdn->fd);
/* c-ares library has closed the fd inside grpc_fd. This fd may be picked up
immediately by another thread, and should not be closed by the following
grpc_fd_orphan. */
@ -109,6 +111,19 @@ static void fd_node_destroy(grpc_exec_ctx *exec_ctx, fd_node *fdn) {
gpr_free(fdn);
}
static void fd_node_shutdown(grpc_exec_ctx *exec_ctx, fd_node *fdn) {
gpr_mu_lock(&fdn->mu);
fdn->shutting_down = true;
if (!fdn->readable_registered && !fdn->writable_registered) {
gpr_mu_unlock(&fdn->mu);
fd_node_destroy(exec_ctx, fdn);
} else {
grpc_fd_shutdown(exec_ctx, fdn->fd, GRPC_ERROR_CREATE_FROM_STATIC_STRING(
"c-ares fd shutdown"));
gpr_mu_unlock(&fdn->mu);
}
}
grpc_error *grpc_ares_ev_driver_create(grpc_ares_ev_driver **ev_driver,
grpc_pollset_set *pollset_set) {
*ev_driver = (grpc_ares_ev_driver *)gpr_malloc(sizeof(grpc_ares_ev_driver));
@ -175,18 +190,33 @@ static fd_node *pop_fd_node(fd_node **head, int fd) {
return NULL;
}
/* Check if \a fd is still readable */
static bool grpc_ares_is_fd_still_readable(grpc_ares_ev_driver *ev_driver,
int fd) {
size_t bytes_available = 0;
return ioctl(fd, FIONREAD, &bytes_available) == 0 && bytes_available > 0;
}
static void on_readable_cb(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error) {
fd_node *fdn = (fd_node *)arg;
grpc_ares_ev_driver *ev_driver = fdn->ev_driver;
gpr_mu_lock(&fdn->mu);
const int fd = grpc_fd_wrapped_fd(fdn->fd);
fdn->readable_registered = false;
if (fdn->shutting_down && !fdn->writable_registered) {
gpr_mu_unlock(&fdn->mu);
fd_node_destroy(exec_ctx, fdn);
grpc_ares_ev_driver_unref(ev_driver);
return;
}
gpr_mu_unlock(&fdn->mu);
gpr_log(GPR_DEBUG, "readable on %d", grpc_fd_wrapped_fd(fdn->fd));
gpr_log(GPR_DEBUG, "readable on %d", fd);
if (error == GRPC_ERROR_NONE) {
ares_process_fd(ev_driver->channel, grpc_fd_wrapped_fd(fdn->fd),
ARES_SOCKET_BAD);
do {
ares_process_fd(ev_driver->channel, fd, ARES_SOCKET_BAD);
} while (grpc_ares_is_fd_still_readable(ev_driver, fd));
} else {
// If error is not GRPC_ERROR_NONE, it means the fd has been shutdown or
// timed out. The pending lookups made on this ev_driver will be cancelled
@ -207,13 +237,19 @@ static void on_writable_cb(grpc_exec_ctx *exec_ctx, void *arg,
fd_node *fdn = (fd_node *)arg;
grpc_ares_ev_driver *ev_driver = fdn->ev_driver;
gpr_mu_lock(&fdn->mu);
const int fd = grpc_fd_wrapped_fd(fdn->fd);
fdn->writable_registered = false;
if (fdn->shutting_down && !fdn->readable_registered) {
gpr_mu_unlock(&fdn->mu);
fd_node_destroy(exec_ctx, fdn);
grpc_ares_ev_driver_unref(ev_driver);
return;
}
gpr_mu_unlock(&fdn->mu);
gpr_log(GPR_DEBUG, "writable on %d", grpc_fd_wrapped_fd(fdn->fd));
gpr_log(GPR_DEBUG, "writable on %d", fd);
if (error == GRPC_ERROR_NONE) {
ares_process_fd(ev_driver->channel, ARES_SOCKET_BAD,
grpc_fd_wrapped_fd(fdn->fd));
ares_process_fd(ev_driver->channel, ARES_SOCKET_BAD, fd);
} else {
// If error is not GRPC_ERROR_NONE, it means the fd has been shutdown or
// timed out. The pending lookups made on this ev_driver will be cancelled
@ -256,6 +292,7 @@ static void grpc_ares_notify_on_event_locked(grpc_exec_ctx *exec_ctx,
fdn->ev_driver = ev_driver;
fdn->readable_registered = false;
fdn->writable_registered = false;
fdn->shutting_down = false;
gpr_mu_init(&fdn->mu);
GRPC_CLOSURE_INIT(&fdn->read_closure, on_readable_cb, fdn,
grpc_schedule_on_exec_ctx);
@ -296,7 +333,7 @@ static void grpc_ares_notify_on_event_locked(grpc_exec_ctx *exec_ctx,
while (ev_driver->fds != NULL) {
fd_node *cur = ev_driver->fds;
ev_driver->fds = ev_driver->fds->next;
fd_node_destroy(exec_ctx, cur);
fd_node_shutdown(exec_ctx, cur);
}
ev_driver->fds = new_list;
// If the ev driver has no working fd, all the tasks are done.

@ -682,7 +682,10 @@ static int init_stream(grpc_exec_ctx *exec_ctx, grpc_transport *gt,
grpc_schedule_on_exec_ctx);
grpc_slice_buffer_init(&s->unprocessed_incoming_frames_buffer);
grpc_slice_buffer_init(&s->frame_storage);
grpc_slice_buffer_init(&s->compressed_data_buffer);
grpc_slice_buffer_init(&s->decompressed_data_buffer);
s->pending_byte_stream = false;
s->decompressed_header_bytes = 0;
GRPC_CLOSURE_INIT(&s->reset_byte_stream, reset_byte_stream, s,
grpc_combiner_scheduler(t->combiner));
@ -716,14 +719,8 @@ static void destroy_stream_locked(grpc_exec_ctx *exec_ctx, void *sp,
grpc_slice_buffer_destroy_internal(exec_ctx,
&s->unprocessed_incoming_frames_buffer);
grpc_slice_buffer_destroy_internal(exec_ctx, &s->frame_storage);
if (s->compressed_data_buffer) {
grpc_slice_buffer_destroy_internal(exec_ctx, s->compressed_data_buffer);
gpr_free(s->compressed_data_buffer);
}
if (s->decompressed_data_buffer) {
grpc_slice_buffer_destroy_internal(exec_ctx, s->decompressed_data_buffer);
gpr_free(s->decompressed_data_buffer);
}
grpc_slice_buffer_destroy_internal(exec_ctx, &s->compressed_data_buffer);
grpc_slice_buffer_destroy_internal(exec_ctx, &s->decompressed_data_buffer);
grpc_chttp2_list_remove_stalled_by_transport(t, s);
grpc_chttp2_list_remove_stalled_by_stream(t, s);
@ -1420,12 +1417,14 @@ static void perform_stream_op_locked(grpc_exec_ctx *exec_ctx, void *stream_op,
on_complete->next_data.scratch |= CLOSURE_BARRIER_MAY_COVER_WRITE;
/* Identify stream compression */
if ((s->stream_compression_send_enabled =
(op_payload->send_initial_metadata.send_initial_metadata->idx.named
.content_encoding != NULL)) == true) {
s->compressed_data_buffer =
(grpc_slice_buffer *)gpr_malloc(sizeof(grpc_slice_buffer));
grpc_slice_buffer_init(s->compressed_data_buffer);
if (op_payload->send_initial_metadata.send_initial_metadata->idx.named
.content_encoding == NULL ||
grpc_stream_compression_method_parse(
GRPC_MDVALUE(
op_payload->send_initial_metadata.send_initial_metadata->idx
.named.content_encoding->md),
true, &s->stream_compression_method) == 0) {
s->stream_compression_method = GRPC_STREAM_COMPRESSION_IDENTITY_COMPRESS;
}
s->send_initial_metadata_finished = add_closure_barrier(on_complete);
@ -1863,20 +1862,20 @@ void grpc_chttp2_maybe_complete_recv_message(grpc_exec_ctx *exec_ctx,
&s->frame_storage);
s->unprocessed_incoming_frames_decompressed = false;
}
if (s->stream_compression_recv_enabled &&
!s->unprocessed_incoming_frames_decompressed) {
GPR_ASSERT(s->decompressed_data_buffer->length == 0);
if (!s->unprocessed_incoming_frames_decompressed) {
GPR_ASSERT(s->decompressed_data_buffer.length == 0);
bool end_of_context;
if (!s->stream_decompression_ctx) {
s->stream_decompression_ctx =
grpc_stream_compression_context_create(
GRPC_STREAM_COMPRESSION_DECOMPRESS);
s->stream_decompression_method);
}
if (!grpc_stream_decompress(s->stream_decompression_ctx,
&s->unprocessed_incoming_frames_buffer,
s->decompressed_data_buffer, NULL,
GRPC_HEADER_SIZE_IN_BYTES,
&end_of_context)) {
if (!grpc_stream_decompress(
s->stream_decompression_ctx,
&s->unprocessed_incoming_frames_buffer,
&s->decompressed_data_buffer, NULL,
GRPC_HEADER_SIZE_IN_BYTES - s->decompressed_header_bytes,
&end_of_context)) {
grpc_slice_buffer_reset_and_unref_internal(exec_ctx,
&s->frame_storage);
grpc_slice_buffer_reset_and_unref_internal(
@ -1884,9 +1883,13 @@ void grpc_chttp2_maybe_complete_recv_message(grpc_exec_ctx *exec_ctx,
error = GRPC_ERROR_CREATE_FROM_STATIC_STRING(
"Stream decompression error.");
} else {
s->decompressed_header_bytes += s->decompressed_data_buffer.length;
if (s->decompressed_header_bytes == GRPC_HEADER_SIZE_IN_BYTES) {
s->decompressed_header_bytes = 0;
}
error = grpc_deframe_unprocessed_incoming_frames(
exec_ctx, &s->data_parser, s, s->decompressed_data_buffer, NULL,
s->recv_message);
exec_ctx, &s->data_parser, s, &s->decompressed_data_buffer,
NULL, s->recv_message);
if (end_of_context) {
grpc_stream_compression_context_destroy(
s->stream_decompression_ctx);
@ -1935,15 +1938,14 @@ void grpc_chttp2_maybe_complete_recv_trailing_metadata(grpc_exec_ctx *exec_ctx,
}
bool pending_data = s->pending_byte_stream ||
s->unprocessed_incoming_frames_buffer.length > 0;
if (s->stream_compression_recv_enabled && s->read_closed &&
s->frame_storage.length > 0 && !pending_data && !s->seen_error &&
s->recv_trailing_metadata_finished != NULL) {
if (s->read_closed && s->frame_storage.length > 0 && !pending_data &&
!s->seen_error && s->recv_trailing_metadata_finished != NULL) {
/* Maybe some SYNC_FLUSH data is left in frame_storage. Consume them and
* maybe decompress the next 5 bytes in the stream. */
bool end_of_context;
if (!s->stream_decompression_ctx) {
s->stream_decompression_ctx = grpc_stream_compression_context_create(
GRPC_STREAM_COMPRESSION_DECOMPRESS);
s->stream_decompression_method);
}
if (!grpc_stream_decompress(s->stream_decompression_ctx,
&s->frame_storage,
@ -1956,6 +1958,7 @@ void grpc_chttp2_maybe_complete_recv_trailing_metadata(grpc_exec_ctx *exec_ctx,
} else {
if (s->unprocessed_incoming_frames_buffer.length > 0) {
s->unprocessed_incoming_frames_decompressed = true;
pending_data = true;
}
if (end_of_context) {
grpc_stream_compression_context_destroy(s->stream_decompression_ctx);
@ -2771,7 +2774,7 @@ static void reset_byte_stream(grpc_exec_ctx *exec_ctx, void *arg,
GRPC_ERROR_UNREF(s->byte_stream_error);
s->byte_stream_error = GRPC_ERROR_NONE;
grpc_chttp2_cancel_stream(exec_ctx, s->t, s, GRPC_ERROR_REF(error));
s->byte_stream_error = error;
s->byte_stream_error = GRPC_ERROR_REF(error);
}
}
@ -2869,24 +2872,23 @@ static grpc_error *incoming_byte_stream_pull(grpc_exec_ctx *exec_ctx,
grpc_error *error;
if (s->unprocessed_incoming_frames_buffer.length > 0) {
if (s->stream_compression_recv_enabled &&
!s->unprocessed_incoming_frames_decompressed) {
if (!s->unprocessed_incoming_frames_decompressed) {
bool end_of_context;
if (!s->stream_decompression_ctx) {
s->stream_decompression_ctx = grpc_stream_compression_context_create(
GRPC_STREAM_COMPRESSION_DECOMPRESS);
s->stream_decompression_method);
}
if (!grpc_stream_decompress(s->stream_decompression_ctx,
&s->unprocessed_incoming_frames_buffer,
s->decompressed_data_buffer, NULL, MAX_SIZE_T,
&end_of_context)) {
&s->decompressed_data_buffer, NULL,
MAX_SIZE_T, &end_of_context)) {
error =
GRPC_ERROR_CREATE_FROM_STATIC_STRING("Stream decompression error.");
return error;
}
GPR_ASSERT(s->unprocessed_incoming_frames_buffer.length == 0);
grpc_slice_buffer_swap(&s->unprocessed_incoming_frames_buffer,
s->decompressed_data_buffer);
&s->decompressed_data_buffer);
s->unprocessed_incoming_frames_decompressed = true;
if (end_of_context) {
grpc_stream_compression_context_destroy(s->stream_decompression_ctx);

@ -210,7 +210,7 @@ grpc_error *grpc_deframe_unprocessed_incoming_frames(
if (cur != end) {
grpc_slice_buffer_undo_take_first(
&s->unprocessed_incoming_frames_buffer,
slices,
grpc_slice_sub(slice, (size_t)(cur - beg), (size_t)(end - beg)));
}
grpc_slice_unref_internal(exec_ctx, slice);
@ -277,7 +277,7 @@ grpc_error *grpc_deframe_unprocessed_incoming_frames(
p->state = GRPC_CHTTP2_DATA_FH_0;
cur += p->frame_size;
grpc_slice_buffer_undo_take_first(
&s->unprocessed_incoming_frames_buffer,
slices,
grpc_slice_sub(slice, (size_t)(cur - beg), (size_t)(end - beg)));
grpc_slice_unref_internal(exec_ctx, slice);
return GRPC_ERROR_NONE;

@ -1683,17 +1683,12 @@ static void parse_stream_compression_md(grpc_exec_ctx *exec_ctx,
grpc_chttp2_transport *t,
grpc_chttp2_stream *s,
grpc_metadata_batch *initial_metadata) {
if (initial_metadata->idx.named.content_encoding != NULL) {
grpc_slice content_encoding =
GRPC_MDVALUE(initial_metadata->idx.named.content_encoding->md);
if (!grpc_slice_eq(content_encoding, GRPC_MDSTR_IDENTITY)) {
if (grpc_slice_eq(content_encoding, GRPC_MDSTR_GZIP)) {
s->stream_compression_recv_enabled = true;
s->decompressed_data_buffer =
(grpc_slice_buffer *)gpr_malloc(sizeof(grpc_slice_buffer));
grpc_slice_buffer_init(s->decompressed_data_buffer);
}
}
if (initial_metadata->idx.named.content_encoding == NULL ||
grpc_stream_compression_method_parse(
GRPC_MDVALUE(initial_metadata->idx.named.content_encoding->md), false,
&s->stream_decompression_method) == 0) {
s->stream_decompression_method =
GRPC_STREAM_COMPRESSION_IDENTITY_DECOMPRESS;
}
}

@ -588,25 +588,27 @@ struct grpc_chttp2_stream {
grpc_chttp2_write_cb *finish_after_write;
size_t sending_bytes;
/** Whether stream compression send is enabled */
bool stream_compression_recv_enabled;
/** Whether stream compression recv is enabled */
bool stream_compression_send_enabled;
/** Whether bytes stored in unprocessed_incoming_byte_stream is decompressed
*/
bool unprocessed_incoming_frames_decompressed;
/* Stream compression method to be used. */
grpc_stream_compression_method stream_compression_method;
/* Stream decompression method to be used. */
grpc_stream_compression_method stream_decompression_method;
/** Stream compression decompress context */
grpc_stream_compression_context *stream_decompression_ctx;
/** Stream compression compress context */
grpc_stream_compression_context *stream_compression_ctx;
/** Buffer storing data that is compressed but not sent */
grpc_slice_buffer *compressed_data_buffer;
grpc_slice_buffer compressed_data_buffer;
/** Amount of uncompressed bytes sent out when compressed_data_buffer is
* emptied */
size_t uncompressed_data_size;
/** Temporary buffer storing decompressed data */
grpc_slice_buffer *decompressed_data_buffer;
grpc_slice_buffer decompressed_data_buffer;
/** Whether bytes stored in unprocessed_incoming_byte_stream is decompressed
*/
bool unprocessed_incoming_frames_decompressed;
/** gRPC header bytes that are already decompressed */
size_t decompressed_header_bytes;
};
/** Transport writing call flow:

@ -321,8 +321,7 @@ grpc_chttp2_begin_write_result grpc_chttp2_begin_write(
if (sent_initial_metadata) {
/* send any body bytes, if allowed by flow control */
if (s->flow_controlled_buffer.length > 0 ||
(s->stream_compression_send_enabled &&
s->compressed_data_buffer->length > 0)) {
s->compressed_data_buffer.length > 0) {
uint32_t stream_remote_window = (uint32_t)GPR_MAX(
0,
s->flow_control.remote_window_delta +
@ -336,56 +335,59 @@ grpc_chttp2_begin_write_result grpc_chttp2_begin_write(
bool is_last_data_frame = false;
bool is_last_frame = false;
size_t sending_bytes_before = s->sending_bytes;
if (s->stream_compression_send_enabled) {
while ((s->flow_controlled_buffer.length > 0 ||
s->compressed_data_buffer->length > 0) &&
max_outgoing > 0) {
if (s->compressed_data_buffer->length > 0) {
uint32_t send_bytes = (uint32_t)GPR_MIN(
max_outgoing, s->compressed_data_buffer->length);
is_last_data_frame =
(send_bytes == s->compressed_data_buffer->length &&
s->flow_controlled_buffer.length == 0 &&
s->fetching_send_message == NULL);
is_last_frame =
is_last_data_frame && s->send_trailing_metadata != NULL &&
grpc_metadata_batch_is_empty(s->send_trailing_metadata);
grpc_chttp2_encode_data(s->id, s->compressed_data_buffer,
send_bytes, is_last_frame,
&s->stats.outgoing, &t->outbuf);
grpc_chttp2_flowctl_sent_data(&t->flow_control,
&s->flow_control, send_bytes);
max_outgoing -= send_bytes;
if (s->compressed_data_buffer->length == 0) {
s->sending_bytes += s->uncompressed_data_size;
while ((s->flow_controlled_buffer.length > 0 ||
s->compressed_data_buffer.length > 0) &&
max_outgoing > 0) {
if (s->compressed_data_buffer.length > 0) {
uint32_t send_bytes = (uint32_t)GPR_MIN(
max_outgoing, s->compressed_data_buffer.length);
is_last_data_frame =
(send_bytes == s->compressed_data_buffer.length &&
s->flow_controlled_buffer.length == 0 &&
s->fetching_send_message == NULL);
if (is_last_data_frame && s->send_trailing_metadata != NULL &&
s->stream_compression_ctx != NULL) {
if (!grpc_stream_compress(
s->stream_compression_ctx, &s->flow_controlled_buffer,
&s->compressed_data_buffer, NULL, MAX_SIZE_T,
GRPC_STREAM_COMPRESSION_FLUSH_FINISH)) {
gpr_log(GPR_ERROR, "Stream compression failed.");
}
} else {
if (s->stream_compression_ctx == NULL) {
s->stream_compression_ctx =
grpc_stream_compression_context_create(
GRPC_STREAM_COMPRESSION_COMPRESS);
}
s->uncompressed_data_size = s->flow_controlled_buffer.length;
GPR_ASSERT(grpc_stream_compress(
s->stream_compression_ctx, &s->flow_controlled_buffer,
s->compressed_data_buffer, NULL, MAX_SIZE_T,
GRPC_STREAM_COMPRESSION_FLUSH_SYNC));
grpc_stream_compression_context_destroy(
s->stream_compression_ctx);
s->stream_compression_ctx = NULL;
/* After finish, bytes in s->compressed_data_buffer may be
* more than max_outgoing. Start another round of the current
* while loop so that send_bytes and is_last_data_frame are
* recalculated. */
continue;
}
is_last_frame =
is_last_data_frame && s->send_trailing_metadata != NULL &&
grpc_metadata_batch_is_empty(s->send_trailing_metadata);
grpc_chttp2_encode_data(s->id, &s->compressed_data_buffer,
send_bytes, is_last_frame,
&s->stats.outgoing, &t->outbuf);
grpc_chttp2_flowctl_sent_data(&t->flow_control, &s->flow_control,
send_bytes);
max_outgoing -= send_bytes;
if (s->compressed_data_buffer.length == 0) {
s->sending_bytes += s->uncompressed_data_size;
}
} else {
if (s->stream_compression_ctx == NULL) {
s->stream_compression_ctx =
grpc_stream_compression_context_create(
s->stream_compression_method);
}
s->uncompressed_data_size = s->flow_controlled_buffer.length;
if (!grpc_stream_compress(
s->stream_compression_ctx, &s->flow_controlled_buffer,
&s->compressed_data_buffer, NULL, MAX_SIZE_T,
GRPC_STREAM_COMPRESSION_FLUSH_SYNC)) {
gpr_log(GPR_ERROR, "Stream compression failed.");
}
}
} else {
uint32_t send_bytes = (uint32_t)GPR_MIN(
max_outgoing, s->flow_controlled_buffer.length);
is_last_data_frame = s->fetching_send_message == NULL &&
send_bytes == s->flow_controlled_buffer.length;
is_last_frame =
is_last_data_frame && s->send_trailing_metadata != NULL &&
grpc_metadata_batch_is_empty(s->send_trailing_metadata);
grpc_chttp2_encode_data(s->id, &s->flow_controlled_buffer,
send_bytes, is_last_frame,
&s->stats.outgoing, &t->outbuf);
grpc_chttp2_flowctl_sent_data(&t->flow_control, &s->flow_control,
send_bytes);
s->sending_bytes += send_bytes;
}
if (!t->is_client) {
t->ping_recv_state.last_ping_recv_time = 0;
@ -409,8 +411,7 @@ grpc_chttp2_begin_write_result grpc_chttp2_begin_write(
&s->flow_controlled_bytes_flowed, GRPC_ERROR_NONE);
now_writing = true;
if (s->flow_controlled_buffer.length > 0 ||
(s->stream_compression_send_enabled &&
s->compressed_data_buffer->length > 0)) {
s->compressed_data_buffer.length > 0) {
GRPC_CHTTP2_STREAM_REF(s, "chttp2_writing:fork");
grpc_chttp2_list_add_writable_stream(t, s);
}
@ -428,8 +429,7 @@ grpc_chttp2_begin_write_result grpc_chttp2_begin_write(
if (s->send_trailing_metadata != NULL &&
s->fetching_send_message == NULL &&
s->flow_controlled_buffer.length == 0 &&
(!s->stream_compression_send_enabled ||
s->compressed_data_buffer->length == 0)) {
s->compressed_data_buffer.length == 0) {
GRPC_CHTTP2_IF_TRACING(gpr_log(GPR_INFO, "sending trailing_metadata"));
if (grpc_metadata_batch_is_empty(s->send_trailing_metadata)) {
grpc_chttp2_encode_data(s->id, &s->flow_controlled_buffer, 0, true,

@ -16,177 +16,62 @@
*
*/
#include <grpc/support/alloc.h>
#include <grpc/support/log.h>
#include "src/core/lib/compression/stream_compression.h"
#include "src/core/lib/iomgr/exec_ctx.h"
#include "src/core/lib/slice/slice_internal.h"
#include "src/core/lib/compression/stream_compression_gzip.h"
#define OUTPUT_BLOCK_SIZE (1024)
static bool gzip_flate(grpc_stream_compression_context *ctx,
grpc_slice_buffer *in, grpc_slice_buffer *out,
size_t *output_size, size_t max_output_size, int flush,
bool *end_of_context) {
GPR_ASSERT(flush == 0 || flush == Z_SYNC_FLUSH || flush == Z_FINISH);
/* Full flush is not allowed when inflating. */
GPR_ASSERT(!(ctx->flate == inflate && (flush == Z_FINISH)));
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
int r;
bool eoc = false;
size_t original_max_output_size = max_output_size;
while (max_output_size > 0 && (in->length > 0 || flush) && !eoc) {
size_t slice_size = max_output_size < OUTPUT_BLOCK_SIZE ? max_output_size
: OUTPUT_BLOCK_SIZE;
grpc_slice slice_out = GRPC_SLICE_MALLOC(slice_size);
ctx->zs.avail_out = (uInt)slice_size;
ctx->zs.next_out = GRPC_SLICE_START_PTR(slice_out);
while (ctx->zs.avail_out > 0 && in->length > 0 && !eoc) {
grpc_slice slice = grpc_slice_buffer_take_first(in);
ctx->zs.avail_in = (uInt)GRPC_SLICE_LENGTH(slice);
ctx->zs.next_in = GRPC_SLICE_START_PTR(slice);
r = ctx->flate(&ctx->zs, Z_NO_FLUSH);
if (r < 0 && r != Z_BUF_ERROR) {
gpr_log(GPR_ERROR, "zlib error (%d)", r);
grpc_slice_unref_internal(&exec_ctx, slice_out);
grpc_exec_ctx_finish(&exec_ctx);
return false;
} else if (r == Z_STREAM_END && ctx->flate == inflate) {
eoc = true;
}
if (ctx->zs.avail_in > 0) {
grpc_slice_buffer_undo_take_first(
in,
grpc_slice_sub(slice, GRPC_SLICE_LENGTH(slice) - ctx->zs.avail_in,
GRPC_SLICE_LENGTH(slice)));
}
grpc_slice_unref_internal(&exec_ctx, slice);
}
if (flush != 0 && ctx->zs.avail_out > 0 && !eoc) {
GPR_ASSERT(in->length == 0);
r = ctx->flate(&ctx->zs, flush);
if (flush == Z_SYNC_FLUSH) {
switch (r) {
case Z_OK:
/* Maybe flush is not complete; just made some partial progress. */
if (ctx->zs.avail_out > 0) {
flush = 0;
}
break;
case Z_BUF_ERROR:
case Z_STREAM_END:
flush = 0;
break;
default:
gpr_log(GPR_ERROR, "zlib error (%d)", r);
grpc_slice_unref_internal(&exec_ctx, slice_out);
grpc_exec_ctx_finish(&exec_ctx);
return false;
}
} else if (flush == Z_FINISH) {
switch (r) {
case Z_OK:
case Z_BUF_ERROR:
/* Wait for the next loop to assign additional output space. */
GPR_ASSERT(ctx->zs.avail_out == 0);
break;
case Z_STREAM_END:
flush = 0;
break;
default:
gpr_log(GPR_ERROR, "zlib error (%d)", r);
grpc_slice_unref_internal(&exec_ctx, slice_out);
grpc_exec_ctx_finish(&exec_ctx);
return false;
}
}
}
if (ctx->zs.avail_out == 0) {
grpc_slice_buffer_add(out, slice_out);
} else if (ctx->zs.avail_out < slice_size) {
slice_out.data.refcounted.length -= ctx->zs.avail_out;
grpc_slice_buffer_add(out, slice_out);
} else {
grpc_slice_unref_internal(&exec_ctx, slice_out);
}
max_output_size -= (slice_size - ctx->zs.avail_out);
}
grpc_exec_ctx_finish(&exec_ctx);
if (end_of_context) {
*end_of_context = eoc;
}
if (output_size) {
*output_size = original_max_output_size - max_output_size;
}
return true;
}
extern const grpc_stream_compression_vtable
grpc_stream_compression_identity_vtable;
bool grpc_stream_compress(grpc_stream_compression_context *ctx,
grpc_slice_buffer *in, grpc_slice_buffer *out,
size_t *output_size, size_t max_output_size,
grpc_stream_compression_flush flush) {
GPR_ASSERT(ctx->flate == deflate);
int gzip_flush;
switch (flush) {
case GRPC_STREAM_COMPRESSION_FLUSH_NONE:
gzip_flush = 0;
break;
case GRPC_STREAM_COMPRESSION_FLUSH_SYNC:
gzip_flush = Z_SYNC_FLUSH;
break;
case GRPC_STREAM_COMPRESSION_FLUSH_FINISH:
gzip_flush = Z_FINISH;
break;
default:
gzip_flush = 0;
}
return gzip_flate(ctx, in, out, output_size, max_output_size, gzip_flush,
NULL);
return ctx->vtable->compress(ctx, in, out, output_size, max_output_size,
flush);
}
bool grpc_stream_decompress(grpc_stream_compression_context *ctx,
grpc_slice_buffer *in, grpc_slice_buffer *out,
size_t *output_size, size_t max_output_size,
bool *end_of_context) {
GPR_ASSERT(ctx->flate == inflate);
return gzip_flate(ctx, in, out, output_size, max_output_size, Z_SYNC_FLUSH,
end_of_context);
return ctx->vtable->decompress(ctx, in, out, output_size, max_output_size,
end_of_context);
}
grpc_stream_compression_context *grpc_stream_compression_context_create(
grpc_stream_compression_method method) {
grpc_stream_compression_context *ctx =
(grpc_stream_compression_context *)gpr_zalloc(
sizeof(grpc_stream_compression_context));
int r;
if (ctx == NULL) {
return NULL;
}
if (method == GRPC_STREAM_COMPRESSION_DECOMPRESS) {
r = inflateInit2(&ctx->zs, 0x1F);
ctx->flate = inflate;
} else {
r = deflateInit2(&ctx->zs, Z_DEFAULT_COMPRESSION, Z_DEFLATED, 0x1F, 8,
Z_DEFAULT_STRATEGY);
ctx->flate = deflate;
}
if (r != Z_OK) {
gpr_free(ctx);
return NULL;
switch (method) {
case GRPC_STREAM_COMPRESSION_IDENTITY_COMPRESS:
case GRPC_STREAM_COMPRESSION_IDENTITY_DECOMPRESS:
return grpc_stream_compression_identity_vtable.context_create(method);
case GRPC_STREAM_COMPRESSION_GZIP_COMPRESS:
case GRPC_STREAM_COMPRESSION_GZIP_DECOMPRESS:
return grpc_stream_compression_gzip_vtable.context_create(method);
default:
gpr_log(GPR_ERROR, "Unknown stream compression method: %d", method);
return NULL;
}
return ctx;
}
void grpc_stream_compression_context_destroy(
grpc_stream_compression_context *ctx) {
if (ctx->flate == inflate) {
inflateEnd(&ctx->zs);
ctx->vtable->context_destroy(ctx);
}
int grpc_stream_compression_method_parse(
grpc_slice value, bool is_compress,
grpc_stream_compression_method *method) {
if (grpc_slice_eq(value, GRPC_MDSTR_IDENTITY)) {
*method = is_compress ? GRPC_STREAM_COMPRESSION_IDENTITY_COMPRESS
: GRPC_STREAM_COMPRESSION_IDENTITY_DECOMPRESS;
return 1;
} else if (grpc_slice_eq(value, GRPC_MDSTR_GZIP)) {
*method = is_compress ? GRPC_STREAM_COMPRESSION_GZIP_COMPRESS
: GRPC_STREAM_COMPRESSION_GZIP_DECOMPRESS;
return 1;
} else {
deflateEnd(&ctx->zs);
return 0;
}
gpr_free(ctx);
}

@ -24,15 +24,20 @@
#include <grpc/slice_buffer.h>
#include <zlib.h>
#include "src/core/lib/transport/static_metadata.h"
typedef struct grpc_stream_compression_vtable grpc_stream_compression_vtable;
/* Stream compression/decompression context */
typedef struct grpc_stream_compression_context {
z_stream zs;
int (*flate)(z_stream *zs, int flush);
const grpc_stream_compression_vtable *vtable;
} grpc_stream_compression_context;
typedef enum grpc_stream_compression_method {
GRPC_STREAM_COMPRESSION_COMPRESS = 0,
GRPC_STREAM_COMPRESSION_DECOMPRESS,
GRPC_STREAM_COMPRESSION_IDENTITY_COMPRESS = 0,
GRPC_STREAM_COMPRESSION_IDENTITY_DECOMPRESS,
GRPC_STREAM_COMPRESSION_GZIP_COMPRESS,
GRPC_STREAM_COMPRESSION_GZIP_DECOMPRESS,
GRPC_STREAM_COMPRESSION_METHOD_COUNT
} grpc_stream_compression_method;
@ -43,6 +48,19 @@ typedef enum grpc_stream_compression_flush {
GRPC_STREAM_COMPRESSION_FLUSH_COUNT
} grpc_stream_compression_flush;
struct grpc_stream_compression_vtable {
bool (*compress)(grpc_stream_compression_context *ctx, grpc_slice_buffer *in,
grpc_slice_buffer *out, size_t *output_size,
size_t max_output_size, grpc_stream_compression_flush flush);
bool (*decompress)(grpc_stream_compression_context *ctx,
grpc_slice_buffer *in, grpc_slice_buffer *out,
size_t *output_size, size_t max_output_size,
bool *end_of_context);
grpc_stream_compression_context *(*context_create)(
grpc_stream_compression_method method);
void (*context_destroy)(grpc_stream_compression_context *ctx);
};
/**
* Compress bytes provided in \a in with a given context, with an optional flush
* at the end of compression. Emits at most \a max_output_size compressed bytes
@ -87,4 +105,10 @@ grpc_stream_compression_context *grpc_stream_compression_context_create(
void grpc_stream_compression_context_destroy(
grpc_stream_compression_context *ctx);
#endif /* GRPC_CORE_LIB_COMPRESSION_STREAM_COMPRESSION_H */
/**
* Parse stream compression method based on algorithm name
*/
int grpc_stream_compression_method_parse(
grpc_slice value, bool is_compress, grpc_stream_compression_method *method);
#endif

@ -0,0 +1,228 @@
/*
*
* Copyright 2017 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
#include <grpc/support/alloc.h>
#include <grpc/support/log.h>
#include "src/core/lib/compression/stream_compression_gzip.h"
#include "src/core/lib/iomgr/exec_ctx.h"
#include "src/core/lib/slice/slice_internal.h"
#define OUTPUT_BLOCK_SIZE (1024)
typedef struct grpc_stream_compression_context_gzip {
grpc_stream_compression_context base;
z_stream zs;
int (*flate)(z_stream *zs, int flush);
} grpc_stream_compression_context_gzip;
static bool gzip_flate(grpc_stream_compression_context_gzip *ctx,
grpc_slice_buffer *in, grpc_slice_buffer *out,
size_t *output_size, size_t max_output_size, int flush,
bool *end_of_context) {
GPR_ASSERT(flush == 0 || flush == Z_SYNC_FLUSH || flush == Z_FINISH);
/* Full flush is not allowed when inflating. */
GPR_ASSERT(!(ctx->flate == inflate && (flush == Z_FINISH)));
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
int r;
bool eoc = false;
size_t original_max_output_size = max_output_size;
while (max_output_size > 0 && (in->length > 0 || flush) && !eoc) {
size_t slice_size = max_output_size < OUTPUT_BLOCK_SIZE ? max_output_size
: OUTPUT_BLOCK_SIZE;
grpc_slice slice_out = GRPC_SLICE_MALLOC(slice_size);
ctx->zs.avail_out = (uInt)slice_size;
ctx->zs.next_out = GRPC_SLICE_START_PTR(slice_out);
while (ctx->zs.avail_out > 0 && in->length > 0 && !eoc) {
grpc_slice slice = grpc_slice_buffer_take_first(in);
ctx->zs.avail_in = (uInt)GRPC_SLICE_LENGTH(slice);
ctx->zs.next_in = GRPC_SLICE_START_PTR(slice);
r = ctx->flate(&ctx->zs, Z_NO_FLUSH);
if (r < 0 && r != Z_BUF_ERROR) {
gpr_log(GPR_ERROR, "zlib error (%d)", r);
grpc_slice_unref_internal(&exec_ctx, slice_out);
grpc_exec_ctx_finish(&exec_ctx);
return false;
} else if (r == Z_STREAM_END && ctx->flate == inflate) {
eoc = true;
}
if (ctx->zs.avail_in > 0) {
grpc_slice_buffer_undo_take_first(
in,
grpc_slice_sub(slice, GRPC_SLICE_LENGTH(slice) - ctx->zs.avail_in,
GRPC_SLICE_LENGTH(slice)));
}
grpc_slice_unref_internal(&exec_ctx, slice);
}
if (flush != 0 && ctx->zs.avail_out > 0 && !eoc) {
GPR_ASSERT(in->length == 0);
r = ctx->flate(&ctx->zs, flush);
if (flush == Z_SYNC_FLUSH) {
switch (r) {
case Z_OK:
/* Maybe flush is not complete; just made some partial progress. */
if (ctx->zs.avail_out > 0) {
flush = 0;
}
break;
case Z_BUF_ERROR:
case Z_STREAM_END:
flush = 0;
break;
default:
gpr_log(GPR_ERROR, "zlib error (%d)", r);
grpc_slice_unref_internal(&exec_ctx, slice_out);
grpc_exec_ctx_finish(&exec_ctx);
return false;
}
} else if (flush == Z_FINISH) {
switch (r) {
case Z_OK:
case Z_BUF_ERROR:
/* Wait for the next loop to assign additional output space. */
GPR_ASSERT(ctx->zs.avail_out == 0);
break;
case Z_STREAM_END:
flush = 0;
break;
default:
gpr_log(GPR_ERROR, "zlib error (%d)", r);
grpc_slice_unref_internal(&exec_ctx, slice_out);
grpc_exec_ctx_finish(&exec_ctx);
return false;
}
}
}
if (ctx->zs.avail_out == 0) {
grpc_slice_buffer_add(out, slice_out);
} else if (ctx->zs.avail_out < slice_size) {
slice_out.data.refcounted.length -= ctx->zs.avail_out;
grpc_slice_buffer_add(out, slice_out);
} else {
grpc_slice_unref_internal(&exec_ctx, slice_out);
}
max_output_size -= (slice_size - ctx->zs.avail_out);
}
grpc_exec_ctx_finish(&exec_ctx);
if (end_of_context) {
*end_of_context = eoc;
}
if (output_size) {
*output_size = original_max_output_size - max_output_size;
}
return true;
}
static bool grpc_stream_compress_gzip(grpc_stream_compression_context *ctx,
grpc_slice_buffer *in,
grpc_slice_buffer *out,
size_t *output_size,
size_t max_output_size,
grpc_stream_compression_flush flush) {
if (ctx == NULL) {
return false;
}
grpc_stream_compression_context_gzip *gzip_ctx =
(grpc_stream_compression_context_gzip *)ctx;
GPR_ASSERT(gzip_ctx->flate == deflate);
int gzip_flush;
switch (flush) {
case GRPC_STREAM_COMPRESSION_FLUSH_NONE:
gzip_flush = 0;
break;
case GRPC_STREAM_COMPRESSION_FLUSH_SYNC:
gzip_flush = Z_SYNC_FLUSH;
break;
case GRPC_STREAM_COMPRESSION_FLUSH_FINISH:
gzip_flush = Z_FINISH;
break;
default:
gzip_flush = 0;
}
return gzip_flate(gzip_ctx, in, out, output_size, max_output_size, gzip_flush,
NULL);
}
static bool grpc_stream_decompress_gzip(grpc_stream_compression_context *ctx,
grpc_slice_buffer *in,
grpc_slice_buffer *out,
size_t *output_size,
size_t max_output_size,
bool *end_of_context) {
if (ctx == NULL) {
return false;
}
grpc_stream_compression_context_gzip *gzip_ctx =
(grpc_stream_compression_context_gzip *)ctx;
GPR_ASSERT(gzip_ctx->flate == inflate);
return gzip_flate(gzip_ctx, in, out, output_size, max_output_size,
Z_SYNC_FLUSH, end_of_context);
}
static grpc_stream_compression_context *
grpc_stream_compression_context_create_gzip(
grpc_stream_compression_method method) {
GPR_ASSERT(method == GRPC_STREAM_COMPRESSION_GZIP_COMPRESS ||
method == GRPC_STREAM_COMPRESSION_GZIP_DECOMPRESS);
grpc_stream_compression_context_gzip *gzip_ctx =
(grpc_stream_compression_context_gzip *)gpr_zalloc(
sizeof(grpc_stream_compression_context_gzip));
int r;
if (gzip_ctx == NULL) {
return NULL;
}
if (method == GRPC_STREAM_COMPRESSION_GZIP_DECOMPRESS) {
r = inflateInit2(&gzip_ctx->zs, 0x1F);
gzip_ctx->flate = inflate;
} else {
r = deflateInit2(&gzip_ctx->zs, Z_DEFAULT_COMPRESSION, Z_DEFLATED, 0x1F, 8,
Z_DEFAULT_STRATEGY);
gzip_ctx->flate = deflate;
}
if (r != Z_OK) {
gpr_free(gzip_ctx);
return NULL;
}
gzip_ctx->base.vtable = &grpc_stream_compression_gzip_vtable;
return (grpc_stream_compression_context *)gzip_ctx;
}
static void grpc_stream_compression_context_destroy_gzip(
grpc_stream_compression_context *ctx) {
if (ctx == NULL) {
return;
}
grpc_stream_compression_context_gzip *gzip_ctx =
(grpc_stream_compression_context_gzip *)ctx;
if (gzip_ctx->flate == inflate) {
inflateEnd(&gzip_ctx->zs);
} else {
deflateEnd(&gzip_ctx->zs);
}
gpr_free(ctx);
}
const grpc_stream_compression_vtable grpc_stream_compression_gzip_vtable = {
.compress = grpc_stream_compress_gzip,
.decompress = grpc_stream_decompress_gzip,
.context_create = grpc_stream_compression_context_create_gzip,
.context_destroy = grpc_stream_compression_context_destroy_gzip};

@ -0,0 +1,26 @@
/*
*
* Copyright 2017 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
#ifndef GRPC_CORE_LIB_COMPRESSION_STREAM_COMPRESSION_GZIP_H
#define GRPC_CORE_LIB_COMPRESSION_STREAM_COMPRESSION_GZIP_H
#include "src/core/lib/compression/stream_compression.h"
extern const grpc_stream_compression_vtable grpc_stream_compression_gzip_vtable;
#endif

@ -0,0 +1,94 @@
/*
*
* Copyright 2017 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
#include <grpc/support/alloc.h>
#include <grpc/support/log.h>
#include "src/core/lib/compression/stream_compression_identity.h"
#include "src/core/lib/iomgr/exec_ctx.h"
#include "src/core/lib/slice/slice_internal.h"
#define OUTPUT_BLOCK_SIZE (1024)
/* Singleton context used for all identity streams. */
static grpc_stream_compression_context identity_ctx = {
.vtable = &grpc_stream_compression_identity_vtable};
static void grpc_stream_compression_pass_through(grpc_slice_buffer *in,
grpc_slice_buffer *out,
size_t *output_size,
size_t max_output_size) {
if (max_output_size >= in->length) {
if (output_size) {
*output_size = in->length;
}
grpc_slice_buffer_move_into(in, out);
} else {
if (output_size) {
*output_size = max_output_size;
}
grpc_slice_buffer_move_first(in, max_output_size, out);
}
}
static bool grpc_stream_compress_identity(grpc_stream_compression_context *ctx,
grpc_slice_buffer *in,
grpc_slice_buffer *out,
size_t *output_size,
size_t max_output_size,
grpc_stream_compression_flush flush) {
if (ctx == NULL) {
return false;
}
grpc_stream_compression_pass_through(in, out, output_size, max_output_size);
return true;
}
static bool grpc_stream_decompress_identity(
grpc_stream_compression_context *ctx, grpc_slice_buffer *in,
grpc_slice_buffer *out, size_t *output_size, size_t max_output_size,
bool *end_of_context) {
if (ctx == NULL) {
return false;
}
grpc_stream_compression_pass_through(in, out, output_size, max_output_size);
if (end_of_context) {
*end_of_context = false;
}
return true;
}
static grpc_stream_compression_context *
grpc_stream_compression_context_create_identity(
grpc_stream_compression_method method) {
GPR_ASSERT(method == GRPC_STREAM_COMPRESSION_IDENTITY_COMPRESS ||
method == GRPC_STREAM_COMPRESSION_IDENTITY_DECOMPRESS);
/* No context needed in this case. Use fake context instead. */
return (grpc_stream_compression_context *)&identity_ctx;
}
static void grpc_stream_compression_context_destroy_identity(
grpc_stream_compression_context *ctx) {
return;
}
const grpc_stream_compression_vtable grpc_stream_compression_identity_vtable = {
.compress = grpc_stream_compress_identity,
.decompress = grpc_stream_decompress_identity,
.context_create = grpc_stream_compression_context_create_identity,
.context_destroy = grpc_stream_compression_context_destroy_identity};

@ -0,0 +1,27 @@
/*
*
* Copyright 2017 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
#ifndef GRPC_CORE_LIB_COMPRESSION_STREAM_COMPRESSION_IDENTITY_H
#define GRPC_CORE_LIB_COMPRESSION_STREAM_COMPRESSION_IDENTITY_H
#include "src/core/lib/compression/stream_compression.h"
extern const grpc_stream_compression_vtable
grpc_stream_compression_identity_vtable;
#endif

@ -110,8 +110,6 @@ const char *grpc_stats_counter_name[GRPC_STATS_COUNTER_COUNT] = {
"executor_wakeup_initiated",
"executor_queue_drained",
"executor_push_retries",
"executor_threads_created",
"executor_threads_used",
"server_requested_calls",
"server_slowpath_requests_queued",
};
@ -221,8 +219,6 @@ const char *grpc_stats_counter_doc[GRPC_STATS_COUNTER_COUNT] = {
"Number of times an executor queue was drained",
"Number of times we raced and were forced to retry pushing a closure to "
"the executor",
"Size of the backing thread pool for overflow gRPC Core work",
"How many executor threads actually got used",
"How many calls were requested (not necessarily received) by the server",
"How many times was the server slow path taken (indicates too few "
"outstanding requests)",
@ -240,7 +236,6 @@ const char *grpc_stats_histogram_name[GRPC_STATS_HISTOGRAM_COUNT] = {
"http2_send_message_per_write",
"http2_send_trailing_metadata_per_write",
"http2_send_flowctl_per_write",
"executor_closures_per_wakeup",
"server_cqs_checked",
};
const char *grpc_stats_histogram_doc[GRPC_STATS_HISTOGRAM_COUNT] = {
@ -256,7 +251,6 @@ const char *grpc_stats_histogram_doc[GRPC_STATS_HISTOGRAM_COUNT] = {
"Number of streams whose payload was written per TCP write",
"Number of streams terminated per TCP write",
"Number of flow control updates written per TCP write",
"Number of closures executed each time an executor wakes up",
"How many completion queues were checked looking for a CQ that had "
"requested the incoming call",
};
@ -328,7 +322,6 @@ const uint8_t grpc_stats_table_7[102] = {
const int grpc_stats_table_8[9] = {0, 1, 2, 4, 7, 13, 23, 39, 64};
const uint8_t grpc_stats_table_9[9] = {0, 0, 1, 2, 2, 3, 4, 4, 5};
void grpc_stats_inc_call_initial_size(grpc_exec_ctx *exec_ctx, int value) {
/* Automatically generated by tools/codegen/core/gen_stats_data.py */
value = GPR_CLAMP(value, 0, 262144);
if (value < 6) {
GRPC_STATS_INC_HISTOGRAM((exec_ctx), GRPC_STATS_HISTOGRAM_CALL_INITIAL_SIZE,
@ -354,7 +347,6 @@ void grpc_stats_inc_call_initial_size(grpc_exec_ctx *exec_ctx, int value) {
(exec_ctx), value, grpc_stats_table_0, 64));
}
void grpc_stats_inc_poll_events_returned(grpc_exec_ctx *exec_ctx, int value) {
/* Automatically generated by tools/codegen/core/gen_stats_data.py */
value = GPR_CLAMP(value, 0, 1024);
if (value < 29) {
GRPC_STATS_INC_HISTOGRAM((exec_ctx),
@ -381,7 +373,6 @@ void grpc_stats_inc_poll_events_returned(grpc_exec_ctx *exec_ctx, int value) {
(exec_ctx), value, grpc_stats_table_2, 128));
}
void grpc_stats_inc_tcp_write_size(grpc_exec_ctx *exec_ctx, int value) {
/* Automatically generated by tools/codegen/core/gen_stats_data.py */
value = GPR_CLAMP(value, 0, 16777216);
if (value < 5) {
GRPC_STATS_INC_HISTOGRAM((exec_ctx), GRPC_STATS_HISTOGRAM_TCP_WRITE_SIZE,
@ -407,7 +398,6 @@ void grpc_stats_inc_tcp_write_size(grpc_exec_ctx *exec_ctx, int value) {
(exec_ctx), value, grpc_stats_table_4, 64));
}
void grpc_stats_inc_tcp_write_iov_size(grpc_exec_ctx *exec_ctx, int value) {
/* Automatically generated by tools/codegen/core/gen_stats_data.py */
value = GPR_CLAMP(value, 0, 1024);
if (value < 13) {
GRPC_STATS_INC_HISTOGRAM((exec_ctx),
@ -433,7 +423,6 @@ void grpc_stats_inc_tcp_write_iov_size(grpc_exec_ctx *exec_ctx, int value) {
(exec_ctx), value, grpc_stats_table_6, 64));
}
void grpc_stats_inc_tcp_read_size(grpc_exec_ctx *exec_ctx, int value) {
/* Automatically generated by tools/codegen/core/gen_stats_data.py */
value = GPR_CLAMP(value, 0, 16777216);
if (value < 5) {
GRPC_STATS_INC_HISTOGRAM((exec_ctx), GRPC_STATS_HISTOGRAM_TCP_READ_SIZE,
@ -459,7 +448,6 @@ void grpc_stats_inc_tcp_read_size(grpc_exec_ctx *exec_ctx, int value) {
(exec_ctx), value, grpc_stats_table_4, 64));
}
void grpc_stats_inc_tcp_read_offer(grpc_exec_ctx *exec_ctx, int value) {
/* Automatically generated by tools/codegen/core/gen_stats_data.py */
value = GPR_CLAMP(value, 0, 16777216);
if (value < 5) {
GRPC_STATS_INC_HISTOGRAM((exec_ctx), GRPC_STATS_HISTOGRAM_TCP_READ_OFFER,
@ -486,7 +474,6 @@ void grpc_stats_inc_tcp_read_offer(grpc_exec_ctx *exec_ctx, int value) {
}
void grpc_stats_inc_tcp_read_offer_iov_size(grpc_exec_ctx *exec_ctx,
int value) {
/* Automatically generated by tools/codegen/core/gen_stats_data.py */
value = GPR_CLAMP(value, 0, 1024);
if (value < 13) {
GRPC_STATS_INC_HISTOGRAM(
@ -514,7 +501,6 @@ void grpc_stats_inc_tcp_read_offer_iov_size(grpc_exec_ctx *exec_ctx,
}
void grpc_stats_inc_http2_send_message_size(grpc_exec_ctx *exec_ctx,
int value) {
/* Automatically generated by tools/codegen/core/gen_stats_data.py */
value = GPR_CLAMP(value, 0, 16777216);
if (value < 5) {
GRPC_STATS_INC_HISTOGRAM(
@ -542,7 +528,6 @@ void grpc_stats_inc_http2_send_message_size(grpc_exec_ctx *exec_ctx,
}
void grpc_stats_inc_http2_send_initial_metadata_per_write(
grpc_exec_ctx *exec_ctx, int value) {
/* Automatically generated by tools/codegen/core/gen_stats_data.py */
value = GPR_CLAMP(value, 0, 1024);
if (value < 13) {
GRPC_STATS_INC_HISTOGRAM(
@ -572,7 +557,6 @@ void grpc_stats_inc_http2_send_initial_metadata_per_write(
}
void grpc_stats_inc_http2_send_message_per_write(grpc_exec_ctx *exec_ctx,
int value) {
/* Automatically generated by tools/codegen/core/gen_stats_data.py */
value = GPR_CLAMP(value, 0, 1024);
if (value < 13) {
GRPC_STATS_INC_HISTOGRAM(
@ -600,7 +584,6 @@ void grpc_stats_inc_http2_send_message_per_write(grpc_exec_ctx *exec_ctx,
}
void grpc_stats_inc_http2_send_trailing_metadata_per_write(
grpc_exec_ctx *exec_ctx, int value) {
/* Automatically generated by tools/codegen/core/gen_stats_data.py */
value = GPR_CLAMP(value, 0, 1024);
if (value < 13) {
GRPC_STATS_INC_HISTOGRAM(
@ -630,7 +613,6 @@ void grpc_stats_inc_http2_send_trailing_metadata_per_write(
}
void grpc_stats_inc_http2_send_flowctl_per_write(grpc_exec_ctx *exec_ctx,
int value) {
/* Automatically generated by tools/codegen/core/gen_stats_data.py */
value = GPR_CLAMP(value, 0, 1024);
if (value < 13) {
GRPC_STATS_INC_HISTOGRAM(
@ -656,36 +638,7 @@ void grpc_stats_inc_http2_send_flowctl_per_write(grpc_exec_ctx *exec_ctx,
grpc_stats_histo_find_bucket_slow(
(exec_ctx), value, grpc_stats_table_6, 64));
}
void grpc_stats_inc_executor_closures_per_wakeup(grpc_exec_ctx *exec_ctx,
int value) {
/* Automatically generated by tools/codegen/core/gen_stats_data.py */
value = GPR_CLAMP(value, 0, 1024);
if (value < 13) {
GRPC_STATS_INC_HISTOGRAM(
(exec_ctx), GRPC_STATS_HISTOGRAM_EXECUTOR_CLOSURES_PER_WAKEUP, value);
return;
}
union {
double dbl;
uint64_t uint;
} _val, _bkt;
_val.dbl = value;
if (_val.uint < 4637863191261478912ull) {
int bucket =
grpc_stats_table_7[((_val.uint - 4623507967449235456ull) >> 48)] + 13;
_bkt.dbl = grpc_stats_table_6[bucket];
bucket -= (_val.uint < _bkt.uint);
GRPC_STATS_INC_HISTOGRAM(
(exec_ctx), GRPC_STATS_HISTOGRAM_EXECUTOR_CLOSURES_PER_WAKEUP, bucket);
return;
}
GRPC_STATS_INC_HISTOGRAM((exec_ctx),
GRPC_STATS_HISTOGRAM_EXECUTOR_CLOSURES_PER_WAKEUP,
grpc_stats_histo_find_bucket_slow(
(exec_ctx), value, grpc_stats_table_6, 64));
}
void grpc_stats_inc_server_cqs_checked(grpc_exec_ctx *exec_ctx, int value) {
/* Automatically generated by tools/codegen/core/gen_stats_data.py */
value = GPR_CLAMP(value, 0, 64);
if (value < 3) {
GRPC_STATS_INC_HISTOGRAM((exec_ctx),
@ -710,17 +663,17 @@ void grpc_stats_inc_server_cqs_checked(grpc_exec_ctx *exec_ctx, int value) {
grpc_stats_histo_find_bucket_slow(
(exec_ctx), value, grpc_stats_table_8, 8));
}
const int grpc_stats_histo_buckets[14] = {64, 128, 64, 64, 64, 64, 64,
64, 64, 64, 64, 64, 64, 8};
const int grpc_stats_histo_start[14] = {0, 64, 192, 256, 320, 384, 448,
512, 576, 640, 704, 768, 832, 896};
const int *const grpc_stats_histo_bucket_boundaries[14] = {
const int grpc_stats_histo_buckets[13] = {64, 128, 64, 64, 64, 64, 64,
64, 64, 64, 64, 64, 8};
const int grpc_stats_histo_start[13] = {0, 64, 192, 256, 320, 384, 448,
512, 576, 640, 704, 768, 832};
const int *const grpc_stats_histo_bucket_boundaries[13] = {
grpc_stats_table_0, grpc_stats_table_2, grpc_stats_table_4,
grpc_stats_table_6, grpc_stats_table_4, grpc_stats_table_4,
grpc_stats_table_6, grpc_stats_table_4, grpc_stats_table_6,
grpc_stats_table_6, grpc_stats_table_6, grpc_stats_table_6,
grpc_stats_table_6, grpc_stats_table_8};
void (*const grpc_stats_inc_histogram[14])(grpc_exec_ctx *exec_ctx, int x) = {
grpc_stats_table_8};
void (*const grpc_stats_inc_histogram[13])(grpc_exec_ctx *exec_ctx, int x) = {
grpc_stats_inc_call_initial_size,
grpc_stats_inc_poll_events_returned,
grpc_stats_inc_tcp_write_size,
@ -733,5 +686,4 @@ void (*const grpc_stats_inc_histogram[14])(grpc_exec_ctx *exec_ctx, int x) = {
grpc_stats_inc_http2_send_message_per_write,
grpc_stats_inc_http2_send_trailing_metadata_per_write,
grpc_stats_inc_http2_send_flowctl_per_write,
grpc_stats_inc_executor_closures_per_wakeup,
grpc_stats_inc_server_cqs_checked};

@ -112,8 +112,6 @@ typedef enum {
GRPC_STATS_COUNTER_EXECUTOR_WAKEUP_INITIATED,
GRPC_STATS_COUNTER_EXECUTOR_QUEUE_DRAINED,
GRPC_STATS_COUNTER_EXECUTOR_PUSH_RETRIES,
GRPC_STATS_COUNTER_EXECUTOR_THREADS_CREATED,
GRPC_STATS_COUNTER_EXECUTOR_THREADS_USED,
GRPC_STATS_COUNTER_SERVER_REQUESTED_CALLS,
GRPC_STATS_COUNTER_SERVER_SLOWPATH_REQUESTS_QUEUED,
GRPC_STATS_COUNTER_COUNT
@ -133,7 +131,6 @@ typedef enum {
GRPC_STATS_HISTOGRAM_HTTP2_SEND_MESSAGE_PER_WRITE,
GRPC_STATS_HISTOGRAM_HTTP2_SEND_TRAILING_METADATA_PER_WRITE,
GRPC_STATS_HISTOGRAM_HTTP2_SEND_FLOWCTL_PER_WRITE,
GRPC_STATS_HISTOGRAM_EXECUTOR_CLOSURES_PER_WAKEUP,
GRPC_STATS_HISTOGRAM_SERVER_CQS_CHECKED,
GRPC_STATS_HISTOGRAM_COUNT
} grpc_stats_histograms;
@ -164,11 +161,9 @@ typedef enum {
GRPC_STATS_HISTOGRAM_HTTP2_SEND_TRAILING_METADATA_PER_WRITE_BUCKETS = 64,
GRPC_STATS_HISTOGRAM_HTTP2_SEND_FLOWCTL_PER_WRITE_FIRST_SLOT = 768,
GRPC_STATS_HISTOGRAM_HTTP2_SEND_FLOWCTL_PER_WRITE_BUCKETS = 64,
GRPC_STATS_HISTOGRAM_EXECUTOR_CLOSURES_PER_WAKEUP_FIRST_SLOT = 832,
GRPC_STATS_HISTOGRAM_EXECUTOR_CLOSURES_PER_WAKEUP_BUCKETS = 64,
GRPC_STATS_HISTOGRAM_SERVER_CQS_CHECKED_FIRST_SLOT = 896,
GRPC_STATS_HISTOGRAM_SERVER_CQS_CHECKED_FIRST_SLOT = 832,
GRPC_STATS_HISTOGRAM_SERVER_CQS_CHECKED_BUCKETS = 8,
GRPC_STATS_HISTOGRAM_BUCKETS = 904
GRPC_STATS_HISTOGRAM_BUCKETS = 840
} grpc_stats_histogram_constants;
#define GRPC_STATS_INC_CLIENT_CALLS_CREATED(exec_ctx) \
GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_CLIENT_CALLS_CREATED)
@ -421,11 +416,6 @@ typedef enum {
GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_EXECUTOR_QUEUE_DRAINED)
#define GRPC_STATS_INC_EXECUTOR_PUSH_RETRIES(exec_ctx) \
GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_EXECUTOR_PUSH_RETRIES)
#define GRPC_STATS_INC_EXECUTOR_THREADS_CREATED(exec_ctx) \
GRPC_STATS_INC_COUNTER((exec_ctx), \
GRPC_STATS_COUNTER_EXECUTOR_THREADS_CREATED)
#define GRPC_STATS_INC_EXECUTOR_THREADS_USED(exec_ctx) \
GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_EXECUTOR_THREADS_USED)
#define GRPC_STATS_INC_SERVER_REQUESTED_CALLS(exec_ctx) \
GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_SERVER_REQUESTED_CALLS)
#define GRPC_STATS_INC_SERVER_SLOWPATH_REQUESTS_QUEUED(exec_ctx) \
@ -472,17 +462,13 @@ void grpc_stats_inc_http2_send_trailing_metadata_per_write(
grpc_stats_inc_http2_send_flowctl_per_write((exec_ctx), (int)(value))
void grpc_stats_inc_http2_send_flowctl_per_write(grpc_exec_ctx *exec_ctx,
int x);
#define GRPC_STATS_INC_EXECUTOR_CLOSURES_PER_WAKEUP(exec_ctx, value) \
grpc_stats_inc_executor_closures_per_wakeup((exec_ctx), (int)(value))
void grpc_stats_inc_executor_closures_per_wakeup(grpc_exec_ctx *exec_ctx,
int x);
#define GRPC_STATS_INC_SERVER_CQS_CHECKED(exec_ctx, value) \
grpc_stats_inc_server_cqs_checked((exec_ctx), (int)(value))
void grpc_stats_inc_server_cqs_checked(grpc_exec_ctx *exec_ctx, int x);
extern const int grpc_stats_histo_buckets[14];
extern const int grpc_stats_histo_start[14];
extern const int *const grpc_stats_histo_bucket_boundaries[14];
extern void (*const grpc_stats_inc_histogram[14])(grpc_exec_ctx *exec_ctx,
extern const int grpc_stats_histo_buckets[13];
extern const int grpc_stats_histo_start[13];
extern const int *const grpc_stats_histo_bucket_boundaries[13];
extern void (*const grpc_stats_inc_histogram[13])(grpc_exec_ctx *exec_ctx,
int x);
#endif /* GRPC_CORE_LIB_DEBUG_STATS_DATA_H */

@ -261,14 +261,6 @@
- counter: executor_push_retries
doc: Number of times we raced and were forced to retry pushing a closure to
the executor
- counter: executor_threads_created
doc: Size of the backing thread pool for overflow gRPC Core work
- counter: executor_threads_used
doc: How many executor threads actually got used
- histogram: executor_closures_per_wakeup
max: 1024
buckets: 64
doc: Number of closures executed each time an executor wakes up
# server
- counter: server_requested_calls
doc: How many calls were requested (not necessarily received) by the server

@ -85,7 +85,5 @@ executor_scheduled_to_self_per_iteration:FLOAT,
executor_wakeup_initiated_per_iteration:FLOAT,
executor_queue_drained_per_iteration:FLOAT,
executor_push_retries_per_iteration:FLOAT,
executor_threads_created_per_iteration:FLOAT,
executor_threads_used_per_iteration:FLOAT,
server_requested_calls_per_iteration:FLOAT,
server_slowpath_requests_queued_per_iteration:FLOAT

@ -32,14 +32,16 @@
#include "src/core/lib/iomgr/exec_ctx.h"
#include "src/core/lib/support/spinlock.h"
#define MAX_DEPTH 2
typedef struct {
gpr_mu mu;
gpr_cv cv;
grpc_closure_list elems;
size_t depth;
bool shutdown;
bool queued_long_job;
gpr_thd_id id;
grpc_closure_list local_elems;
} thread_state;
static thread_state *g_thread_state;
@ -54,35 +56,32 @@ static grpc_tracer_flag executor_trace =
static void executor_thread(void *arg);
static void run_closures(grpc_exec_ctx *exec_ctx, grpc_closure_list *list) {
int n = 0; // number of closures executed
static size_t run_closures(grpc_exec_ctx *exec_ctx, grpc_closure_list list) {
size_t n = 0;
while (!grpc_closure_list_empty(*list)) {
grpc_closure *c = list->head;
grpc_closure_list_init(list);
while (c != NULL) {
grpc_closure *next = c->next_data.next;
grpc_error *error = c->error_data.error;
if (GRPC_TRACER_ON(executor_trace)) {
grpc_closure *c = list.head;
while (c != NULL) {
grpc_closure *next = c->next_data.next;
grpc_error *error = c->error_data.error;
if (GRPC_TRACER_ON(executor_trace)) {
#ifndef NDEBUG
gpr_log(GPR_DEBUG, "EXECUTOR: run %p [created by %s:%d]", c,
c->file_created, c->line_created);
gpr_log(GPR_DEBUG, "EXECUTOR: run %p [created by %s:%d]", c,
c->file_created, c->line_created);
#else
gpr_log(GPR_DEBUG, "EXECUTOR: run %p", c);
gpr_log(GPR_DEBUG, "EXECUTOR: run %p", c);
#endif
}
}
#ifndef NDEBUG
c->scheduled = false;
c->scheduled = false;
#endif
n++;
c->cb(exec_ctx, c->cb_arg, error);
GRPC_ERROR_UNREF(error);
c = next;
grpc_exec_ctx_flush(exec_ctx);
}
c->cb(exec_ctx, c->cb_arg, error);
GRPC_ERROR_UNREF(error);
c = next;
n++;
grpc_exec_ctx_flush(exec_ctx);
}
GRPC_STATS_INC_EXECUTOR_CLOSURES_PER_WAKEUP(exec_ctx, n);
return n;
}
bool grpc_executor_is_threaded() {
@ -127,7 +126,7 @@ void grpc_executor_set_threading(grpc_exec_ctx *exec_ctx, bool threading) {
for (size_t i = 0; i < g_max_threads; i++) {
gpr_mu_destroy(&g_thread_state[i].mu);
gpr_cv_destroy(&g_thread_state[i].cv);
run_closures(exec_ctx, &g_thread_state[i].elems);
run_closures(exec_ctx, g_thread_state[i].elems);
}
gpr_free(g_thread_state);
gpr_tls_destroy(&g_this_thread_state);
@ -151,14 +150,14 @@ static void executor_thread(void *arg) {
grpc_exec_ctx exec_ctx =
GRPC_EXEC_CTX_INITIALIZER(0, grpc_never_ready_to_finish, NULL);
GRPC_STATS_INC_EXECUTOR_THREADS_CREATED(&exec_ctx);
bool used = false;
size_t subtract_depth = 0;
for (;;) {
if (GRPC_TRACER_ON(executor_trace)) {
gpr_log(GPR_DEBUG, "EXECUTOR[%d]: step", (int)(ts - g_thread_state));
gpr_log(GPR_DEBUG, "EXECUTOR[%d]: step (sub_depth=%" PRIdPTR ")",
(int)(ts - g_thread_state), subtract_depth);
}
gpr_mu_lock(&ts->mu);
ts->depth -= subtract_depth;
while (grpc_closure_list_empty(ts->elems) && !ts->shutdown) {
ts->queued_long_job = false;
gpr_cv_wait(&ts->cv, &ts->mu, gpr_inf_future(GPR_CLOCK_REALTIME));
@ -171,13 +170,8 @@ static void executor_thread(void *arg) {
gpr_mu_unlock(&ts->mu);
break;
}
if (!used) {
GRPC_STATS_INC_EXECUTOR_THREADS_USED(&exec_ctx);
used = true;
}
GRPC_STATS_INC_EXECUTOR_QUEUE_DRAINED(&exec_ctx);
GPR_ASSERT(grpc_closure_list_empty(ts->local_elems));
ts->local_elems = ts->elems;
grpc_closure_list exec = ts->elems;
ts->elems = (grpc_closure_list)GRPC_CLOSURE_LIST_INIT;
gpr_mu_unlock(&ts->mu);
if (GRPC_TRACER_ON(executor_trace)) {
@ -185,7 +179,7 @@ static void executor_thread(void *arg) {
}
grpc_exec_ctx_invalidate_now(&exec_ctx);
run_closures(&exec_ctx, &ts->local_elems);
subtract_depth = run_closures(&exec_ctx, exec);
}
grpc_exec_ctx_finish(&exec_ctx);
}
@ -218,10 +212,6 @@ static void executor_push(grpc_exec_ctx *exec_ctx, grpc_closure *closure,
ts = &g_thread_state[GPR_HASH_POINTER(exec_ctx, cur_thread_count)];
} else {
GRPC_STATS_INC_EXECUTOR_SCHEDULED_TO_SELF(exec_ctx);
if (is_short) {
grpc_closure_list_append(&ts->local_elems, closure, error);
return;
}
}
thread_state *orig_ts = ts;
@ -261,7 +251,8 @@ static void executor_push(grpc_exec_ctx *exec_ctx, grpc_closure *closure,
gpr_cv_signal(&ts->cv);
}
grpc_closure_list_append(&ts->elems, closure, error);
try_new_thread = ts->elems.head != closure &&
ts->depth++;
try_new_thread = ts->depth > MAX_DEPTH &&
cur_thread_count < g_max_threads && !ts->shutdown;
if (!is_short) ts->queued_long_job = true;
gpr_mu_unlock(&ts->mu);

@ -26,12 +26,8 @@
#include <grpc/support/log.h>
const char *grpc_inet_ntop(int af, const void *src, char *dst, size_t size) {
#ifdef GPR_WIN_INET_NTOP
return inet_ntop(af, src, dst, size);
#else
/* Windows InetNtopA wants a mutable ip pointer */
return InetNtopA(af, (void *)src, dst, size);
#endif /* GPR_WIN_INET_NTOP */
}
#endif /* GRPC_WINDOWS_SOCKETUTILS */

@ -79,6 +79,125 @@ static timer_shard g_shards[NUM_SHARDS];
* Access to this is protected by g_shared_mutables.mu */
static timer_shard *g_shard_queue[NUM_SHARDS];
#ifndef NDEBUG
/* == Hash table for duplicate timer detection == */
#define NUM_HASH_BUCKETS 1009 /* Prime number close to 1000 */
static gpr_mu g_hash_mu[NUM_HASH_BUCKETS]; /* One mutex per bucket */
static grpc_timer *g_timer_ht[NUM_HASH_BUCKETS] = {NULL};
static void init_timer_ht() {
for (int i = 0; i < NUM_HASH_BUCKETS; i++) {
gpr_mu_init(&g_hash_mu[i]);
}
}
static bool is_in_ht(grpc_timer *t) {
size_t i = GPR_HASH_POINTER(t, NUM_HASH_BUCKETS);
gpr_mu_lock(&g_hash_mu[i]);
grpc_timer *p = g_timer_ht[i];
while (p != NULL && p != t) {
p = p->hash_table_next;
}
gpr_mu_unlock(&g_hash_mu[i]);
return (p == t);
}
static void add_to_ht(grpc_timer *t) {
GPR_ASSERT(!t->hash_table_next);
size_t i = GPR_HASH_POINTER(t, NUM_HASH_BUCKETS);
gpr_mu_lock(&g_hash_mu[i]);
grpc_timer *p = g_timer_ht[i];
while (p != NULL && p != t) {
p = p->hash_table_next;
}
if (p == t) {
grpc_closure *c = t->closure;
gpr_log(GPR_ERROR,
"** Duplicate timer (%p) being added. Closure: (%p), created at: "
"(%s:%d), scheduled at: (%s:%d) **",
t, c, c->file_created, c->line_created, c->file_initiated,
c->line_initiated);
abort();
}
/* Timer not present in the bucket. Insert at head of the list */
t->hash_table_next = g_timer_ht[i];
g_timer_ht[i] = t;
gpr_mu_unlock(&g_hash_mu[i]);
}
static void remove_from_ht(grpc_timer *t) {
size_t i = GPR_HASH_POINTER(t, NUM_HASH_BUCKETS);
bool removed = false;
gpr_mu_lock(&g_hash_mu[i]);
if (g_timer_ht[i] == t) {
g_timer_ht[i] = g_timer_ht[i]->hash_table_next;
removed = true;
} else if (g_timer_ht[i] != NULL) {
grpc_timer *p = g_timer_ht[i];
while (p->hash_table_next != NULL && p->hash_table_next != t) {
p = p->hash_table_next;
}
if (p->hash_table_next == t) {
p->hash_table_next = t->hash_table_next;
removed = true;
}
}
gpr_mu_unlock(&g_hash_mu[i]);
if (!removed) {
grpc_closure *c = t->closure;
gpr_log(GPR_ERROR,
"** Removing timer (%p) that is not added to hash table. Closure "
"(%p), created at: (%s:%d), scheduled at: (%s:%d) **",
t, c, c->file_created, c->line_created, c->file_initiated,
c->line_initiated);
abort();
}
t->hash_table_next = NULL;
}
/* If a timer is added to a timer shard (either heap or a list), it cannot
* be pending. A timer is added to hash table only-if it is added to the
* timer shard.
* Therefore, if timer->pending is false, it cannot be in hash table */
static void validate_non_pending_timer(grpc_timer *t) {
if (!t->pending && is_in_ht(t)) {
grpc_closure *c = t->closure;
gpr_log(GPR_ERROR,
"** gpr_timer_cancel() called on a non-pending timer (%p) which "
"is in the hash table. Closure: (%p), created at: (%s:%d), "
"scheduled at: (%s:%d) **",
t, c, c->file_created, c->line_created, c->file_initiated,
c->line_initiated);
abort();
}
}
#define INIT_TIMER_HASH_TABLE() init_timer_ht()
#define ADD_TO_HASH_TABLE(t) add_to_ht((t))
#define REMOVE_FROM_HASH_TABLE(t) remove_from_ht((t))
#define VALIDATE_NON_PENDING_TIMER(t) validate_non_pending_timer((t))
#else
#define INIT_TIMER_HASH_TABLE()
#define ADD_TO_HASH_TABLE(t)
#define REMOVE_FROM_HASH_TABLE(t)
#define VALIDATE_NON_PENDING_TIMER(t)
#endif
/* Thread local variable that stores the deadline of the next timer the thread
* has last-seen. This is an optimization to prevent the thread from checking
* shared_mutables.min_timer (which requires acquiring shared_mutables.mu lock,
@ -139,6 +258,8 @@ void grpc_timer_list_init(grpc_exec_ctx *exec_ctx) {
shard->min_deadline = compute_min_deadline(shard);
g_shard_queue[i] = shard;
}
INIT_TIMER_HASH_TABLE();
}
void grpc_timer_list_shutdown(grpc_exec_ctx *exec_ctx) {
@ -202,6 +323,10 @@ void grpc_timer_init(grpc_exec_ctx *exec_ctx, grpc_timer *timer,
timer->closure = closure;
timer->deadline = deadline;
#ifndef NDEBUG
timer->hash_table_next = NULL;
#endif
if (GRPC_TRACER_ON(grpc_timer_trace)) {
gpr_log(GPR_DEBUG,
"TIMER %p: SET %" PRIdPTR " now %" PRIdPTR " call %p[%p]", timer,
@ -229,6 +354,9 @@ void grpc_timer_init(grpc_exec_ctx *exec_ctx, grpc_timer *timer,
grpc_time_averaged_stats_add_sample(&shard->stats,
(double)(deadline - now) / 1000.0);
ADD_TO_HASH_TABLE(timer);
if (deadline < shard->queue_deadline_cap) {
is_first_timer = grpc_timer_heap_add(&shard->heap, timer);
} else {
@ -290,7 +418,10 @@ void grpc_timer_cancel(grpc_exec_ctx *exec_ctx, grpc_timer *timer) {
gpr_log(GPR_DEBUG, "TIMER %p: CANCEL pending=%s", timer,
timer->pending ? "true" : "false");
}
if (timer->pending) {
REMOVE_FROM_HASH_TABLE(timer);
GRPC_CLOSURE_SCHED(exec_ctx, timer->closure, GRPC_ERROR_CANCELLED);
timer->pending = false;
if (timer->heap_index == INVALID_HEAP_INDEX) {
@ -298,6 +429,8 @@ void grpc_timer_cancel(grpc_exec_ctx *exec_ctx, grpc_timer *timer) {
} else {
grpc_timer_heap_remove(&shard->heap, timer);
}
} else {
VALIDATE_NON_PENDING_TIMER(timer);
}
gpr_mu_unlock(&shard->mu);
}
@ -382,6 +515,7 @@ static size_t pop_timers(grpc_exec_ctx *exec_ctx, timer_shard *shard,
grpc_timer *timer;
gpr_mu_lock(&shard->mu);
while ((timer = pop_one(shard, now))) {
REMOVE_FROM_HASH_TABLE(timer);
GRPC_CLOSURE_SCHED(exec_ctx, timer->closure, GRPC_ERROR_REF(error));
n++;
}

@ -29,6 +29,9 @@ struct grpc_timer {
struct grpc_timer *next;
struct grpc_timer *prev;
grpc_closure *closure;
#ifndef NDEBUG
struct grpc_timer *hash_table_next;
#endif
};
#endif /* GRPC_CORE_LIB_IOMGR_TIMER_GENERIC_H */

@ -87,6 +87,7 @@ static bool composite_call_get_request_metadata(
ctx->on_request_metadata = on_request_metadata;
GRPC_CLOSURE_INIT(&ctx->internal_on_request_metadata,
composite_call_metadata_cb, ctx, grpc_schedule_on_exec_ctx);
bool synchronous = true;
while (ctx->creds_index < ctx->composite_creds->inner.num_creds) {
grpc_call_credentials *inner_creds =
ctx->composite_creds->inner.creds_array[ctx->creds_index++];
@ -95,19 +96,12 @@ static bool composite_call_get_request_metadata(
ctx->md_array, &ctx->internal_on_request_metadata, error)) {
if (*error != GRPC_ERROR_NONE) break;
} else {
synchronous = false; // Async return.
break;
}
}
// If we got through all creds synchronously or we got a synchronous
// error on one of them, return synchronously.
if (ctx->creds_index == ctx->composite_creds->inner.num_creds ||
*error != GRPC_ERROR_NONE) {
gpr_free(ctx);
return true;
}
// At least one inner cred is returning asynchronously, so we'll
// return asynchronously as well.
return false;
if (synchronous) gpr_free(ctx);
return synchronous;
}
static void composite_call_cancel_get_request_metadata(

@ -31,6 +31,9 @@
#include "src/core/lib/surface/api_trace.h"
#include "src/core/lib/surface/validate_metadata.h"
grpc_tracer_flag grpc_plugin_credentials_trace =
GRPC_TRACER_INITIALIZER(false, "plugin_credentials");
static void plugin_destruct(grpc_exec_ctx *exec_ctx,
grpc_call_credentials *creds) {
grpc_plugin_credentials *c = (grpc_plugin_credentials *)creds;
@ -53,6 +56,62 @@ static void pending_request_remove_locked(
}
}
// Checks if the request has been cancelled.
// If not, removes it from the pending list, so that it cannot be
// cancelled out from under us.
// When this returns, r->cancelled indicates whether the request was
// cancelled before completion.
static void pending_request_complete(
grpc_exec_ctx *exec_ctx, grpc_plugin_credentials_pending_request *r) {
gpr_mu_lock(&r->creds->mu);
if (!r->cancelled) pending_request_remove_locked(r->creds, r);
gpr_mu_unlock(&r->creds->mu);
// Ref to credentials not needed anymore.
grpc_call_credentials_unref(exec_ctx, &r->creds->base);
}
static grpc_error *process_plugin_result(
grpc_exec_ctx *exec_ctx, grpc_plugin_credentials_pending_request *r,
const grpc_metadata *md, size_t num_md, grpc_status_code status,
const char *error_details) {
grpc_error *error = GRPC_ERROR_NONE;
if (status != GRPC_STATUS_OK) {
char *msg;
gpr_asprintf(&msg, "Getting metadata from plugin failed with error: %s",
error_details);
error = GRPC_ERROR_CREATE_FROM_COPIED_STRING(msg);
gpr_free(msg);
} else {
bool seen_illegal_header = false;
for (size_t i = 0; i < num_md; ++i) {
if (!GRPC_LOG_IF_ERROR("validate_metadata_from_plugin",
grpc_validate_header_key_is_legal(md[i].key))) {
seen_illegal_header = true;
break;
} else if (!grpc_is_binary_header(md[i].key) &&
!GRPC_LOG_IF_ERROR(
"validate_metadata_from_plugin",
grpc_validate_header_nonbin_value_is_legal(md[i].value))) {
gpr_log(GPR_ERROR, "Plugin added invalid metadata value.");
seen_illegal_header = true;
break;
}
}
if (seen_illegal_header) {
error = GRPC_ERROR_CREATE_FROM_STATIC_STRING("Illegal metadata");
} else {
for (size_t i = 0; i < num_md; ++i) {
grpc_mdelem mdelem = grpc_mdelem_from_slices(
exec_ctx, grpc_slice_ref_internal(md[i].key),
grpc_slice_ref_internal(md[i].value));
grpc_credentials_mdelem_array_add(r->md_array, mdelem);
GRPC_MDELEM_UNREF(exec_ctx, mdelem);
}
}
}
return error;
}
static void plugin_md_request_metadata_ready(void *request,
const grpc_metadata *md,
size_t num_md,
@ -64,54 +123,24 @@ static void plugin_md_request_metadata_ready(void *request,
NULL, NULL);
grpc_plugin_credentials_pending_request *r =
(grpc_plugin_credentials_pending_request *)request;
// Check if the request has been cancelled.
// If not, remove it from the pending list, so that it cannot be
// cancelled out from under us.
gpr_mu_lock(&r->creds->mu);
if (!r->cancelled) pending_request_remove_locked(r->creds, r);
gpr_mu_unlock(&r->creds->mu);
grpc_call_credentials_unref(&exec_ctx, &r->creds->base);
if (GRPC_TRACER_ON(grpc_plugin_credentials_trace)) {
gpr_log(GPR_INFO,
"plugin_credentials[%p]: request %p: plugin returned "
"asynchronously",
r->creds, r);
}
// Remove request from pending list if not previously cancelled.
pending_request_complete(&exec_ctx, r);
// If it has not been cancelled, process it.
if (!r->cancelled) {
if (status != GRPC_STATUS_OK) {
char *msg;
gpr_asprintf(&msg, "Getting metadata from plugin failed with error: %s",
error_details);
GRPC_CLOSURE_SCHED(&exec_ctx, r->on_request_metadata,
GRPC_ERROR_CREATE_FROM_COPIED_STRING(msg));
gpr_free(msg);
} else {
bool seen_illegal_header = false;
for (size_t i = 0; i < num_md; ++i) {
if (!GRPC_LOG_IF_ERROR("validate_metadata_from_plugin",
grpc_validate_header_key_is_legal(md[i].key))) {
seen_illegal_header = true;
break;
} else if (!grpc_is_binary_header(md[i].key) &&
!GRPC_LOG_IF_ERROR(
"validate_metadata_from_plugin",
grpc_validate_header_nonbin_value_is_legal(
md[i].value))) {
gpr_log(GPR_ERROR, "Plugin added invalid metadata value.");
seen_illegal_header = true;
break;
}
}
if (seen_illegal_header) {
GRPC_CLOSURE_SCHED(
&exec_ctx, r->on_request_metadata,
GRPC_ERROR_CREATE_FROM_STATIC_STRING("Illegal metadata"));
} else {
for (size_t i = 0; i < num_md; ++i) {
grpc_mdelem mdelem = grpc_mdelem_from_slices(
&exec_ctx, grpc_slice_ref_internal(md[i].key),
grpc_slice_ref_internal(md[i].value));
grpc_credentials_mdelem_array_add(r->md_array, mdelem);
GRPC_MDELEM_UNREF(&exec_ctx, mdelem);
}
GRPC_CLOSURE_SCHED(&exec_ctx, r->on_request_metadata, GRPC_ERROR_NONE);
}
}
grpc_error *error =
process_plugin_result(&exec_ctx, r, md, num_md, status, error_details);
GRPC_CLOSURE_SCHED(&exec_ctx, r->on_request_metadata, error);
} else if (GRPC_TRACER_ON(grpc_plugin_credentials_trace)) {
gpr_log(GPR_INFO,
"plugin_credentials[%p]: request %p: plugin was previously "
"cancelled",
r->creds, r);
}
gpr_free(r);
grpc_exec_ctx_finish(&exec_ctx);
@ -125,6 +154,7 @@ static bool plugin_get_request_metadata(grpc_exec_ctx *exec_ctx,
grpc_closure *on_request_metadata,
grpc_error **error) {
grpc_plugin_credentials *c = (grpc_plugin_credentials *)creds;
bool retval = true; // Synchronous return.
if (c->plugin.get_metadata != NULL) {
// Create pending_request object.
grpc_plugin_credentials_pending_request *pending_request =
@ -142,12 +172,60 @@ static bool plugin_get_request_metadata(grpc_exec_ctx *exec_ctx,
c->pending_requests = pending_request;
gpr_mu_unlock(&c->mu);
// Invoke the plugin. The callback holds a ref to us.
if (GRPC_TRACER_ON(grpc_plugin_credentials_trace)) {
gpr_log(GPR_INFO, "plugin_credentials[%p]: request %p: invoking plugin",
c, pending_request);
}
grpc_call_credentials_ref(creds);
c->plugin.get_metadata(c->plugin.state, context,
plugin_md_request_metadata_ready, pending_request);
return false;
grpc_metadata creds_md[GRPC_METADATA_CREDENTIALS_PLUGIN_SYNC_MAX];
size_t num_creds_md = 0;
grpc_status_code status = GRPC_STATUS_OK;
const char *error_details = NULL;
if (!c->plugin.get_metadata(c->plugin.state, context,
plugin_md_request_metadata_ready,
pending_request, creds_md, &num_creds_md,
&status, &error_details)) {
if (GRPC_TRACER_ON(grpc_plugin_credentials_trace)) {
gpr_log(GPR_INFO,
"plugin_credentials[%p]: request %p: plugin will return "
"asynchronously",
c, pending_request);
}
return false; // Asynchronous return.
}
// Returned synchronously.
// Remove request from pending list if not previously cancelled.
pending_request_complete(exec_ctx, pending_request);
// If the request was cancelled, the error will have been returned
// asynchronously by plugin_cancel_get_request_metadata(), so return
// false. Otherwise, process the result.
if (pending_request->cancelled) {
if (GRPC_TRACER_ON(grpc_plugin_credentials_trace)) {
gpr_log(GPR_INFO,
"plugin_credentials[%p]: request %p was cancelled, error "
"will be returned asynchronously",
c, pending_request);
}
retval = false;
} else {
if (GRPC_TRACER_ON(grpc_plugin_credentials_trace)) {
gpr_log(GPR_INFO,
"plugin_credentials[%p]: request %p: plugin returned "
"synchronously",
c, pending_request);
}
*error = process_plugin_result(exec_ctx, pending_request, creds_md,
num_creds_md, status, error_details);
}
// Clean up.
for (size_t i = 0; i < num_creds_md; ++i) {
grpc_slice_unref_internal(exec_ctx, creds_md[i].key);
grpc_slice_unref_internal(exec_ctx, creds_md[i].value);
}
gpr_free((void *)error_details);
gpr_free(pending_request);
}
return true;
return retval;
}
static void plugin_cancel_get_request_metadata(
@ -159,6 +237,10 @@ static void plugin_cancel_get_request_metadata(
c->pending_requests;
pending_request != NULL; pending_request = pending_request->next) {
if (pending_request->md_array == md_array) {
if (GRPC_TRACER_ON(grpc_plugin_credentials_trace)) {
gpr_log(GPR_INFO, "plugin_credentials[%p]: cancelling request %p", c,
pending_request);
}
pending_request->cancelled = true;
GRPC_CLOSURE_SCHED(exec_ctx, pending_request->on_request_metadata,
GRPC_ERROR_REF(error));

@ -21,6 +21,8 @@
#include "src/core/lib/security/credentials/credentials.h"
extern grpc_tracer_flag grpc_plugin_credentials_trace;
struct grpc_plugin_credentials;
typedef struct grpc_plugin_credentials_pending_request {

@ -25,6 +25,7 @@
#include "src/core/lib/debug/trace.h"
#include "src/core/lib/security/credentials/credentials.h"
#include "src/core/lib/security/credentials/plugin/plugin_credentials.h"
#include "src/core/lib/security/transport/auth_filters.h"
#include "src/core/lib/security/transport/secure_endpoint.h"
#include "src/core/lib/security/transport/security_connector.h"
@ -84,4 +85,7 @@ void grpc_register_security_filters(void) {
maybe_prepend_server_auth_filter, NULL);
}
void grpc_security_init() { grpc_security_register_handshaker_factories(); }
void grpc_security_init() {
grpc_security_register_handshaker_factories();
grpc_register_tracer(&grpc_plugin_credentials_trace);
}

@ -47,4 +47,14 @@ std::unique_ptr<GenericClientAsyncReaderWriter> GenericStub::PrepareCall(
return CallInternal(channel_.get(), context, method, cq, false, nullptr);
}
// setup a unary call to a named method
std::unique_ptr<GenericClientAsyncResponseReader> GenericStub::PrepareUnaryCall(
ClientContext* context, const grpc::string& method,
const ByteBuffer& request, CompletionQueue* cq) {
return std::unique_ptr<GenericClientAsyncResponseReader>(
GenericClientAsyncResponseReader::Create(
channel_.get(), cq, RpcMethod(method.c_str(), RpcMethod::NORMAL_RPC),
context, request, false));
}
} // namespace grpc

@ -21,6 +21,7 @@
#include <grpc++/impl/grpc_library.h>
#include <grpc++/support/channel_arguments.h>
#include <grpc/support/log.h>
#include <grpc/support/string_util.h>
#include "src/cpp/client/create_channel_internal.h"
#include "src/cpp/common/secure_auth_context.h"
@ -150,6 +151,18 @@ std::shared_ptr<ChannelCredentials> CompositeChannelCredentials(
return nullptr;
}
std::shared_ptr<CallCredentials> CompositeCallCredentials(
const std::shared_ptr<CallCredentials>& creds1,
const std::shared_ptr<CallCredentials>& creds2) {
SecureCallCredentials* s_creds1 = creds1->AsSecureCredentials();
SecureCallCredentials* s_creds2 = creds2->AsSecureCredentials();
if (s_creds1 != nullptr && s_creds2 != nullptr) {
return WrapCallCredentials(grpc_composite_call_credentials_create(
s_creds1->GetRawCreds(), s_creds2->GetRawCreds(), nullptr));
}
return nullptr;
}
void MetadataCredentialsPluginWrapper::Destroy(void* wrapper) {
if (wrapper == nullptr) return;
MetadataCredentialsPluginWrapper* w =
@ -157,28 +170,50 @@ void MetadataCredentialsPluginWrapper::Destroy(void* wrapper) {
delete w;
}
void MetadataCredentialsPluginWrapper::GetMetadata(
int MetadataCredentialsPluginWrapper::GetMetadata(
void* wrapper, grpc_auth_metadata_context context,
grpc_credentials_plugin_metadata_cb cb, void* user_data) {
grpc_credentials_plugin_metadata_cb cb, void* user_data,
grpc_metadata creds_md[GRPC_METADATA_CREDENTIALS_PLUGIN_SYNC_MAX],
size_t* num_creds_md, grpc_status_code* status,
const char** error_details) {
GPR_ASSERT(wrapper);
MetadataCredentialsPluginWrapper* w =
reinterpret_cast<MetadataCredentialsPluginWrapper*>(wrapper);
if (!w->plugin_) {
cb(user_data, NULL, 0, GRPC_STATUS_OK, NULL);
return;
*num_creds_md = 0;
*status = GRPC_STATUS_OK;
*error_details = nullptr;
return true;
}
if (w->plugin_->IsBlocking()) {
// Asynchronous return.
w->thread_pool_->Add(
std::bind(&MetadataCredentialsPluginWrapper::InvokePlugin, w, context,
cb, user_data));
cb, user_data, nullptr, nullptr, nullptr, nullptr));
return 0;
} else {
w->InvokePlugin(context, cb, user_data);
// Synchronous return.
w->InvokePlugin(context, cb, user_data, creds_md, num_creds_md, status,
error_details);
return 1;
}
}
namespace {
void UnrefMetadata(const std::vector<grpc_metadata>& md) {
for (auto it = md.begin(); it != md.end(); ++it) {
grpc_slice_unref(it->key);
grpc_slice_unref(it->value);
}
}
} // namespace
void MetadataCredentialsPluginWrapper::InvokePlugin(
grpc_auth_metadata_context context, grpc_credentials_plugin_metadata_cb cb,
void* user_data) {
void* user_data, grpc_metadata creds_md[4], size_t* num_creds_md,
grpc_status_code* status_code, const char** error_details) {
std::multimap<grpc::string, grpc::string> metadata;
// const_cast is safe since the SecureAuthContext does not take owndership and
@ -196,12 +231,31 @@ void MetadataCredentialsPluginWrapper::InvokePlugin(
md_entry.flags = 0;
md.push_back(md_entry);
}
cb(user_data, md.empty() ? nullptr : &md[0], md.size(),
static_cast<grpc_status_code>(status.error_code()),
status.error_message().c_str());
for (auto it = md.begin(); it != md.end(); ++it) {
grpc_slice_unref(it->key);
grpc_slice_unref(it->value);
if (creds_md != nullptr) {
// Synchronous return.
if (md.size() > GRPC_METADATA_CREDENTIALS_PLUGIN_SYNC_MAX) {
*num_creds_md = 0;
*status_code = GRPC_STATUS_INTERNAL;
*error_details = gpr_strdup(
"blocking plugin credentials returned too many metadata keys");
UnrefMetadata(md);
} else {
for (const auto& elem : md) {
creds_md[*num_creds_md].key = elem.key;
creds_md[*num_creds_md].value = elem.value;
creds_md[*num_creds_md].flags = elem.flags;
++(*num_creds_md);
}
*status_code = static_cast<grpc_status_code>(status.error_code());
*error_details =
status.ok() ? nullptr : gpr_strdup(status.error_message().c_str());
}
} else {
// Asynchronous return.
cb(user_data, md.empty() ? nullptr : &md[0], md.size(),
static_cast<grpc_status_code>(status.error_code()),
status.error_message().c_str());
UnrefMetadata(md);
}
}

@ -58,16 +58,23 @@ class SecureCallCredentials final : public CallCredentials {
class MetadataCredentialsPluginWrapper final : private GrpcLibraryCodegen {
public:
static void Destroy(void* wrapper);
static void GetMetadata(void* wrapper, grpc_auth_metadata_context context,
grpc_credentials_plugin_metadata_cb cb,
void* user_data);
static int GetMetadata(
void* wrapper, grpc_auth_metadata_context context,
grpc_credentials_plugin_metadata_cb cb, void* user_data,
grpc_metadata creds_md[GRPC_METADATA_CREDENTIALS_PLUGIN_SYNC_MAX],
size_t* num_creds_md, grpc_status_code* status,
const char** error_details);
explicit MetadataCredentialsPluginWrapper(
std::unique_ptr<MetadataCredentialsPlugin> plugin);
private:
void InvokePlugin(grpc_auth_metadata_context context,
grpc_credentials_plugin_metadata_cb cb, void* user_data);
void InvokePlugin(
grpc_auth_metadata_context context,
grpc_credentials_plugin_metadata_cb cb, void* user_data,
grpc_metadata creds_md[GRPC_METADATA_CREDENTIALS_PLUGIN_SYNC_MAX],
size_t* num_creds_md, grpc_status_code* status,
const char** error_details);
std::unique_ptr<ThreadPoolInterface> thread_pool_;
std::unique_ptr<MetadataCredentialsPlugin> plugin_;
};

@ -86,6 +86,10 @@ void ChannelArguments::SetCompressionAlgorithm(
SetInt(GRPC_COMPRESSION_CHANNEL_DEFAULT_ALGORITHM, algorithm);
}
void ChannelArguments::SetGrpclbFallbackTimeout(int fallback_timeout) {
SetInt(GRPC_ARG_GRPCLB_FALLBACK_TIMEOUT_MS, fallback_timeout);
}
void ChannelArguments::SetSocketMutator(grpc_socket_mutator* mutator) {
if (!mutator) {
return;

@ -61,12 +61,9 @@ namespace Grpc.Core.Internal
try
{
var context = new AuthInterceptorContext(Marshal.PtrToStringAnsi(serviceUrlPtr),
Marshal.PtrToStringAnsi(methodNamePtr));
// Don't await, we are in a native callback and need to return.
#pragma warning disable 4014
GetMetadataAsync(context, callbackPtr, userDataPtr);
#pragma warning restore 4014
var context = new AuthInterceptorContext(Marshal.PtrToStringAnsi(serviceUrlPtr), Marshal.PtrToStringAnsi(methodNamePtr));
// Make a guarantee that credentials_notify_from_plugin is invoked async to be compliant with c-core API.
ThreadPool.QueueUserWorkItem(async (stateInfo) => await GetMetadataAsync(context, callbackPtr, userDataPtr));
}
catch (Exception e)
{

@ -89,6 +89,54 @@ namespace Grpc.IntegrationTesting
client.UnaryCall(new SimpleRequest { }, new CallOptions(credentials: callCredentials));
}
[Test]
public async Task MetadataCredentials_Composed()
{
var first = CallCredentials.FromInterceptor(new AsyncAuthInterceptor((context, metadata) => {
// Attempt to exercise the case where async callback is inlineable/synchronously-runnable.
metadata.Add("first_authorization", "FIRST_SECRET_TOKEN");
return TaskUtils.CompletedTask;
}));
var second = CallCredentials.FromInterceptor(new AsyncAuthInterceptor((context, metadata) => {
metadata.Add("second_authorization", "SECOND_SECRET_TOKEN");
return TaskUtils.CompletedTask;
}));
var third = CallCredentials.FromInterceptor(new AsyncAuthInterceptor((context, metadata) => {
metadata.Add("third_authorization", "THIRD_SECRET_TOKEN");
return TaskUtils.CompletedTask;
}));
var channelCredentials = ChannelCredentials.Create(TestCredentials.CreateSslCredentials(),
CallCredentials.Compose(first, second, third));
channel = new Channel(Host, server.Ports.Single().BoundPort, channelCredentials, options);
var client = new TestService.TestServiceClient(channel);
var call = client.StreamingOutputCall(new StreamingOutputCallRequest { });
Assert.IsTrue(await call.ResponseStream.MoveNext());
Assert.IsFalse(await call.ResponseStream.MoveNext());
}
[Test]
public async Task MetadataCredentials_ComposedPerCall()
{
channel = new Channel(Host, server.Ports.Single().BoundPort, TestCredentials.CreateSslCredentials(), options);
var client = new TestService.TestServiceClient(channel);
var first = CallCredentials.FromInterceptor(new AsyncAuthInterceptor((context, metadata) => {
metadata.Add("first_authorization", "FIRST_SECRET_TOKEN");
return TaskUtils.CompletedTask;
}));
var second = CallCredentials.FromInterceptor(new AsyncAuthInterceptor((context, metadata) => {
metadata.Add("second_authorization", "SECOND_SECRET_TOKEN");
return TaskUtils.CompletedTask;
}));
var third = CallCredentials.FromInterceptor(new AsyncAuthInterceptor((context, metadata) => {
metadata.Add("third_authorization", "THIRD_SECRET_TOKEN");
return TaskUtils.CompletedTask;
}));
var call = client.StreamingOutputCall(new StreamingOutputCallRequest{ },
new CallOptions(credentials: CallCredentials.Compose(first, second, third)));
Assert.IsTrue(await call.ResponseStream.MoveNext());
Assert.IsFalse(await call.ResponseStream.MoveNext());
}
[Test]
public void MetadataCredentials_InterceptorLeavesMetadataEmpty()
{
@ -125,6 +173,17 @@ namespace Grpc.IntegrationTesting
Assert.AreEqual("SECRET_TOKEN", authToken);
return Task.FromResult(new SimpleResponse());
}
public override async Task StreamingOutputCall(StreamingOutputCallRequest request, IServerStreamWriter<StreamingOutputCallResponse> responseStream, ServerCallContext context)
{
var first = context.RequestHeaders.First((entry) => entry.Key == "first_authorization").Value;
Assert.AreEqual("FIRST_SECRET_TOKEN", first);
var second = context.RequestHeaders.First((entry) => entry.Key == "second_authorization").Value;
Assert.AreEqual("SECOND_SECRET_TOKEN", second);
var third = context.RequestHeaders.First((entry) => entry.Key == "third_authorization").Value;
Assert.AreEqual("THIRD_SECRET_TOKEN", third);
await responseStream.WriteAsync(new StreamingOutputCallResponse());
}
}
}
}

@ -1023,13 +1023,17 @@ typedef void(GPR_CALLTYPE *grpcsharp_metadata_interceptor_func)(
grpc_credentials_plugin_metadata_cb cb, void *user_data,
int32_t is_destroy);
static void grpcsharp_get_metadata_handler(
static int grpcsharp_get_metadata_handler(
void *state, grpc_auth_metadata_context context,
grpc_credentials_plugin_metadata_cb cb, void *user_data) {
grpc_credentials_plugin_metadata_cb cb, void *user_data,
grpc_metadata creds_md[GRPC_METADATA_CREDENTIALS_PLUGIN_SYNC_MAX],
size_t *num_creds_md, grpc_status_code *status,
const char **error_details) {
grpcsharp_metadata_interceptor_func interceptor =
(grpcsharp_metadata_interceptor_func)(intptr_t)state;
interceptor(state, context.service_url, context.method_name, cb, user_data,
0);
return 0; /* Asynchronous return. */
}
static void grpcsharp_metadata_credentials_destroy_handler(void *state) {

@ -238,9 +238,12 @@ NAUV_WORK_CB(SendPluginCallback) {
}
}
void plugin_get_metadata(void *state, grpc_auth_metadata_context context,
grpc_credentials_plugin_metadata_cb cb,
void *user_data) {
int plugin_get_metadata(
void *state, grpc_auth_metadata_context context,
grpc_credentials_plugin_metadata_cb cb, void *user_data,
grpc_metadata creds_md[GRPC_METADATA_CREDENTIALS_PLUGIN_SYNC_MAX],
size_t *num_creds_md, grpc_status_code *status,
const char **error_details) {
plugin_state *p_state = reinterpret_cast<plugin_state *>(state);
plugin_callback_data *data = new plugin_callback_data;
data->service_url = context.service_url;
@ -252,6 +255,7 @@ void plugin_get_metadata(void *state, grpc_auth_metadata_context context,
uv_mutex_unlock(&p_state->plugin_mutex);
uv_async_send(&p_state->plugin_async);
return 0; // Async processing.
}
void plugin_uv_close_cb(uv_handle_t *handle) {

@ -75,9 +75,11 @@ typedef struct plugin_state {
uv_async_t plugin_async;
} plugin_state;
void plugin_get_metadata(void *state, grpc_auth_metadata_context context,
grpc_credentials_plugin_metadata_cb cb,
void *user_data);
int plugin_get_metadata(
void *state, grpc_auth_metadata_context context,
grpc_credentials_plugin_metadata_cb cb, void *user_data,
grpc_metadata creds_md[GRPC_METADATA_CREDENTIALS_PLUGIN_SYNC_MAX],
size_t *num_creds_md, grpc_status_code *status, const char **error_details);
void plugin_destroy_state(void *state);

@ -35,6 +35,7 @@
#include <grpc/grpc.h>
#include <grpc/grpc_security.h>
#include <grpc/support/string_util.h>
zend_class_entry *grpc_ce_call_credentials;
#if PHP_MAJOR_VERSION >= 7
@ -143,9 +144,12 @@ PHP_METHOD(CallCredentials, createFromPlugin) {
}
/* Callback function for plugin creds API */
void plugin_get_metadata(void *ptr, grpc_auth_metadata_context context,
grpc_credentials_plugin_metadata_cb cb,
void *user_data) {
int plugin_get_metadata(
void *ptr, grpc_auth_metadata_context context,
grpc_credentials_plugin_metadata_cb cb, void *user_data,
grpc_metadata creds_md[GRPC_METADATA_CREDENTIALS_PLUGIN_SYNC_MAX],
size_t *num_creds_md, grpc_status_code *status,
const char **error_details) {
TSRMLS_FETCH();
plugin_state *state = (plugin_state *)ptr;
@ -175,15 +179,19 @@ void plugin_get_metadata(void *ptr, grpc_auth_metadata_context context,
/* call the user callback function */
zend_call_function(state->fci, state->fci_cache TSRMLS_CC);
grpc_status_code code = GRPC_STATUS_OK;
*num_creds_md = 0;
*status = GRPC_STATUS_OK;
*error_details = NULL;
grpc_metadata_array metadata;
bool cleanup = true;
if (retval == NULL || Z_TYPE_P(retval) != IS_ARRAY) {
cleanup = false;
code = GRPC_STATUS_INVALID_ARGUMENT;
} else if (!create_metadata_array(retval, &metadata)) {
code = GRPC_STATUS_INVALID_ARGUMENT;
*status = GRPC_STATUS_INVALID_ARGUMENT;
return true; // Synchronous return.
}
if (!create_metadata_array(retval, &metadata)) {
*status = GRPC_STATUS_INVALID_ARGUMENT;
return true; // Synchronous return.
}
if (retval != NULL) {
@ -197,14 +205,24 @@ void plugin_get_metadata(void *ptr, grpc_auth_metadata_context context,
#endif
}
/* Pass control back to core */
cb(user_data, metadata.metadata, metadata.count, code, NULL);
if (cleanup) {
for (int i = 0; i < metadata.count; i++) {
if (metadata.count > GRPC_METADATA_CREDENTIALS_PLUGIN_SYNC_MAX) {
*status = GRPC_STATUS_INTERNAL;
*error_details = gpr_strdup(
"PHP plugin credentials returned too many metadata entries");
for (size_t i = 0; i < metadata.count; i++) {
// TODO(stanleycheung): Why don't we need to unref the key here?
grpc_slice_unref(metadata.metadata[i].value);
}
grpc_metadata_array_destroy(&metadata);
} else {
// Return data to core.
*num_creds_md = metadata.count;
for (size_t i = 0; i < metadata.count; ++i) {
creds_md[i] = metadata.metadata[i];
}
}
grpc_metadata_array_destroy(&metadata);
return true; // Synchronous return.
}
/* Cleanup function for plugin creds API */

@ -65,9 +65,12 @@ typedef struct plugin_state {
} plugin_state;
/* Callback function for plugin creds API */
void plugin_get_metadata(void *state, grpc_auth_metadata_context context,
grpc_credentials_plugin_metadata_cb cb,
void *user_data);
int plugin_get_metadata(
void *ptr, grpc_auth_metadata_context context,
grpc_credentials_plugin_metadata_cb cb, void *user_data,
grpc_metadata creds_md[GRPC_METADATA_CREDENTIALS_PLUGIN_SYNC_MAX],
size_t *num_creds_md, grpc_status_code *status,
const char **error_details);
/* Cleanup function for plugin creds API */
void plugin_destroy_state(void *ptr);

@ -41,7 +41,8 @@ cdef class CredentialsMetadataPlugin:
cdef object plugin_callback
cdef bytes plugin_name
cdef grpc_metadata_credentials_plugin make_c_plugin(self)
cdef grpc_metadata_credentials_plugin _c_plugin(CredentialsMetadataPlugin plugin)
cdef class AuthMetadataContext:
@ -49,8 +50,11 @@ cdef class AuthMetadataContext:
cdef grpc_auth_metadata_context context
cdef void plugin_get_metadata(
cdef int plugin_get_metadata(
void *state, grpc_auth_metadata_context context,
grpc_credentials_plugin_metadata_cb cb, void *user_data) with gil
grpc_credentials_plugin_metadata_cb cb, void *user_data,
grpc_metadata creds_md[GRPC_METADATA_CREDENTIALS_PLUGIN_SYNC_MAX],
size_t *num_creds_md, grpc_status_code *status,
const char **error_details) with gil
cdef void plugin_destroy_c_plugin_state(void *state) with gil

@ -14,6 +14,7 @@
cimport cpython
import threading
import traceback
@ -89,20 +90,20 @@ cdef class CredentialsMetadataPlugin:
self.plugin_callback = plugin_callback
self.plugin_name = name
@staticmethod
cdef grpc_metadata_credentials_plugin make_c_plugin(self):
cdef grpc_metadata_credentials_plugin result
result.get_metadata = plugin_get_metadata
result.destroy = plugin_destroy_c_plugin_state
result.state = <void *>self
result.type = self.plugin_name
cpython.Py_INCREF(self)
return result
def __dealloc__(self):
grpc_shutdown()
cdef grpc_metadata_credentials_plugin _c_plugin(CredentialsMetadataPlugin plugin):
cdef grpc_metadata_credentials_plugin c_plugin
c_plugin.get_metadata = plugin_get_metadata
c_plugin.destroy = plugin_destroy_c_plugin_state
c_plugin.state = <void *>plugin
c_plugin.type = plugin.plugin_name
cpython.Py_INCREF(plugin)
return c_plugin
cdef class AuthMetadataContext:
def __cinit__(self):
@ -122,9 +123,12 @@ cdef class AuthMetadataContext:
grpc_shutdown()
cdef void plugin_get_metadata(
cdef int plugin_get_metadata(
void *state, grpc_auth_metadata_context context,
grpc_credentials_plugin_metadata_cb cb, void *user_data) with gil:
grpc_credentials_plugin_metadata_cb cb, void *user_data,
grpc_metadata creds_md[GRPC_METADATA_CREDENTIALS_PLUGIN_SYNC_MAX],
size_t *num_creds_md, grpc_status_code *status,
const char **error_details) with gil:
called_flag = [False]
def python_callback(
Metadata metadata, grpc_status_code status,
@ -134,12 +138,15 @@ cdef void plugin_get_metadata(
cdef CredentialsMetadataPlugin self = <CredentialsMetadataPlugin>state
cdef AuthMetadataContext cy_context = AuthMetadataContext()
cy_context.context = context
try:
self.plugin_callback(cy_context, python_callback)
except Exception as error:
if not called_flag[0]:
cb(user_data, NULL, 0, StatusCode.unknown,
traceback.format_exc().encode())
def async_callback():
try:
self.plugin_callback(cy_context, python_callback)
except Exception as error:
if not called_flag[0]:
cb(user_data, NULL, 0, StatusCode.unknown,
traceback.format_exc().encode())
threading.Thread(group=None, target=async_callback).start()
return 0 # Asynchronous return
cdef void plugin_destroy_c_plugin_state(void *state) with gil:
cpython.Py_DECREF(<CredentialsMetadataPlugin>state)
@ -239,7 +246,7 @@ def call_credentials_google_iam(authorization_token, authority_selector):
def call_credentials_metadata_plugin(CredentialsMetadataPlugin plugin):
cdef CallCredentials credentials = CallCredentials()
cdef grpc_metadata_credentials_plugin c_plugin = plugin.make_c_plugin()
cdef grpc_metadata_credentials_plugin c_plugin = _c_plugin(plugin)
with nogil:
credentials.c_credentials = (
grpc_metadata_credentials_create_from_plugin(c_plugin, NULL))

@ -375,6 +375,10 @@ cdef extern from "grpc/grpc.h":
cdef extern from "grpc/grpc_security.h":
# Declare this as an enum, this is the only way to make it a const in
# cython
enum: GRPC_METADATA_CREDENTIALS_PLUGIN_SYNC_MAX
ctypedef enum grpc_ssl_roots_override_result:
GRPC_SSL_ROOTS_OVERRIDE_OK
GRPC_SSL_ROOTS_OVERRIDE_FAILED_PERMANENTLY
@ -462,9 +466,12 @@ cdef extern from "grpc/grpc_security.h":
grpc_status_code status, const char *error_details)
ctypedef struct grpc_metadata_credentials_plugin:
void (*get_metadata)(
int (*get_metadata)(
void *state, grpc_auth_metadata_context context,
grpc_credentials_plugin_metadata_cb cb, void *user_data)
grpc_credentials_plugin_metadata_cb cb, void *user_data,
grpc_metadata creds_md[GRPC_METADATA_CREDENTIALS_PLUGIN_SYNC_MAX],
size_t *num_creds_md, grpc_status_code *status,
const char **error_details)
void (*destroy)(void *state)
void *state
const char *type

@ -171,14 +171,6 @@ cdef class Timespec:
gpr_convert_clock_type(self.c_time, GPR_CLOCK_REALTIME))
return <double>real_time.seconds + <double>real_time.nanoseconds / 1e9
@staticmethod
def infinite_future():
return Timespec(float("+inf"))
@staticmethod
def infinite_past():
return Timespec(float("-inf"))
def __richcmp__(Timespec self not None, Timespec other not None, int op):
cdef gpr_timespec self_c_time = self.c_time
cdef gpr_timespec other_c_time = other.c_time
@ -454,7 +446,7 @@ cdef class _MetadataIterator:
self.i = self.i + 1
return result
else:
raise StopIteration
raise StopIteration()
# TODO(https://github.com/grpc/grpc/issues/7950): Eliminate this; just use an
@ -518,7 +510,7 @@ cdef class MetadataArray:
def __getitem__(self, size_t i):
if i >= self.c_metadata_array.count:
raise IndexError
raise IndexError()
key = _slice_bytes(self.c_metadata_array.metadata[i].key)
value = _slice_bytes(self.c_metadata_array.metadata[i].value)
return Metadatum(key=key, value=value)
@ -720,7 +712,7 @@ cdef class _OperationsIterator:
self.i = self.i + 1
return result
else:
raise StopIteration
raise StopIteration()
cdef class Operations:

@ -72,6 +72,8 @@ CORE_SOURCE_FILES = [
'src/core/lib/compression/compression.c',
'src/core/lib/compression/message_compress.c',
'src/core/lib/compression/stream_compression.c',
'src/core/lib/compression/stream_compression_gzip.c',
'src/core/lib/compression/stream_compression_identity.c',
'src/core/lib/debug/stats.c',
'src/core/lib/debug/stats_data.c',
'src/core/lib/http/format_request.c',

@ -94,7 +94,7 @@ def diagnose_attribute_error(build_ext, error):
_ERROR_DIAGNOSES = {
errors.CompileError: diagnose_compile_error,
AttributeError: diagnose_attribute_error
AttributeError: diagnose_attribute_error,
}
@ -102,8 +102,10 @@ def diagnose_build_ext_error(build_ext, error, formatted):
diagnostic = _ERROR_DIAGNOSES.get(type(error))
if diagnostic is None:
raise commands.CommandError(
"\n\nWe could not diagnose your build failure. Please file an issue at "
"http://www.github.com/grpc/grpc with `[Python install]` in the title."
"\n\n{}".format(formatted))
"\n\nWe could not diagnose your build failure. If you are unable to "
"proceed, please file an issue at http://www.github.com/grpc/grpc "
"with `[Python install]` in the title; please attach the whole log "
"(including everything that may have appeared above the Python "
"backtrace).\n\n{}".format(formatted))
else:
diagnostic(build_ext, error)

@ -29,7 +29,7 @@ _SERIALIZED_RESPONSE = b'\x49\x50\x51'
_REQUEST_SERIALIZER = lambda unused_request: _SERIALIZED_REQUEST
_REQUEST_DESERIALIZER = lambda unused_serialized_request: object()
_RESPONSE_SERIALIZER = lambda unused_response: _SERIALIZED_RESPONSE
_RESPONSE_DESERIALIZER = lambda unused_serialized_resopnse: object()
_RESPONSE_DESERIALIZER = lambda unused_serialized_response: object()
_SERVICE = 'test.TestService'
_UNARY_UNARY = 'UnaryUnary'

@ -35,7 +35,7 @@ def main
:this_channel_is_insecure)
stub.echo(Echo::EchoRequest.new(request: 'hello'))
fail 'the clients rpc in this test shouldnt complete. ' \
'expecting SIGINT to happen in the middle of the call'
'expecting SIGTERM to happen in the middle of the call'
end
thd.join
end

@ -69,9 +69,9 @@ def main
call_started_cv.wait(call_started_mu) until call_started.val
end
# SIGINT the child process now that it's
# SIGTERM the child process now that it's
# in the middle of an RPC (happening on a non-main thread)
Process.kill('SIGINT', client_pid)
Process.kill('SIGTERM', client_pid)
STDERR.puts 'sent shutdown'
begin
@ -88,8 +88,8 @@ def main
end
client_exit_code = $CHILD_STATUS
if client_exit_code.termsig != 2 # SIGINT
fail 'expected client exit from SIGINT ' \
if client_exit_code.termsig != 15 # SIGTERM
fail 'expected client exit from SIGTERM ' \
"but got child status: #{client_exit_code}"
end

@ -112,9 +112,12 @@ static void grpc_rb_call_credentials_callback_with_gil(void *param) {
gpr_free(params);
}
static void grpc_rb_call_credentials_plugin_get_metadata(
static int grpc_rb_call_credentials_plugin_get_metadata(
void *state, grpc_auth_metadata_context context,
grpc_credentials_plugin_metadata_cb cb, void *user_data) {
grpc_credentials_plugin_metadata_cb cb, void *user_data,
grpc_metadata creds_md[GRPC_METADATA_CREDENTIALS_PLUGIN_SYNC_MAX],
size_t *num_creds_md, grpc_status_code *status,
const char **error_details) {
callback_params *params = gpr_malloc(sizeof(callback_params));
params->get_metadata = (VALUE)state;
params->context = context;
@ -123,6 +126,7 @@ static void grpc_rb_call_credentials_plugin_get_metadata(
grpc_rb_event_queue_enqueue(grpc_rb_call_credentials_callback_with_gil,
(void *)(params));
return 0; // Async return.
}
static void grpc_rb_call_credentials_plugin_destroy(void *state) {

@ -24,6 +24,7 @@ require_relative 'grpc/generic/active_call'
require_relative 'grpc/generic/client_stub'
require_relative 'grpc/generic/service'
require_relative 'grpc/generic/rpc_server'
require_relative 'grpc/generic/interceptors'
begin
file = File.open(ssl_roots_path)

@ -154,6 +154,15 @@ module GRPC
Operation.new(self)
end
##
# Returns a restricted view of this ActiveCall for use in interceptors
#
# @return [InterceptableView]
#
def interceptable
InterceptableView.new(self)
end
def receive_and_check_status
batch_result = @call.run_batch(RECV_STATUS_ON_CLIENT => nil)
set_input_stream_done
@ -515,15 +524,27 @@ module GRPC
# This does not mean that must necessarily be one. E.g, the replies
# produced by gen_each_reply could ignore the received_msgs
#
# @param gen_each_reply [Proc] generates the BiDi stream replies
def run_server_bidi(gen_each_reply)
bd = BidiCall.new(@call,
@marshal,
@unmarshal,
metadata_received: @metadata_received,
req_view: MultiReqView.new(self))
bd.run_on_server(gen_each_reply, proc { set_input_stream_done })
# @param mth [Proc] generates the BiDi stream replies
# @param interception_ctx [InterceptionContext]
#
def run_server_bidi(mth, interception_ctx)
view = multi_req_view
bidi_call = BidiCall.new(
@call,
@marshal,
@unmarshal,
metadata_received: @metadata_received,
req_view: view
)
requests = bidi_call.read_next_loop(proc { set_input_stream_done }, false)
interception_ctx.intercept!(
:bidi_streamer,
call: view,
method: mth,
requests: requests
) do
bidi_call.run_on_server(mth, requests)
end
end
# Waits till an operation completes
@ -645,5 +666,9 @@ module GRPC
Operation = view_class(:cancel, :cancelled?, :deadline, :execute,
:metadata, :status, :start_call, :wait, :write_flag,
:write_flag=, :trailing_metadata)
# InterceptableView further limits access to an ActiveCall's methods
# for use in interceptors on the client, exposing only the deadline
InterceptableView = view_class(:deadline)
end
end

@ -87,23 +87,32 @@ module GRPC
# This does not mean that must necessarily be one. E.g, the replies
# produced by gen_each_reply could ignore the received_msgs
#
# @param gen_each_reply [Proc] generates the BiDi stream replies.
# @param set_input_steam_done [Proc] call back to call when
# the reads have been completely read through.
def run_on_server(gen_each_reply, set_input_stream_done)
# @param [Proc] gen_each_reply generates the BiDi stream replies.
# @param [Enumerable] requests The enumerable of requests to run
def run_on_server(gen_each_reply, requests)
replies = nil
# Pass in the optional call object parameter if possible
if gen_each_reply.arity == 1
replys = gen_each_reply.call(
read_loop(set_input_stream_done, is_client: false))
replies = gen_each_reply.call(requests)
elsif gen_each_reply.arity == 2
replys = gen_each_reply.call(
read_loop(set_input_stream_done, is_client: false),
@req_view)
replies = gen_each_reply.call(requests, @req_view)
else
fail 'Illegal arity of reply generator'
end
write_loop(replys, is_client: false)
write_loop(replies, is_client: false)
end
##
# Read the next stream iteration
#
# @param [Proc] finalize_stream callback to call when the reads have been
# completely read through.
# @param [Boolean] is_client If this is a client or server request
#
def read_next_loop(finalize_stream, is_client = false)
read_loop(finalize_stream, is_client: is_client)
end
private

@ -89,17 +89,23 @@ module GRPC
# used within a gRPC server.
# @param channel_args [Hash] the channel arguments. Note: this argument is
# ignored if the channel_override argument is provided.
# @param interceptors [Array<GRPC::ClientInterceptor>] An array of
# GRPC::ClientInterceptor objects that will be used for
# intercepting calls before they are executed
# Interceptors are an EXPERIMENTAL API.
def initialize(host, creds,
channel_override: nil,
timeout: nil,
propagate_mask: nil,
channel_args: {})
channel_args: {},
interceptors: [])
@ch = ClientStub.setup_channel(channel_override, host, creds,
channel_args)
alt_host = channel_args[Core::Channel::SSL_TARGET]
@host = alt_host.nil? ? host : alt_host
@propagate_mask = propagate_mask
@timeout = timeout.nil? ? DEFAULT_TIMEOUT : timeout
@interceptors = InterceptorRegistry.new(interceptors)
end
# request_response sends a request to a GRPC server, and returns the
@ -149,16 +155,29 @@ module GRPC
deadline: deadline,
parent: parent,
credentials: credentials)
return c.request_response(req, metadata: metadata) unless return_op
# return the operation view of the active_call; define #execute as a
# new method for this instance that invokes #request_response.
c.merge_metadata_to_send(metadata)
op = c.operation
op.define_singleton_method(:execute) do
c.request_response(req, metadata: metadata)
interception_context = @interceptors.build_context
intercept_args = {
method: method,
request: req,
call: c.interceptable,
metadata: metadata
}
if return_op
# return the operation view of the active_call; define #execute as a
# new method for this instance that invokes #request_response.
c.merge_metadata_to_send(metadata)
op = c.operation
op.define_singleton_method(:execute) do
interception_context.intercept!(:request_response, intercept_args) do
c.request_response(req, metadata: metadata)
end
end
op
else
interception_context.intercept!(:request_response, intercept_args) do
c.request_response(req, metadata: metadata)
end
end
op
end
# client_streamer sends a stream of requests to a GRPC server, and
@ -213,16 +232,29 @@ module GRPC
deadline: deadline,
parent: parent,
credentials: credentials)
return c.client_streamer(requests, metadata: metadata) unless return_op
# return the operation view of the active_call; define #execute as a
# new method for this instance that invokes #client_streamer.
c.merge_metadata_to_send(metadata)
op = c.operation
op.define_singleton_method(:execute) do
c.client_streamer(requests)
interception_context = @interceptors.build_context
intercept_args = {
method: method,
requests: requests,
call: c.interceptable,
metadata: metadata
}
if return_op
# return the operation view of the active_call; define #execute as a
# new method for this instance that invokes #client_streamer.
c.merge_metadata_to_send(metadata)
op = c.operation
op.define_singleton_method(:execute) do
interception_context.intercept!(:client_streamer, intercept_args) do
c.client_streamer(requests)
end
end
op
else
interception_context.intercept!(:client_streamer, intercept_args) do
c.client_streamer(requests, metadata: metadata)
end
end
op
end
# server_streamer sends one request to the GRPC server, which yields a
@ -292,16 +324,29 @@ module GRPC
deadline: deadline,
parent: parent,
credentials: credentials)
return c.server_streamer(req, metadata: metadata, &blk) unless return_op
# return the operation view of the active_call; define #execute
# as a new method for this instance that invokes #server_streamer
c.merge_metadata_to_send(metadata)
op = c.operation
op.define_singleton_method(:execute) do
c.server_streamer(req, &blk)
interception_context = @interceptors.build_context
intercept_args = {
method: method,
request: req,
call: c.interceptable,
metadata: metadata
}
if return_op
# return the operation view of the active_call; define #execute
# as a new method for this instance that invokes #server_streamer
c.merge_metadata_to_send(metadata)
op = c.operation
op.define_singleton_method(:execute) do
interception_context.intercept!(:server_streamer, intercept_args) do
c.server_streamer(req, &blk)
end
end
op
else
interception_context.intercept!(:server_streamer, intercept_args) do
c.server_streamer(req, metadata: metadata, &blk)
end
end
op
end
# bidi_streamer sends a stream of requests to the GRPC server, and yields
@ -405,17 +450,29 @@ module GRPC
deadline: deadline,
parent: parent,
credentials: credentials)
return c.bidi_streamer(requests, metadata: metadata,
&blk) unless return_op
# return the operation view of the active_call; define #execute
# as a new method for this instance that invokes #bidi_streamer
c.merge_metadata_to_send(metadata)
op = c.operation
op.define_singleton_method(:execute) do
c.bidi_streamer(requests, &blk)
interception_context = @interceptors.build_context
intercept_args = {
method: method,
requests: requests,
call: c.interceptable,
metadata: metadata
}
if return_op
# return the operation view of the active_call; define #execute
# as a new method for this instance that invokes #bidi_streamer
c.merge_metadata_to_send(metadata)
op = c.operation
op.define_singleton_method(:execute) do
interception_context.intercept!(:bidi_streamer, intercept_args) do
c.bidi_streamer(requests, &blk)
end
end
op
else
interception_context.intercept!(:bidi_streamer, intercept_args) do
c.bidi_streamer(requests, metadata: metadata, &blk)
end
end
op
end
private

@ -0,0 +1,53 @@
# Copyright 2017 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# GRPC contains the General RPC module.
module GRPC
##
# Represents a registry of added interceptors available for enumeration.
# The registry can be used for both server and client interceptors.
# This class is internal to gRPC and not meant for public usage.
#
class InterceptorRegistry
##
# An error raised when an interceptor is attempted to be added
# that does not extend GRPC::Interceptor
#
class DescendantError < StandardError; end
##
# Initialize the registry with an empty interceptor list
# This is an EXPERIMENTAL API.
#
def initialize(interceptors = [])
@interceptors = []
interceptors.each do |i|
base = GRPC::Interceptor
unless i.class.ancestors.include?(base)
fail DescendantError, "Interceptors must descend from #{base}"
end
@interceptors << i
end
end
##
# Builds an interception context from this registry
#
# @return [InterceptionContext]
#
def build_context
InterceptionContext.new(@interceptors)
end
end
end

@ -0,0 +1,186 @@
# Copyright 2017 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
require_relative 'interceptor_registry'
# GRPC contains the General RPC module.
module GRPC
##
# Base class for interception in GRPC
#
class Interceptor
##
# @param [Hash] options A hash of options that will be used
# by the interceptor. This is an EXPERIMENTAL API.
#
def initialize(options = {})
@options = options || {}
end
end
##
# ClientInterceptor allows for wrapping outbound gRPC client stub requests.
# This is an EXPERIMENTAL API.
#
class ClientInterceptor < Interceptor
##
# Intercept a unary request response call
#
# @param [Object] request
# @param [GRPC::ActiveCall] call
# @param [Method] method
# @param [Hash] metadata
#
def request_response(request:, call:, method:, metadata:)
GRPC.logger.debug "Intercepting request response method #{method}" \
" for request #{request} with call #{call} and metadata: #{metadata}"
yield
end
##
# Intercept a client streaming call
#
# @param [Enumerable] requests
# @param [GRPC::ActiveCall] call
# @param [Method] method
# @param [Hash] metadata
#
def client_streamer(requests:, call:, method:, metadata:)
GRPC.logger.debug "Intercepting client streamer method #{method}" \
" for requests #{requests} with call #{call} and metadata: #{metadata}"
yield
end
##
# Intercept a server streaming call
#
# @param [Object] request
# @param [GRPC::ActiveCall] call
# @param [Method] method
# @param [Hash] metadata
#
def server_streamer(request:, call:, method:, metadata:)
GRPC.logger.debug "Intercepting server streamer method #{method}" \
" for request #{request} with call #{call} and metadata: #{metadata}"
yield
end
##
# Intercept a BiDi streaming call
#
# @param [Enumerable] requests
# @param [GRPC::ActiveCall] call
# @param [Method] method
# @param [Hash] metadata
#
def bidi_streamer(requests:, call:, method:, metadata:)
GRPC.logger.debug "Intercepting bidi streamer method #{method}" \
" for requests #{requests} with call #{call} and metadata: #{metadata}"
yield
end
end
##
# ServerInterceptor allows for wrapping gRPC server execution handling.
# This is an EXPERIMENTAL API.
#
class ServerInterceptor < Interceptor
##
# Intercept a unary request response call.
#
# @param [Object] request
# @param [GRPC::ActiveCall::SingleReqView] call
# @param [Method] method
#
def request_response(request:, call:, method:)
GRPC.logger.debug "Intercepting request response method #{method}" \
" for request #{request} with call #{call}"
yield
end
##
# Intercept a client streaming call
#
# @param [GRPC::ActiveCall::MultiReqView] call
# @param [Method] method
#
def client_streamer(call:, method:)
GRPC.logger.debug "Intercepting client streamer method #{method}" \
" with call #{call}"
yield
end
##
# Intercept a server streaming call
#
# @param [Object] request
# @param [GRPC::ActiveCall::SingleReqView] call
# @param [Method] method
#
def server_streamer(request:, call:, method:)
GRPC.logger.debug "Intercepting server streamer method #{method}" \
" for request #{request} with call #{call}"
yield
end
##
# Intercept a BiDi streaming call
#
# @param [Enumerable<Object>] requests
# @param [GRPC::ActiveCall::MultiReqView] call
# @param [Method] method
#
def bidi_streamer(requests:, call:, method:)
GRPC.logger.debug "Intercepting bidi streamer method #{method}" \
" for requests #{requests} with call #{call}"
yield
end
end
##
# Represents the context in which an interceptor runs. Used to provide an
# injectable mechanism for handling interception. This is an EXPERIMENTAL API.
#
class InterceptionContext
##
# @param [Array<GRPC::Interceptor>]
#
def initialize(interceptors = [])
@interceptors = interceptors.dup
end
##
# Intercept the call and fire out to interceptors in a FIFO execution.
# This is an EXPERIMENTAL API.
#
# @param [Symbol] type The request type
# @param [Hash] args The arguments for the call
#
def intercept!(type, args = {})
return yield if @interceptors.none?
i = @interceptors.pop
return yield unless i
i.send(type, args) do
if @interceptors.any?
intercept!(type, args) do
yield
end
else
yield
end
end
end
end
end

@ -47,43 +47,85 @@ module GRPC
proc { |o| unmarshal_class.method(unmarshal_method).call(o) }
end
def handle_request_response(active_call, mth)
def handle_request_response(active_call, mth, inter_ctx)
req = active_call.read_unary_request
resp = mth.call(req, active_call.single_req_view)
active_call.server_unary_response(
resp, trailing_metadata: active_call.output_metadata)
call = active_call.single_req_view
inter_ctx.intercept!(
:request_response,
method: mth,
call: call,
request: req
) do
resp = mth.call(req, call)
active_call.server_unary_response(
resp,
trailing_metadata: active_call.output_metadata
)
end
end
def handle_client_streamer(active_call, mth)
resp = mth.call(active_call.multi_req_view)
active_call.server_unary_response(
resp, trailing_metadata: active_call.output_metadata)
def handle_client_streamer(active_call, mth, inter_ctx)
call = active_call.multi_req_view
inter_ctx.intercept!(
:client_streamer,
method: mth,
call: call
) do
resp = mth.call(call)
active_call.server_unary_response(
resp,
trailing_metadata: active_call.output_metadata
)
end
end
def handle_server_streamer(active_call, mth)
def handle_server_streamer(active_call, mth, inter_ctx)
req = active_call.read_unary_request
replys = mth.call(req, active_call.single_req_view)
replys.each { |r| active_call.remote_send(r) }
send_status(active_call, OK, 'OK', active_call.output_metadata)
call = active_call.single_req_view
inter_ctx.intercept!(
:server_streamer,
method: mth,
call: call,
request: req
) do
replies = mth.call(req, call)
replies.each { |r| active_call.remote_send(r) }
send_status(active_call, OK, 'OK', active_call.output_metadata)
end
end
def handle_bidi_streamer(active_call, mth)
active_call.run_server_bidi(mth)
##
# @param [GRPC::ActiveCall] active_call
# @param [Method] mth
# @param [Array<GRPC::InterceptionContext>] inter_ctx
#
def handle_bidi_streamer(active_call, mth, inter_ctx)
active_call.run_server_bidi(mth, inter_ctx)
send_status(active_call, OK, 'OK', active_call.output_metadata)
end
def run_server_method(active_call, mth)
##
# @param [GRPC::ActiveCall] active_call The current active call object
# for the request
# @param [Method] mth The current RPC method being called
# @param [GRPC::InterceptionContext] inter_ctx The interception context
# being executed
#
def run_server_method(active_call, mth, inter_ctx = InterceptionContext.new)
# While a server method is running, it might be cancelled, its deadline
# might be reached, the handler could throw an unknown error, or a
# well-behaved handler could throw a StatusError.
if request_response?
handle_request_response(active_call, mth)
handle_request_response(active_call, mth, inter_ctx)
elsif client_streamer?
handle_client_streamer(active_call, mth)
handle_client_streamer(active_call, mth, inter_ctx)
elsif server_streamer?
handle_server_streamer(active_call, mth)
handle_server_streamer(active_call, mth, inter_ctx)
else # is a bidi_stream
handle_bidi_streamer(active_call, mth)
handle_bidi_streamer(active_call, mth, inter_ctx)
end
rescue BadStatus => e
# this is raised by handlers that want GRPC to send an application error

@ -196,11 +196,18 @@ module GRPC
#
# * server_args:
# A server arguments hash to be passed down to the underlying core server
#
# * interceptors:
# Am array of GRPC::ServerInterceptor objects that will be used for
# intercepting server handlers to provide extra functionality.
# Interceptors are an EXPERIMENTAL API.
#
def initialize(pool_size:DEFAULT_POOL_SIZE,
max_waiting_requests:DEFAULT_MAX_WAITING_REQUESTS,
poll_period:DEFAULT_POLL_PERIOD,
connect_md_proc:nil,
server_args:{})
server_args:{},
interceptors:[])
@connect_md_proc = RpcServer.setup_connect_md_proc(connect_md_proc)
@max_waiting_requests = max_waiting_requests
@poll_period = poll_period
@ -212,6 +219,7 @@ module GRPC
# :stopped. State transitions can only proceed in that order.
@running_state = :not_started
@server = Core::Server.new(server_args)
@interceptors = InterceptorRegistry.new(interceptors)
end
# stops a running server
@ -374,7 +382,11 @@ module GRPC
@pool.schedule(active_call) do |ac|
c, mth = ac
begin
rpc_descs[mth].run_server_method(c, rpc_handlers[mth])
rpc_descs[mth].run_server_method(
c,
rpc_handlers[mth],
@interceptors.build_context
)
rescue StandardError
c.send_status(GRPC::Core::StatusCodes::INTERNAL,
'Server handler failed')
@ -382,7 +394,7 @@ module GRPC
end
end
rescue Core::CallError, RuntimeError => e
# these might happen for various reasonse. The correct behaviour of
# these might happen for various reasons. The correct behavior of
# the server is to log them and continue, if it's not shutting down.
if running_state == :running
GRPC.logger.warn("server call failed: #{e}")

@ -19,10 +19,17 @@ require 'google/rpc/status_pb'
module GRPC
# GoogleRpcStatusUtils provides utilities to convert between a
# GRPC::Core::Status and a deserialized Google::Rpc::Status proto
# Returns nil if the grpc-status-details-bin trailer could not be
# converted to a GoogleRpcStatus due to the server not providing
# the necessary trailers.
# Raises an error if the server did provide the necessary trailers
# but they fail to deseriliaze into a GoogleRpcStatus protobuf.
class GoogleRpcStatusUtils
def self.extract_google_rpc_status(status)
fail ArgumentError, 'bad type' unless status.is_a? Struct::Status
Google::Rpc::Status.decode(status.metadata['grpc-status-details-bin'])
grpc_status_details_bin_trailer = 'grpc-status-details-bin'
return nil if status.metadata[grpc_status_details_bin_trailer].nil?
Google::Rpc::Status.decode(status.metadata[grpc_status_details_bin_trailer])
end
end
end

@ -34,6 +34,7 @@ module Grpc
self.service_name = 'grpc.testing.duplicate.EchoTestService'
rpc :Echo, Grpc::Testing::EchoRequest, Grpc::Testing::EchoResponse
rpc :ResponseStream, Grpc::Testing::EchoRequest, stream(Grpc::Testing::EchoResponse)
end
Stub = Service.rpc_stub_class

@ -11,45 +11,12 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
require 'grpc'
require 'spec_helper'
require 'timeout'
include Timeout
include GRPC::Core
# A test message
class EchoMsg
def self.marshal(_o)
''
end
def self.unmarshal(_o)
EchoMsg.new
end
end
# A test service with an echo implementation.
class EchoService
include GRPC::GenericService
rpc :an_rpc, EchoMsg, EchoMsg
attr_reader :received_md
def initialize(**kw)
@trailing_metadata = kw
@received_md = []
end
def an_rpc(req, call)
GRPC.logger.info('echo service received a request')
call.output_metadata.update(@trailing_metadata)
@received_md << call.metadata unless call.metadata.nil?
req
end
end
EchoStub = EchoService.rpc_stub_class
def start_server(port = 0)
@srv = GRPC::RpcServer.new(pool_size: 1)
server_port = @srv.add_http2_port("localhost:#{port}", :this_port_is_insecure)

@ -12,7 +12,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
require 'grpc'
require 'spec_helper'
include GRPC::Core::StatusCodes
@ -82,6 +82,16 @@ describe GRPC::ActiveCall do
end
end
end
describe '#interceptable' do
it 'exposes a fixed subset of the ActiveCall.methods' do
want = %w(deadline)
v = @client_call.interceptable
want.each do |w|
expect(v.methods.include?(w))
end
end
end
end
describe '#remote_send' do
@ -609,9 +619,11 @@ describe GRPC::ActiveCall do
msgs
end
int_ctx = GRPC::InterceptionContext.new
@server_thread = Thread.new do
@server_call.run_server_bidi(
fake_gen_each_reply_with_no_call_param)
fake_gen_each_reply_with_no_call_param, int_ctx)
@server_call.send_status(@server_status)
end
end
@ -624,10 +636,11 @@ describe GRPC::ActiveCall do
call_param.send_initial_metadata
msgs
end
int_ctx = GRPC::InterceptionContext.new
@server_thread = Thread.new do
@server_call.run_server_bidi(
fake_gen_each_reply_with_call_param)
fake_gen_each_reply_with_call_param, int_ctx)
@server_call.send_status(@server_status)
end
end

@ -0,0 +1,153 @@
# Copyright 2017 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
require 'spec_helper'
describe 'Client Interceptors' do
let(:interceptor) { TestClientInterceptor.new }
let(:interceptors_opts) { { interceptors: [interceptor] } }
let(:request) { EchoMsg.new }
let(:service) { EchoService }
before(:each) do
build_rpc_server
end
context 'when a client interceptor is added' do
context 'with a request/response call' do
it 'should be called', server: true do
expect(interceptor).to receive(:request_response)
.once.and_call_original
run_services_on_server(@server, services: [service]) do
stub = build_insecure_stub(EchoStub, opts: interceptors_opts)
expect_any_instance_of(GRPC::ActiveCall).to receive(:request_response)
.once.and_call_original
expect(stub.an_rpc(request)).to be_a(EchoMsg)
end
end
it 'can modify outgoing metadata', server: true do
expect(interceptor).to receive(:request_response)
.once.and_call_original
run_services_on_server(@server, services: [service]) do
stub = build_insecure_stub(EchoStub, opts: interceptors_opts)
expect_any_instance_of(GRPC::ActiveCall).to receive(:request_response)
.with(request, metadata: { 'foo' => 'bar_from_request_response' })
.once.and_call_original
expect(stub.an_rpc(request)).to be_a(EchoMsg)
end
end
end
context 'with a client streaming call' do
it 'should be called', server: true do
expect(interceptor).to receive(:client_streamer)
.once.and_call_original
run_services_on_server(@server, services: [service]) do
stub = build_insecure_stub(EchoStub, opts: interceptors_opts)
expect_any_instance_of(GRPC::ActiveCall).to receive(:client_streamer)
.once.and_call_original
requests = [EchoMsg.new, EchoMsg.new]
expect(stub.a_client_streaming_rpc(requests)).to be_a(EchoMsg)
end
end
it 'can modify outgoing metadata', server: true do
expect(interceptor).to receive(:client_streamer)
.once.and_call_original
run_services_on_server(@server, services: [service]) do
stub = build_insecure_stub(EchoStub, opts: interceptors_opts)
requests = [EchoMsg.new, EchoMsg.new]
expect_any_instance_of(GRPC::ActiveCall).to receive(:client_streamer)
.with(requests, metadata: { 'foo' => 'bar_from_client_streamer' })
.once.and_call_original
expect(stub.a_client_streaming_rpc(requests)).to be_a(EchoMsg)
end
end
end
context 'with a server streaming call' do
it 'should be called', server: true do
expect(interceptor).to receive(:server_streamer)
.once.and_call_original
run_services_on_server(@server, services: [service]) do
stub = build_insecure_stub(EchoStub, opts: interceptors_opts)
request = EchoMsg.new
expect_any_instance_of(GRPC::ActiveCall).to receive(:server_streamer)
.once.and_call_original
responses = stub.a_server_streaming_rpc(request)
responses.each do |r|
expect(r).to be_a(EchoMsg)
end
end
end
it 'can modify outgoing metadata', server: true do
expect(interceptor).to receive(:server_streamer)
.once.and_call_original
run_services_on_server(@server, services: [service]) do
stub = build_insecure_stub(EchoStub, opts: interceptors_opts)
request = EchoMsg.new
expect_any_instance_of(GRPC::ActiveCall).to receive(:server_streamer)
.with(request, metadata: { 'foo' => 'bar_from_server_streamer' })
.once.and_call_original
responses = stub.a_server_streaming_rpc(request)
responses.each do |r|
expect(r).to be_a(EchoMsg)
end
end
end
end
context 'with a bidi call' do
it 'should be called', server: true do
expect(interceptor).to receive(:bidi_streamer)
.once.and_call_original
run_services_on_server(@server, services: [service]) do
stub = build_insecure_stub(EchoStub, opts: interceptors_opts)
expect_any_instance_of(GRPC::ActiveCall).to receive(:bidi_streamer)
.once.and_call_original
requests = [EchoMsg.new, EchoMsg.new]
responses = stub.a_bidi_rpc(requests)
responses.each do |r|
expect(r).to be_a(EchoMsg)
end
end
end
it 'can modify outgoing metadata', server: true do
expect(interceptor).to receive(:bidi_streamer)
.once.and_call_original
run_services_on_server(@server, services: [service]) do
stub = build_insecure_stub(EchoStub, opts: interceptors_opts)
requests = [EchoMsg.new, EchoMsg.new]
expect_any_instance_of(GRPC::ActiveCall).to receive(:bidi_streamer)
.with(requests, metadata: { 'foo' => 'bar_from_bidi_streamer' })
.once.and_call_original
responses = stub.a_bidi_rpc(requests)
responses.each do |r|
expect(r).to be_a(EchoMsg)
end
end
end
end
end
end

@ -0,0 +1,65 @@
# Copyright 2017 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
require 'spec_helper'
describe GRPC::InterceptorRegistry do
let(:server) { RpcServer.new }
let(:interceptor) { TestServerInterceptor.new }
let(:interceptors) { [interceptor] }
let(:registry) { described_class.new(interceptors) }
describe 'initialization' do
subject { registry }
context 'with an interceptor extending GRPC::ServerInterceptor' do
it 'should add the interceptor to the registry' do
subject
is = registry.instance_variable_get('@interceptors')
expect(is.count).to eq 1
expect(is.first).to eq interceptor
end
end
context 'with multiple interceptors' do
let(:interceptor2) { TestServerInterceptor.new }
let(:interceptor3) { TestServerInterceptor.new }
let(:interceptors) { [interceptor, interceptor2, interceptor3] }
it 'should maintain order of insertion when iterated against' do
subject
is = registry.instance_variable_get('@interceptors')
expect(is.count).to eq 3
is.each_with_index do |i, idx|
case idx
when 0
expect(i).to eq interceptor
when 1
expect(i).to eq interceptor2
when 2
expect(i).to eq interceptor3
end
end
end
end
context 'with an interceptor not extending GRPC::ServerInterceptor' do
let(:interceptor) { Class }
let(:err) { GRPC::InterceptorRegistry::DescendantError }
it 'should raise an InvalidArgument exception' do
expect { subject }.to raise_error(err)
end
end
end
end

@ -11,8 +11,7 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
require 'grpc'
require 'spec_helper'
def load_test_certs
test_root = File.join(File.dirname(File.dirname(__FILE__)), 'testdata')
@ -28,17 +27,6 @@ def check_md(wanted_md, received_md)
end
end
# A test message
class EchoMsg
def self.marshal(_o)
''
end
def self.unmarshal(_o)
EchoMsg.new
end
end
# A test service with no methods.
class EmptyService
include GRPC::GenericService
@ -50,27 +38,6 @@ class NoRpcImplementation
rpc :an_rpc, EchoMsg, EchoMsg
end
# A test service with an echo implementation.
class EchoService
include GRPC::GenericService
rpc :an_rpc, EchoMsg, EchoMsg
attr_reader :received_md
def initialize(**kw)
@trailing_metadata = kw
@received_md = []
end
def an_rpc(req, call)
GRPC.logger.info('echo service received a request')
call.output_metadata.update(@trailing_metadata)
@received_md << call.metadata unless call.metadata.nil?
req
end
end
EchoStub = EchoService.rpc_stub_class
# A test service with an implementation that fails with BadStatus
class FailingService
include GRPC::GenericService

@ -0,0 +1,218 @@
# Copyright 2017 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
require 'spec_helper'
describe 'Server Interceptors' do
let(:interceptor) { TestServerInterceptor.new }
let(:request) { EchoMsg.new }
let(:trailing_metadata) { {} }
let(:service) { EchoService.new(trailing_metadata) }
let(:interceptors) { [] }
before(:each) do
build_rpc_server(server_opts: { interceptors: interceptors })
end
context 'when a server interceptor is added' do
let(:interceptors) { [interceptor] }
let(:client_metadata) { { client_md: 'test' } }
let(:client_call_opts) { { metadata: client_metadata, return_op: true } }
context 'with a request/response call' do
let(:trailing_metadata) { { server_om: 'from_request_response' } }
it 'should be called', server: true do
expect(interceptor).to receive(:request_response)
.once.and_call_original
run_services_on_server(@server, services: [service]) do
stub = build_insecure_stub(EchoStub)
expect(stub.an_rpc(request)).to be_a(EchoMsg)
end
end
it 'can modify trailing metadata', server: true do
expect(interceptor).to receive(:request_response)
.once.and_call_original
run_services_on_server(@server, services: [service]) do
stub = build_insecure_stub(EchoStub)
expect_any_instance_of(GRPC::ActiveCall).to(
receive(:request_response).with(request, metadata: client_metadata)
.once.and_call_original
)
op = stub.an_rpc(request, client_call_opts)
msg = op.execute
expect(op.trailing_metadata).to eq(
'interc' => 'from_request_response',
'server_om' => 'from_request_response'
)
expect(msg).to be_a(EchoMsg)
end
end
end
context 'with a client streaming call' do
let(:trailing_metadata) { { server_om: 'from_client_streamer' } }
let(:requests) { [EchoMsg.new, EchoMsg.new] }
it 'should be called', server: true do
expect(interceptor).to receive(:client_streamer)
.once.and_call_original
run_services_on_server(@server, services: [service]) do
stub = build_insecure_stub(EchoStub)
expect(stub.a_client_streaming_rpc(requests)).to be_a(EchoMsg)
end
end
it 'can modify trailing metadata', server: true do
expect(interceptor).to receive(:client_streamer)
.once.and_call_original
run_services_on_server(@server, services: [service]) do
stub = build_insecure_stub(EchoStub)
expect_any_instance_of(GRPC::ActiveCall).to(
receive(:client_streamer).with(requests)
.once.and_call_original
)
op = stub.a_client_streaming_rpc(requests, client_call_opts)
msg = op.execute
expect(op.trailing_metadata).to eq(
'interc' => 'from_client_streamer',
'server_om' => 'from_client_streamer'
)
expect(msg).to be_a(EchoMsg)
end
end
end
context 'with a server streaming call' do
let(:trailing_metadata) { { server_om: 'from_server_streamer' } }
let(:request) { EchoMsg.new }
it 'should be called', server: true do
expect(interceptor).to receive(:server_streamer)
.once.and_call_original
run_services_on_server(@server, services: [service]) do
stub = build_insecure_stub(EchoStub)
responses = stub.a_server_streaming_rpc(request)
responses.each do |r|
expect(r).to be_a(EchoMsg)
end
end
end
it 'can modify trailing metadata', server: true do
expect(interceptor).to receive(:server_streamer)
.once.and_call_original
run_services_on_server(@server, services: [service]) do
stub = build_insecure_stub(EchoStub)
expect_any_instance_of(GRPC::ActiveCall).to(
receive(:server_streamer).with(request)
.once.and_call_original
)
op = stub.a_server_streaming_rpc(request, client_call_opts)
responses = op.execute
responses.each do |r|
expect(r).to be_a(EchoMsg)
end
expect(op.trailing_metadata).to eq(
'interc' => 'from_server_streamer',
'server_om' => 'from_server_streamer'
)
end
end
end
context 'with a bidi call' do
let(:trailing_metadata) { { server_om: 'from_bidi_streamer' } }
let(:requests) { [EchoMsg.new, EchoMsg.new] }
it 'should be called', server: true do
expect(interceptor).to receive(:bidi_streamer)
.once.and_call_original
run_services_on_server(@server, services: [service]) do
stub = build_insecure_stub(EchoStub)
responses = stub.a_bidi_rpc(requests)
responses.each do |r|
expect(r).to be_a(EchoMsg)
end
end
end
it 'can modify trailing metadata', server: true do
expect(interceptor).to receive(:bidi_streamer)
.once.and_call_original
run_services_on_server(@server, services: [service]) do
stub = build_insecure_stub(EchoStub)
expect_any_instance_of(GRPC::ActiveCall).to(
receive(:bidi_streamer).with(requests)
.once.and_call_original
)
op = stub.a_bidi_rpc(requests, client_call_opts)
responses = op.execute
responses.each do |r|
expect(r).to be_a(EchoMsg)
end
expect(op.trailing_metadata).to eq(
'interc' => 'from_bidi_streamer',
'server_om' => 'from_bidi_streamer'
)
end
end
end
end
context 'when multiple interceptors are added' do
let(:interceptor2) { TestServerInterceptor.new }
let(:interceptor3) { TestServerInterceptor.new }
let(:interceptors) do
[
interceptor,
interceptor2,
interceptor3
]
end
it 'each should be called', server: true do
expect(interceptor).to receive(:request_response)
.once.and_call_original
expect(interceptor2).to receive(:request_response)
.once.and_call_original
expect(interceptor3).to receive(:request_response)
.once.and_call_original
run_services_on_server(@server, services: [service]) do
stub = build_insecure_stub(EchoStub)
expect(stub.an_rpc(request)).to be_a(EchoMsg)
end
end
end
context 'when an interceptor is not added' do
it 'should not be called', server: true do
expect(interceptor).to_not receive(:call)
run_services_on_server(@server, services: [service]) do
stub = build_insecure_stub(EchoStub)
expect(stub.an_rpc(request)).to be_a(EchoMsg)
end
end
end
end

@ -31,12 +31,11 @@ describe 'conversion from a status struct to a google protobuf status' do
expect(exception.message.include?('bad type')).to be true
end
it 'fails with some error if the header key is missing' do
it 'returns nil if the header key is missing' do
status = Struct::Status.new(1, 'details', key: 'val')
expect(status.metadata.nil?).to be false
expect do
GRPC::GoogleRpcStatusUtils.extract_google_rpc_status(status)
end.to raise_error(StandardError)
expect(GRPC::GoogleRpcStatusUtils.extract_google_rpc_status(
status)).to be(nil)
end
it 'fails with some error if the header key fails to deserialize' do
@ -221,3 +220,73 @@ describe 'receving a google rpc status from a remote endpoint' do
status_from_exception)).to eq(rpc_status)
end
end
# A test service that fails without explicitly setting the
# grpc-status-details-bin trailer. Tests assumptions about value
# of grpc-status-details-bin on the client side when the trailer wasn't
# set explicitly.
class NoStatusDetailsBinTestService
include GRPC::GenericService
rpc :an_rpc, EchoMsg, EchoMsg
def an_rpc(_, _)
fail GRPC::Unknown
end
end
NoStatusDetailsBinTestServiceStub = NoStatusDetailsBinTestService.rpc_stub_class
describe 'when the endpoint doesnt send grpc-status-details-bin' do
def start_server
@srv = GRPC::RpcServer.new(pool_size: 1)
@server_port = @srv.add_http2_port('localhost:0',
:this_port_is_insecure)
@srv.handle(NoStatusDetailsBinTestService)
@server_thd = Thread.new { @srv.run }
@srv.wait_till_running
end
def stop_server
expect(@srv.stopped?).to be(false)
@srv.stop
@server_thd.join
expect(@srv.stopped?).to be(true)
end
before(:each) do
start_server
end
after(:each) do
stop_server
end
it 'should receive nil when we extract try to extract a google '\
'rpc status from a BadStatus exception that didnt have it' do
stub = NoStatusDetailsBinTestServiceStub.new("localhost:#{@server_port}",
:this_channel_is_insecure)
begin
stub.an_rpc(EchoMsg.new)
rescue GRPC::Unknown => e
rpc_status = GRPC::GoogleRpcStatusUtils.extract_google_rpc_status(
e.to_status)
end
expect(rpc_status).to be(nil)
end
it 'should receive nil when we extract try to extract a google '\
'rpc status from an op views status object that didnt have it' do
stub = NoStatusDetailsBinTestServiceStub.new("localhost:#{@server_port}",
:this_channel_is_insecure)
op = stub.an_rpc(EchoMsg.new, return_op: true)
begin
op.execute
rescue GRPC::Unknown => e
status_from_exception = e.to_status
end
expect(GRPC::GoogleRpcStatusUtils.extract_google_rpc_status(
status_from_exception)).to be(nil)
expect(GRPC::GoogleRpcStatusUtils.extract_google_rpc_status(
op.status)).to be nil
end
end

@ -32,6 +32,9 @@ require 'rspec'
require 'logging'
require 'rspec/logging_helper'
require_relative 'support/services'
require_relative 'support/helpers'
# GRPC is the general RPC module
#
# Configure its logging for fine-grained log control during test runs
@ -49,6 +52,7 @@ Logging.logger['GRPC::BidiCall'].level = :info
RSpec.configure do |config|
include RSpec::LoggingHelper
config.capture_log_messages # comment this out to see logs during test runs
include GRPC::Spec::Helpers
end
RSpec::Expectations.configuration.warn_about_potential_false_positives = false

@ -0,0 +1,73 @@
# Copyright 2017 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# GRPC contains the General RPC module.
module GRPC
##
# GRPC RSpec base module
#
module Spec
##
# A module that is used for providing generic helpers across the
# GRPC test suite
#
module Helpers
# Shortcut syntax for a GRPC RPC Server
RpcServer = GRPC::RpcServer
##
# Build an RPC server used for testing
#
def build_rpc_server(server_opts: {},
client_opts: {})
@server = RpcServer.new({ poll_period: 1 }.merge(server_opts))
@port = @server.add_http2_port('0.0.0.0:0', :this_port_is_insecure)
@host = "0.0.0.0:#{@port}"
@client_opts = client_opts
@server
end
##
# Run services on an RPC server, yielding to allow testing within
#
# @param [RpcServer] server
# @param [Array<Class>] services
#
def run_services_on_server(server, services: [])
services.each do |s|
server.handle(s)
end
t = Thread.new { server.run }
server.wait_till_running
yield
server.stop
t.join
end
##
# Build an insecure stub from a given stub class
#
# @param [Class] klass
# @param [String] host
#
def build_insecure_stub(klass, host: nil, opts: nil)
host ||= @host
opts ||= @client_opts
klass.new(host, :this_channel_is_insecure, **opts)
end
end
end
end

@ -0,0 +1,147 @@
# Copyright 2017 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Test stubs for various scenarios
require 'grpc'
# A test message
class EchoMsg
def self.marshal(_o)
''
end
def self.unmarshal(_o)
EchoMsg.new
end
end
# A test service with an echo implementation.
class EchoService
include GRPC::GenericService
rpc :an_rpc, EchoMsg, EchoMsg
rpc :a_client_streaming_rpc, stream(EchoMsg), EchoMsg
rpc :a_server_streaming_rpc, EchoMsg, stream(EchoMsg)
rpc :a_bidi_rpc, stream(EchoMsg), stream(EchoMsg)
attr_reader :received_md
def initialize(**kw)
@trailing_metadata = kw
@received_md = []
end
def an_rpc(req, call)
GRPC.logger.info('echo service received a request')
call.output_metadata.update(@trailing_metadata)
@received_md << call.metadata unless call.metadata.nil?
req
end
def a_client_streaming_rpc(call)
# iterate through requests so call can complete
call.output_metadata.update(@trailing_metadata)
call.each_remote_read.each { |r| p r }
EchoMsg.new
end
def a_server_streaming_rpc(_req, call)
call.output_metadata.update(@trailing_metadata)
[EchoMsg.new, EchoMsg.new]
end
def a_bidi_rpc(requests, call)
call.output_metadata.update(@trailing_metadata)
requests.each { |r| p r }
[EchoMsg.new, EchoMsg.new]
end
end
EchoStub = EchoService.rpc_stub_class
# For testing server interceptors
class TestServerInterceptor < GRPC::ServerInterceptor
def request_response(request:, call:, method:)
p "Received request/response call at method #{method}" \
" with request #{request} for call #{call}"
call.output_metadata[:interc] = 'from_request_response'
p "[GRPC::Ok] (#{method.owner.name}.#{method.name})"
yield
end
def client_streamer(call:, method:)
call.output_metadata[:interc] = 'from_client_streamer'
call.each_remote_read.each do |r|
p "In interceptor: #{r}"
end
p "Received client streamer call at method #{method} for call #{call}"
yield
end
def server_streamer(request:, call:, method:)
p "Received server streamer call at method #{method} with request" \
" #{request} for call #{call}"
call.output_metadata[:interc] = 'from_server_streamer'
yield
end
def bidi_streamer(requests:, call:, method:)
requests.each do |r|
p "Bidi request: #{r}"
end
p "Received bidi streamer call at method #{method} with requests" \
" #{requests} for call #{call}"
call.output_metadata[:interc] = 'from_bidi_streamer'
yield
end
end
# For testing client interceptors
class TestClientInterceptor < GRPC::ClientInterceptor
def request_response(request:, call:, method:, metadata: {})
p "Intercepted request/response call at method #{method}" \
" with request #{request} for call #{call}" \
" and metadata: #{metadata}"
metadata['foo'] = 'bar_from_request_response'
yield
end
def client_streamer(requests:, call:, method:, metadata: {})
p "Received client streamer call at method #{method}" \
" with requests #{requests} for call #{call}" \
" and metadata: #{metadata}"
requests.each do |r|
p "In client interceptor: #{r}"
end
metadata['foo'] = 'bar_from_client_streamer'
yield
end
def server_streamer(request:, call:, method:, metadata: {})
p "Received server streamer call at method #{method}" \
" with request #{request} for call #{call}" \
" and metadata: #{metadata}"
metadata['foo'] = 'bar_from_server_streamer'
yield
end
def bidi_streamer(requests:, call:, method:, metadata: {})
p "Received bidi streamer call at method #{method}" \
"with requests #{requests} for call #{call}" \
" and metadata: #{metadata}"
requests.each do |r|
p "In client interceptor: #{r}"
end
metadata['foo'] = 'bar_from_bidi_streamer'
yield
end
end

@ -0,0 +1,4 @@
%YAML 1.2
--- |
<%namespace file="create_private_dns_zone_defs.include" import="*"/>\
${create_private_dns_zone(resolver_gce_integration_tests_zone_id, resolver_tests_common_zone_name)}

@ -0,0 +1,32 @@
<%def name="create_private_dns_zone(resolver_gce_integration_tests_zone_id, resolver_tests_common_zone_name)">#!/bin/bash
# Copyright 2015 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This file is auto-generated
set -ex
cd $(dirname $0)/../../..
gcloud alpha dns managed-zones create \\
${resolver_gce_integration_tests_zone_id} \\
--dns-name=${resolver_tests_common_zone_name} \\
--description="GCE-DNS-private-zone-for-GRPC-testing" \\
--visibility=private \\
--networks=default</%def>

@ -0,0 +1,4 @@
%YAML 1.2
--- |
<%namespace file="private_dns_zone_init_defs.include" import="*"/>\
${private_dns_zone_init(all_integration_test_records, resolver_gce_integration_tests_zone_id, resolver_tests_common_zone_name)}

@ -0,0 +1,40 @@
<%def name="private_dns_zone_init(records,resolver_gce_integration_tests_zone_id,resolver_tests_common_zone_name)">#!/bin/bash
# Copyright 2015 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This file is auto-generated
set -ex
cd $(dirname $0)/../../..
gcloud dns record-sets transaction start -z=${resolver_gce_integration_tests_zone_id}
% for r in records:
gcloud dns record-sets transaction add \\
-z=${resolver_gce_integration_tests_zone_id} \\
--name=${r['name']}.${resolver_tests_common_zone_name} \\
--type=${r['type']} \\
--ttl=${r['ttl']} \\
${r['data']}
% endfor
gcloud dns record-sets transaction describe -z=${resolver_gce_integration_tests_zone_id}
gcloud dns record-sets transaction execute -z=${resolver_gce_integration_tests_zone_id}
gcloud dns record-sets list -z=${resolver_gce_integration_tests_zone_id}</%def>

@ -0,0 +1,64 @@
<%def name="resolver_gce_integration_tests(tests, records, resolver_tests_common_zone_name)">#!/bin/bash
# Copyright 2015 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This file is auto-generated
set -ex
if [[ "$GRPC_DNS_RESOLVER" == "" ]]; then
export GRPC_DNS_RESOLVER=ares
elif [[ "$GRPC_DNS_RESOLVER" != ares ]]; then
echo "Unexpected: GRPC_DNS_RESOLVER=$GRPC_DNS_RESOLVER. This test only works with c-ares resolver"
exit 1
fi
cd $(dirname $0)/../../..
if [[ "$CONFIG" == "" ]]; then
export CONFIG=opt
fi
make resolver_component_test
echo "Sanity check DNS records are resolveable with dig:"
EXIT_CODE=0
% for r in records:
ONE_FAILED=0
dig ${r['type']} ${r['name']}.${resolver_tests_common_zone_name} | grep 'ANSWER SECTION' || ONE_FAILED=1
if [[ "$ONE_FAILED" != 0 ]]; then
echo "Sanity check: dig ${r['type']} ${r['name']}.${resolver_tests_common_zone_name} FAILED"
exit 1
fi
% endfor
echo "Sanity check PASSED. Run resolver tests:"
% for test in tests:
ONE_FAILED=0
bins/$CONFIG/resolver_component_test \\
--target_name='${test['target_name']}' \\
--expected_addrs='${test['expected_addrs']}' \\
--expected_chosen_service_config='${test['expected_chosen_service_config']}' \\
--expected_lb_policy='${test['expected_lb_policy']}' || ONE_FAILED=1
if [[ "$ONE_FAILED" != 0 ]]; then
echo "Test based on target record: ${test['target_name']} FAILED"
EXIT_CODE=1
fi
% endfor
exit $EXIT_CODE</%def>

@ -0,0 +1,4 @@
%YAML 1.2
--- |
<%namespace file="resolver_gce_integration_tests_defs.include" import="*"/>\
${resolver_gce_integration_tests(resolver_gce_integration_test_cases, all_integration_test_records, resolver_tests_common_zone_name)}

@ -1,5 +1,5 @@
RUN apt-get update && apt-get -y install wget
RUN echo deb http://llvm.org/apt/wily/ llvm-toolchain-wily-3.8 main >> /etc/apt/sources.list
RUN echo deb-src http://llvm.org/apt/wily/ llvm-toolchain-wily-3.8 main >> /etc/apt/sources.list
RUN wget -O - http://llvm.org/apt/llvm-snapshot.gpg.key| apt-key add -
RUN echo "deb http://llvm.org/apt/jessie/ llvm-toolchain-jessie-3.8 main" >> /etc/apt/sources.list
RUN echo "deb-src http://llvm.org/apt/jessie/ llvm-toolchain-jessie-3.8 main" >> /etc/apt/sources.list
RUN wget -O - http://llvm.org/apt/llvm-snapshot.gpg.key | apt-key add -
RUN apt-get update && apt-get -y install clang-format-3.8

@ -14,9 +14,10 @@
# See the License for the specific language governing permissions and
# limitations under the License.
FROM ubuntu:15.10
FROM debian:jessie
<%include file="../clang_format.include"/>
ADD clang_format_all_the_things.sh /
CMD ["echo 'Run with tools/distrib/clang_format_code.sh'"]

@ -9,6 +9,6 @@ RUN apt-get update && apt-get install -y ${'\\'}
python-pip
# Install Python packages from PyPI
RUN pip install pip --upgrade
RUN pip install --upgrade pip==9.0.1
RUN pip install virtualenv
RUN pip install futures==2.2.0 enum34==1.0.4 protobuf==3.2.0 six==1.10.0 twisted==17.5.0

@ -14,7 +14,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
FROM ubuntu:15.10
FROM debian:jessie
<%include file="../../apt_get_basic.include"/>
<%include file="../../gcp_api_libraries.include"/>
@ -33,14 +33,23 @@
#======================================
# More sanity test dependencies (bazel)
RUN apt-get install -y openjdk-8-jdk
# Check out Bazel version 0.4.1 since this version allows running
# ./compile.sh without a local protoc dependency
# TODO(mattkwong): install dependencies to support latest Bazel version if newer
# version is needed
RUN git clone https://github.com/bazelbuild/bazel.git /bazel && ${"\\"}
cd /bazel && git checkout tags/0.4.1 && ./compile.sh
RUN ln -s /bazel/output/bazel /bin/
RUN echo "deb http://http.debian.net/debian jessie-backports main" >> /etc/apt/sources.list
RUN apt-get update
RUN apt-get install -y -t jessie-backports openjdk-8-jdk
#========================
# Bazel installation
RUN echo "deb [arch=amd64] http://storage.googleapis.com/bazel-apt stable jdk1.8" > /etc/apt/sources.list.d/bazel.list
RUN curl https://bazel.build/bazel-release.pub.gpg | apt-key add -
RUN apt-get -y update
RUN apt-get -y install bazel
# Pin Bazel to 0.4.4
# Installing Bazel via apt-get first is required before installing 0.4.4 to
# allow gRPC to build without errors. See https://github.com/grpc/grpc/issues/10553
RUN curl -fSsL -O https://github.com/bazelbuild/bazel/releases/download/0.4.4/bazel-0.4.4-installer-linux-x86_64.sh
RUN chmod +x ./bazel-0.4.4-installer-linux-x86_64.sh
RUN ./bazel-0.4.4-installer-linux-x86_64.sh
<%include file="../../clang_format.include"/>
<%include file="../../run_tests_addons.include"/>

@ -134,9 +134,12 @@ void grpc_run_bad_client_test(
grpc_endpoint_write(&exec_ctx, sfd.client, &outgoing, &done_write_closure);
grpc_exec_ctx_finish(&exec_ctx);
/* Await completion */
GPR_ASSERT(
gpr_event_wait(&a.done_write, grpc_timeout_seconds_to_deadline(5)));
/* Await completion, unless the request is large and write may not finish
* before the peer shuts down. */
if (!(flags & GRPC_BAD_CLIENT_LARGE_REQUEST)) {
GPR_ASSERT(
gpr_event_wait(&a.done_write, grpc_timeout_seconds_to_deadline(5)));
}
if (flags & GRPC_BAD_CLIENT_DISCONNECT) {
grpc_endpoint_shutdown(
@ -186,6 +189,8 @@ void grpc_run_bad_client_test(
grpc_exec_ctx_finish(&exec_ctx);
}
GPR_ASSERT(
gpr_event_wait(&a.done_write, grpc_timeout_seconds_to_deadline(1)));
shutdown_cq = grpc_completion_queue_create_for_pluck(NULL);
grpc_server_shutdown_and_notify(a.server, shutdown_cq, NULL);
GPR_ASSERT(grpc_completion_queue_pluck(

@ -37,6 +37,7 @@ typedef bool (*grpc_bad_client_client_stream_validator)(
grpc_slice_buffer *incoming);
#define GRPC_BAD_CLIENT_DISCONNECT 1
#define GRPC_BAD_CLIENT_LARGE_REQUEST 2
/* Test runner.

Some files were not shown because too many files have changed in this diff Show More

Loading…
Cancel
Save