Merge remote-tracking branch 'upstream/master' into max_send_size_filter

pull/7846/head
Mark D. Roth 8 years ago
commit 7bfa1a8343
  1. 14
      BUILD
  2. 5
      CMakeLists.txt
  3. 7
      Makefile
  4. 2
      binding.gyp
  5. 4
      build.yaml
  6. 2
      config.m4
  7. 16
      doc/fail_fast.md
  8. 14
      doc/wait-for-ready.md
  9. 2
      examples/php/run_greeter_client.sh
  10. 6
      gRPC-Core.podspec
  11. 5
      grpc.gemspec
  12. 2
      include/grpc++/ext/reflection.grpc.pb.h
  13. 4
      include/grpc++/grpc++.h
  14. 4
      include/grpc++/impl/codegen/call.h
  15. 10
      include/grpc++/impl/codegen/completion_queue.h
  16. 64
      include/grpc++/impl/codegen/method_handler_impl.h
  17. 3
      include/grpc++/impl/codegen/rpc_method.h
  18. 1
      include/grpc++/impl/codegen/rpc_service_method.h
  19. 10
      include/grpc++/impl/codegen/server_context.h
  20. 11
      include/grpc++/impl/codegen/service_type.h
  21. 107
      include/grpc++/impl/codegen/sync_stream.h
  22. 5
      include/grpc++/impl/server_builder_plugin.h
  23. 10
      include/grpc/impl/codegen/grpc_types.h
  24. 4
      package.xml
  25. 77
      src/compiler/cpp_generator.cc
  26. 4
      src/core/ext/census/gen/README.md
  27. 81
      src/core/ext/census/gen/trace_context.pb.c
  28. 99
      src/core/ext/census/gen/trace_context.pb.h
  29. 466
      src/core/ext/client_config/client_channel.c
  30. 292
      src/core/ext/client_config/subchannel_call_holder.c
  31. 99
      src/core/ext/client_config/subchannel_call_holder.h
  32. 177
      src/core/lib/channel/http_client_filter.c
  33. 3
      src/core/lib/channel/http_client_filter.h
  34. 79
      src/core/lib/channel/http_server_filter.c
  35. 11
      src/core/lib/surface/server.c
  36. 15
      src/core/lib/transport/static_metadata.c
  37. 98
      src/core/lib/transport/static_metadata.h
  38. 1
      src/core/lib/transport/transport.h
  39. 15
      src/cpp/server/server_builder.cc
  40. 37
      src/php/lib/Grpc/AbstractCall.php
  41. 10
      src/php/lib/Grpc/BidiStreamingCall.php
  42. 6
      src/php/lib/Grpc/ClientStreamingCall.php
  43. 12
      src/php/lib/Grpc/ServerStreamingCall.php
  44. 4
      src/php/lib/Grpc/UnaryCall.php
  45. 0
      src/proto/census/trace_context.options
  46. 48
      src/proto/census/trace_context.proto
  47. 2
      src/python/grpcio/grpc_core_dependencies.py
  48. 143
      src/ruby/lib/grpc/generic/rpc_server.rb
  49. 138
      src/ruby/spec/generic/rpc_server_pool_spec.rb
  50. 13
      src/ruby/spec/generic/rpc_server_spec.rb
  51. 1
      templates/grpc.gemspec.template
  52. 2
      test/core/bad_client/tests/head_of_line_blocking.c
  53. 6
      test/core/bad_client/tests/large_metadata.c
  54. 4
      test/core/bad_client/tests/server_registered_method.c
  55. 2
      test/core/bad_client/tests/simple_request.c
  56. 2
      test/core/bad_ssl/bad_ssl_test.c
  57. 12
      test/core/client_config/lb_policies_test.c
  58. 2
      test/core/end2end/bad_server_response_test.c
  59. 44
      test/core/end2end/cq_verifier.c
  60. 5
      test/core/end2end/cq_verifier.h
  61. 8
      test/core/end2end/dualstack_socket_test.c
  62. 8
      test/core/end2end/end2end_nosec_tests.c
  63. 8
      test/core/end2end/end2end_tests.c
  64. 2
      test/core/end2end/fixtures/h2_ssl_cert.c
  65. 1
      test/core/end2end/fuzzers/hpack.dictionary
  66. 1
      test/core/end2end/gen_build_yaml.py
  67. 22
      test/core/end2end/goaway_server_test.c
  68. 10
      test/core/end2end/invalid_call_argument_test.c
  69. 2
      test/core/end2end/no_server_test.c
  70. 2
      test/core/end2end/tests/bad_hostname.c
  71. 8
      test/core/end2end/tests/binary_metadata.c
  72. 10
      test/core/end2end/tests/call_creds.c
  73. 6
      test/core/end2end/tests/cancel_after_accept.c
  74. 6
      test/core/end2end/tests/cancel_after_client_done.c
  75. 2
      test/core/end2end/tests/cancel_after_invoke.c
  76. 2
      test/core/end2end/tests/cancel_before_invoke.c
  77. 2
      test/core/end2end/tests/cancel_with_status.c
  78. 24
      test/core/end2end/tests/compressed_payload.c
  79. 10
      test/core/end2end/tests/connectivity.c
  80. 6
      test/core/end2end/tests/default_host.c
  81. 8
      test/core/end2end/tests/disappearing_server.c
  82. 2
      test/core/end2end/tests/empty_batch.c
  83. 2
      test/core/end2end/tests/filter_call_init_fails.c
  84. 2
      test/core/end2end/tests/filter_causes_close.c
  85. 8
      test/core/end2end/tests/graceful_server_shutdown.c
  86. 6
      test/core/end2end/tests/high_initial_seqno.c
  87. 6
      test/core/end2end/tests/hpack_size.c
  88. 6
      test/core/end2end/tests/idempotent_request.c
  89. 8
      test/core/end2end/tests/invoke_large_request.c
  90. 8
      test/core/end2end/tests/large_metadata.c
  91. 8
      test/core/end2end/tests/load_reporting_hook.c
  92. 18
      test/core/end2end/tests/max_concurrent_streams.c
  93. 6
      test/core/end2end/tests/max_message_length.c
  94. 2
      test/core/end2end/tests/negative_deadline.c
  95. 8
      test/core/end2end/tests/network_status_change.c
  96. 293
      test/core/end2end/tests/no_logging.c
  97. 8
      test/core/end2end/tests/payload.c
  98. 8
      test/core/end2end/tests/ping.c
  99. 16
      test/core/end2end/tests/ping_pong_streaming.c
  100. 6
      test/core/end2end/tests/registered_call.c
  101. Some files were not shown because too many files have changed in this diff Show More

14
BUILD

@ -300,7 +300,6 @@ cc_library(
"src/core/ext/client_config/resolver_registry.h",
"src/core/ext/client_config/resolver_result.h",
"src/core/ext/client_config/subchannel.h",
"src/core/ext/client_config/subchannel_call_holder.h",
"src/core/ext/client_config/subchannel_index.h",
"src/core/ext/client_config/uri_parser.h",
"src/core/ext/lb_policy/grpclb/grpclb.h",
@ -313,6 +312,7 @@ cc_library(
"src/core/ext/census/census_interface.h",
"src/core/ext/census/census_rpc_stats.h",
"src/core/ext/census/gen/census.pb.h",
"src/core/ext/census/gen/trace_context.pb.h",
"src/core/ext/census/grpc_filter.h",
"src/core/ext/census/mlog.h",
"src/core/ext/census/resource.h",
@ -476,7 +476,6 @@ cc_library(
"src/core/ext/client_config/resolver_registry.c",
"src/core/ext/client_config/resolver_result.c",
"src/core/ext/client_config/subchannel.c",
"src/core/ext/client_config/subchannel_call_holder.c",
"src/core/ext/client_config/subchannel_index.c",
"src/core/ext/client_config/uri_parser.c",
"src/core/ext/transport/chttp2/server/insecure/server_chttp2.c",
@ -495,6 +494,7 @@ cc_library(
"src/core/ext/census/base_resources.c",
"src/core/ext/census/context.c",
"src/core/ext/census/gen/census.pb.c",
"src/core/ext/census/gen/trace_context.pb.c",
"src/core/ext/census/grpc_context.c",
"src/core/ext/census/grpc_filter.c",
"src/core/ext/census/grpc_plugin.c",
@ -674,7 +674,6 @@ cc_library(
"src/core/ext/client_config/resolver_registry.h",
"src/core/ext/client_config/resolver_result.h",
"src/core/ext/client_config/subchannel.h",
"src/core/ext/client_config/subchannel_call_holder.h",
"src/core/ext/client_config/subchannel_index.h",
"src/core/ext/client_config/uri_parser.h",
"src/core/lib/security/context/security_context.h",
@ -834,7 +833,6 @@ cc_library(
"src/core/ext/client_config/resolver_registry.c",
"src/core/ext/client_config/resolver_result.c",
"src/core/ext/client_config/subchannel.c",
"src/core/ext/client_config/subchannel_call_holder.c",
"src/core/ext/client_config/subchannel_index.c",
"src/core/ext/client_config/uri_parser.c",
"src/core/lib/http/httpcli_security_connector.c",
@ -1029,7 +1027,6 @@ cc_library(
"src/core/ext/client_config/resolver_registry.h",
"src/core/ext/client_config/resolver_result.h",
"src/core/ext/client_config/subchannel.h",
"src/core/ext/client_config/subchannel_call_holder.h",
"src/core/ext/client_config/subchannel_index.h",
"src/core/ext/client_config/uri_parser.h",
"src/core/ext/load_reporting/load_reporting.h",
@ -1042,6 +1039,7 @@ cc_library(
"src/core/ext/census/census_interface.h",
"src/core/ext/census/census_rpc_stats.h",
"src/core/ext/census/gen/census.pb.h",
"src/core/ext/census/gen/trace_context.pb.h",
"src/core/ext/census/grpc_filter.h",
"src/core/ext/census/mlog.h",
"src/core/ext/census/resource.h",
@ -1180,7 +1178,6 @@ cc_library(
"src/core/ext/client_config/resolver_registry.c",
"src/core/ext/client_config/resolver_result.c",
"src/core/ext/client_config/subchannel.c",
"src/core/ext/client_config/subchannel_call_holder.c",
"src/core/ext/client_config/subchannel_index.c",
"src/core/ext/client_config/uri_parser.c",
"src/core/ext/resolver/dns/native/dns_resolver.c",
@ -1195,6 +1192,7 @@ cc_library(
"src/core/ext/census/base_resources.c",
"src/core/ext/census/context.c",
"src/core/ext/census/gen/census.pb.c",
"src/core/ext/census/gen/trace_context.pb.c",
"src/core/ext/census/grpc_context.c",
"src/core/ext/census/grpc_filter.c",
"src/core/ext/census/grpc_plugin.c",
@ -2331,7 +2329,6 @@ objc_library(
"src/core/ext/client_config/resolver_registry.c",
"src/core/ext/client_config/resolver_result.c",
"src/core/ext/client_config/subchannel.c",
"src/core/ext/client_config/subchannel_call_holder.c",
"src/core/ext/client_config/subchannel_index.c",
"src/core/ext/client_config/uri_parser.c",
"src/core/ext/transport/chttp2/server/insecure/server_chttp2.c",
@ -2350,6 +2347,7 @@ objc_library(
"src/core/ext/census/base_resources.c",
"src/core/ext/census/context.c",
"src/core/ext/census/gen/census.pb.c",
"src/core/ext/census/gen/trace_context.pb.c",
"src/core/ext/census/grpc_context.c",
"src/core/ext/census/grpc_filter.c",
"src/core/ext/census/grpc_plugin.c",
@ -2531,7 +2529,6 @@ objc_library(
"src/core/ext/client_config/resolver_registry.h",
"src/core/ext/client_config/resolver_result.h",
"src/core/ext/client_config/subchannel.h",
"src/core/ext/client_config/subchannel_call_holder.h",
"src/core/ext/client_config/subchannel_index.h",
"src/core/ext/client_config/uri_parser.h",
"src/core/ext/lb_policy/grpclb/grpclb.h",
@ -2544,6 +2541,7 @@ objc_library(
"src/core/ext/census/census_interface.h",
"src/core/ext/census/census_rpc_stats.h",
"src/core/ext/census/gen/census.pb.h",
"src/core/ext/census/gen/trace_context.pb.h",
"src/core/ext/census/grpc_filter.h",
"src/core/ext/census/mlog.h",
"src/core/ext/census/resource.h",

@ -449,7 +449,6 @@ add_library(grpc
src/core/ext/client_config/resolver_registry.c
src/core/ext/client_config/resolver_result.c
src/core/ext/client_config/subchannel.c
src/core/ext/client_config/subchannel_call_holder.c
src/core/ext/client_config/subchannel_index.c
src/core/ext/client_config/uri_parser.c
src/core/ext/transport/chttp2/server/insecure/server_chttp2.c
@ -471,6 +470,7 @@ add_library(grpc
src/core/ext/census/base_resources.c
src/core/ext/census/context.c
src/core/ext/census/gen/census.pb.c
src/core/ext/census/gen/trace_context.pb.c
src/core/ext/census/grpc_context.c
src/core/ext/census/grpc_filter.c
src/core/ext/census/grpc_plugin.c
@ -682,7 +682,6 @@ add_library(grpc_cronet
src/core/ext/client_config/resolver_registry.c
src/core/ext/client_config/resolver_result.c
src/core/ext/client_config/subchannel.c
src/core/ext/client_config/subchannel_call_holder.c
src/core/ext/client_config/subchannel_index.c
src/core/ext/client_config/uri_parser.c
src/core/lib/http/httpcli_security_connector.c
@ -915,7 +914,6 @@ add_library(grpc_unsecure
src/core/ext/client_config/resolver_registry.c
src/core/ext/client_config/resolver_result.c
src/core/ext/client_config/subchannel.c
src/core/ext/client_config/subchannel_call_holder.c
src/core/ext/client_config/subchannel_index.c
src/core/ext/client_config/uri_parser.c
src/core/ext/resolver/dns/native/dns_resolver.c
@ -933,6 +931,7 @@ add_library(grpc_unsecure
src/core/ext/census/base_resources.c
src/core/ext/census/context.c
src/core/ext/census/gen/census.pb.c
src/core/ext/census/gen/trace_context.pb.c
src/core/ext/census/grpc_context.c
src/core/ext/census/grpc_filter.c
src/core/ext/census/grpc_plugin.c

@ -2665,7 +2665,6 @@ LIBGRPC_SRC = \
src/core/ext/client_config/resolver_registry.c \
src/core/ext/client_config/resolver_result.c \
src/core/ext/client_config/subchannel.c \
src/core/ext/client_config/subchannel_call_holder.c \
src/core/ext/client_config/subchannel_index.c \
src/core/ext/client_config/uri_parser.c \
src/core/ext/transport/chttp2/server/insecure/server_chttp2.c \
@ -2687,6 +2686,7 @@ LIBGRPC_SRC = \
src/core/ext/census/base_resources.c \
src/core/ext/census/context.c \
src/core/ext/census/gen/census.pb.c \
src/core/ext/census/gen/trace_context.pb.c \
src/core/ext/census/grpc_context.c \
src/core/ext/census/grpc_filter.c \
src/core/ext/census/grpc_plugin.c \
@ -2916,7 +2916,6 @@ LIBGRPC_CRONET_SRC = \
src/core/ext/client_config/resolver_registry.c \
src/core/ext/client_config/resolver_result.c \
src/core/ext/client_config/subchannel.c \
src/core/ext/client_config/subchannel_call_holder.c \
src/core/ext/client_config/subchannel_index.c \
src/core/ext/client_config/uri_parser.c \
src/core/lib/http/httpcli_security_connector.c \
@ -3377,7 +3376,6 @@ LIBGRPC_UNSECURE_SRC = \
src/core/ext/client_config/resolver_registry.c \
src/core/ext/client_config/resolver_result.c \
src/core/ext/client_config/subchannel.c \
src/core/ext/client_config/subchannel_call_holder.c \
src/core/ext/client_config/subchannel_index.c \
src/core/ext/client_config/uri_parser.c \
src/core/ext/resolver/dns/native/dns_resolver.c \
@ -3395,6 +3393,7 @@ LIBGRPC_UNSECURE_SRC = \
src/core/ext/census/base_resources.c \
src/core/ext/census/context.c \
src/core/ext/census/gen/census.pb.c \
src/core/ext/census/gen/trace_context.pb.c \
src/core/ext/census/grpc_context.c \
src/core/ext/census/grpc_filter.c \
src/core/ext/census/grpc_plugin.c \
@ -6776,6 +6775,7 @@ LIBEND2END_TESTS_SRC = \
test/core/end2end/tests/max_message_length.c \
test/core/end2end/tests/negative_deadline.c \
test/core/end2end/tests/network_status_change.c \
test/core/end2end/tests/no_logging.c \
test/core/end2end/tests/no_op.c \
test/core/end2end/tests/payload.c \
test/core/end2end/tests/ping.c \
@ -6856,6 +6856,7 @@ LIBEND2END_NOSEC_TESTS_SRC = \
test/core/end2end/tests/max_message_length.c \
test/core/end2end/tests/negative_deadline.c \
test/core/end2end/tests/network_status_change.c \
test/core/end2end/tests/no_logging.c \
test/core/end2end/tests/no_op.c \
test/core/end2end/tests/payload.c \
test/core/end2end/tests/ping.c \

@ -721,7 +721,6 @@
'src/core/ext/client_config/resolver_registry.c',
'src/core/ext/client_config/resolver_result.c',
'src/core/ext/client_config/subchannel.c',
'src/core/ext/client_config/subchannel_call_holder.c',
'src/core/ext/client_config/subchannel_index.c',
'src/core/ext/client_config/uri_parser.c',
'src/core/ext/transport/chttp2/server/insecure/server_chttp2.c',
@ -743,6 +742,7 @@
'src/core/ext/census/base_resources.c',
'src/core/ext/census/context.c',
'src/core/ext/census/gen/census.pb.c',
'src/core/ext/census/gen/trace_context.pb.c',
'src/core/ext/census/grpc_context.c',
'src/core/ext/census/grpc_filter.c',
'src/core/ext/census/grpc_plugin.c',

@ -24,6 +24,7 @@ filegroups:
- src/core/ext/census/census_interface.h
- src/core/ext/census/census_rpc_stats.h
- src/core/ext/census/gen/census.pb.h
- src/core/ext/census/gen/trace_context.pb.h
- src/core/ext/census/grpc_filter.h
- src/core/ext/census/mlog.h
- src/core/ext/census/resource.h
@ -32,6 +33,7 @@ filegroups:
- src/core/ext/census/base_resources.c
- src/core/ext/census/context.c
- src/core/ext/census/gen/census.pb.c
- src/core/ext/census/gen/trace_context.pb.c
- src/core/ext/census/grpc_context.c
- src/core/ext/census/grpc_filter.c
- src/core/ext/census/grpc_plugin.c
@ -352,7 +354,6 @@ filegroups:
- src/core/ext/client_config/resolver_registry.h
- src/core/ext/client_config/resolver_result.h
- src/core/ext/client_config/subchannel.h
- src/core/ext/client_config/subchannel_call_holder.h
- src/core/ext/client_config/subchannel_index.h
- src/core/ext/client_config/uri_parser.h
src:
@ -372,7 +373,6 @@ filegroups:
- src/core/ext/client_config/resolver_registry.c
- src/core/ext/client_config/resolver_result.c
- src/core/ext/client_config/subchannel.c
- src/core/ext/client_config/subchannel_call_holder.c
- src/core/ext/client_config/subchannel_index.c
- src/core/ext/client_config/uri_parser.c
plugin: grpc_client_config

@ -240,7 +240,6 @@ if test "$PHP_GRPC" != "no"; then
src/core/ext/client_config/resolver_registry.c \
src/core/ext/client_config/resolver_result.c \
src/core/ext/client_config/subchannel.c \
src/core/ext/client_config/subchannel_call_holder.c \
src/core/ext/client_config/subchannel_index.c \
src/core/ext/client_config/uri_parser.c \
src/core/ext/transport/chttp2/server/insecure/server_chttp2.c \
@ -262,6 +261,7 @@ if test "$PHP_GRPC" != "no"; then
src/core/ext/census/base_resources.c \
src/core/ext/census/context.c \
src/core/ext/census/gen/census.pb.c \
src/core/ext/census/gen/trace_context.pb.c \
src/core/ext/census/grpc_context.c \
src/core/ext/census/grpc_filter.c \
src/core/ext/census/grpc_plugin.c \

@ -1,15 +1 @@
gRPC Fail Fast Semantics
========================
Fail fast requests allow terminating requests (with status UNAVAILABLE) prior
to the deadline of the request being met.
gRPC implementations of fail fast can terminate requests whenever a channel is
in the TRANSIENT_FAILURE or SHUTDOWN states. If the channel is in any other
state (CONNECTING, READY, or IDLE) the request should not be terminated.
Fail fast SHOULD be the default for gRPC implementations, with an option to
switch to non fail fast.
The opposite of fail fast is 'ignore connectivity'.
Moved to wait-for-ready.md

@ -0,0 +1,14 @@
gRPC Wait for Ready Semantics
=============================
If an RPC is issued but the channel is in `TRANSIENT_FAILURE` or `SHUTDOWN`
states, the RPC is unable to be transmited promptly. By default, gRPC
implementations SHOULD fail such RPCs immediately. This is known as "fail fast,"
but usage of the term is historical. RPCs SHOULD NOT fail as a result of the
channel being in other states (`CONNECTING`, `READY`, or `IDLE`).
gRPC implementations MAY provide a per-RPC option to not fail RPCs as a result
of the channel being in `TRANSIENT_FAILURE` state. Instead, the implementation
queues the RPCs until the channel is `READY`. This is known as "wait for ready."
The RPCs SHOULD still fail before `READY` if there are unrelated reasons, such
as the channel is `SHUTDOWN` or the RPC's deadline is reached.

@ -30,5 +30,5 @@
set -e
cd $(dirname $0)
php $extension_dir -d extension=grpc.so -d max_execution_time=300 \
php -d extension=grpc.so -d max_execution_time=300 \
greeter_client.php $1

@ -391,7 +391,6 @@ Pod::Spec.new do |s|
'src/core/ext/client_config/resolver_registry.h',
'src/core/ext/client_config/resolver_result.h',
'src/core/ext/client_config/subchannel.h',
'src/core/ext/client_config/subchannel_call_holder.h',
'src/core/ext/client_config/subchannel_index.h',
'src/core/ext/client_config/uri_parser.h',
'src/core/ext/lb_policy/grpclb/grpclb.h',
@ -408,6 +407,7 @@ Pod::Spec.new do |s|
'src/core/ext/census/census_interface.h',
'src/core/ext/census/census_rpc_stats.h',
'src/core/ext/census/gen/census.pb.h',
'src/core/ext/census/gen/trace_context.pb.h',
'src/core/ext/census/grpc_filter.h',
'src/core/ext/census/mlog.h',
'src/core/ext/census/resource.h',
@ -571,7 +571,6 @@ Pod::Spec.new do |s|
'src/core/ext/client_config/resolver_registry.c',
'src/core/ext/client_config/resolver_result.c',
'src/core/ext/client_config/subchannel.c',
'src/core/ext/client_config/subchannel_call_holder.c',
'src/core/ext/client_config/subchannel_index.c',
'src/core/ext/client_config/uri_parser.c',
'src/core/ext/transport/chttp2/server/insecure/server_chttp2.c',
@ -593,6 +592,7 @@ Pod::Spec.new do |s|
'src/core/ext/census/base_resources.c',
'src/core/ext/census/context.c',
'src/core/ext/census/gen/census.pb.c',
'src/core/ext/census/gen/trace_context.pb.c',
'src/core/ext/census/grpc_context.c',
'src/core/ext/census/grpc_filter.c',
'src/core/ext/census/grpc_plugin.c',
@ -754,7 +754,6 @@ Pod::Spec.new do |s|
'src/core/ext/client_config/resolver_registry.h',
'src/core/ext/client_config/resolver_result.h',
'src/core/ext/client_config/subchannel.h',
'src/core/ext/client_config/subchannel_call_holder.h',
'src/core/ext/client_config/subchannel_index.h',
'src/core/ext/client_config/uri_parser.h',
'src/core/ext/lb_policy/grpclb/grpclb.h',
@ -771,6 +770,7 @@ Pod::Spec.new do |s|
'src/core/ext/census/census_interface.h',
'src/core/ext/census/census_rpc_stats.h',
'src/core/ext/census/gen/census.pb.h',
'src/core/ext/census/gen/trace_context.pb.h',
'src/core/ext/census/grpc_filter.h',
'src/core/ext/census/mlog.h',
'src/core/ext/census/resource.h',

@ -29,6 +29,7 @@ Gem::Specification.new do |s|
s.add_dependency 'google-protobuf', '~> 3.0'
s.add_dependency 'googleauth', '~> 0.5.1'
s.add_dependency 'concurrent-ruby'
s.add_development_dependency 'bundler', '~> 1.9'
s.add_development_dependency 'facter', '~> 2.4'
@ -310,7 +311,6 @@ Gem::Specification.new do |s|
s.files += %w( src/core/ext/client_config/resolver_registry.h )
s.files += %w( src/core/ext/client_config/resolver_result.h )
s.files += %w( src/core/ext/client_config/subchannel.h )
s.files += %w( src/core/ext/client_config/subchannel_call_holder.h )
s.files += %w( src/core/ext/client_config/subchannel_index.h )
s.files += %w( src/core/ext/client_config/uri_parser.h )
s.files += %w( src/core/ext/lb_policy/grpclb/grpclb.h )
@ -327,6 +327,7 @@ Gem::Specification.new do |s|
s.files += %w( src/core/ext/census/census_interface.h )
s.files += %w( src/core/ext/census/census_rpc_stats.h )
s.files += %w( src/core/ext/census/gen/census.pb.h )
s.files += %w( src/core/ext/census/gen/trace_context.pb.h )
s.files += %w( src/core/ext/census/grpc_filter.h )
s.files += %w( src/core/ext/census/mlog.h )
s.files += %w( src/core/ext/census/resource.h )
@ -490,7 +491,6 @@ Gem::Specification.new do |s|
s.files += %w( src/core/ext/client_config/resolver_registry.c )
s.files += %w( src/core/ext/client_config/resolver_result.c )
s.files += %w( src/core/ext/client_config/subchannel.c )
s.files += %w( src/core/ext/client_config/subchannel_call_holder.c )
s.files += %w( src/core/ext/client_config/subchannel_index.c )
s.files += %w( src/core/ext/client_config/uri_parser.c )
s.files += %w( src/core/ext/transport/chttp2/server/insecure/server_chttp2.c )
@ -512,6 +512,7 @@ Gem::Specification.new do |s|
s.files += %w( src/core/ext/census/base_resources.c )
s.files += %w( src/core/ext/census/context.c )
s.files += %w( src/core/ext/census/gen/census.pb.c )
s.files += %w( src/core/ext/census/gen/trace_context.pb.c )
s.files += %w( src/core/ext/census/grpc_context.c )
s.files += %w( src/core/ext/census/grpc_filter.c )
s.files += %w( src/core/ext/census/grpc_plugin.c )

@ -74,6 +74,7 @@
#include <grpc++/impl/codegen/async_stream.h>
#include <grpc++/impl/codegen/async_unary_call.h>
#include <grpc++/impl/codegen/method_handler_impl.h>
#include <grpc++/impl/codegen/proto_utils.h>
#include <grpc++/impl/codegen/rpc_method.h>
#include <grpc++/impl/codegen/service_type.h>
@ -174,6 +175,7 @@ class ServerReflection GRPC_FINAL {
return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, "");
}
};
typedef Service StreamedUnaryService;
};
} // namespace v1alpha

@ -51,6 +51,9 @@
#ifndef GRPCXX_GRPCXX_H
#define GRPCXX_GRPCXX_H
// Pragma for http://include-what-you-use.org/ tool, tells that following
// headers are not private for grpc++.h and are part of its interface.
// IWYU pragma: begin_exports
#include <grpc/grpc.h>
#include <grpc++/channel.h>
@ -62,5 +65,6 @@
#include <grpc++/server_builder.h>
#include <grpc++/server_context.h>
#include <grpc++/server_posix.h>
// IWYU pragma: end_exports
#endif // GRPCXX_GRPCXX_H

@ -669,8 +669,8 @@ class Call GRPC_FINAL {
call_hook_->PerformOpsOnCall(ops, this);
}
grpc_call* call() { return call_; }
CompletionQueue* cq() { return cq_; }
grpc_call* call() const { return call_; }
CompletionQueue* cq() const { return cq_; }
int max_receive_message_size() { return max_receive_message_size_; }

@ -68,8 +68,10 @@ template <class R>
class ServerReader;
template <class W>
class ServerWriter;
namespace internal {
template <class W, class R>
class ServerReaderWriter;
class ServerReaderWriterBody;
}
template <class ServiceType, class RequestType, class ResponseType>
class RpcMethodHandler;
template <class ServiceType, class RequestType, class ResponseType>
@ -178,15 +180,15 @@ class CompletionQueue : private GrpcLibraryCodegen {
template <class W>
friend class ::grpc::ServerWriter;
template <class W, class R>
friend class ::grpc::ServerReaderWriter;
friend class ::grpc::internal::ServerReaderWriterBody;
template <class ServiceType, class RequestType, class ResponseType>
friend class RpcMethodHandler;
template <class ServiceType, class RequestType, class ResponseType>
friend class ClientStreamingHandler;
template <class ServiceType, class RequestType, class ResponseType>
friend class ServerStreamingHandler;
template <class ServiceType, class RequestType, class ResponseType>
friend class BidiStreamingHandler;
template <class Streamer, bool WriteNeeded>
friend class TemplatedBidiStreamingHandler;
friend class UnknownMethodHandler;
friend class ::grpc::Server;
friend class ::grpc::ServerContext;

@ -167,20 +167,22 @@ class ServerStreamingHandler : public MethodHandler {
};
// A wrapper class of an application provided bidi-streaming handler.
template <class ServiceType, class RequestType, class ResponseType>
class BidiStreamingHandler : public MethodHandler {
// This also applies to server-streamed implementation of a unary method
// with the additional requirement that such methods must have done a
// write for status to be ok
// Since this is used by more than 1 class, the service is not passed in.
// Instead, it is expected to be an implicitly-captured argument of func
// (through bind or something along those lines)
template <class Streamer, bool WriteNeeded>
class TemplatedBidiStreamingHandler : public MethodHandler {
public:
BidiStreamingHandler(
std::function<Status(ServiceType*, ServerContext*,
ServerReaderWriter<ResponseType, RequestType>*)>
func,
ServiceType* service)
: func_(func), service_(service) {}
TemplatedBidiStreamingHandler(
std::function<Status(ServerContext*, Streamer*)> func)
: func_(func), write_needed_(WriteNeeded) {}
void RunHandler(const HandlerParameter& param) GRPC_FINAL {
ServerReaderWriter<ResponseType, RequestType> stream(param.call,
param.server_context);
Status status = func_(service_, param.server_context, &stream);
Streamer stream(param.call, param.server_context);
Status status = func_(param.server_context, &stream);
CallOpSet<CallOpSendInitialMetadata, CallOpServerSendStatus> ops;
if (!param.server_context->sent_initial_metadata_) {
@ -189,6 +191,12 @@ class BidiStreamingHandler : public MethodHandler {
if (param.server_context->compression_level_set()) {
ops.set_compression_level(param.server_context->compression_level());
}
if (write_needed_ && status.ok()) {
// If we needed a write but never did one, we need to mark the
// status as a fail
status = Status(StatusCode::INTERNAL,
"Service did not provide response message");
}
}
ops.ServerSendStatus(param.server_context->trailing_metadata_, status);
param.call->PerformOps(&ops);
@ -196,10 +204,36 @@ class BidiStreamingHandler : public MethodHandler {
}
private:
std::function<Status(ServiceType*, ServerContext*,
ServerReaderWriter<ResponseType, RequestType>*)>
func_;
ServiceType* service_;
std::function<Status(ServerContext*, Streamer*)> func_;
const bool write_needed_;
};
template <class ServiceType, class RequestType, class ResponseType>
class BidiStreamingHandler
: public TemplatedBidiStreamingHandler<
ServerReaderWriter<ResponseType, RequestType>, false> {
public:
BidiStreamingHandler(
std::function<Status(ServiceType*, ServerContext*,
ServerReaderWriter<ResponseType, RequestType>*)>
func,
ServiceType* service)
: TemplatedBidiStreamingHandler<
ServerReaderWriter<ResponseType, RequestType>, false>(std::bind(
func, service, std::placeholders::_1, std::placeholders::_2)) {}
};
template <class RequestType, class ResponseType>
class StreamedUnaryHandler
: public TemplatedBidiStreamingHandler<
ServerUnaryStreamer<RequestType, ResponseType>, true> {
public:
explicit StreamedUnaryHandler(
std::function<Status(ServerContext*,
ServerUnaryStreamer<RequestType, ResponseType>*)>
func)
: TemplatedBidiStreamingHandler<
ServerUnaryStreamer<RequestType, ResponseType>, true>(func) {}
};
// Handle unknown method by returning UNIMPLEMENTED error.

@ -60,11 +60,12 @@ class RpcMethod {
const char* name() const { return name_; }
RpcType method_type() const { return method_type_; }
void SetMethodType(RpcType type) { method_type_ = type; }
void* channel_tag() const { return channel_tag_; }
private:
const char* const name_;
const RpcType method_type_;
RpcType method_type_;
void* const channel_tag_;
};

@ -82,6 +82,7 @@ class RpcServiceMethod : public RpcMethod {
// if MethodHandler is nullptr, then this is an async method
MethodHandler* handler() const { return handler_.get(); }
void ResetHandler() { handler_.reset(); }
void SetHandler(MethodHandler* handler) { handler_.reset(handler); }
private:
void* server_tag_;

@ -65,8 +65,10 @@ template <class R>
class ServerReader;
template <class W>
class ServerWriter;
namespace internal {
template <class W, class R>
class ServerReaderWriter;
class ServerReaderWriterBody;
}
template <class ServiceType, class RequestType, class ResponseType>
class RpcMethodHandler;
template <class ServiceType, class RequestType, class ResponseType>
@ -187,15 +189,15 @@ class ServerContext {
template <class W>
friend class ::grpc::ServerWriter;
template <class W, class R>
friend class ::grpc::ServerReaderWriter;
friend class ::grpc::internal::ServerReaderWriterBody;
template <class ServiceType, class RequestType, class ResponseType>
friend class RpcMethodHandler;
template <class ServiceType, class RequestType, class ResponseType>
friend class ClientStreamingHandler;
template <class ServiceType, class RequestType, class ResponseType>
friend class ServerStreamingHandler;
template <class ServiceType, class RequestType, class ResponseType>
friend class BidiStreamingHandler;
template <class Streamer, bool WriteNeeded>
friend class TemplatedBidiStreamingHandler;
friend class UnknownMethodHandler;
friend class ::grpc::ClientContext;

@ -147,6 +147,17 @@ class Service {
methods_[index].reset();
}
void MarkMethodStreamedUnary(int index,
MethodHandler* streamed_unary_method) {
GPR_CODEGEN_ASSERT(methods_[index] && methods_[index]->handler() &&
"Cannot mark an async or generic method Streamed Unary");
methods_[index]->SetHandler(streamed_unary_method);
// From the server's point of view, streamed unary is a special
// case of BIDI_STREAMING that has 1 read and 1 write, in that order.
methods_[index]->SetMethodType(::grpc::RpcMethod::BIDI_STREAMING);
}
private:
friend class Server;
friend class ServerInterface;

@ -79,6 +79,9 @@ class ReaderInterface {
public:
virtual ~ReaderInterface() {}
/// Upper bound on the next message size available for reading on this stream
virtual bool NextMessageSize(uint32_t* sz) = 0;
/// Blocking read a message and parse to \a msg. Returns \a true on success.
/// This is thread-safe with respect to \a Write or \WritesDone methods on
/// the same stream. It should not be called concurrently with another \a
@ -157,6 +160,11 @@ class ClientReader GRPC_FINAL : public ClientReaderInterface<R> {
cq_.Pluck(&ops); /// status ignored
}
bool NextMessageSize(uint32_t* sz) GRPC_OVERRIDE {
*sz = call_.max_message_size();
return true;
}
bool Read(R* msg) GRPC_OVERRIDE {
CallOpSet<CallOpRecvInitialMetadata, CallOpRecvMessage<R>> ops;
if (!context_->initial_metadata_received_) {
@ -302,6 +310,11 @@ class ClientReaderWriter GRPC_FINAL : public ClientReaderWriterInterface<W, R> {
cq_.Pluck(&ops); // status ignored
}
bool NextMessageSize(uint32_t* sz) GRPC_OVERRIDE {
*sz = call_.max_message_size();
return true;
}
bool Read(R* msg) GRPC_OVERRIDE {
CallOpSet<CallOpRecvInitialMetadata, CallOpRecvMessage<R>> ops;
if (!context_->initial_metadata_received_) {
@ -369,6 +382,11 @@ class ServerReader GRPC_FINAL : public ServerReaderInterface<R> {
call_->cq()->Pluck(&ops);
}
bool NextMessageSize(uint32_t* sz) GRPC_OVERRIDE {
*sz = call_->max_message_size();
return true;
}
bool Read(R* msg) GRPC_OVERRIDE {
CallOpSet<CallOpRecvMessage<R>> ops;
ops.RecvMessage(msg);
@ -434,12 +452,15 @@ class ServerReaderWriterInterface : public ServerStreamingInterface,
public WriterInterface<W>,
public ReaderInterface<R> {};
// Actual implementation of bi-directional streaming
namespace internal {
template <class W, class R>
class ServerReaderWriter GRPC_FINAL : public ServerReaderWriterInterface<W, R> {
class ServerReaderWriterBody GRPC_FINAL {
public:
ServerReaderWriter(Call* call, ServerContext* ctx) : call_(call), ctx_(ctx) {}
ServerReaderWriterBody(Call* call, ServerContext* ctx)
: call_(call), ctx_(ctx) {}
void SendInitialMetadata() GRPC_OVERRIDE {
void SendInitialMetadata() {
GPR_CODEGEN_ASSERT(!ctx_->sent_initial_metadata_);
CallOpSet<CallOpSendInitialMetadata> ops;
@ -453,15 +474,19 @@ class ServerReaderWriter GRPC_FINAL : public ServerReaderWriterInterface<W, R> {
call_->cq()->Pluck(&ops);
}
bool Read(R* msg) GRPC_OVERRIDE {
bool NextMessageSize(uint32_t* sz) {
*sz = call_->max_message_size();
return true;
}
bool Read(R* msg) {
CallOpSet<CallOpRecvMessage<R>> ops;
ops.RecvMessage(msg);
call_->PerformOps(&ops);
return call_->cq()->Pluck(&ops) && ops.got_message;
}
using WriterInterface<W>::Write;
bool Write(const W& msg, const WriteOptions& options) GRPC_OVERRIDE {
bool Write(const W& msg, const WriteOptions& options) {
CallOpSet<CallOpSendInitialMetadata, CallOpSendMessage> ops;
if (!ops.SendMessage(msg, options).ok()) {
return false;
@ -482,6 +507,76 @@ class ServerReaderWriter GRPC_FINAL : public ServerReaderWriterInterface<W, R> {
Call* const call_;
ServerContext* const ctx_;
};
}
// class to represent the user API for a bidirectional streaming call
template <class W, class R>
class ServerReaderWriter GRPC_FINAL : public ServerReaderWriterInterface<W, R> {
public:
ServerReaderWriter(Call* call, ServerContext* ctx) : body_(call, ctx) {}
void SendInitialMetadata() GRPC_OVERRIDE { body_.SendInitialMetadata(); }
bool NextMessageSize(uint32_t* sz) GRPC_OVERRIDE {
return body_.NextMessageSize(sz);
}
bool Read(R* msg) GRPC_OVERRIDE { return body_.Read(msg); }
using WriterInterface<W>::Write;
bool Write(const W& msg, const WriteOptions& options) GRPC_OVERRIDE {
return body_.Write(msg, options);
}
private:
internal::ServerReaderWriterBody<W, R> body_;
};
/// A class to represent a flow-controlled unary call. This is something
/// of a hybrid between conventional unary and streaming. This is invoked
/// through a unary call on the client side, but the server responds to it
/// as though it were a single-ping-pong streaming call. The server can use
/// the \a NextMessageSize method to determine an upper-bound on the size of
/// the message.
/// A key difference relative to streaming: ServerUnaryStreamer
/// must have exactly 1 Read and exactly 1 Write, in that order, to function
/// correctly. Otherwise, the RPC is in error.
template <class RequestType, class ResponseType>
class ServerUnaryStreamer GRPC_FINAL
: public ServerReaderWriterInterface<ResponseType, RequestType> {
public:
ServerUnaryStreamer(Call* call, ServerContext* ctx)
: body_(call, ctx), read_done_(false), write_done_(false) {}
void SendInitialMetadata() GRPC_OVERRIDE { body_.SendInitialMetadata(); }
bool NextMessageSize(uint32_t* sz) GRPC_OVERRIDE {
return body_.NextMessageSize(sz);
}
bool Read(RequestType* request) GRPC_OVERRIDE {
if (read_done_) {
return false;
}
read_done_ = true;
return body_.Read(request);
}
using WriterInterface<ResponseType>::Write;
bool Write(const ResponseType& response,
const WriteOptions& options) GRPC_OVERRIDE {
if (write_done_ || !read_done_) {
return false;
}
write_done_ = true;
return body_.Write(response, options);
}
private:
internal::ServerReaderWriterBody<ResponseType, RequestType> body_;
bool read_done_;
bool write_done_;
};
} // namespace grpc

@ -41,6 +41,7 @@
namespace grpc {
class ServerInitializer;
class ChannelArguments;
class ServerBuilderPlugin {
public:
@ -58,6 +59,10 @@ class ServerBuilderPlugin {
// ServerBuilderOption::UpdatePlugins
virtual void ChangeArguments(const grpc::string& name, void* value) = 0;
// UpdateChannelArguments will be called in ServerBuilder::BuildAndStart(),
// before the Server instance is created.
virtual void UpdateChannelArguments(ChannelArguments* args) {}
virtual bool has_sync_methods() const { return false; }
virtual bool has_async_methods() const { return false; }
};

@ -226,10 +226,14 @@ typedef enum grpc_call_error {
#define GRPC_INITIAL_METADATA_IDEMPOTENT_REQUEST (0x00000010u)
/** Signal that the call should not return UNAVAILABLE before it has started */
#define GRPC_INITIAL_METADATA_IGNORE_CONNECTIVITY (0x00000020u)
/** Signal that the call is cacheable. GRPC is free to use GET verb */
#define GRPC_INITIAL_METADATA_CACHEABLE_REQUEST (0x00000040u)
/** Mask of all valid flags */
#define GRPC_INITIAL_METADATA_USED_MASK \
(GRPC_INITIAL_METADATA_IDEMPOTENT_REQUEST | \
GRPC_INITIAL_METADATA_IGNORE_CONNECTIVITY)
#define GRPC_INITIAL_METADATA_USED_MASK \
(GRPC_INITIAL_METADATA_IDEMPOTENT_REQUEST | \
GRPC_INITIAL_METADATA_IGNORE_CONNECTIVITY | \
GRPC_INITIAL_METADATA_CACHEABLE_REQUEST)
/** A single metadata element */
typedef struct grpc_metadata {

@ -318,7 +318,6 @@
<file baseinstalldir="/" name="src/core/ext/client_config/resolver_registry.h" role="src" />
<file baseinstalldir="/" name="src/core/ext/client_config/resolver_result.h" role="src" />
<file baseinstalldir="/" name="src/core/ext/client_config/subchannel.h" role="src" />
<file baseinstalldir="/" name="src/core/ext/client_config/subchannel_call_holder.h" role="src" />
<file baseinstalldir="/" name="src/core/ext/client_config/subchannel_index.h" role="src" />
<file baseinstalldir="/" name="src/core/ext/client_config/uri_parser.h" role="src" />
<file baseinstalldir="/" name="src/core/ext/lb_policy/grpclb/grpclb.h" role="src" />
@ -335,6 +334,7 @@
<file baseinstalldir="/" name="src/core/ext/census/census_interface.h" role="src" />
<file baseinstalldir="/" name="src/core/ext/census/census_rpc_stats.h" role="src" />
<file baseinstalldir="/" name="src/core/ext/census/gen/census.pb.h" role="src" />
<file baseinstalldir="/" name="src/core/ext/census/gen/trace_context.pb.h" role="src" />
<file baseinstalldir="/" name="src/core/ext/census/grpc_filter.h" role="src" />
<file baseinstalldir="/" name="src/core/ext/census/mlog.h" role="src" />
<file baseinstalldir="/" name="src/core/ext/census/resource.h" role="src" />
@ -498,7 +498,6 @@
<file baseinstalldir="/" name="src/core/ext/client_config/resolver_registry.c" role="src" />
<file baseinstalldir="/" name="src/core/ext/client_config/resolver_result.c" role="src" />
<file baseinstalldir="/" name="src/core/ext/client_config/subchannel.c" role="src" />
<file baseinstalldir="/" name="src/core/ext/client_config/subchannel_call_holder.c" role="src" />
<file baseinstalldir="/" name="src/core/ext/client_config/subchannel_index.c" role="src" />
<file baseinstalldir="/" name="src/core/ext/client_config/uri_parser.c" role="src" />
<file baseinstalldir="/" name="src/core/ext/transport/chttp2/server/insecure/server_chttp2.c" role="src" />
@ -520,6 +519,7 @@
<file baseinstalldir="/" name="src/core/ext/census/base_resources.c" role="src" />
<file baseinstalldir="/" name="src/core/ext/census/context.c" role="src" />
<file baseinstalldir="/" name="src/core/ext/census/gen/census.pb.c" role="src" />
<file baseinstalldir="/" name="src/core/ext/census/gen/trace_context.pb.c" role="src" />
<file baseinstalldir="/" name="src/core/ext/census/grpc_context.c" role="src" />
<file baseinstalldir="/" name="src/core/ext/census/grpc_filter.c" role="src" />
<file baseinstalldir="/" name="src/core/ext/census/grpc_plugin.c" role="src" />

@ -130,6 +130,7 @@ grpc::string GetHeaderIncludes(File *file, const Parameters &params) {
static const char *headers_strs[] = {
"grpc++/impl/codegen/async_stream.h",
"grpc++/impl/codegen/async_unary_call.h",
"grpc++/impl/codegen/method_handler_impl.h",
"grpc++/impl/codegen/proto_utils.h",
"grpc++/impl/codegen/rpc_method.h",
"grpc++/impl/codegen/service_type.h",
@ -604,6 +605,57 @@ void PrintHeaderServerMethodAsync(Printer *printer, const Method *method,
printer->Print(*vars, "};\n");
}
void PrintHeaderServerMethodStreamedUnary(
Printer *printer, const Method *method,
std::map<grpc::string, grpc::string> *vars) {
(*vars)["Method"] = method->name();
(*vars)["Request"] = method->input_type_name();
(*vars)["Response"] = method->output_type_name();
if (method->NoStreaming()) {
printer->Print(*vars, "template <class BaseClass>\n");
printer->Print(*vars,
"class WithStreamedUnaryMethod_$Method$ : "
"public BaseClass {\n");
printer->Print(
" private:\n"
" void BaseClassMustBeDerivedFromService(const Service *service) "
"{}\n");
printer->Print(" public:\n");
printer->Indent();
printer->Print(*vars,
"WithStreamedUnaryMethod_$Method$() {\n"
" ::grpc::Service::MarkMethodStreamedUnary($Idx$,\n"
" new ::grpc::StreamedUnaryHandler< $Request$, "
"$Response$>(std::bind"
"(&WithStreamedUnaryMethod_$Method$<BaseClass>::"
"Streamed$Method$, this, std::placeholders::_1, "
"std::placeholders::_2)));\n"
"}\n");
printer->Print(*vars,
"~WithStreamedUnaryMethod_$Method$() GRPC_OVERRIDE {\n"
" BaseClassMustBeDerivedFromService(this);\n"
"}\n");
printer->Print(
*vars,
"// disable regular version of this method\n"
"::grpc::Status $Method$("
"::grpc::ServerContext* context, const $Request$* request, "
"$Response$* response) GRPC_FINAL GRPC_OVERRIDE {\n"
" abort();\n"
" return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, \"\");\n"
"}\n");
printer->Print(*vars,
"// replace default version of method with streamed unary\n"
"virtual ::grpc::Status Streamed$Method$("
"::grpc::ServerContext* context, "
"::grpc::ServerUnaryStreamer< "
"$Request$,$Response$>* server_unary_streamer)"
" = 0;\n");
printer->Outdent();
printer->Print(*vars, "};\n");
}
}
void PrintHeaderServerMethodGeneric(
Printer *printer, const Method *method,
std::map<grpc::string, grpc::string> *vars) {
@ -770,6 +822,28 @@ void PrintHeaderService(Printer *printer, const Service *service,
PrintHeaderServerMethodGeneric(printer, service->method(i).get(), vars);
}
// Server side - Streamed Unary
for (int i = 0; i < service->method_count(); ++i) {
(*vars)["Idx"] = as_string(i);
PrintHeaderServerMethodStreamedUnary(printer, service->method(i).get(),
vars);
}
printer->Print("typedef ");
for (int i = 0; i < service->method_count(); ++i) {
(*vars)["method_name"] = service->method(i).get()->name();
if (service->method(i)->NoStreaming()) {
printer->Print(*vars, "WithStreamedUnaryMethod_$method_name$<");
}
}
printer->Print("Service");
for (int i = 0; i < service->method_count(); ++i) {
if (service->method(i)->NoStreaming()) {
printer->Print(" >");
}
}
printer->Print(" StreamedUnaryService;\n");
printer->Outdent();
printer->Print("};\n");
printer->Print(service->GetTrailingComments().c_str());
@ -1080,6 +1154,9 @@ void PrintSourceService(Printer *printer, const Service *service,
(*vars)["Idx"] = as_string(i);
if (method->NoStreaming()) {
(*vars)["StreamingType"] = "NORMAL_RPC";
// NOTE: There is no reason to consider streamed-unary as a separate
// category here since this part is setting up the client-side stub
// and this appears as a NORMAL_RPC from the client-side.
} else if (method->ClientOnlyStreaming()) {
(*vars)["StreamingType"] = "CLIENT_STREAMING";
} else if (method->ServerOnlyStreaming()) {

@ -4,3 +4,7 @@ Files generated for use by Census stats and trace recording subsystem.
* census.pb.{h,c} - Generated from src/core/ext/census/census.proto, using the
script `tools/codegen/core/gen_nano_proto.sh src/proto/census/census.proto
$PWD/src/core/ext/census/gen src/core/ext/census/gen`
* trace_context.pb.{h,c} - Generated from
src/core/ext/census/trace_context.proto, using the script
`tools/codegen/core/gen_nano_proto.sh src/proto/census/trace_context.proto
$PWD/src/core/ext/census/gen src/core/ext/census/gen`

@ -0,0 +1,81 @@
/*
*
* Copyright 2016, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following disclaimer
* in the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Google Inc. nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
/* Automatically generated nanopb constant definitions */
/* Generated by nanopb-0.3.5-dev */
#include "src/core/ext/census/gen/trace_context.pb.h"
#if PB_PROTO_HEADER_VERSION != 30
#error Regenerate this file with the current version of nanopb generator.
#endif
const pb_field_t google_trace_TraceId_fields[3] = {
PB_FIELD( 1, FIXED64 , OPTIONAL, STATIC , FIRST, google_trace_TraceId, hi, hi, 0),
PB_FIELD( 2, FIXED64 , OPTIONAL, STATIC , OTHER, google_trace_TraceId, lo, hi, 0),
PB_LAST_FIELD
};
const pb_field_t google_trace_TraceContext_fields[4] = {
PB_FIELD( 1, MESSAGE , OPTIONAL, STATIC , FIRST, google_trace_TraceContext, trace_id, trace_id, &google_trace_TraceId_fields),
PB_FIELD( 2, FIXED64 , OPTIONAL, STATIC , OTHER, google_trace_TraceContext, span_id, trace_id, 0),
PB_FIELD( 3, BOOL , OPTIONAL, STATIC , OTHER, google_trace_TraceContext, is_sampled, span_id, 0),
PB_LAST_FIELD
};
/* Check that field information fits in pb_field_t */
#if !defined(PB_FIELD_32BIT)
/* If you get an error here, it means that you need to define PB_FIELD_32BIT
* compile-time option. You can do that in pb.h or on compiler command line.
*
* The reason you need to do this is that some of your messages contain tag
* numbers or field sizes that are larger than what can fit in 8 or 16 bit
* field descriptors.
*/
PB_STATIC_ASSERT((pb_membersize(google_trace_TraceContext, trace_id) < 65536), YOU_MUST_DEFINE_PB_FIELD_32BIT_FOR_MESSAGES_google_trace_TraceId_google_trace_TraceContext)
#endif
#if !defined(PB_FIELD_16BIT) && !defined(PB_FIELD_32BIT)
/* If you get an error here, it means that you need to define PB_FIELD_16BIT
* compile-time option. You can do that in pb.h or on compiler command line.
*
* The reason you need to do this is that some of your messages contain tag
* numbers or field sizes that are larger than what can fit in the default
* 8 bit descriptors.
*/
PB_STATIC_ASSERT((pb_membersize(google_trace_TraceContext, trace_id) < 256), YOU_MUST_DEFINE_PB_FIELD_16BIT_FOR_MESSAGES_google_trace_TraceId_google_trace_TraceContext)
#endif

@ -0,0 +1,99 @@
/*
*
* Copyright 2016, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following disclaimer
* in the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Google Inc. nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
/* Automatically generated nanopb header */
/* Generated by nanopb-0.3.5-dev */
#ifndef GRPC_CORE_EXT_CENSUS_GEN_TRACE_CONTEXT_PB_H
#define GRPC_CORE_EXT_CENSUS_GEN_TRACE_CONTEXT_PB_H
#include "third_party/nanopb/pb.h"
#if PB_PROTO_HEADER_VERSION != 30
#error Regenerate this file with the current version of nanopb generator.
#endif
#ifdef __cplusplus
extern "C" {
#endif
/* Struct definitions */
typedef struct _google_trace_TraceId {
bool has_hi;
uint64_t hi;
bool has_lo;
uint64_t lo;
} google_trace_TraceId;
typedef struct _google_trace_TraceContext {
bool has_trace_id;
google_trace_TraceId trace_id;
bool has_span_id;
uint64_t span_id;
bool has_is_sampled;
bool is_sampled;
} google_trace_TraceContext;
/* Default values for struct fields */
/* Initializer values for message structs */
#define google_trace_TraceId_init_default {false, 0, false, 0}
#define google_trace_TraceContext_init_default {false, google_trace_TraceId_init_default, false, 0, false, 0}
#define google_trace_TraceId_init_zero {false, 0, false, 0}
#define google_trace_TraceContext_init_zero {false, google_trace_TraceId_init_zero, false, 0, false, 0}
/* Field tags (for use in manual encoding/decoding) */
#define google_trace_TraceId_hi_tag 1
#define google_trace_TraceId_lo_tag 2
#define google_trace_TraceContext_trace_id_tag 1
#define google_trace_TraceContext_span_id_tag 2
#define google_trace_TraceContext_is_sampled_tag 3
/* Struct field encoding specification for nanopb */
extern const pb_field_t google_trace_TraceId_fields[3];
extern const pb_field_t google_trace_TraceContext_fields[4];
/* Maximum encoded size of messages (where known) */
#define google_trace_TraceId_size 18
#define google_trace_TraceContext_size 31
/* Message IDs (where set with "msgid" option) */
#ifdef PB_MSGID
#define TRACE_CONTEXT_MESSAGES \
#endif
#ifdef __cplusplus
} /* extern "C" */
#endif
#endif

@ -33,6 +33,7 @@
#include "src/core/ext/client_config/client_channel.h"
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
@ -41,10 +42,11 @@
#include <grpc/support/sync.h>
#include <grpc/support/useful.h>
#include "src/core/ext/client_config/subchannel_call_holder.h"
#include "src/core/ext/client_config/subchannel.h"
#include "src/core/lib/channel/channel_args.h"
#include "src/core/lib/channel/connected_channel.h"
#include "src/core/lib/iomgr/iomgr.h"
#include "src/core/lib/iomgr/polling_entity.h"
#include "src/core/lib/profiling/timers.h"
#include "src/core/lib/support/string.h"
#include "src/core/lib/surface/channel.h"
@ -52,13 +54,15 @@
/* Client channel implementation */
typedef grpc_subchannel_call_holder call_data;
/*************************************************************************
* CHANNEL-WIDE FUNCTIONS
*/
typedef struct client_channel_channel_data {
/** resolver for this channel */
grpc_resolver *resolver;
/** have we started resolving this channel */
int started_resolving;
bool started_resolving;
/** mutex protecting client configuration, including all
variables below in this data structure */
@ -74,7 +78,7 @@ typedef struct client_channel_channel_data {
/** connectivity state being tracked */
grpc_connectivity_state_tracker state_tracker;
/** when an lb_policy arrives, should we try to exit idle */
int exit_idle_when_lb_policy_arrives;
bool exit_idle_when_lb_policy_arrives;
/** owning stack */
grpc_channel_stack *owning_stack;
/** interested parties (owned) */
@ -82,10 +86,8 @@ typedef struct client_channel_channel_data {
} channel_data;
/** We create one watcher for each new lb_policy that is returned from a
resolver,
to watch for state changes from the lb_policy. When a state change is seen,
we
update the channel, and create a new watcher */
resolver, to watch for state changes from the lb_policy. When a state
change is seen, we update the channel, and create a new watcher. */
typedef struct {
channel_data *chand;
grpc_closure on_changed;
@ -93,22 +95,6 @@ typedef struct {
grpc_lb_policy *lb_policy;
} lb_policy_connectivity_watcher;
typedef struct {
grpc_closure closure;
grpc_call_element *elem;
} waiting_call;
static char *cc_get_peer(grpc_exec_ctx *exec_ctx, grpc_call_element *elem) {
return grpc_subchannel_call_holder_get_peer(exec_ctx, elem->call_data);
}
static void cc_start_transport_stream_op(grpc_exec_ctx *exec_ctx,
grpc_call_element *elem,
grpc_transport_stream_op *op) {
GRPC_CALL_LOG_OP(GPR_INFO, elem, op);
grpc_subchannel_call_holder_perform_op(exec_ctx, elem->call_data, op);
}
static void watch_lb_policy(grpc_exec_ctx *exec_ctx, channel_data *chand,
grpc_lb_policy *lb_policy,
grpc_connectivity_state current_state);
@ -177,13 +163,13 @@ static void watch_lb_policy(grpc_exec_ctx *exec_ctx, channel_data *chand,
&w->on_changed);
}
static void cc_on_resolver_result_changed(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error) {
static void on_resolver_result_changed(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error) {
channel_data *chand = arg;
grpc_lb_policy *lb_policy = NULL;
grpc_lb_policy *old_lb_policy;
grpc_connectivity_state state = GRPC_CHANNEL_TRANSIENT_FAILURE;
int exit_idle = 0;
bool exit_idle = false;
grpc_error *state_error = GRPC_ERROR_CREATE("No load balancing policy");
if (chand->resolver_result != NULL) {
@ -221,8 +207,8 @@ static void cc_on_resolver_result_changed(grpc_exec_ctx *exec_ctx, void *arg,
}
if (lb_policy != NULL && chand->exit_idle_when_lb_policy_arrives) {
GRPC_LB_POLICY_REF(lb_policy, "exit_idle");
exit_idle = 1;
chand->exit_idle_when_lb_policy_arrives = 0;
exit_idle = true;
chand->exit_idle_when_lb_policy_arrives = false;
}
if (error == GRPC_ERROR_NONE && chand->resolver) {
@ -330,6 +316,188 @@ static void cc_start_transport_op(grpc_exec_ctx *exec_ctx,
gpr_mu_unlock(&chand->mu);
}
/* Constructor for channel_data */
static void cc_init_channel_elem(grpc_exec_ctx *exec_ctx,
grpc_channel_element *elem,
grpc_channel_element_args *args) {
channel_data *chand = elem->channel_data;
memset(chand, 0, sizeof(*chand));
GPR_ASSERT(args->is_last);
GPR_ASSERT(elem->filter == &grpc_client_channel_filter);
gpr_mu_init(&chand->mu);
grpc_closure_init(&chand->on_resolver_result_changed,
on_resolver_result_changed, chand);
chand->owning_stack = args->channel_stack;
grpc_connectivity_state_init(&chand->state_tracker, GRPC_CHANNEL_IDLE,
"client_channel");
chand->interested_parties = grpc_pollset_set_create();
}
/* Destructor for channel_data */
static void cc_destroy_channel_elem(grpc_exec_ctx *exec_ctx,
grpc_channel_element *elem) {
channel_data *chand = elem->channel_data;
if (chand->resolver != NULL) {
grpc_resolver_shutdown(exec_ctx, chand->resolver);
GRPC_RESOLVER_UNREF(exec_ctx, chand->resolver, "channel");
}
if (chand->lb_policy != NULL) {
grpc_pollset_set_del_pollset_set(exec_ctx,
chand->lb_policy->interested_parties,
chand->interested_parties);
GRPC_LB_POLICY_UNREF(exec_ctx, chand->lb_policy, "channel");
}
grpc_connectivity_state_destroy(exec_ctx, &chand->state_tracker);
grpc_pollset_set_destroy(chand->interested_parties);
gpr_mu_destroy(&chand->mu);
}
/*************************************************************************
* PER-CALL FUNCTIONS
*/
#define GET_CALL(call_data) \
((grpc_subchannel_call *)(gpr_atm_acq_load(&(call_data)->subchannel_call)))
#define CANCELLED_CALL ((grpc_subchannel_call *)1)
typedef enum {
GRPC_SUBCHANNEL_CALL_HOLDER_NOT_CREATING,
GRPC_SUBCHANNEL_CALL_HOLDER_PICKING_SUBCHANNEL
} subchannel_creation_phase;
/** Call data. Holds a pointer to grpc_subchannel_call and the
associated machinery to create such a pointer.
Handles queueing of stream ops until a call object is ready, waiting
for initial metadata before trying to create a call object,
and handling cancellation gracefully. */
typedef struct client_channel_call_data {
/** either 0 for no call, 1 for cancelled, or a pointer to a
grpc_subchannel_call */
gpr_atm subchannel_call;
gpr_mu mu;
subchannel_creation_phase creation_phase;
grpc_connected_subchannel *connected_subchannel;
grpc_polling_entity *pollent;
grpc_transport_stream_op *waiting_ops;
size_t waiting_ops_count;
size_t waiting_ops_capacity;
grpc_closure next_step;
grpc_call_stack *owning_call;
} call_data;
static void add_waiting_locked(call_data *calld, grpc_transport_stream_op *op) {
GPR_TIMER_BEGIN("add_waiting_locked", 0);
if (calld->waiting_ops_count == calld->waiting_ops_capacity) {
calld->waiting_ops_capacity = GPR_MAX(3, 2 * calld->waiting_ops_capacity);
calld->waiting_ops =
gpr_realloc(calld->waiting_ops,
calld->waiting_ops_capacity * sizeof(*calld->waiting_ops));
}
calld->waiting_ops[calld->waiting_ops_count++] = *op;
GPR_TIMER_END("add_waiting_locked", 0);
}
static void fail_locked(grpc_exec_ctx *exec_ctx, call_data *calld,
grpc_error *error) {
size_t i;
for (i = 0; i < calld->waiting_ops_count; i++) {
grpc_transport_stream_op_finish_with_failure(
exec_ctx, &calld->waiting_ops[i], GRPC_ERROR_REF(error));
}
calld->waiting_ops_count = 0;
GRPC_ERROR_UNREF(error);
}
typedef struct {
grpc_transport_stream_op *ops;
size_t nops;
grpc_subchannel_call *call;
} retry_ops_args;
static void retry_ops(grpc_exec_ctx *exec_ctx, void *args, grpc_error *error) {
retry_ops_args *a = args;
size_t i;
for (i = 0; i < a->nops; i++) {
grpc_subchannel_call_process_op(exec_ctx, a->call, &a->ops[i]);
}
GRPC_SUBCHANNEL_CALL_UNREF(exec_ctx, a->call, "retry_ops");
gpr_free(a->ops);
gpr_free(a);
}
static void retry_waiting_locked(grpc_exec_ctx *exec_ctx, call_data *calld) {
retry_ops_args *a = gpr_malloc(sizeof(*a));
a->ops = calld->waiting_ops;
a->nops = calld->waiting_ops_count;
a->call = GET_CALL(calld);
if (a->call == CANCELLED_CALL) {
gpr_free(a);
fail_locked(exec_ctx, calld, GRPC_ERROR_CANCELLED);
return;
}
calld->waiting_ops = NULL;
calld->waiting_ops_count = 0;
calld->waiting_ops_capacity = 0;
GRPC_SUBCHANNEL_CALL_REF(a->call, "retry_ops");
grpc_exec_ctx_sched(exec_ctx, grpc_closure_create(retry_ops, a),
GRPC_ERROR_NONE, NULL);
}
static void subchannel_ready(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error) {
call_data *calld = arg;
gpr_mu_lock(&calld->mu);
GPR_ASSERT(calld->creation_phase ==
GRPC_SUBCHANNEL_CALL_HOLDER_PICKING_SUBCHANNEL);
calld->creation_phase = GRPC_SUBCHANNEL_CALL_HOLDER_NOT_CREATING;
if (calld->connected_subchannel == NULL) {
gpr_atm_no_barrier_store(&calld->subchannel_call, 1);
fail_locked(exec_ctx, calld, GRPC_ERROR_CREATE_REFERENCING(
"Failed to create subchannel", &error, 1));
} else if (1 == gpr_atm_acq_load(&calld->subchannel_call)) {
/* already cancelled before subchannel became ready */
fail_locked(exec_ctx, calld,
GRPC_ERROR_CREATE_REFERENCING(
"Cancelled before creating subchannel", &error, 1));
} else {
grpc_subchannel_call *subchannel_call = NULL;
grpc_error *new_error = grpc_connected_subchannel_create_call(
exec_ctx, calld->connected_subchannel, calld->pollent,
&subchannel_call);
if (new_error != GRPC_ERROR_NONE) {
new_error = grpc_error_add_child(new_error, error);
subchannel_call = CANCELLED_CALL;
fail_locked(exec_ctx, calld, new_error);
}
gpr_atm_rel_store(&calld->subchannel_call,
(gpr_atm)(uintptr_t)subchannel_call);
retry_waiting_locked(exec_ctx, calld);
}
gpr_mu_unlock(&calld->mu);
GRPC_CALL_STACK_UNREF(exec_ctx, calld->owning_call, "pick_subchannel");
}
static char *cc_get_peer(grpc_exec_ctx *exec_ctx, grpc_call_element *elem) {
call_data *calld = elem->call_data;
grpc_subchannel_call *subchannel_call = GET_CALL(calld);
if (subchannel_call == NULL || subchannel_call == CANCELLED_CALL) {
return NULL;
} else {
return grpc_subchannel_call_get_peer(exec_ctx, subchannel_call);
}
}
typedef struct {
grpc_metadata_batch *initial_metadata;
uint32_t initial_metadata_flags;
@ -339,11 +507,11 @@ typedef struct {
grpc_closure closure;
} continue_picking_args;
static int cc_pick_subchannel(grpc_exec_ctx *exec_ctx, void *arg,
grpc_metadata_batch *initial_metadata,
uint32_t initial_metadata_flags,
grpc_connected_subchannel **connected_subchannel,
grpc_closure *on_ready);
static bool pick_subchannel(grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
grpc_metadata_batch *initial_metadata,
uint32_t initial_metadata_flags,
grpc_connected_subchannel **connected_subchannel,
grpc_closure *on_ready);
static void continue_picking(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error) {
@ -352,22 +520,21 @@ static void continue_picking(grpc_exec_ctx *exec_ctx, void *arg,
/* cancelled, do nothing */
} else if (error != GRPC_ERROR_NONE) {
grpc_exec_ctx_sched(exec_ctx, cpa->on_ready, GRPC_ERROR_REF(error), NULL);
} else if (cc_pick_subchannel(exec_ctx, cpa->elem, cpa->initial_metadata,
cpa->initial_metadata_flags,
cpa->connected_subchannel, cpa->on_ready)) {
} else if (pick_subchannel(exec_ctx, cpa->elem, cpa->initial_metadata,
cpa->initial_metadata_flags,
cpa->connected_subchannel, cpa->on_ready)) {
grpc_exec_ctx_sched(exec_ctx, cpa->on_ready, GRPC_ERROR_NONE, NULL);
}
gpr_free(cpa);
}
static int cc_pick_subchannel(grpc_exec_ctx *exec_ctx, void *elemp,
grpc_metadata_batch *initial_metadata,
uint32_t initial_metadata_flags,
grpc_connected_subchannel **connected_subchannel,
grpc_closure *on_ready) {
GPR_TIMER_BEGIN("cc_pick_subchannel", 0);
static bool pick_subchannel(grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
grpc_metadata_batch *initial_metadata,
uint32_t initial_metadata_flags,
grpc_connected_subchannel **connected_subchannel,
grpc_closure *on_ready) {
GPR_TIMER_BEGIN("pick_subchannel", 0);
grpc_call_element *elem = elemp;
channel_data *chand = elem->channel_data;
call_data *calld = elem->call_data;
continue_picking_args *cpa;
@ -391,23 +558,23 @@ static int cc_pick_subchannel(grpc_exec_ctx *exec_ctx, void *elemp,
}
}
gpr_mu_unlock(&chand->mu);
GPR_TIMER_END("cc_pick_subchannel", 0);
return 1;
GPR_TIMER_END("pick_subchannel", 0);
return true;
}
if (chand->lb_policy != NULL) {
grpc_lb_policy *lb_policy = chand->lb_policy;
int r;
GRPC_LB_POLICY_REF(lb_policy, "cc_pick_subchannel");
GRPC_LB_POLICY_REF(lb_policy, "pick_subchannel");
gpr_mu_unlock(&chand->mu);
r = grpc_lb_policy_pick(exec_ctx, lb_policy, calld->pollent,
initial_metadata, initial_metadata_flags,
connected_subchannel, on_ready);
GRPC_LB_POLICY_UNREF(exec_ctx, lb_policy, "cc_pick_subchannel");
GPR_TIMER_END("cc_pick_subchannel", 0);
GRPC_LB_POLICY_UNREF(exec_ctx, lb_policy, "pick_subchannel");
GPR_TIMER_END("pick_subchannel", 0);
return r;
}
if (chand->resolver != NULL && !chand->started_resolving) {
chand->started_resolving = 1;
chand->started_resolving = true;
GRPC_CHANNEL_STACK_REF(chand->owning_stack, "resolver");
grpc_resolver_next(exec_ctx, chand->resolver, &chand->resolver_result,
&chand->on_resolver_result_changed);
@ -428,66 +595,143 @@ static int cc_pick_subchannel(grpc_exec_ctx *exec_ctx, void *elemp,
}
gpr_mu_unlock(&chand->mu);
GPR_TIMER_END("cc_pick_subchannel", 0);
return 0;
GPR_TIMER_END("pick_subchannel", 0);
return false;
}
// The logic here is fairly complicated, due to (a) the fact that we
// need to handle the case where we receive the send op before the
// initial metadata op, and (b) the need for efficiency, especially in
// the streaming case.
// TODO(ctiller): Explain this more thoroughly.
static void cc_start_transport_stream_op(grpc_exec_ctx *exec_ctx,
grpc_call_element *elem,
grpc_transport_stream_op *op) {
call_data *calld = elem->call_data;
GRPC_CALL_LOG_OP(GPR_INFO, elem, op);
/* try to (atomically) get the call */
grpc_subchannel_call *call = GET_CALL(calld);
GPR_TIMER_BEGIN("cc_start_transport_stream_op", 0);
if (call == CANCELLED_CALL) {
grpc_transport_stream_op_finish_with_failure(exec_ctx, op,
GRPC_ERROR_CANCELLED);
GPR_TIMER_END("cc_start_transport_stream_op", 0);
return;
}
if (call != NULL) {
grpc_subchannel_call_process_op(exec_ctx, call, op);
GPR_TIMER_END("cc_start_transport_stream_op", 0);
return;
}
/* we failed; lock and figure out what to do */
gpr_mu_lock(&calld->mu);
retry:
/* need to recheck that another thread hasn't set the call */
call = GET_CALL(calld);
if (call == CANCELLED_CALL) {
gpr_mu_unlock(&calld->mu);
grpc_transport_stream_op_finish_with_failure(exec_ctx, op,
GRPC_ERROR_CANCELLED);
GPR_TIMER_END("cc_start_transport_stream_op", 0);
return;
}
if (call != NULL) {
gpr_mu_unlock(&calld->mu);
grpc_subchannel_call_process_op(exec_ctx, call, op);
GPR_TIMER_END("cc_start_transport_stream_op", 0);
return;
}
/* if this is a cancellation, then we can raise our cancelled flag */
if (op->cancel_error != GRPC_ERROR_NONE) {
if (!gpr_atm_rel_cas(&calld->subchannel_call, 0,
(gpr_atm)(uintptr_t)CANCELLED_CALL)) {
goto retry;
} else {
switch (calld->creation_phase) {
case GRPC_SUBCHANNEL_CALL_HOLDER_NOT_CREATING:
fail_locked(exec_ctx, calld, GRPC_ERROR_REF(op->cancel_error));
break;
case GRPC_SUBCHANNEL_CALL_HOLDER_PICKING_SUBCHANNEL:
pick_subchannel(exec_ctx, elem, NULL, 0, &calld->connected_subchannel,
NULL);
break;
}
gpr_mu_unlock(&calld->mu);
grpc_transport_stream_op_finish_with_failure(exec_ctx, op,
GRPC_ERROR_CANCELLED);
GPR_TIMER_END("cc_start_transport_stream_op", 0);
return;
}
}
/* if we don't have a subchannel, try to get one */
if (calld->creation_phase == GRPC_SUBCHANNEL_CALL_HOLDER_NOT_CREATING &&
calld->connected_subchannel == NULL &&
op->send_initial_metadata != NULL) {
calld->creation_phase = GRPC_SUBCHANNEL_CALL_HOLDER_PICKING_SUBCHANNEL;
grpc_closure_init(&calld->next_step, subchannel_ready, calld);
GRPC_CALL_STACK_REF(calld->owning_call, "pick_subchannel");
if (pick_subchannel(exec_ctx, elem, op->send_initial_metadata,
op->send_initial_metadata_flags,
&calld->connected_subchannel, &calld->next_step)) {
calld->creation_phase = GRPC_SUBCHANNEL_CALL_HOLDER_NOT_CREATING;
GRPC_CALL_STACK_UNREF(exec_ctx, calld->owning_call, "pick_subchannel");
}
}
/* if we've got a subchannel, then let's ask it to create a call */
if (calld->creation_phase == GRPC_SUBCHANNEL_CALL_HOLDER_NOT_CREATING &&
calld->connected_subchannel != NULL) {
grpc_subchannel_call *subchannel_call = NULL;
grpc_error *error = grpc_connected_subchannel_create_call(
exec_ctx, calld->connected_subchannel, calld->pollent,
&subchannel_call);
if (error != GRPC_ERROR_NONE) {
subchannel_call = CANCELLED_CALL;
fail_locked(exec_ctx, calld, GRPC_ERROR_REF(error));
grpc_transport_stream_op_finish_with_failure(exec_ctx, op, error);
}
gpr_atm_rel_store(&calld->subchannel_call,
(gpr_atm)(uintptr_t)subchannel_call);
retry_waiting_locked(exec_ctx, calld);
goto retry;
}
/* nothing to be done but wait */
add_waiting_locked(calld, op);
gpr_mu_unlock(&calld->mu);
GPR_TIMER_END("cc_start_transport_stream_op", 0);
}
/* Constructor for call_data */
static grpc_error *init_call_elem(grpc_exec_ctx *exec_ctx,
grpc_call_element *elem,
grpc_call_element_args *args) {
grpc_subchannel_call_holder_init(elem->call_data, cc_pick_subchannel, elem,
args->call_stack);
static grpc_error *cc_init_call_elem(grpc_exec_ctx *exec_ctx,
grpc_call_element *elem,
grpc_call_element_args *args) {
call_data *calld = elem->call_data;
gpr_atm_rel_store(&calld->subchannel_call, 0);
gpr_mu_init(&calld->mu);
calld->connected_subchannel = NULL;
calld->waiting_ops = NULL;
calld->waiting_ops_count = 0;
calld->waiting_ops_capacity = 0;
calld->creation_phase = GRPC_SUBCHANNEL_CALL_HOLDER_NOT_CREATING;
calld->owning_call = args->call_stack;
calld->pollent = NULL;
return GRPC_ERROR_NONE;
}
/* Destructor for call_data */
static void destroy_call_elem(grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
const grpc_call_final_info *final_info,
void *and_free_memory) {
grpc_subchannel_call_holder_destroy(exec_ctx, elem->call_data);
gpr_free(and_free_memory);
}
/* Constructor for channel_data */
static void init_channel_elem(grpc_exec_ctx *exec_ctx,
grpc_channel_element *elem,
grpc_channel_element_args *args) {
channel_data *chand = elem->channel_data;
memset(chand, 0, sizeof(*chand));
GPR_ASSERT(args->is_last);
GPR_ASSERT(elem->filter == &grpc_client_channel_filter);
gpr_mu_init(&chand->mu);
grpc_closure_init(&chand->on_resolver_result_changed,
cc_on_resolver_result_changed, chand);
chand->owning_stack = args->channel_stack;
grpc_connectivity_state_init(&chand->state_tracker, GRPC_CHANNEL_IDLE,
"client_channel");
chand->interested_parties = grpc_pollset_set_create();
}
/* Destructor for channel_data */
static void destroy_channel_elem(grpc_exec_ctx *exec_ctx,
grpc_channel_element *elem) {
channel_data *chand = elem->channel_data;
if (chand->resolver != NULL) {
grpc_resolver_shutdown(exec_ctx, chand->resolver);
GRPC_RESOLVER_UNREF(exec_ctx, chand->resolver, "channel");
}
if (chand->lb_policy != NULL) {
grpc_pollset_set_del_pollset_set(exec_ctx,
chand->lb_policy->interested_parties,
chand->interested_parties);
GRPC_LB_POLICY_UNREF(exec_ctx, chand->lb_policy, "channel");
static void cc_destroy_call_elem(grpc_exec_ctx *exec_ctx,
grpc_call_element *elem,
const grpc_call_final_info *final_info,
void *and_free_memory) {
call_data *calld = elem->call_data;
grpc_subchannel_call *call = GET_CALL(calld);
if (call != NULL && call != CANCELLED_CALL) {
GRPC_SUBCHANNEL_CALL_UNREF(exec_ctx, call, "client_channel_destroy_call");
}
grpc_connectivity_state_destroy(exec_ctx, &chand->state_tracker);
grpc_pollset_set_destroy(chand->interested_parties);
gpr_mu_destroy(&chand->mu);
GPR_ASSERT(calld->creation_phase == GRPC_SUBCHANNEL_CALL_HOLDER_NOT_CREATING);
gpr_mu_destroy(&calld->mu);
GPR_ASSERT(calld->waiting_ops_count == 0);
gpr_free(calld->waiting_ops);
gpr_free(and_free_memory);
}
static void cc_set_pollset_or_pollset_set(grpc_exec_ctx *exec_ctx,
@ -497,16 +741,20 @@ static void cc_set_pollset_or_pollset_set(grpc_exec_ctx *exec_ctx,
calld->pollent = pollent;
}
/*************************************************************************
* EXPORTED SYMBOLS
*/
const grpc_channel_filter grpc_client_channel_filter = {
cc_start_transport_stream_op,
cc_start_transport_op,
sizeof(call_data),
init_call_elem,
cc_init_call_elem,
cc_set_pollset_or_pollset_set,
destroy_call_elem,
cc_destroy_call_elem,
sizeof(channel_data),
init_channel_elem,
destroy_channel_elem,
cc_init_channel_elem,
cc_destroy_channel_elem,
cc_get_peer,
"client-channel",
};
@ -523,7 +771,7 @@ void grpc_client_channel_set_resolver(grpc_exec_ctx *exec_ctx,
GRPC_RESOLVER_REF(resolver, "channel");
if (!grpc_closure_list_empty(chand->waiting_for_config_closures) ||
chand->exit_idle_when_lb_policy_arrives) {
chand->started_resolving = 1;
chand->started_resolving = true;
GRPC_CHANNEL_STACK_REF(chand->owning_stack, "resolver");
grpc_resolver_next(exec_ctx, resolver, &chand->resolver_result,
&chand->on_resolver_result_changed);
@ -541,10 +789,10 @@ grpc_connectivity_state grpc_client_channel_check_connectivity_state(
if (chand->lb_policy != NULL) {
grpc_lb_policy_exit_idle(exec_ctx, chand->lb_policy);
} else {
chand->exit_idle_when_lb_policy_arrives = 1;
chand->exit_idle_when_lb_policy_arrives = true;
if (!chand->started_resolving && chand->resolver != NULL) {
GRPC_CHANNEL_STACK_REF(chand->owning_stack, "resolver");
chand->started_resolving = 1;
chand->started_resolving = true;
grpc_resolver_next(exec_ctx, chand->resolver, &chand->resolver_result,
&chand->on_resolver_result_changed);
}

@ -1,292 +0,0 @@
/*
*
* Copyright 2015, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following disclaimer
* in the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Google Inc. nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
#include "src/core/ext/client_config/subchannel_call_holder.h"
#include <grpc/support/alloc.h>
#include "src/core/lib/profiling/timers.h"
#define GET_CALL(holder) \
((grpc_subchannel_call *)(gpr_atm_acq_load(&(holder)->subchannel_call)))
#define CANCELLED_CALL ((grpc_subchannel_call *)1)
static void subchannel_ready(grpc_exec_ctx *exec_ctx, void *holder,
grpc_error *error);
static void retry_ops(grpc_exec_ctx *exec_ctx, void *retry_ops_args,
grpc_error *error);
static void add_waiting_locked(grpc_subchannel_call_holder *holder,
grpc_transport_stream_op *op);
static void fail_locked(grpc_exec_ctx *exec_ctx,
grpc_subchannel_call_holder *holder, grpc_error *error);
static void retry_waiting_locked(grpc_exec_ctx *exec_ctx,
grpc_subchannel_call_holder *holder);
void grpc_subchannel_call_holder_init(
grpc_subchannel_call_holder *holder,
grpc_subchannel_call_holder_pick_subchannel pick_subchannel,
void *pick_subchannel_arg, grpc_call_stack *owning_call) {
gpr_atm_rel_store(&holder->subchannel_call, 0);
holder->pick_subchannel = pick_subchannel;
holder->pick_subchannel_arg = pick_subchannel_arg;
gpr_mu_init(&holder->mu);
holder->connected_subchannel = NULL;
holder->waiting_ops = NULL;
holder->waiting_ops_count = 0;
holder->waiting_ops_capacity = 0;
holder->creation_phase = GRPC_SUBCHANNEL_CALL_HOLDER_NOT_CREATING;
holder->owning_call = owning_call;
holder->pollent = NULL;
}
void grpc_subchannel_call_holder_destroy(grpc_exec_ctx *exec_ctx,
grpc_subchannel_call_holder *holder) {
grpc_subchannel_call *call = GET_CALL(holder);
if (call != NULL && call != CANCELLED_CALL) {
GRPC_SUBCHANNEL_CALL_UNREF(exec_ctx, call, "holder");
}
GPR_ASSERT(holder->creation_phase ==
GRPC_SUBCHANNEL_CALL_HOLDER_NOT_CREATING);
gpr_mu_destroy(&holder->mu);
GPR_ASSERT(holder->waiting_ops_count == 0);
gpr_free(holder->waiting_ops);
}
// The logic here is fairly complicated, due to (a) the fact that we
// need to handle the case where we receive the send op before the
// initial metadata op, and (b) the need for efficiency, especially in
// the streaming case.
// TODO(ctiller): Explain this more thoroughly.
void grpc_subchannel_call_holder_perform_op(grpc_exec_ctx *exec_ctx,
grpc_subchannel_call_holder *holder,
grpc_transport_stream_op *op) {
/* try to (atomically) get the call */
grpc_subchannel_call *call = GET_CALL(holder);
GPR_TIMER_BEGIN("grpc_subchannel_call_holder_perform_op", 0);
if (call == CANCELLED_CALL) {
grpc_transport_stream_op_finish_with_failure(exec_ctx, op,
GRPC_ERROR_CANCELLED);
GPR_TIMER_END("grpc_subchannel_call_holder_perform_op", 0);
return;
}
if (call != NULL) {
grpc_subchannel_call_process_op(exec_ctx, call, op);
GPR_TIMER_END("grpc_subchannel_call_holder_perform_op", 0);
return;
}
/* we failed; lock and figure out what to do */
gpr_mu_lock(&holder->mu);
retry:
/* need to recheck that another thread hasn't set the call */
call = GET_CALL(holder);
if (call == CANCELLED_CALL) {
gpr_mu_unlock(&holder->mu);
grpc_transport_stream_op_finish_with_failure(exec_ctx, op,
GRPC_ERROR_CANCELLED);
GPR_TIMER_END("grpc_subchannel_call_holder_perform_op", 0);
return;
}
if (call != NULL) {
gpr_mu_unlock(&holder->mu);
grpc_subchannel_call_process_op(exec_ctx, call, op);
GPR_TIMER_END("grpc_subchannel_call_holder_perform_op", 0);
return;
}
/* if this is a cancellation, then we can raise our cancelled flag */
if (op->cancel_error != GRPC_ERROR_NONE) {
if (!gpr_atm_rel_cas(&holder->subchannel_call, 0,
(gpr_atm)(uintptr_t)CANCELLED_CALL)) {
goto retry;
} else {
switch (holder->creation_phase) {
case GRPC_SUBCHANNEL_CALL_HOLDER_NOT_CREATING:
fail_locked(exec_ctx, holder, GRPC_ERROR_REF(op->cancel_error));
break;
case GRPC_SUBCHANNEL_CALL_HOLDER_PICKING_SUBCHANNEL:
holder->pick_subchannel(exec_ctx, holder->pick_subchannel_arg, NULL,
0, &holder->connected_subchannel, NULL);
break;
}
gpr_mu_unlock(&holder->mu);
grpc_transport_stream_op_finish_with_failure(exec_ctx, op,
GRPC_ERROR_CANCELLED);
GPR_TIMER_END("grpc_subchannel_call_holder_perform_op", 0);
return;
}
}
/* if we don't have a subchannel, try to get one */
if (holder->creation_phase == GRPC_SUBCHANNEL_CALL_HOLDER_NOT_CREATING &&
holder->connected_subchannel == NULL &&
op->send_initial_metadata != NULL) {
holder->creation_phase = GRPC_SUBCHANNEL_CALL_HOLDER_PICKING_SUBCHANNEL;
grpc_closure_init(&holder->next_step, subchannel_ready, holder);
GRPC_CALL_STACK_REF(holder->owning_call, "pick_subchannel");
if (holder->pick_subchannel(
exec_ctx, holder->pick_subchannel_arg, op->send_initial_metadata,
op->send_initial_metadata_flags, &holder->connected_subchannel,
&holder->next_step)) {
holder->creation_phase = GRPC_SUBCHANNEL_CALL_HOLDER_NOT_CREATING;
GRPC_CALL_STACK_UNREF(exec_ctx, holder->owning_call, "pick_subchannel");
}
}
/* if we've got a subchannel, then let's ask it to create a call */
if (holder->creation_phase == GRPC_SUBCHANNEL_CALL_HOLDER_NOT_CREATING &&
holder->connected_subchannel != NULL) {
grpc_subchannel_call *subchannel_call = NULL;
grpc_error *error = grpc_connected_subchannel_create_call(
exec_ctx, holder->connected_subchannel, holder->pollent,
&subchannel_call);
if (error != GRPC_ERROR_NONE) {
subchannel_call = CANCELLED_CALL;
fail_locked(exec_ctx, holder, GRPC_ERROR_REF(error));
grpc_transport_stream_op_finish_with_failure(exec_ctx, op, error);
}
gpr_atm_rel_store(&holder->subchannel_call,
(gpr_atm)(uintptr_t)subchannel_call);
retry_waiting_locked(exec_ctx, holder);
goto retry;
}
/* nothing to be done but wait */
add_waiting_locked(holder, op);
gpr_mu_unlock(&holder->mu);
GPR_TIMER_END("grpc_subchannel_call_holder_perform_op", 0);
}
static void subchannel_ready(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error) {
grpc_subchannel_call_holder *holder = arg;
gpr_mu_lock(&holder->mu);
GPR_ASSERT(holder->creation_phase ==
GRPC_SUBCHANNEL_CALL_HOLDER_PICKING_SUBCHANNEL);
holder->creation_phase = GRPC_SUBCHANNEL_CALL_HOLDER_NOT_CREATING;
if (holder->connected_subchannel == NULL) {
gpr_atm_no_barrier_store(&holder->subchannel_call, 1);
fail_locked(exec_ctx, holder,
GRPC_ERROR_CREATE_REFERENCING("Failed to create subchannel",
&error, 1));
} else if (1 == gpr_atm_acq_load(&holder->subchannel_call)) {
/* already cancelled before subchannel became ready */
fail_locked(exec_ctx, holder,
GRPC_ERROR_CREATE_REFERENCING(
"Cancelled before creating subchannel", &error, 1));
} else {
grpc_subchannel_call *subchannel_call = NULL;
grpc_error *new_error = grpc_connected_subchannel_create_call(
exec_ctx, holder->connected_subchannel, holder->pollent,
&subchannel_call);
if (new_error != GRPC_ERROR_NONE) {
new_error = grpc_error_add_child(new_error, error);
subchannel_call = CANCELLED_CALL;
fail_locked(exec_ctx, holder, new_error);
}
gpr_atm_rel_store(&holder->subchannel_call,
(gpr_atm)(uintptr_t)subchannel_call);
retry_waiting_locked(exec_ctx, holder);
}
gpr_mu_unlock(&holder->mu);
GRPC_CALL_STACK_UNREF(exec_ctx, holder->owning_call, "pick_subchannel");
}
typedef struct {
grpc_transport_stream_op *ops;
size_t nops;
grpc_subchannel_call *call;
} retry_ops_args;
static void retry_waiting_locked(grpc_exec_ctx *exec_ctx,
grpc_subchannel_call_holder *holder) {
retry_ops_args *a = gpr_malloc(sizeof(*a));
a->ops = holder->waiting_ops;
a->nops = holder->waiting_ops_count;
a->call = GET_CALL(holder);
if (a->call == CANCELLED_CALL) {
gpr_free(a);
fail_locked(exec_ctx, holder, GRPC_ERROR_CANCELLED);
return;
}
holder->waiting_ops = NULL;
holder->waiting_ops_count = 0;
holder->waiting_ops_capacity = 0;
GRPC_SUBCHANNEL_CALL_REF(a->call, "retry_ops");
grpc_exec_ctx_sched(exec_ctx, grpc_closure_create(retry_ops, a),
GRPC_ERROR_NONE, NULL);
}
static void retry_ops(grpc_exec_ctx *exec_ctx, void *args, grpc_error *error) {
retry_ops_args *a = args;
size_t i;
for (i = 0; i < a->nops; i++) {
grpc_subchannel_call_process_op(exec_ctx, a->call, &a->ops[i]);
}
GRPC_SUBCHANNEL_CALL_UNREF(exec_ctx, a->call, "retry_ops");
gpr_free(a->ops);
gpr_free(a);
}
static void add_waiting_locked(grpc_subchannel_call_holder *holder,
grpc_transport_stream_op *op) {
GPR_TIMER_BEGIN("add_waiting_locked", 0);
if (holder->waiting_ops_count == holder->waiting_ops_capacity) {
holder->waiting_ops_capacity = GPR_MAX(3, 2 * holder->waiting_ops_capacity);
holder->waiting_ops =
gpr_realloc(holder->waiting_ops, holder->waiting_ops_capacity *
sizeof(*holder->waiting_ops));
}
holder->waiting_ops[holder->waiting_ops_count++] = *op;
GPR_TIMER_END("add_waiting_locked", 0);
}
static void fail_locked(grpc_exec_ctx *exec_ctx,
grpc_subchannel_call_holder *holder,
grpc_error *error) {
size_t i;
for (i = 0; i < holder->waiting_ops_count; i++) {
grpc_transport_stream_op_finish_with_failure(
exec_ctx, &holder->waiting_ops[i], GRPC_ERROR_REF(error));
}
holder->waiting_ops_count = 0;
GRPC_ERROR_UNREF(error);
}
char *grpc_subchannel_call_holder_get_peer(
grpc_exec_ctx *exec_ctx, grpc_subchannel_call_holder *holder) {
grpc_subchannel_call *subchannel_call = GET_CALL(holder);
if (subchannel_call == NULL || subchannel_call == CANCELLED_CALL) {
return NULL;
} else {
return grpc_subchannel_call_get_peer(exec_ctx, subchannel_call);
}
}

@ -1,99 +0,0 @@
/*
*
* Copyright 2015, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following disclaimer
* in the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Google Inc. nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
#ifndef GRPC_CORE_EXT_CLIENT_CONFIG_SUBCHANNEL_CALL_HOLDER_H
#define GRPC_CORE_EXT_CLIENT_CONFIG_SUBCHANNEL_CALL_HOLDER_H
#include "src/core/ext/client_config/subchannel.h"
#include "src/core/lib/iomgr/polling_entity.h"
/** Pick a subchannel for grpc_subchannel_call_holder;
Return 1 if subchannel is available immediately (in which case on_ready
should not be called), or 0 otherwise (in which case on_ready should be
called when the subchannel is available) */
typedef int (*grpc_subchannel_call_holder_pick_subchannel)(
grpc_exec_ctx *exec_ctx, void *arg, grpc_metadata_batch *initial_metadata,
uint32_t initial_metadata_flags,
grpc_connected_subchannel **connected_subchannel, grpc_closure *on_ready);
typedef enum {
GRPC_SUBCHANNEL_CALL_HOLDER_NOT_CREATING,
GRPC_SUBCHANNEL_CALL_HOLDER_PICKING_SUBCHANNEL
} grpc_subchannel_call_holder_creation_phase;
/** Wrapper for holding a pointer to grpc_subchannel_call, and the
associated machinery to create such a pointer.
Handles queueing of stream ops until a call object is ready, waiting
for initial metadata before trying to create a call object,
and handling cancellation gracefully.
The channel filter uses this as their call_data. */
typedef struct grpc_subchannel_call_holder {
/** either 0 for no call, 1 for cancelled, or a pointer to a
grpc_subchannel_call */
gpr_atm subchannel_call;
/** Helper function to choose the subchannel on which to create
the call object. Channel filter delegates to the load
balancing policy (once it's ready). */
grpc_subchannel_call_holder_pick_subchannel pick_subchannel;
void *pick_subchannel_arg;
gpr_mu mu;
grpc_subchannel_call_holder_creation_phase creation_phase;
grpc_connected_subchannel *connected_subchannel;
grpc_polling_entity *pollent;
grpc_transport_stream_op *waiting_ops;
size_t waiting_ops_count;
size_t waiting_ops_capacity;
grpc_closure next_step;
grpc_call_stack *owning_call;
} grpc_subchannel_call_holder;
void grpc_subchannel_call_holder_init(
grpc_subchannel_call_holder *holder,
grpc_subchannel_call_holder_pick_subchannel pick_subchannel,
void *pick_subchannel_arg, grpc_call_stack *owning_call);
void grpc_subchannel_call_holder_destroy(grpc_exec_ctx *exec_ctx,
grpc_subchannel_call_holder *holder);
void grpc_subchannel_call_holder_perform_op(grpc_exec_ctx *exec_ctx,
grpc_subchannel_call_holder *holder,
grpc_transport_stream_op *op);
char *grpc_subchannel_call_holder_get_peer(grpc_exec_ctx *exec_ctx,
grpc_subchannel_call_holder *holder);
#endif /* GRPC_CORE_EXT_CLIENT_CONFIG_SUBCHANNEL_CALL_HOLDER_H */

@ -43,6 +43,9 @@
#define EXPECTED_CONTENT_TYPE "application/grpc"
#define EXPECTED_CONTENT_TYPE_LENGTH sizeof(EXPECTED_CONTENT_TYPE) - 1
/* default maximum size of payload eligable for GET request */
static const size_t kMaxPayloadSizeForGet = 2048;
typedef struct call_data {
grpc_linked_mdelem method;
grpc_linked_mdelem scheme;
@ -50,20 +53,39 @@ typedef struct call_data {
grpc_linked_mdelem te_trailers;
grpc_linked_mdelem content_type;
grpc_linked_mdelem user_agent;
grpc_linked_mdelem payload_bin;
grpc_metadata_batch *recv_initial_metadata;
uint8_t *payload_bytes;
/* Vars to read data off of send_message */
grpc_transport_stream_op send_op;
uint32_t send_length;
uint32_t send_flags;
gpr_slice incoming_slice;
grpc_slice_buffer_stream replacement_stream;
gpr_slice_buffer slices;
/* flag that indicates that all slices of send_messages aren't availble */
bool send_message_blocked;
/** Closure to call when finished with the hc_on_recv hook */
grpc_closure *on_done_recv;
grpc_closure *on_complete;
grpc_closure *post_send;
/** Receive closures are chained: we inject this closure as the on_done_recv
up-call on transport_op, and remember to call our on_done_recv member
after handling it. */
grpc_closure hc_on_recv;
grpc_closure hc_on_complete;
grpc_closure got_slice;
grpc_closure send_done;
} call_data;
typedef struct channel_data {
grpc_mdelem *static_scheme;
grpc_mdelem *user_agent;
size_t max_payload_size_for_get;
} channel_data;
typedef struct {
@ -119,6 +141,24 @@ static void hc_on_recv(grpc_exec_ctx *exec_ctx, void *user_data,
calld->on_done_recv->cb(exec_ctx, calld->on_done_recv->cb_arg, error);
}
static void hc_on_complete(grpc_exec_ctx *exec_ctx, void *user_data,
grpc_error *error) {
grpc_call_element *elem = user_data;
call_data *calld = elem->call_data;
if (calld->payload_bytes) {
gpr_free(calld->payload_bytes);
calld->payload_bytes = NULL;
}
calld->on_complete->cb(exec_ctx, calld->on_complete->cb_arg, error);
}
static void send_done(grpc_exec_ctx *exec_ctx, void *elemp, grpc_error *error) {
grpc_call_element *elem = elemp;
call_data *calld = elem->call_data;
gpr_slice_buffer_reset_and_unref(&calld->slices);
calld->post_send->cb(exec_ctx, calld->post_send->cb_arg, error);
}
static grpc_mdelem *client_strip_filter(void *user_data, grpc_mdelem *md) {
/* eat the things we'd like to set ourselves */
if (md->key == GRPC_MDSTR_METHOD) return NULL;
@ -129,22 +169,105 @@ static grpc_mdelem *client_strip_filter(void *user_data, grpc_mdelem *md) {
return md;
}
static void hc_mutate_op(grpc_call_element *elem,
static void continue_send_message(grpc_exec_ctx *exec_ctx,
grpc_call_element *elem) {
call_data *calld = elem->call_data;
uint8_t *wrptr = calld->payload_bytes;
while (grpc_byte_stream_next(exec_ctx, calld->send_op.send_message,
&calld->incoming_slice, ~(size_t)0,
&calld->got_slice)) {
memcpy(wrptr, GPR_SLICE_START_PTR(calld->incoming_slice),
GPR_SLICE_LENGTH(calld->incoming_slice));
wrptr += GPR_SLICE_LENGTH(calld->incoming_slice);
gpr_slice_buffer_add(&calld->slices, calld->incoming_slice);
if (calld->send_length == calld->slices.length) {
calld->send_message_blocked = false;
break;
}
}
}
static void got_slice(grpc_exec_ctx *exec_ctx, void *elemp, grpc_error *error) {
grpc_call_element *elem = elemp;
call_data *calld = elem->call_data;
calld->send_message_blocked = false;
gpr_slice_buffer_add(&calld->slices, calld->incoming_slice);
if (calld->send_length == calld->slices.length) {
/* Pass down the original send_message op that was blocked.*/
grpc_slice_buffer_stream_init(&calld->replacement_stream, &calld->slices,
calld->send_flags);
calld->send_op.send_message = &calld->replacement_stream.base;
calld->post_send = calld->send_op.on_complete;
calld->send_op.on_complete = &calld->send_done;
grpc_call_next_op(exec_ctx, elem, &calld->send_op);
} else {
continue_send_message(exec_ctx, elem);
}
}
static void hc_mutate_op(grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
grpc_transport_stream_op *op) {
/* grab pointers to our data from the call element */
call_data *calld = elem->call_data;
channel_data *channeld = elem->channel_data;
if (op->send_initial_metadata != NULL) {
/* Decide which HTTP VERB to use. We use GET if the request is marked
cacheable, and the operation contains both initial metadata and send
message, and the payload is below the size threshold, and all the data
for this request is immediately available. */
grpc_mdelem *method = GRPC_MDELEM_METHOD_POST;
calld->send_message_blocked = false;
if ((op->send_initial_metadata_flags &
GRPC_INITIAL_METADATA_CACHEABLE_REQUEST) &&
op->send_message != NULL &&
op->send_message->length < channeld->max_payload_size_for_get) {
method = GRPC_MDELEM_METHOD_GET;
calld->send_message_blocked = true;
} else if (op->send_initial_metadata_flags &
GRPC_INITIAL_METADATA_IDEMPOTENT_REQUEST) {
method = GRPC_MDELEM_METHOD_PUT;
}
/* Attempt to read the data from send_message and create a header field. */
if (method == GRPC_MDELEM_METHOD_GET) {
/* allocate memory to hold the entire payload */
calld->payload_bytes = gpr_malloc(op->send_message->length);
GPR_ASSERT(calld->payload_bytes);
/* read slices of send_message and copy into payload_bytes */
calld->send_op = *op;
calld->send_length = op->send_message->length;
calld->send_flags = op->send_message->flags;
continue_send_message(exec_ctx, elem);
if (calld->send_message_blocked == false) {
/* when all the send_message data is available, then create a MDELEM and
append to headers */
grpc_mdelem *payload_bin = grpc_mdelem_from_metadata_strings(
GRPC_MDSTR_GRPC_PAYLOAD_BIN,
grpc_mdstr_from_buffer(calld->payload_bytes,
op->send_message->length));
grpc_metadata_batch_add_tail(op->send_initial_metadata,
&calld->payload_bin, payload_bin);
calld->on_complete = op->on_complete;
op->on_complete = &calld->hc_on_complete;
op->send_message = NULL;
} else {
/* Not all data is available. Fall back to POST. */
gpr_log(GPR_DEBUG,
"Request is marked Cacheable but not all data is available.\
Falling back to POST");
method = GRPC_MDELEM_METHOD_POST;
}
}
grpc_metadata_batch_filter(op->send_initial_metadata, client_strip_filter,
elem);
/* Send : prefixed headers, which have to be before any application
layer headers. */
grpc_metadata_batch_add_head(
op->send_initial_metadata, &calld->method,
op->send_initial_metadata_flags &
GRPC_INITIAL_METADATA_IDEMPOTENT_REQUEST
? GRPC_MDELEM_METHOD_PUT
: GRPC_MDELEM_METHOD_POST);
grpc_metadata_batch_add_head(op->send_initial_metadata, &calld->method,
method);
grpc_metadata_batch_add_head(op->send_initial_metadata, &calld->scheme,
channeld->static_scheme);
grpc_metadata_batch_add_tail(op->send_initial_metadata, &calld->te_trailers,
@ -169,9 +292,16 @@ static void hc_start_transport_op(grpc_exec_ctx *exec_ctx,
grpc_transport_stream_op *op) {
GPR_TIMER_BEGIN("hc_start_transport_op", 0);
GRPC_CALL_LOG_OP(GPR_INFO, elem, op);
hc_mutate_op(elem, op);
hc_mutate_op(exec_ctx, elem, op);
GPR_TIMER_END("hc_start_transport_op", 0);
grpc_call_next_op(exec_ctx, elem, op);
call_data *calld = elem->call_data;
if (op->send_message != NULL && calld->send_message_blocked) {
/* Don't forward the op. send_message contains slices that aren't ready
yet. The call will be forwarded by the op_complete of slice read call.
*/
} else {
grpc_call_next_op(exec_ctx, elem, op);
}
}
/* Constructor for call_data */
@ -180,14 +310,23 @@ static grpc_error *init_call_elem(grpc_exec_ctx *exec_ctx,
grpc_call_element_args *args) {
call_data *calld = elem->call_data;
calld->on_done_recv = NULL;
calld->on_complete = NULL;
calld->payload_bytes = NULL;
gpr_slice_buffer_init(&calld->slices);
grpc_closure_init(&calld->hc_on_recv, hc_on_recv, elem);
grpc_closure_init(&calld->hc_on_complete, hc_on_complete, elem);
grpc_closure_init(&calld->got_slice, got_slice, elem);
grpc_closure_init(&calld->send_done, send_done, elem);
return GRPC_ERROR_NONE;
}
/* Destructor for call_data */
static void destroy_call_elem(grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
const grpc_call_final_info *final_info,
void *ignored) {}
void *ignored) {
call_data *calld = elem->call_data;
gpr_slice_buffer_destroy(&calld->slices);
}
static grpc_mdelem *scheme_from_args(const grpc_channel_args *args) {
unsigned i;
@ -210,6 +349,22 @@ static grpc_mdelem *scheme_from_args(const grpc_channel_args *args) {
return GRPC_MDELEM_SCHEME_HTTP;
}
static size_t max_payload_size_from_args(const grpc_channel_args *args) {
if (args != NULL) {
for (size_t i = 0; i < args->num_args; ++i) {
if (0 == strcmp(args->args[i].key, GRPC_ARG_MAX_PAYLOAD_SIZE_FOR_GET)) {
if (args->args[i].type != GRPC_ARG_INTEGER) {
gpr_log(GPR_ERROR, "%s: must be an integer",
GRPC_ARG_MAX_PAYLOAD_SIZE_FOR_GET);
} else {
return (size_t)args->args[i].value.integer;
}
}
}
}
return kMaxPayloadSizeForGet;
}
static grpc_mdstr *user_agent_from_args(const grpc_channel_args *args,
const char *transport_name) {
gpr_strvec v;
@ -268,6 +423,8 @@ static void init_channel_elem(grpc_exec_ctx *exec_ctx,
GPR_ASSERT(!args->is_last);
GPR_ASSERT(args->optional_transport != NULL);
chand->static_scheme = scheme_from_args(args->channel_args);
chand->max_payload_size_for_get =
max_payload_size_from_args(args->channel_args);
chand->user_agent = grpc_mdelem_from_metadata_strings(
GRPC_MDSTR_USER_AGENT,
user_agent_from_args(args->channel_args,

@ -41,4 +41,7 @@ extern const grpc_channel_filter grpc_http_client_filter;
/* Channel arg to override the http2 :scheme header */
#define GRPC_ARG_HTTP2_SCHEME "grpc.http2_scheme"
/* Channel arg to determine maximum size of payload eligable for GET request */
#define GRPC_ARG_MAX_PAYLOAD_SIZE_FOR_GET "grpc.max_payload_size_for_get"
#endif /* GRPC_CORE_LIB_CHANNEL_HTTP_CLIENT_FILTER_H */

@ -49,17 +49,32 @@ typedef struct call_data {
uint8_t seen_scheme;
uint8_t seen_te_trailers;
uint8_t seen_authority;
uint8_t seen_payload_bin;
grpc_linked_mdelem status;
grpc_linked_mdelem content_type;
/* flag to ensure payload_bin is delivered only once */
uint8_t payload_bin_delivered;
grpc_metadata_batch *recv_initial_metadata;
bool *recv_idempotent_request;
bool *recv_cacheable_request;
/** Closure to call when finished with the hs_on_recv hook */
grpc_closure *on_done_recv;
/** Closure to call when we retrieve read message from the payload-bin header
*/
grpc_closure *recv_message_ready;
grpc_closure *on_complete;
grpc_byte_stream **pp_recv_message;
gpr_slice_buffer read_slice_buffer;
grpc_slice_buffer_stream read_stream;
/** Receive closures are chained: we inject this closure as the on_done_recv
up-call on transport_op, and remember to call our on_done_recv member
after handling it. */
grpc_closure hs_on_recv;
grpc_closure hs_on_complete;
grpc_closure hs_recv_message_ready;
} call_data;
typedef struct channel_data { uint8_t unused; } channel_data;
@ -76,16 +91,20 @@ static grpc_mdelem *server_filter(void *user_data, grpc_mdelem *md) {
/* Check if it is one of the headers we care about. */
if (md == GRPC_MDELEM_TE_TRAILERS || md == GRPC_MDELEM_METHOD_POST ||
md == GRPC_MDELEM_METHOD_PUT || md == GRPC_MDELEM_SCHEME_HTTP ||
md == GRPC_MDELEM_SCHEME_HTTPS ||
md == GRPC_MDELEM_METHOD_PUT || md == GRPC_MDELEM_METHOD_GET ||
md == GRPC_MDELEM_SCHEME_HTTP || md == GRPC_MDELEM_SCHEME_HTTPS ||
md == GRPC_MDELEM_CONTENT_TYPE_APPLICATION_SLASH_GRPC) {
/* swallow it */
if (md == GRPC_MDELEM_METHOD_POST) {
calld->seen_method = 1;
*calld->recv_idempotent_request = false;
*calld->recv_cacheable_request = false;
} else if (md == GRPC_MDELEM_METHOD_PUT) {
calld->seen_method = 1;
*calld->recv_idempotent_request = true;
} else if (md == GRPC_MDELEM_METHOD_GET) {
calld->seen_method = 1;
*calld->recv_cacheable_request = true;
} else if (md->key == GRPC_MDSTR_SCHEME) {
calld->seen_scheme = 1;
} else if (md == GRPC_MDELEM_TE_TRAILERS) {
@ -137,6 +156,16 @@ static grpc_mdelem *server_filter(void *user_data, grpc_mdelem *md) {
GRPC_MDSTR_AUTHORITY, GRPC_MDSTR_REF(md->value));
calld->seen_authority = 1;
return authority;
} else if (md->key == GRPC_MDSTR_GRPC_PAYLOAD_BIN) {
/* Retrieve the payload from the value of the 'grpc-internal-payload-bin'
header field */
calld->seen_payload_bin = 1;
gpr_slice_buffer_init(&calld->read_slice_buffer);
gpr_slice_buffer_add(&calld->read_slice_buffer,
gpr_slice_ref(md->value->slice));
grpc_slice_buffer_stream_init(&calld->read_stream,
&calld->read_slice_buffer, 0);
return NULL;
} else {
return md;
}
@ -189,6 +218,36 @@ static void hs_on_recv(grpc_exec_ctx *exec_ctx, void *user_data,
GRPC_ERROR_UNREF(err);
}
static void hs_on_complete(grpc_exec_ctx *exec_ctx, void *user_data,
grpc_error *err) {
grpc_call_element *elem = user_data;
call_data *calld = elem->call_data;
/* Call recv_message_ready if we got the payload via the header field */
if (calld->seen_payload_bin && calld->recv_message_ready != NULL) {
*calld->pp_recv_message = calld->payload_bin_delivered
? NULL
: (grpc_byte_stream *)&calld->read_stream;
calld->recv_message_ready->cb(exec_ctx, calld->recv_message_ready->cb_arg,
err);
calld->recv_message_ready = NULL;
calld->payload_bin_delivered = true;
}
calld->on_complete->cb(exec_ctx, calld->on_complete->cb_arg, err);
}
static void hs_recv_message_ready(grpc_exec_ctx *exec_ctx, void *user_data,
grpc_error *err) {
grpc_call_element *elem = user_data;
call_data *calld = elem->call_data;
if (calld->seen_payload_bin) {
/* do nothing. This is probably a GET request, and payload will be returned
in hs_on_complete callback. */
} else {
calld->recv_message_ready->cb(exec_ctx, calld->recv_message_ready->cb_arg,
err);
}
}
static void hs_mutate_op(grpc_call_element *elem,
grpc_transport_stream_op *op) {
/* grab pointers to our data from the call element */
@ -206,11 +265,25 @@ static void hs_mutate_op(grpc_call_element *elem,
if (op->recv_initial_metadata) {
/* substitute our callback for the higher callback */
GPR_ASSERT(op->recv_idempotent_request != NULL);
GPR_ASSERT(op->recv_cacheable_request != NULL);
calld->recv_initial_metadata = op->recv_initial_metadata;
calld->recv_idempotent_request = op->recv_idempotent_request;
calld->recv_cacheable_request = op->recv_cacheable_request;
calld->on_done_recv = op->recv_initial_metadata_ready;
op->recv_initial_metadata_ready = &calld->hs_on_recv;
}
if (op->recv_message) {
calld->recv_message_ready = op->recv_message_ready;
calld->pp_recv_message = op->recv_message;
if (op->recv_message_ready) {
op->recv_message_ready = &calld->hs_recv_message_ready;
}
if (op->on_complete) {
calld->on_complete = op->on_complete;
op->on_complete = &calld->hs_on_complete;
}
}
}
static void hs_start_transport_op(grpc_exec_ctx *exec_ctx,
@ -232,6 +305,8 @@ static grpc_error *init_call_elem(grpc_exec_ctx *exec_ctx,
/* initialize members */
memset(calld, 0, sizeof(*calld));
grpc_closure_init(&calld->hs_on_recv, hs_on_recv, elem);
grpc_closure_init(&calld->hs_on_complete, hs_on_complete, elem);
grpc_closure_init(&calld->hs_recv_message_ready, hs_recv_message_ready, elem);
return GRPC_ERROR_NONE;
}

@ -149,6 +149,7 @@ struct call_data {
grpc_metadata_batch *recv_initial_metadata;
bool recv_idempotent_request;
bool recv_cacheable_request;
grpc_metadata_array initial_metadata;
request_matcher *request_matcher;
@ -497,9 +498,12 @@ static void publish_call(grpc_exec_ctx *exec_ctx, grpc_server *server,
&rc->data.batch.details->method_capacity, calld->path);
rc->data.batch.details->deadline = calld->deadline;
rc->data.batch.details->flags =
0 | (calld->recv_idempotent_request
? GRPC_INITIAL_METADATA_IDEMPOTENT_REQUEST
: 0);
(calld->recv_idempotent_request
? GRPC_INITIAL_METADATA_IDEMPOTENT_REQUEST
: 0) |
(calld->recv_cacheable_request
? GRPC_INITIAL_METADATA_CACHEABLE_REQUEST
: 0);
break;
case REGISTERED_CALL:
*rc->data.registered.deadline = calld->deadline;
@ -779,6 +783,7 @@ static void server_mutate_op(grpc_call_element *elem,
calld->on_done_recv_initial_metadata = op->recv_initial_metadata_ready;
op->recv_initial_metadata_ready = &calld->server_on_recv_initial_metadata;
op->recv_idempotent_request = &calld->recv_idempotent_request;
op->recv_cacheable_request = &calld->recv_cacheable_request;
}
}

@ -51,15 +51,15 @@ uintptr_t grpc_static_mdelem_user_data[GRPC_STATIC_MDELEM_COUNT] = {
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
const uint8_t grpc_static_metadata_elem_indices[GRPC_STATIC_MDELEM_COUNT * 2] =
{11, 33, 10, 33, 12, 33, 12, 49, 13, 33, 14, 33, 15, 33, 16, 33, 17, 33,
{11, 33, 10, 33, 12, 33, 12, 50, 13, 33, 14, 33, 15, 33, 16, 33, 17, 33,
19, 33, 20, 33, 21, 33, 22, 33, 23, 33, 24, 33, 25, 33, 26, 33, 27, 33,
28, 18, 28, 33, 29, 33, 30, 33, 34, 33, 35, 33, 36, 33, 37, 33, 40, 31,
40, 32, 40, 48, 40, 53, 40, 54, 40, 55, 40, 56, 42, 31, 42, 48, 42, 53,
45, 0, 45, 1, 45, 2, 50, 33, 57, 33, 58, 33, 59, 33, 60, 33, 61, 33,
62, 33, 63, 33, 64, 33, 65, 33, 66, 33, 67, 33, 68, 38, 68, 70, 68, 73,
69, 81, 69, 82, 71, 33, 72, 33, 74, 33, 75, 33, 76, 33, 77, 33, 78, 39,
78, 51, 78, 52, 79, 33, 80, 33, 83, 3, 83, 4, 83, 5, 83, 6, 83, 7,
83, 8, 83, 9, 84, 33, 85, 86, 87, 33, 88, 33, 89, 33, 90, 33, 91, 33};
40, 32, 40, 49, 40, 54, 40, 55, 40, 56, 40, 57, 42, 31, 42, 49, 42, 54,
46, 0, 46, 1, 46, 2, 51, 33, 58, 33, 59, 33, 60, 33, 61, 33, 62, 33,
63, 33, 64, 33, 65, 33, 66, 33, 67, 33, 68, 33, 69, 38, 69, 71, 69, 74,
70, 82, 70, 83, 72, 33, 73, 33, 75, 33, 76, 33, 77, 33, 78, 33, 79, 39,
79, 52, 79, 53, 80, 33, 81, 33, 84, 3, 84, 4, 84, 5, 84, 6, 84, 7,
84, 8, 84, 9, 85, 33, 86, 87, 88, 33, 89, 33, 90, 33, 91, 33, 92, 33};
const char *const grpc_static_metadata_strings[GRPC_STATIC_MDSTR_COUNT] = {
"0",
@ -107,6 +107,7 @@ const char *const grpc_static_metadata_strings[GRPC_STATIC_MDSTR_COUNT] = {
"grpc-encoding",
"grpc-internal-encoding-request",
"grpc-message",
"grpc-payload-bin",
"grpc-status",
"grpc-timeout",
"grpc-tracing-bin",

@ -44,7 +44,7 @@
#include "src/core/lib/transport/metadata.h"
#define GRPC_STATIC_MDSTR_COUNT 92
#define GRPC_STATIC_MDSTR_COUNT 93
extern grpc_mdstr grpc_static_mdstr_table[GRPC_STATIC_MDSTR_COUNT];
/* "0" */
#define GRPC_MDSTR_0 (&grpc_static_mdstr_table[0])
@ -136,101 +136,103 @@ extern grpc_mdstr grpc_static_mdstr_table[GRPC_STATIC_MDSTR_COUNT];
#define GRPC_MDSTR_GRPC_INTERNAL_ENCODING_REQUEST (&grpc_static_mdstr_table[43])
/* "grpc-message" */
#define GRPC_MDSTR_GRPC_MESSAGE (&grpc_static_mdstr_table[44])
/* "grpc-payload-bin" */
#define GRPC_MDSTR_GRPC_PAYLOAD_BIN (&grpc_static_mdstr_table[45])
/* "grpc-status" */
#define GRPC_MDSTR_GRPC_STATUS (&grpc_static_mdstr_table[45])
#define GRPC_MDSTR_GRPC_STATUS (&grpc_static_mdstr_table[46])
/* "grpc-timeout" */
#define GRPC_MDSTR_GRPC_TIMEOUT (&grpc_static_mdstr_table[46])
#define GRPC_MDSTR_GRPC_TIMEOUT (&grpc_static_mdstr_table[47])
/* "grpc-tracing-bin" */
#define GRPC_MDSTR_GRPC_TRACING_BIN (&grpc_static_mdstr_table[47])
#define GRPC_MDSTR_GRPC_TRACING_BIN (&grpc_static_mdstr_table[48])
/* "gzip" */
#define GRPC_MDSTR_GZIP (&grpc_static_mdstr_table[48])
#define GRPC_MDSTR_GZIP (&grpc_static_mdstr_table[49])
/* "gzip, deflate" */
#define GRPC_MDSTR_GZIP_COMMA_DEFLATE (&grpc_static_mdstr_table[49])
#define GRPC_MDSTR_GZIP_COMMA_DEFLATE (&grpc_static_mdstr_table[50])
/* "host" */
#define GRPC_MDSTR_HOST (&grpc_static_mdstr_table[50])
#define GRPC_MDSTR_HOST (&grpc_static_mdstr_table[51])
/* "http" */
#define GRPC_MDSTR_HTTP (&grpc_static_mdstr_table[51])
#define GRPC_MDSTR_HTTP (&grpc_static_mdstr_table[52])
/* "https" */
#define GRPC_MDSTR_HTTPS (&grpc_static_mdstr_table[52])
#define GRPC_MDSTR_HTTPS (&grpc_static_mdstr_table[53])
/* "identity" */
#define GRPC_MDSTR_IDENTITY (&grpc_static_mdstr_table[53])
#define GRPC_MDSTR_IDENTITY (&grpc_static_mdstr_table[54])
/* "identity,deflate" */
#define GRPC_MDSTR_IDENTITY_COMMA_DEFLATE (&grpc_static_mdstr_table[54])
#define GRPC_MDSTR_IDENTITY_COMMA_DEFLATE (&grpc_static_mdstr_table[55])
/* "identity,deflate,gzip" */
#define GRPC_MDSTR_IDENTITY_COMMA_DEFLATE_COMMA_GZIP \
(&grpc_static_mdstr_table[55])
(&grpc_static_mdstr_table[56])
/* "identity,gzip" */
#define GRPC_MDSTR_IDENTITY_COMMA_GZIP (&grpc_static_mdstr_table[56])
#define GRPC_MDSTR_IDENTITY_COMMA_GZIP (&grpc_static_mdstr_table[57])
/* "if-match" */
#define GRPC_MDSTR_IF_MATCH (&grpc_static_mdstr_table[57])
#define GRPC_MDSTR_IF_MATCH (&grpc_static_mdstr_table[58])
/* "if-modified-since" */
#define GRPC_MDSTR_IF_MODIFIED_SINCE (&grpc_static_mdstr_table[58])
#define GRPC_MDSTR_IF_MODIFIED_SINCE (&grpc_static_mdstr_table[59])
/* "if-none-match" */
#define GRPC_MDSTR_IF_NONE_MATCH (&grpc_static_mdstr_table[59])
#define GRPC_MDSTR_IF_NONE_MATCH (&grpc_static_mdstr_table[60])
/* "if-range" */
#define GRPC_MDSTR_IF_RANGE (&grpc_static_mdstr_table[60])
#define GRPC_MDSTR_IF_RANGE (&grpc_static_mdstr_table[61])
/* "if-unmodified-since" */
#define GRPC_MDSTR_IF_UNMODIFIED_SINCE (&grpc_static_mdstr_table[61])
#define GRPC_MDSTR_IF_UNMODIFIED_SINCE (&grpc_static_mdstr_table[62])
/* "last-modified" */
#define GRPC_MDSTR_LAST_MODIFIED (&grpc_static_mdstr_table[62])
#define GRPC_MDSTR_LAST_MODIFIED (&grpc_static_mdstr_table[63])
/* "link" */
#define GRPC_MDSTR_LINK (&grpc_static_mdstr_table[63])
#define GRPC_MDSTR_LINK (&grpc_static_mdstr_table[64])
/* "load-reporting-initial" */
#define GRPC_MDSTR_LOAD_REPORTING_INITIAL (&grpc_static_mdstr_table[64])
#define GRPC_MDSTR_LOAD_REPORTING_INITIAL (&grpc_static_mdstr_table[65])
/* "load-reporting-trailing" */
#define GRPC_MDSTR_LOAD_REPORTING_TRAILING (&grpc_static_mdstr_table[65])
#define GRPC_MDSTR_LOAD_REPORTING_TRAILING (&grpc_static_mdstr_table[66])
/* "location" */
#define GRPC_MDSTR_LOCATION (&grpc_static_mdstr_table[66])
#define GRPC_MDSTR_LOCATION (&grpc_static_mdstr_table[67])
/* "max-forwards" */
#define GRPC_MDSTR_MAX_FORWARDS (&grpc_static_mdstr_table[67])
#define GRPC_MDSTR_MAX_FORWARDS (&grpc_static_mdstr_table[68])
/* ":method" */
#define GRPC_MDSTR_METHOD (&grpc_static_mdstr_table[68])
#define GRPC_MDSTR_METHOD (&grpc_static_mdstr_table[69])
/* ":path" */
#define GRPC_MDSTR_PATH (&grpc_static_mdstr_table[69])
#define GRPC_MDSTR_PATH (&grpc_static_mdstr_table[70])
/* "POST" */
#define GRPC_MDSTR_POST (&grpc_static_mdstr_table[70])
#define GRPC_MDSTR_POST (&grpc_static_mdstr_table[71])
/* "proxy-authenticate" */
#define GRPC_MDSTR_PROXY_AUTHENTICATE (&grpc_static_mdstr_table[71])
#define GRPC_MDSTR_PROXY_AUTHENTICATE (&grpc_static_mdstr_table[72])
/* "proxy-authorization" */
#define GRPC_MDSTR_PROXY_AUTHORIZATION (&grpc_static_mdstr_table[72])
#define GRPC_MDSTR_PROXY_AUTHORIZATION (&grpc_static_mdstr_table[73])
/* "PUT" */
#define GRPC_MDSTR_PUT (&grpc_static_mdstr_table[73])
#define GRPC_MDSTR_PUT (&grpc_static_mdstr_table[74])
/* "range" */
#define GRPC_MDSTR_RANGE (&grpc_static_mdstr_table[74])
#define GRPC_MDSTR_RANGE (&grpc_static_mdstr_table[75])
/* "referer" */
#define GRPC_MDSTR_REFERER (&grpc_static_mdstr_table[75])
#define GRPC_MDSTR_REFERER (&grpc_static_mdstr_table[76])
/* "refresh" */
#define GRPC_MDSTR_REFRESH (&grpc_static_mdstr_table[76])
#define GRPC_MDSTR_REFRESH (&grpc_static_mdstr_table[77])
/* "retry-after" */
#define GRPC_MDSTR_RETRY_AFTER (&grpc_static_mdstr_table[77])
#define GRPC_MDSTR_RETRY_AFTER (&grpc_static_mdstr_table[78])
/* ":scheme" */
#define GRPC_MDSTR_SCHEME (&grpc_static_mdstr_table[78])
#define GRPC_MDSTR_SCHEME (&grpc_static_mdstr_table[79])
/* "server" */
#define GRPC_MDSTR_SERVER (&grpc_static_mdstr_table[79])
#define GRPC_MDSTR_SERVER (&grpc_static_mdstr_table[80])
/* "set-cookie" */
#define GRPC_MDSTR_SET_COOKIE (&grpc_static_mdstr_table[80])
#define GRPC_MDSTR_SET_COOKIE (&grpc_static_mdstr_table[81])
/* "/" */
#define GRPC_MDSTR_SLASH (&grpc_static_mdstr_table[81])
#define GRPC_MDSTR_SLASH (&grpc_static_mdstr_table[82])
/* "/index.html" */
#define GRPC_MDSTR_SLASH_INDEX_DOT_HTML (&grpc_static_mdstr_table[82])
#define GRPC_MDSTR_SLASH_INDEX_DOT_HTML (&grpc_static_mdstr_table[83])
/* ":status" */
#define GRPC_MDSTR_STATUS (&grpc_static_mdstr_table[83])
#define GRPC_MDSTR_STATUS (&grpc_static_mdstr_table[84])
/* "strict-transport-security" */
#define GRPC_MDSTR_STRICT_TRANSPORT_SECURITY (&grpc_static_mdstr_table[84])
#define GRPC_MDSTR_STRICT_TRANSPORT_SECURITY (&grpc_static_mdstr_table[85])
/* "te" */
#define GRPC_MDSTR_TE (&grpc_static_mdstr_table[85])
#define GRPC_MDSTR_TE (&grpc_static_mdstr_table[86])
/* "trailers" */
#define GRPC_MDSTR_TRAILERS (&grpc_static_mdstr_table[86])
#define GRPC_MDSTR_TRAILERS (&grpc_static_mdstr_table[87])
/* "transfer-encoding" */
#define GRPC_MDSTR_TRANSFER_ENCODING (&grpc_static_mdstr_table[87])
#define GRPC_MDSTR_TRANSFER_ENCODING (&grpc_static_mdstr_table[88])
/* "user-agent" */
#define GRPC_MDSTR_USER_AGENT (&grpc_static_mdstr_table[88])
#define GRPC_MDSTR_USER_AGENT (&grpc_static_mdstr_table[89])
/* "vary" */
#define GRPC_MDSTR_VARY (&grpc_static_mdstr_table[89])
#define GRPC_MDSTR_VARY (&grpc_static_mdstr_table[90])
/* "via" */
#define GRPC_MDSTR_VIA (&grpc_static_mdstr_table[90])
#define GRPC_MDSTR_VIA (&grpc_static_mdstr_table[91])
/* "www-authenticate" */
#define GRPC_MDSTR_WWW_AUTHENTICATE (&grpc_static_mdstr_table[91])
#define GRPC_MDSTR_WWW_AUTHENTICATE (&grpc_static_mdstr_table[92])
#define GRPC_STATIC_MDELEM_COUNT 81
extern grpc_mdelem grpc_static_mdelem_table[GRPC_STATIC_MDELEM_COUNT];

@ -124,6 +124,7 @@ typedef struct grpc_transport_stream_op {
/** Receive initial metadata from the stream, into provided metadata batch. */
grpc_metadata_batch *recv_initial_metadata;
bool *recv_idempotent_request;
bool *recv_cacheable_request;
/** Should be enqueued when initial metadata is ready to be processed. */
grpc_closure *recv_initial_metadata_ready;

@ -35,10 +35,9 @@
#include <grpc++/impl/service_type.h>
#include <grpc++/server.h>
#include <grpc/support/cpu.h>
#include <grpc/support/log.h>
#include <grpc/support/useful.h>
#include "include/grpc/support/useful.h"
#include "src/cpp/server/thread_pool_interface.h"
namespace grpc {
@ -156,14 +155,12 @@ std::unique_ptr<Server> ServerBuilder::BuildAndStart() {
(*option)->UpdateArguments(&args);
(*option)->UpdatePlugins(&plugins_);
}
if (!thread_pool) {
for (auto plugin = plugins_.begin(); plugin != plugins_.end(); plugin++) {
if ((*plugin)->has_sync_methods()) {
thread_pool.reset(CreateDefaultThreadPool());
has_sync_methods = true;
break;
}
for (auto plugin = plugins_.begin(); plugin != plugins_.end(); plugin++) {
if (!thread_pool && (*plugin)->has_sync_methods()) {
thread_pool.reset(CreateDefaultThreadPool());
has_sync_methods = true;
}
(*plugin)->UpdateChannelArguments(&args);
}
if (max_receive_message_size_ > 0) {
args.SetInt(GRPC_ARG_MAX_RECEIVE_MESSAGE_LENGTH, max_receive_message_size_);

@ -34,8 +34,15 @@
namespace Grpc;
/**
* Class AbstractCall
* @package Grpc
*/
abstract class AbstractCall
{
/**
* @var Call
*/
protected $call;
protected $deserialize;
protected $metadata;
@ -51,13 +58,15 @@ abstract class AbstractCall
* the response
* @param array $options Call options (optional)
*/
public function __construct(Channel $channel,
$method,
$deserialize,
$options = [])
{
if (isset($options['timeout']) &&
is_numeric($timeout = $options['timeout'])) {
public function __construct(
Channel $channel,
$method,
$deserialize,
$options = []
) {
if (array_key_exists('timeout', $options) &&
is_numeric($timeout = $options['timeout'])
) {
$now = Timeval::now();
$delta = new Timeval($timeout);
$deadline = $now->add($delta);
@ -68,17 +77,19 @@ abstract class AbstractCall
$this->deserialize = $deserialize;
$this->metadata = null;
$this->trailing_metadata = null;
if (isset($options['call_credentials_callback']) &&
if (array_key_exists('call_credentials_callback', $options) &&
is_callable($call_credentials_callback =
$options['call_credentials_callback'])) {
$options['call_credentials_callback'])
) {
$call_credentials = CallCredentials::createFromPlugin(
$call_credentials_callback);
$call_credentials_callback
);
$this->call->setCredentials($call_credentials);
}
}
/**
* @return The metadata sent by the server.
* @return mixed The metadata sent by the server.
*/
public function getMetadata()
{
@ -86,7 +97,7 @@ abstract class AbstractCall
}
/**
* @return The trailing metadata sent by the server.
* @return mixed The trailing metadata sent by the server.
*/
public function getTrailingMetadata()
{
@ -114,7 +125,7 @@ abstract class AbstractCall
*
* @param string $value The binary value to deserialize
*
* @return The deserialized value
* @return mixed The deserialized value
*/
protected function deserializeResponse($value)
{

@ -45,7 +45,7 @@ class BidiStreamingCall extends AbstractCall
*
* @param array $metadata Metadata to send with the call, if applicable
*/
public function start($metadata = [])
public function start(array $metadata = [])
{
$this->call->startBatch([
OP_SEND_INITIAL_METADATA => $metadata,
@ -55,7 +55,7 @@ class BidiStreamingCall extends AbstractCall
/**
* Reads the next value from the server.
*
* @return The next value from the server, or null if there is none
* @return mixed The next value from the server, or null if there is none
*/
public function read()
{
@ -82,7 +82,7 @@ class BidiStreamingCall extends AbstractCall
public function write($data, $options = [])
{
$message_array = ['message' => $data->serialize()];
if (isset($options['flags'])) {
if (array_key_exists('flags', $options)) {
$message_array['flags'] = $options['flags'];
}
$this->call->startBatch([
@ -103,8 +103,8 @@ class BidiStreamingCall extends AbstractCall
/**
* Wait for the server to send the status, and return it.
*
* @return object The status object, with integer $code, string $details,
* and array $metadata members
* @return \stdClass The status object, with integer $code, string $details,
* and array $metadata members
*/
public function getStatus()
{

@ -60,10 +60,10 @@ class ClientStreamingCall extends AbstractCall
* @param array $options an array of options, possible keys:
* 'flags' => a number
*/
public function write($data, $options = [])
public function write($data, array $options = [])
{
$message_array = ['message' => $data->serialize()];
if (isset($options['flags'])) {
if (array_key_exists('flags', $options)) {
$message_array['flags'] = $options['flags'];
}
$this->call->startBatch([
@ -74,7 +74,7 @@ class ClientStreamingCall extends AbstractCall
/**
* Wait for the server to respond with data and a status.
*
* @return [response data, status]
* @return array [response data, status]
*/
public function wait()
{

@ -36,14 +36,14 @@ namespace Grpc;
/**
* Represents an active call that sends a single message and then gets a stream
* of reponses.
* of responses.
*/
class ServerStreamingCall extends AbstractCall
{
/**
* Start the call.
*
* @param $data The data to send
* @param mixed $data The data to send
* @param array $metadata Metadata to send with the call, if applicable
* @param array $options an array of options, possible keys:
* 'flags' => a number
@ -51,7 +51,7 @@ class ServerStreamingCall extends AbstractCall
public function start($data, $metadata = [], $options = [])
{
$message_array = ['message' => $data->serialize()];
if (isset($options['flags'])) {
if (array_key_exists('flags', $options)) {
$message_array['flags'] = $options['flags'];
}
$event = $this->call->startBatch([
@ -64,7 +64,7 @@ class ServerStreamingCall extends AbstractCall
}
/**
* @return An iterator of response values
* @return mixed An iterator of response values
*/
public function responses()
{
@ -82,8 +82,8 @@ class ServerStreamingCall extends AbstractCall
/**
* Wait for the server to send the status, and return it.
*
* @return object The status object, with integer $code, string $details,
* and array $metadata members
* @return \stdClass The status object, with integer $code, string $details,
* and array $metadata members
*/
public function getStatus()
{

@ -43,7 +43,7 @@ class UnaryCall extends AbstractCall
/**
* Start the call.
*
* @param $data The data to send
* @param mixed $data The data to send
* @param array $metadata Metadata to send with the call, if applicable
* @param array $options an array of options, possible keys:
* 'flags' => a number
@ -66,7 +66,7 @@ class UnaryCall extends AbstractCall
/**
* Wait for the server to respond with data and a status.
*
* @return [response data, status]
* @return array [response data, status]
*/
public function wait()
{

@ -0,0 +1,48 @@
// Copyright 2016, Google Inc.
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
syntax = "proto3";
package google.trace;
// A TraceId uniquely represents a single Trace. It is a 128-bit nonce.
message TraceId {
fixed64 hi = 1;
fixed64 lo = 2;
}
// Tracing information that is propagated with RPC's.
message TraceContext {
// Trace identifer. Must be present.
TraceId trace_id = 1;
// ID of parent (client) span. Must be present.
fixed64 span_id = 2;
// true if this trace is sampled.
bool is_sampled = 3;
}

@ -234,7 +234,6 @@ CORE_SOURCE_FILES = [
'src/core/ext/client_config/resolver_registry.c',
'src/core/ext/client_config/resolver_result.c',
'src/core/ext/client_config/subchannel.c',
'src/core/ext/client_config/subchannel_call_holder.c',
'src/core/ext/client_config/subchannel_index.c',
'src/core/ext/client_config/uri_parser.c',
'src/core/ext/transport/chttp2/server/insecure/server_chttp2.c',
@ -256,6 +255,7 @@ CORE_SOURCE_FILES = [
'src/core/ext/census/base_resources.c',
'src/core/ext/census/context.c',
'src/core/ext/census/gen/census.pb.c',
'src/core/ext/census/gen/trace_context.pb.c',
'src/core/ext/census/grpc_context.c',
'src/core/ext/census/grpc_filter.c',
'src/core/ext/census/grpc_plugin.c',

@ -31,113 +31,10 @@ require_relative '../grpc'
require_relative 'active_call'
require_relative 'service'
require 'thread'
require 'concurrent'
# GRPC contains the General RPC module.
module GRPC
# Pool is a simple thread pool.
class Pool
# Default keep alive period is 1s
DEFAULT_KEEP_ALIVE = 1
def initialize(size, keep_alive: DEFAULT_KEEP_ALIVE)
fail 'pool size must be positive' unless size > 0
@jobs = Queue.new
@size = size
@stopped = false
@stop_mutex = Mutex.new # needs to be held when accessing @stopped
@stop_cond = ConditionVariable.new
@workers = []
@keep_alive = keep_alive
end
# Returns the number of jobs waiting
def jobs_waiting
@jobs.size
end
# Runs the given block on the queue with the provided args.
#
# @param args the args passed blk when it is called
# @param blk the block to call
def schedule(*args, &blk)
return if blk.nil?
@stop_mutex.synchronize do
if @stopped
GRPC.logger.warn('did not schedule job, already stopped')
return
end
GRPC.logger.info('schedule another job')
@jobs << [blk, args]
end
end
# Starts running the jobs in the thread pool.
def start
@stop_mutex.synchronize do
fail 'already stopped' if @stopped
end
until @workers.size == @size.to_i
next_thread = Thread.new do
catch(:exit) do # allows { throw :exit } to kill a thread
loop_execute_jobs
end
remove_current_thread
end
@workers << next_thread
end
end
# Stops the jobs in the pool
def stop
GRPC.logger.info('stopping, will wait for all the workers to exit')
@workers.size.times { schedule { throw :exit } }
@stop_mutex.synchronize do # wait @keep_alive for works to stop
@stopped = true
@stop_cond.wait(@stop_mutex, @keep_alive) if @workers.size > 0
end
forcibly_stop_workers
GRPC.logger.info('stopped, all workers are shutdown')
end
protected
# Forcibly shutdown any threads that are still alive.
def forcibly_stop_workers
return unless @workers.size > 0
GRPC.logger.info("forcibly terminating #{@workers.size} worker(s)")
@workers.each do |t|
next unless t.alive?
begin
t.exit
rescue StandardError => e
GRPC.logger.warn('error while terminating a worker')
GRPC.logger.warn(e)
end
end
end
# removes the threads from workers, and signal when all the
# threads are complete.
def remove_current_thread
@stop_mutex.synchronize do
@workers.delete(Thread.current)
@stop_cond.signal if @workers.size.zero?
end
end
def loop_execute_jobs
loop do
begin
blk, args = @jobs.pop
blk.call(*args)
rescue StandardError => e
GRPC.logger.warn('Error in worker thread')
GRPC.logger.warn(e)
end
end
end
end
# RpcServer hosts a number of services and makes them available on the
# network.
class RpcServer
@ -147,11 +44,14 @@ module GRPC
def_delegators :@server, :add_http2_port
# Default thread pool size is 3
DEFAULT_POOL_SIZE = 3
# Default max size of the thread pool size is 100
DEFAULT_MAX_POOL_SIZE = 100
# Default minimum size of the thread pool is 5
DEFAULT_MIN_POOL_SIZE = 5
# Default max_waiting_requests size is 20
DEFAULT_MAX_WAITING_REQUESTS = 20
# Default max_waiting_requests size is 60
DEFAULT_MAX_WAITING_REQUESTS = 60
# Default poll period is 1s
DEFAULT_POLL_PERIOD = 1
@ -174,8 +74,8 @@ module GRPC
# There are some specific keyword args used to configure the RpcServer
# instance.
#
# * pool_size: the size of the thread pool the server uses to run its
# threads
# * pool_size: the maximum size of the thread pool that the server's
# thread pool can reach.
#
# * max_waiting_requests: the maximum number of requests that are not
# being handled to allow. When this limit is exceeded, the server responds
@ -191,7 +91,8 @@ module GRPC
#
# * server_args:
# A server arguments hash to be passed down to the underlying core server
def initialize(pool_size:DEFAULT_POOL_SIZE,
def initialize(pool_size:DEFAULT_MAX_POOL_SIZE,
min_pool_size:DEFAULT_MIN_POOL_SIZE,
max_waiting_requests:DEFAULT_MAX_WAITING_REQUESTS,
poll_period:DEFAULT_POLL_PERIOD,
connect_md_proc:nil,
@ -199,8 +100,12 @@ module GRPC
@connect_md_proc = RpcServer.setup_connect_md_proc(connect_md_proc)
@max_waiting_requests = max_waiting_requests
@poll_period = poll_period
@pool_size = pool_size
@pool = Pool.new(@pool_size)
@pool = Concurrent::ThreadPoolExecutor.new(
min_threads: [min_pool_size, pool_size].min,
max_threads: pool_size,
max_queue: max_waiting_requests,
fallback_policy: :discard)
@run_cond = ConditionVariable.new
@run_mutex = Mutex.new
# running_state can take 4 values: :not_started, :running, :stopping, and
@ -221,7 +126,8 @@ module GRPC
end
deadline = from_relative_time(@poll_period)
@server.close(deadline)
@pool.stop
@pool.shutdown
@pool.wait_for_termination
end
def running_state
@ -318,7 +224,6 @@ module GRPC
def run
@run_mutex.synchronize do
fail 'cannot run without registering services' if rpc_descs.size.zero?
@pool.start
@server.start
transition_running_state(:running)
@run_cond.broadcast
@ -330,9 +235,11 @@ module GRPC
# Sends RESOURCE_EXHAUSTED if there are too many unprocessed jobs
def available?(an_rpc)
jobs_count, max = @pool.jobs_waiting, @max_waiting_requests
jobs_count, max = @pool.queue_length, @pool.max_queue
GRPC.logger.info("waiting: #{jobs_count}, max: #{max}")
return an_rpc if @pool.jobs_waiting <= @max_waiting_requests
# remaining capacity for ThreadPoolExecutors is -1 if unbounded
return an_rpc if @pool.remaining_capacity != 0
GRPC.logger.warn("NOT AVAILABLE: too many jobs_waiting: #{an_rpc}")
noop = proc { |x| x }
@ -368,7 +275,7 @@ module GRPC
break if (!an_rpc.nil?) && an_rpc.call.nil?
active_call = new_active_server_call(an_rpc)
unless active_call.nil?
@pool.schedule(active_call) do |ac|
@pool.post(active_call) do |ac|
c, mth = ac
begin
rpc_descs[mth].run_server_method(c, rpc_handlers[mth])

@ -1,138 +0,0 @@
# Copyright 2015, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
require 'grpc'
describe GRPC::Pool do
Pool = GRPC::Pool
describe '#new' do
it 'raises if a non-positive size is used' do
expect { Pool.new(0) }.to raise_error
expect { Pool.new(-1) }.to raise_error
expect { Pool.new(Object.new) }.to raise_error
end
it 'is constructed OK with a positive size' do
expect { Pool.new(1) }.not_to raise_error
end
end
describe '#jobs_waiting' do
it 'at start, it is zero' do
p = Pool.new(1)
expect(p.jobs_waiting).to be(0)
end
it 'it increases, with each scheduled job if the pool is not running' do
p = Pool.new(1)
job = proc {}
expect(p.jobs_waiting).to be(0)
5.times do |i|
p.schedule(&job)
expect(p.jobs_waiting).to be(i + 1)
end
end
it 'it decreases as jobs are run' do
p = Pool.new(1)
job = proc {}
expect(p.jobs_waiting).to be(0)
3.times do
p.schedule(&job)
end
p.start
sleep 2
expect(p.jobs_waiting).to be(0)
end
end
describe '#schedule' do
it 'return if the pool is already stopped' do
p = Pool.new(1)
p.stop
job = proc {}
expect { p.schedule(&job) }.to_not raise_error
end
it 'adds jobs that get run by the pool' do
p = Pool.new(1)
p.start
o, q = Object.new, Queue.new
job = proc { q.push(o) }
p.schedule(&job)
expect(q.pop).to be(o)
p.stop
end
end
describe '#stop' do
it 'works when there are no scheduled tasks' do
p = Pool.new(1)
expect { p.stop }.not_to raise_error
end
it 'stops jobs when there are long running jobs' do
p = Pool.new(1)
p.start
o, q = Object.new, Queue.new
job = proc do
sleep(5) # long running
q.push(o)
end
p.schedule(&job)
sleep(1) # should ensure the long job gets scheduled
expect { p.stop }.not_to raise_error
end
end
describe '#start' do
it 'runs pre-scheduled jobs' do
p = Pool.new(2)
o, q = Object.new, Queue.new
n = 5 # arbitrary
n.times { p.schedule(o, &q.method(:push)) }
p.start
n.times { expect(q.pop).to be(o) }
p.stop
end
it 'runs jobs as they are scheduled ' do
p = Pool.new(2)
o, q = Object.new, Queue.new
p.start
n = 5 # arbitrary
n.times do
p.schedule(o, &q.method(:push))
expect(q.pop).to be(o)
end
p.stop
end
end
end

@ -395,9 +395,9 @@ describe GRPC::RpcServer do
it 'should return RESOURCE_EXHAUSTED on too many jobs', server: true do
opts = {
server_args: { a_channel_arg: 'an_arg' },
pool_size: 1,
pool_size: 2,
poll_period: 1,
max_waiting_requests: 0
max_waiting_requests: 1
}
alt_srv = RpcServer.new(**opts)
alt_srv.handle(SlowService)
@ -406,24 +406,23 @@ describe GRPC::RpcServer do
t = Thread.new { alt_srv.run }
alt_srv.wait_till_running
req = EchoMsg.new
n = 5 # arbitrary, use as many to ensure the server pool is exceeded
n = 20 # arbitrary, use as many to ensure the server pool is exceeded
threads = []
one_failed_as_unavailable = false
bad_status_code = nil
n.times do
threads << Thread.new do
stub = SlowStub.new(alt_host, :this_channel_is_insecure)
begin
stub.an_rpc(req)
rescue GRPC::BadStatus => e
one_failed_as_unavailable =
e.code == StatusCodes::RESOURCE_EXHAUSTED
bad_status_code = e.code
end
end
end
threads.each(&:join)
alt_srv.stop
t.join
expect(one_failed_as_unavailable).to be(true)
expect(bad_status_code).to be(StatusCodes::RESOURCE_EXHAUSTED)
end
end

@ -31,6 +31,7 @@
s.add_dependency 'google-protobuf', '~> 3.0'
s.add_dependency 'googleauth', '~> 0.5.1'
s.add_dependency 'concurrent-ruby'
s.add_development_dependency 'bundler', '~> 1.9'
s.add_development_dependency 'facter', '~> 2.4'

@ -97,7 +97,7 @@ static void verifier(grpc_server *server, grpc_completion_queue *cq,
&deadline, &request_metadata_recv,
&payload, cq, cq, tag(101));
GPR_ASSERT(GRPC_CALL_OK == error);
cq_expect_completion(cqv, tag(101), 1);
CQ_EXPECT_COMPLETION(cqv, tag(101), 1);
cq_verify(cqv);
GPR_ASSERT(payload != NULL);

@ -121,7 +121,7 @@ static void server_verifier(grpc_server *server, grpc_completion_queue *cq,
error = grpc_server_request_call(server, &s, &call_details,
&request_metadata_recv, cq, cq, tag(101));
GPR_ASSERT(GRPC_CALL_OK == error);
cq_expect_completion(cqv, tag(101), 1);
CQ_EXPECT_COMPLETION(cqv, tag(101), 1);
cq_verify(cqv);
GPR_ASSERT(0 == strcmp(call_details.host, "localhost"));
@ -148,7 +148,7 @@ static void server_verifier_sends_too_much_metadata(grpc_server *server,
error = grpc_server_request_call(server, &s, &call_details,
&request_metadata_recv, cq, cq, tag(101));
GPR_ASSERT(GRPC_CALL_OK == error);
cq_expect_completion(cqv, tag(101), 1);
CQ_EXPECT_COMPLETION(cqv, tag(101), 1);
cq_verify(cqv);
GPR_ASSERT(0 == strcmp(call_details.host, "localhost"));
@ -171,7 +171,7 @@ static void server_verifier_sends_too_much_metadata(grpc_server *server,
op.reserved = NULL;
error = grpc_call_start_batch(s, &op, 1, tag(102), NULL);
GPR_ASSERT(GRPC_CALL_OK == error);
cq_expect_completion(cqv, tag(102), 0); // Operation fails.
CQ_EXPECT_COMPLETION(cqv, tag(102), 0); // Operation fails.
cq_verify(cqv);
gpr_free((char *)meta.value);

@ -70,7 +70,7 @@ static void verifier_succeeds(grpc_server *server, grpc_completion_queue *cq,
&deadline, &request_metadata_recv,
&payload, cq, cq, tag(101));
GPR_ASSERT(GRPC_CALL_OK == error);
cq_expect_completion(cqv, tag(101), 1);
CQ_EXPECT_COMPLETION(cqv, tag(101), 1);
cq_verify(cqv);
GPR_ASSERT(payload != NULL);
@ -96,7 +96,7 @@ static void verifier_fails(grpc_server *server, grpc_completion_queue *cq,
&deadline, &request_metadata_recv,
&payload, cq, cq, tag(101));
GPR_ASSERT(GRPC_CALL_OK == error);
cq_expect_completion(cqv, tag(101), 1);
CQ_EXPECT_COMPLETION(cqv, tag(101), 1);
cq_verify(cqv);
GPR_ASSERT(payload == NULL);

@ -114,7 +114,7 @@ static void verifier(grpc_server *server, grpc_completion_queue *cq,
error = grpc_server_request_call(server, &s, &call_details,
&request_metadata_recv, cq, cq, tag(101));
GPR_ASSERT(GRPC_CALL_OK == error);
cq_expect_completion(cqv, tag(101), 1);
CQ_EXPECT_COMPLETION(cqv, tag(101), 1);
cq_verify(cqv);
GPR_ASSERT(0 == strcmp(call_details.host, "localhost"));

@ -111,7 +111,7 @@ static void run_test(const char *target, size_t nops) {
error = grpc_call_start_batch(c, ops, nops, tag(1), NULL);
GPR_ASSERT(GRPC_CALL_OK == error);
cq_expect_completion(cqv, tag(1), 1);
CQ_EXPECT_COMPLETION(cqv, tag(1), 1);
cq_verify(cqv);
GPR_ASSERT(status != GRPC_STATUS_OK);

@ -351,9 +351,9 @@ static int *perform_request(servers_fixture *f, grpc_channel *client,
ops, (size_t)(op - ops),
tag(102), NULL));
cq_expect_completion(cqv, tag(102), 1);
CQ_EXPECT_COMPLETION(cqv, tag(102), 1);
if (!completed_client) {
cq_expect_completion(cqv, tag(1), 1);
CQ_EXPECT_COMPLETION(cqv, tag(1), 1);
}
cq_verify(cqv);
@ -376,7 +376,7 @@ static int *perform_request(servers_fixture *f, grpc_channel *client,
} else { /* no response from server */
grpc_call_cancel(c, NULL);
if (!completed_client) {
cq_expect_completion(cqv, tag(1), 1);
CQ_EXPECT_COMPLETION(cqv, tag(1), 1);
cq_verify(cqv);
}
}
@ -576,7 +576,7 @@ static void test_ping() {
client = create_client(f);
grpc_channel_ping(client, f->cq, tag(0), NULL);
cq_expect_completion(cqv, tag(0), 0);
CQ_EXPECT_COMPLETION(cqv, tag(0), 0);
/* check that we're still in idle, and start connecting */
GPR_ASSERT(grpc_channel_check_connectivity_state(client, 1) ==
@ -586,7 +586,7 @@ static void test_ping() {
while (state != GRPC_CHANNEL_READY) {
grpc_channel_watch_connectivity_state(
client, state, GRPC_TIMEOUT_SECONDS_TO_DEADLINE(3), f->cq, tag(99));
cq_expect_completion(cqv, tag(99), 1);
CQ_EXPECT_COMPLETION(cqv, tag(99), 1);
cq_verify(cqv);
state = grpc_channel_check_connectivity_state(client, 0);
GPR_ASSERT(state == GRPC_CHANNEL_READY ||
@ -596,7 +596,7 @@ static void test_ping() {
for (i = 1; i <= 5; i++) {
grpc_channel_ping(client, f->cq, tag(i), NULL);
cq_expect_completion(cqv, tag(i), 1);
CQ_EXPECT_COMPLETION(cqv, tag(i), 1);
cq_verify(cqv);
}
gpr_free(rdata.call_details);

@ -206,7 +206,7 @@ static void start_rpc(int target_port, grpc_status_code expected_status,
GPR_ASSERT(GRPC_CALL_OK == error);
cq_expect_completion(cqv, tag(1), 1);
CQ_EXPECT_COMPLETION(cqv, tag(1), 1);
cq_verify(cqv);
gpr_log(GPR_DEBUG, "Rpc status: %d, details: %s", status, details);

@ -61,6 +61,8 @@ typedef struct metadata {
list to detail other expectations */
typedef struct expectation {
struct expectation *next;
const char *file;
int line;
grpc_completion_type type;
void *tag;
int success;
@ -180,7 +182,8 @@ static void expectation_to_strvec(gpr_strvec *buf, expectation *e) {
switch (e->type) {
case GRPC_OP_COMPLETE:
gpr_asprintf(&tmp, "GRPC_OP_COMPLETE result=%d", e->success);
gpr_asprintf(&tmp, "GRPC_OP_COMPLETE result=%d %s:%d", e->success,
e->file, e->line);
gpr_strvec_add(buf, tmp);
break;
case GRPC_QUEUE_TIMEOUT:
@ -214,25 +217,16 @@ static void fail_no_event_received(cq_verifier *v) {
}
void cq_verify(cq_verifier *v) {
gpr_timespec deadline = GRPC_TIMEOUT_SECONDS_TO_DEADLINE(10);
grpc_event ev;
expectation *e;
char *s;
gpr_strvec have_tags;
gpr_strvec_init(&have_tags);
const gpr_timespec deadline = GRPC_TIMEOUT_SECONDS_TO_DEADLINE(10);
while (v->first_expectation != NULL) {
ev = grpc_completion_queue_next(v->cq, deadline, NULL);
grpc_event ev = grpc_completion_queue_next(v->cq, deadline, NULL);
if (ev.type == GRPC_QUEUE_TIMEOUT) {
fail_no_event_received(v);
break;
}
expectation *e;
expectation *prev = NULL;
for (e = v->first_expectation; e != NULL; e = e->next) {
gpr_asprintf(&s, " %p", e->tag);
gpr_strvec_add(&have_tags, s);
if (e->tag == ev.tag) {
verify_matches(e, &ev);
if (e == v->first_expectation) v->first_expectation = e->next;
@ -243,18 +237,19 @@ void cq_verify(cq_verifier *v) {
prev = e;
}
if (e == NULL) {
s = grpc_event_string(&ev);
char *s = grpc_event_string(&ev);
gpr_log(GPR_ERROR, "cq returned unexpected event: %s", s);
gpr_free(s);
s = gpr_strvec_flatten(&have_tags, NULL);
gpr_log(GPR_ERROR, "expected tags:%s", s);
gpr_strvec expectations;
gpr_strvec_init(&expectations);
expectations_to_strvec(&expectations, v);
s = gpr_strvec_flatten(&expectations, NULL);
gpr_strvec_destroy(&expectations);
gpr_log(GPR_ERROR, "expected tags:\n%s", s);
gpr_free(s);
gpr_strvec_destroy(&have_tags);
abort();
}
}
gpr_strvec_destroy(&have_tags);
}
void cq_verify_empty_timeout(cq_verifier *v, int timeout_sec) {
@ -276,16 +271,19 @@ void cq_verify_empty_timeout(cq_verifier *v, int timeout_sec) {
void cq_verify_empty(cq_verifier *v) { cq_verify_empty_timeout(v, 1); }
static void add(cq_verifier *v, grpc_completion_type type, void *tag,
bool success) {
static void add(cq_verifier *v, const char *file, int line,
grpc_completion_type type, void *tag, bool success) {
expectation *e = gpr_malloc(sizeof(expectation));
e->type = type;
e->file = file;
e->line = line;
e->tag = tag;
e->success = success;
e->next = v->first_expectation;
v->first_expectation = e;
}
void cq_expect_completion(cq_verifier *v, void *tag, bool success) {
add(v, GRPC_OP_COMPLETE, tag, success);
void cq_expect_completion(cq_verifier *v, const char *file, int line, void *tag,
bool success) {
add(v, file, line, GRPC_OP_COMPLETE, tag, success);
}

@ -62,7 +62,10 @@ void cq_verify_empty_timeout(cq_verifier *v, int timeout_sec);
Any functions taking ... expect a NULL terminated list of key/value pairs
(each pair using two parameter slots) of metadata that MUST be present in
the event. */
void cq_expect_completion(cq_verifier *v, void *tag, bool success);
void cq_expect_completion(cq_verifier *v, const char *file, int line, void *tag,
bool success);
#define CQ_EXPECT_COMPLETION(v, tag, success) \
cq_expect_completion(v, __FILE__, __LINE__, tag, success)
int byte_buffer_eq_string(grpc_byte_buffer *byte_buffer, const char *string);
int contains_metadata(grpc_metadata_array *array, const char *key,

@ -199,7 +199,7 @@ void test_connect(const char *server_host, const char *client_host, int port,
error = grpc_server_request_call(server, &s, &call_details,
&request_metadata_recv, cq, cq, tag(101));
GPR_ASSERT(GRPC_CALL_OK == error);
cq_expect_completion(cqv, tag(101), 1);
CQ_EXPECT_COMPLETION(cqv, tag(101), 1);
cq_verify(cqv);
memset(ops, 0, sizeof(ops));
@ -221,8 +221,8 @@ void test_connect(const char *server_host, const char *client_host, int port,
error = grpc_call_start_batch(s, ops, (size_t)(op - ops), tag(102), NULL);
GPR_ASSERT(GRPC_CALL_OK == error);
cq_expect_completion(cqv, tag(102), 1);
cq_expect_completion(cqv, tag(1), 1);
CQ_EXPECT_COMPLETION(cqv, tag(102), 1);
CQ_EXPECT_COMPLETION(cqv, tag(1), 1);
cq_verify(cqv);
peer = grpc_call_get_peer(c);
@ -238,7 +238,7 @@ void test_connect(const char *server_host, const char *client_host, int port,
grpc_call_destroy(s);
} else {
/* Check for a failed connection. */
cq_expect_completion(cqv, tag(1), 1);
CQ_EXPECT_COMPLETION(cqv, tag(1), 1);
cq_verify(cqv);
GPR_ASSERT(status == GRPC_STATUS_UNAVAILABLE);

@ -95,6 +95,8 @@ extern void negative_deadline(grpc_end2end_test_config config);
extern void negative_deadline_pre_init(void);
extern void network_status_change(grpc_end2end_test_config config);
extern void network_status_change_pre_init(void);
extern void no_logging(grpc_end2end_test_config config);
extern void no_logging_pre_init(void);
extern void no_op(grpc_end2end_test_config config);
extern void no_op_pre_init(void);
extern void payload(grpc_end2end_test_config config);
@ -155,6 +157,7 @@ void grpc_end2end_tests_pre_init(void) {
max_message_length_pre_init();
negative_deadline_pre_init();
network_status_change_pre_init();
no_logging_pre_init();
no_op_pre_init();
payload_pre_init();
ping_pre_init();
@ -205,6 +208,7 @@ void grpc_end2end_tests(int argc, char **argv,
max_message_length(config);
negative_deadline(config);
network_status_change(config);
no_logging(config);
no_op(config);
payload(config);
ping(config);
@ -328,6 +332,10 @@ void grpc_end2end_tests(int argc, char **argv,
network_status_change(config);
continue;
}
if (0 == strcmp("no_logging", argv[i])) {
no_logging(config);
continue;
}
if (0 == strcmp("no_op", argv[i])) {
no_op(config);
continue;

@ -97,6 +97,8 @@ extern void negative_deadline(grpc_end2end_test_config config);
extern void negative_deadline_pre_init(void);
extern void network_status_change(grpc_end2end_test_config config);
extern void network_status_change_pre_init(void);
extern void no_logging(grpc_end2end_test_config config);
extern void no_logging_pre_init(void);
extern void no_op(grpc_end2end_test_config config);
extern void no_op_pre_init(void);
extern void payload(grpc_end2end_test_config config);
@ -158,6 +160,7 @@ void grpc_end2end_tests_pre_init(void) {
max_message_length_pre_init();
negative_deadline_pre_init();
network_status_change_pre_init();
no_logging_pre_init();
no_op_pre_init();
payload_pre_init();
ping_pre_init();
@ -209,6 +212,7 @@ void grpc_end2end_tests(int argc, char **argv,
max_message_length(config);
negative_deadline(config);
network_status_change(config);
no_logging(config);
no_op(config);
payload(config);
ping(config);
@ -336,6 +340,10 @@ void grpc_end2end_tests(int argc, char **argv,
network_status_change(config);
continue;
}
if (0 == strcmp("no_logging", argv[i])) {
no_logging(config);
continue;
}
if (0 == strcmp("no_op", argv[i])) {
no_op(config);
continue;

@ -331,7 +331,7 @@ static void simple_request_body(grpc_end2end_test_fixture f,
error = grpc_call_start_batch(c, ops, (size_t)(op - ops), tag(1), NULL);
GPR_ASSERT(GRPC_CALL_OK == error);
cq_expect_completion(cqv, tag(1), expected_result == SUCCESS);
CQ_EXPECT_COMPLETION(cqv, tag(1), expected_result == SUCCESS);
cq_verify(cqv);
grpc_call_destroy(c);

@ -44,6 +44,7 @@
"\x0Dgrpc-encoding"
"\x1Egrpc-internal-encoding-request"
"\x0Cgrpc-message"
"\x10grpc-payload-bin"
"\x0Bgrpc-status"
"\x0Cgrpc-timeout"
"\x10grpc-tracing-bin"

@ -114,6 +114,7 @@ END2END_TESTS = {
'max_message_length': default_test_options,
'negative_deadline': default_test_options,
'network_status_change': default_test_options,
'no_logging': default_test_options._replace(traceable=False),
'no_op': default_test_options,
'payload': default_test_options,
'load_reporting_hook': default_test_options,

@ -175,8 +175,8 @@ int main(int argc, char **argv) {
set_resolve_port(port1);
/* first call should now start */
cq_expect_completion(cqv, tag(0x101), 1);
cq_expect_completion(cqv, tag(0x301), 1);
CQ_EXPECT_COMPLETION(cqv, tag(0x101), 1);
CQ_EXPECT_COMPLETION(cqv, tag(0x301), 1);
cq_verify(cqv);
GPR_ASSERT(GRPC_CHANNEL_READY ==
@ -200,7 +200,7 @@ int main(int argc, char **argv) {
* we should see a connectivity change and then nothing */
set_resolve_port(-1);
grpc_server_shutdown_and_notify(server1, cq, tag(0xdead1));
cq_expect_completion(cqv, tag(0x9999), 1);
CQ_EXPECT_COMPLETION(cqv, tag(0x9999), 1);
cq_verify(cqv);
cq_verify_empty(cqv);
@ -250,8 +250,8 @@ int main(int argc, char **argv) {
&request_metadata2, cq, cq, tag(0x401)));
/* second call should now start */
cq_expect_completion(cqv, tag(0x201), 1);
cq_expect_completion(cqv, tag(0x401), 1);
CQ_EXPECT_COMPLETION(cqv, tag(0x201), 1);
CQ_EXPECT_COMPLETION(cqv, tag(0x401), 1);
cq_verify(cqv);
/* listen for close on the server call to probe for finishing */
@ -273,12 +273,12 @@ int main(int argc, char **argv) {
grpc_call_cancel(call2, NULL);
/* now everything else should finish */
cq_expect_completion(cqv, tag(0x102), 1);
cq_expect_completion(cqv, tag(0x202), 1);
cq_expect_completion(cqv, tag(0x302), 1);
cq_expect_completion(cqv, tag(0x402), 1);
cq_expect_completion(cqv, tag(0xdead1), 1);
cq_expect_completion(cqv, tag(0xdead2), 1);
CQ_EXPECT_COMPLETION(cqv, tag(0x102), 1);
CQ_EXPECT_COMPLETION(cqv, tag(0x202), 1);
CQ_EXPECT_COMPLETION(cqv, tag(0x302), 1);
CQ_EXPECT_COMPLETION(cqv, tag(0x402), 1);
CQ_EXPECT_COMPLETION(cqv, tag(0xdead1), 1);
CQ_EXPECT_COMPLETION(cqv, tag(0xdead2), 1);
cq_verify(cqv);
grpc_call_destroy(call1);

@ -116,8 +116,8 @@ static void prepare_test(int is_client) {
&g_state.call_details,
&g_state.server_initial_metadata_recv,
g_state.cq, g_state.cq, tag(101)));
cq_expect_completion(g_state.cqv, tag(101), 1);
cq_expect_completion(g_state.cqv, tag(1), 1);
CQ_EXPECT_COMPLETION(g_state.cqv, tag(101), 1);
CQ_EXPECT_COMPLETION(g_state.cqv, tag(1), 1);
cq_verify(g_state.cqv);
}
}
@ -191,7 +191,7 @@ static void test_send_initial_metadata_more_than_once() {
GPR_ASSERT(GRPC_CALL_OK == grpc_call_start_batch(g_state.call, g_state.ops,
(size_t)(op - g_state.ops),
tag(1), NULL));
cq_expect_completion(g_state.cqv, tag(1), 0);
CQ_EXPECT_COMPLETION(g_state.cqv, tag(1), 0);
cq_verify(g_state.cqv);
op = g_state.ops;
@ -312,7 +312,7 @@ static void test_receive_initial_metadata_twice_at_client() {
GPR_ASSERT(GRPC_CALL_OK == grpc_call_start_batch(g_state.call, g_state.ops,
(size_t)(op - g_state.ops),
tag(1), NULL));
cq_expect_completion(g_state.cqv, tag(1), 0);
CQ_EXPECT_COMPLETION(g_state.cqv, tag(1), 0);
cq_verify(g_state.cqv);
op = g_state.ops;
op->op = GRPC_OP_RECV_INITIAL_METADATA;
@ -405,7 +405,7 @@ static void test_recv_status_on_client_twice() {
GPR_ASSERT(GRPC_CALL_OK == grpc_call_start_batch(g_state.call, g_state.ops,
(size_t)(op - g_state.ops),
tag(1), NULL));
cq_expect_completion(g_state.cqv, tag(1), 1);
CQ_EXPECT_COMPLETION(g_state.cqv, tag(1), 1);
cq_verify(g_state.cqv);
op = g_state.ops;

@ -86,7 +86,7 @@ int main(int argc, char **argv) {
GPR_ASSERT(GRPC_CALL_OK == grpc_call_start_batch(
call, ops, (size_t)(op - ops), tag(1), NULL));
/* verify that all tags get completed */
cq_expect_completion(cqv, tag(1), 1);
CQ_EXPECT_COMPLETION(cqv, tag(1), 1);
cq_verify(cqv);
GPR_ASSERT(status == GRPC_STATUS_DEADLINE_EXCEEDED);

@ -148,7 +148,7 @@ static void simple_request_body(grpc_end2end_test_fixture f) {
error = grpc_call_start_batch(c, ops, (size_t)(op - ops), tag(1), NULL);
GPR_ASSERT(GRPC_CALL_OK == error);
cq_expect_completion(cqv, tag(1), 1);
CQ_EXPECT_COMPLETION(cqv, tag(1), 1);
cq_verify(cqv);
GPR_ASSERT(status == GRPC_STATUS_INTERNAL);

@ -197,7 +197,7 @@ static void test_request_response_with_metadata_and_payload(
grpc_server_request_call(f.server, &s, &call_details,
&request_metadata_recv, f.cq, f.cq, tag(101));
GPR_ASSERT(GRPC_CALL_OK == error);
cq_expect_completion(cqv, tag(101), 1);
CQ_EXPECT_COMPLETION(cqv, tag(101), 1);
cq_verify(cqv);
memset(ops, 0, sizeof(ops));
@ -216,7 +216,7 @@ static void test_request_response_with_metadata_and_payload(
error = grpc_call_start_batch(s, ops, (size_t)(op - ops), tag(102), NULL);
GPR_ASSERT(GRPC_CALL_OK == error);
cq_expect_completion(cqv, tag(102), 1);
CQ_EXPECT_COMPLETION(cqv, tag(102), 1);
cq_verify(cqv);
memset(ops, 0, sizeof(ops));
@ -241,8 +241,8 @@ static void test_request_response_with_metadata_and_payload(
error = grpc_call_start_batch(s, ops, (size_t)(op - ops), tag(103), NULL);
GPR_ASSERT(GRPC_CALL_OK == error);
cq_expect_completion(cqv, tag(103), 1);
cq_expect_completion(cqv, tag(1), 1);
CQ_EXPECT_COMPLETION(cqv, tag(103), 1);
CQ_EXPECT_COMPLETION(cqv, tag(1), 1);
cq_verify(cqv);
GPR_ASSERT(status == GRPC_STATUS_OK);

@ -232,7 +232,7 @@ static void request_response_with_payload_and_call_creds(
grpc_server_request_call(f.server, &s, &call_details,
&request_metadata_recv, f.cq, f.cq, tag(101));
GPR_ASSERT(GRPC_CALL_OK == error);
cq_expect_completion(cqv, tag(101), 1);
CQ_EXPECT_COMPLETION(cqv, tag(101), 1);
cq_verify(cqv);
s_auth_context = grpc_call_auth_context(s);
GPR_ASSERT(s_auth_context != NULL);
@ -262,7 +262,7 @@ static void request_response_with_payload_and_call_creds(
error = grpc_call_start_batch(s, ops, (size_t)(op - ops), tag(102), NULL);
GPR_ASSERT(GRPC_CALL_OK == error);
cq_expect_completion(cqv, tag(102), 1);
CQ_EXPECT_COMPLETION(cqv, tag(102), 1);
cq_verify(cqv);
memset(ops, 0, sizeof(ops));
@ -287,8 +287,8 @@ static void request_response_with_payload_and_call_creds(
error = grpc_call_start_batch(s, ops, (size_t)(op - ops), tag(103), NULL);
GPR_ASSERT(GRPC_CALL_OK == error);
cq_expect_completion(cqv, tag(103), 1);
cq_expect_completion(cqv, tag(1), 1);
CQ_EXPECT_COMPLETION(cqv, tag(103), 1);
CQ_EXPECT_COMPLETION(cqv, tag(1), 1);
cq_verify(cqv);
GPR_ASSERT(status == GRPC_STATUS_OK);
@ -448,7 +448,7 @@ static void test_request_with_server_rejecting_client_creds(
error = grpc_call_start_batch(c, ops, (size_t)(op - ops), tag(1), NULL);
GPR_ASSERT(error == GRPC_CALL_OK);
cq_expect_completion(cqv, tag(1), 1);
CQ_EXPECT_COMPLETION(cqv, tag(1), 1);
cq_verify(cqv);
GPR_ASSERT(status == GRPC_STATUS_UNAUTHENTICATED);

@ -170,7 +170,7 @@ static void test_cancel_after_accept(grpc_end2end_test_config config,
error = grpc_server_request_call(f.server, &s, &call_details,
&request_metadata_recv, f.cq, f.cq, tag(2));
GPR_ASSERT(GRPC_CALL_OK == error);
cq_expect_completion(cqv, tag(2), 1);
CQ_EXPECT_COMPLETION(cqv, tag(2), 1);
cq_verify(cqv);
memset(ops, 0, sizeof(ops));
@ -200,8 +200,8 @@ static void test_cancel_after_accept(grpc_end2end_test_config config,
GPR_ASSERT(GRPC_CALL_OK == mode.initiate_cancel(c, NULL));
cq_expect_completion(cqv, tag(3), 1);
cq_expect_completion(cqv, tag(1), 1);
CQ_EXPECT_COMPLETION(cqv, tag(3), 1);
CQ_EXPECT_COMPLETION(cqv, tag(1), 1);
cq_verify(cqv);
GPR_ASSERT(status == mode.expect_status || status == GRPC_STATUS_INTERNAL);

@ -174,7 +174,7 @@ static void test_cancel_after_accept_and_writes_closed(
error = grpc_server_request_call(f.server, &s, &call_details,
&request_metadata_recv, f.cq, f.cq, tag(2));
GPR_ASSERT(GRPC_CALL_OK == error);
cq_expect_completion(cqv, tag(2), 1);
CQ_EXPECT_COMPLETION(cqv, tag(2), 1);
cq_verify(cqv);
memset(ops, 0, sizeof(ops));
@ -204,8 +204,8 @@ static void test_cancel_after_accept_and_writes_closed(
GPR_ASSERT(GRPC_CALL_OK == mode.initiate_cancel(c, NULL));
cq_expect_completion(cqv, tag(3), 1);
cq_expect_completion(cqv, tag(1), 1);
CQ_EXPECT_COMPLETION(cqv, tag(3), 1);
CQ_EXPECT_COMPLETION(cqv, tag(1), 1);
cq_verify(cqv);
GPR_ASSERT(status == mode.expect_status || status == GRPC_STATUS_INTERNAL);

@ -168,7 +168,7 @@ static void test_cancel_after_invoke(grpc_end2end_test_config config,
GPR_ASSERT(GRPC_CALL_OK == mode.initiate_cancel(c, NULL));
cq_expect_completion(cqv, tag(1), 1);
CQ_EXPECT_COMPLETION(cqv, tag(1), 1);
cq_verify(cqv);
GPR_ASSERT(status == mode.expect_status || status == GRPC_STATUS_INTERNAL);

@ -166,7 +166,7 @@ static void test_cancel_before_invoke(grpc_end2end_test_config config,
error = grpc_call_start_batch(c, ops, test_ops, tag(1), NULL);
GPR_ASSERT(GRPC_CALL_OK == error);
cq_expect_completion(cqv, tag(1), 1);
CQ_EXPECT_COMPLETION(cqv, tag(1), 1);
cq_verify(cqv);
GPR_ASSERT(status == GRPC_STATUS_CANCELLED);

@ -150,7 +150,7 @@ static void simple_request_body(grpc_end2end_test_fixture f, size_t num_ops) {
grpc_call_cancel_with_status(c, GRPC_STATUS_UNIMPLEMENTED, "xyz", NULL);
cq_expect_completion(cqv, tag(1), 1);
CQ_EXPECT_COMPLETION(cqv, tag(1), 1);
cq_verify(cqv);
GPR_ASSERT(status == GRPC_STATUS_UNIMPLEMENTED);

@ -196,7 +196,7 @@ static void request_for_disabled_algorithm(
grpc_server_request_call(f.server, &s, &call_details,
&request_metadata_recv, f.cq, f.cq, tag(101));
GPR_ASSERT(GRPC_CALL_OK == error);
cq_expect_completion(cqv, tag(101), true);
CQ_EXPECT_COMPLETION(cqv, tag(101), true);
cq_verify(cqv);
op = ops;
@ -213,7 +213,7 @@ static void request_for_disabled_algorithm(
error = grpc_call_start_batch(s, ops, (size_t)(op - ops), tag(102), NULL);
GPR_ASSERT(GRPC_CALL_OK == error);
cq_expect_completion(cqv, tag(102), false);
CQ_EXPECT_COMPLETION(cqv, tag(102), false);
op = ops;
op->op = GRPC_OP_RECV_CLOSE_ON_SERVER;
@ -224,8 +224,8 @@ static void request_for_disabled_algorithm(
error = grpc_call_start_batch(s, ops, (size_t)(op - ops), tag(103), NULL);
GPR_ASSERT(GRPC_CALL_OK == error);
cq_expect_completion(cqv, tag(103), true);
cq_expect_completion(cqv, tag(1), true);
CQ_EXPECT_COMPLETION(cqv, tag(103), true);
CQ_EXPECT_COMPLETION(cqv, tag(1), true);
cq_verify(cqv);
/* call was cancelled (closed) ... */
@ -359,7 +359,7 @@ static void request_with_payload_template(
grpc_server_request_call(f.server, &s, &call_details,
&request_metadata_recv, f.cq, f.cq, tag(100));
GPR_ASSERT(GRPC_CALL_OK == error);
cq_expect_completion(cqv, tag(100), true);
CQ_EXPECT_COMPLETION(cqv, tag(100), true);
cq_verify(cqv);
GPR_ASSERT(GPR_BITCOUNT(grpc_call_test_only_get_encodings_accepted_by_peer(
@ -419,7 +419,7 @@ static void request_with_payload_template(
op++;
error = grpc_call_start_batch(s, ops, (size_t)(op - ops), tag(102), NULL);
GPR_ASSERT(GRPC_CALL_OK == error);
cq_expect_completion(cqv, tag(102), 1);
CQ_EXPECT_COMPLETION(cqv, tag(102), 1);
cq_verify(cqv);
GPR_ASSERT(request_payload_recv->type == GRPC_BB_RAW);
@ -436,8 +436,8 @@ static void request_with_payload_template(
op++;
error = grpc_call_start_batch(s, ops, (size_t)(op - ops), tag(103), NULL);
GPR_ASSERT(GRPC_CALL_OK == error);
cq_expect_completion(cqv, tag(103), 1);
cq_expect_completion(cqv, tag(2), 1);
CQ_EXPECT_COMPLETION(cqv, tag(103), 1);
CQ_EXPECT_COMPLETION(cqv, tag(2), 1);
cq_verify(cqv);
GPR_ASSERT(response_payload_recv->type == GRPC_BB_RAW);
@ -482,10 +482,10 @@ static void request_with_payload_template(
error = grpc_call_start_batch(s, ops, (size_t)(op - ops), tag(104), NULL);
GPR_ASSERT(GRPC_CALL_OK == error);
cq_expect_completion(cqv, tag(1), 1);
cq_expect_completion(cqv, tag(3), 1);
cq_expect_completion(cqv, tag(101), 1);
cq_expect_completion(cqv, tag(104), 1);
CQ_EXPECT_COMPLETION(cqv, tag(1), 1);
CQ_EXPECT_COMPLETION(cqv, tag(3), 1);
CQ_EXPECT_COMPLETION(cqv, tag(101), 1);
CQ_EXPECT_COMPLETION(cqv, tag(104), 1);
cq_verify(cqv);
GPR_ASSERT(status == GRPC_STATUS_OK);

@ -102,7 +102,7 @@ static void test_connectivity(grpc_end2end_test_config config) {
f.cq, tag(2));
/* and now the watch should trigger */
cq_expect_completion(cqv, tag(2), 1);
CQ_EXPECT_COMPLETION(cqv, tag(2), 1);
cq_verify(cqv);
state = grpc_channel_check_connectivity_state(f.client, 0);
GPR_ASSERT(state == GRPC_CHANNEL_TRANSIENT_FAILURE ||
@ -112,7 +112,7 @@ static void test_connectivity(grpc_end2end_test_config config) {
grpc_channel_watch_connectivity_state(f.client, GRPC_CHANNEL_CONNECTING,
GRPC_TIMEOUT_SECONDS_TO_DEADLINE(3),
f.cq, tag(3));
cq_expect_completion(cqv, tag(3), 1);
CQ_EXPECT_COMPLETION(cqv, tag(3), 1);
cq_verify(cqv);
state = grpc_channel_check_connectivity_state(f.client, 0);
GPR_ASSERT(state == GRPC_CHANNEL_TRANSIENT_FAILURE ||
@ -130,7 +130,7 @@ static void test_connectivity(grpc_end2end_test_config config) {
while (state != GRPC_CHANNEL_READY) {
grpc_channel_watch_connectivity_state(
f.client, state, GRPC_TIMEOUT_SECONDS_TO_DEADLINE(3), f.cq, tag(4));
cq_expect_completion(cqv, tag(4), 1);
CQ_EXPECT_COMPLETION(cqv, tag(4), 1);
cq_verify(cqv);
state = grpc_channel_check_connectivity_state(f.client, 0);
GPR_ASSERT(state == GRPC_CHANNEL_READY ||
@ -148,8 +148,8 @@ static void test_connectivity(grpc_end2end_test_config config) {
grpc_server_shutdown_and_notify(f.server, f.cq, tag(0xdead));
cq_expect_completion(cqv, tag(5), 1);
cq_expect_completion(cqv, tag(0xdead), 1);
CQ_EXPECT_COMPLETION(cqv, tag(5), 1);
CQ_EXPECT_COMPLETION(cqv, tag(0xdead), 1);
cq_verify(cqv);
state = grpc_channel_check_connectivity_state(f.client, 0);
GPR_ASSERT(state == GRPC_CHANNEL_TRANSIENT_FAILURE ||

@ -160,7 +160,7 @@ static void simple_request_body(grpc_end2end_test_fixture f) {
grpc_server_request_call(f.server, &s, &call_details,
&request_metadata_recv, f.cq, f.cq, tag(101));
GPR_ASSERT(error == GRPC_CALL_OK);
cq_expect_completion(cqv, tag(101), 1);
CQ_EXPECT_COMPLETION(cqv, tag(101), 1);
cq_verify(cqv);
peer = grpc_call_get_peer(s);
@ -194,8 +194,8 @@ static void simple_request_body(grpc_end2end_test_fixture f) {
error = grpc_call_start_batch(s, ops, (size_t)(op - ops), tag(102), NULL);
GPR_ASSERT(error == GRPC_CALL_OK);
cq_expect_completion(cqv, tag(102), 1);
cq_expect_completion(cqv, tag(1), 1);
CQ_EXPECT_COMPLETION(cqv, tag(102), 1);
CQ_EXPECT_COMPLETION(cqv, tag(1), 1);
cq_verify(cqv);
GPR_ASSERT(status == GRPC_STATUS_UNIMPLEMENTED);

@ -137,7 +137,7 @@ static void do_request_and_shutdown_server(grpc_end2end_test_fixture *f,
grpc_server_request_call(f->server, &s, &call_details,
&request_metadata_recv, f->cq, f->cq, tag(101));
GPR_ASSERT(GRPC_CALL_OK == error);
cq_expect_completion(cqv, tag(101), 1);
CQ_EXPECT_COMPLETION(cqv, tag(101), 1);
cq_verify(cqv);
/* should be able to shut down the server early
@ -166,9 +166,9 @@ static void do_request_and_shutdown_server(grpc_end2end_test_fixture *f,
error = grpc_call_start_batch(s, ops, (size_t)(op - ops), tag(102), NULL);
GPR_ASSERT(GRPC_CALL_OK == error);
cq_expect_completion(cqv, tag(102), 1);
cq_expect_completion(cqv, tag(1), 1);
cq_expect_completion(cqv, tag(1000), 1);
CQ_EXPECT_COMPLETION(cqv, tag(102), 1);
CQ_EXPECT_COMPLETION(cqv, tag(1), 1);
CQ_EXPECT_COMPLETION(cqv, tag(1000), 1);
cq_verify(cqv);
GPR_ASSERT(status == GRPC_STATUS_UNIMPLEMENTED);

@ -110,7 +110,7 @@ static void empty_batch_body(grpc_end2end_test_fixture f) {
error = grpc_call_start_batch(c, op, 0, tag(1), NULL);
GPR_ASSERT(GRPC_CALL_OK == error);
cq_expect_completion(cqv, tag(1), 1);
CQ_EXPECT_COMPLETION(cqv, tag(1), 1);
cq_verify(cqv);
grpc_call_destroy(c);

@ -174,7 +174,7 @@ static void test_request(grpc_end2end_test_config config) {
&request_metadata_recv, f.cq, f.cq, tag(101));
GPR_ASSERT(GRPC_CALL_OK == error);
cq_expect_completion(cqv, tag(1), 1);
CQ_EXPECT_COMPLETION(cqv, tag(1), 1);
cq_verify(cqv);
GPR_ASSERT(status == GRPC_STATUS_PERMISSION_DENIED);

@ -170,7 +170,7 @@ static void test_request(grpc_end2end_test_config config) {
&request_metadata_recv, f.cq, f.cq, tag(101));
GPR_ASSERT(GRPC_CALL_OK == error);
cq_expect_completion(cqv, tag(1), 1);
CQ_EXPECT_COMPLETION(cqv, tag(1), 1);
cq_verify(cqv);
GPR_ASSERT(status == GRPC_STATUS_PERMISSION_DENIED);

@ -152,7 +152,7 @@ static void test_early_server_shutdown_finishes_inflight_calls(
grpc_server_request_call(f.server, &s, &call_details,
&request_metadata_recv, f.cq, f.cq, tag(101));
GPR_ASSERT(GRPC_CALL_OK == error);
cq_expect_completion(cqv, tag(101), 1);
CQ_EXPECT_COMPLETION(cqv, tag(101), 1);
cq_verify(cqv);
/* shutdown and destroy the server */
@ -181,9 +181,9 @@ static void test_early_server_shutdown_finishes_inflight_calls(
error = grpc_call_start_batch(s, ops, (size_t)(op - ops), tag(102), NULL);
GPR_ASSERT(GRPC_CALL_OK == error);
cq_expect_completion(cqv, tag(102), 1);
cq_expect_completion(cqv, tag(0xdead), 1);
cq_expect_completion(cqv, tag(1), 1);
CQ_EXPECT_COMPLETION(cqv, tag(102), 1);
CQ_EXPECT_COMPLETION(cqv, tag(0xdead), 1);
CQ_EXPECT_COMPLETION(cqv, tag(1), 1);
cq_verify(cqv);
grpc_call_destroy(s);

@ -157,7 +157,7 @@ static void simple_request_body(grpc_end2end_test_fixture f) {
grpc_server_request_call(f.server, &s, &call_details,
&request_metadata_recv, f.cq, f.cq, tag(101));
GPR_ASSERT(GRPC_CALL_OK == error);
cq_expect_completion(cqv, tag(101), 1);
CQ_EXPECT_COMPLETION(cqv, tag(101), 1);
cq_verify(cqv);
memset(ops, 0, sizeof(ops));
@ -182,8 +182,8 @@ static void simple_request_body(grpc_end2end_test_fixture f) {
error = grpc_call_start_batch(s, ops, (size_t)(op - ops), tag(102), NULL);
GPR_ASSERT(GRPC_CALL_OK == error);
cq_expect_completion(cqv, tag(102), 1);
cq_expect_completion(cqv, tag(1), 1);
CQ_EXPECT_COMPLETION(cqv, tag(102), 1);
CQ_EXPECT_COMPLETION(cqv, tag(1), 1);
cq_verify(cqv);
GPR_ASSERT(status == GRPC_STATUS_UNIMPLEMENTED);

@ -310,7 +310,7 @@ static void simple_request_body(grpc_end2end_test_fixture f, size_t index) {
grpc_server_request_call(f.server, &s, &call_details,
&request_metadata_recv, f.cq, f.cq, tag(101));
GPR_ASSERT(GRPC_CALL_OK == error);
cq_expect_completion(cqv, tag(101), 1);
CQ_EXPECT_COMPLETION(cqv, tag(101), 1);
cq_verify(cqv);
memset(ops, 0, sizeof(ops));
@ -335,8 +335,8 @@ static void simple_request_body(grpc_end2end_test_fixture f, size_t index) {
error = grpc_call_start_batch(s, ops, (size_t)(op - ops), tag(102), NULL);
GPR_ASSERT(GRPC_CALL_OK == error);
cq_expect_completion(cqv, tag(102), 1);
cq_expect_completion(cqv, tag(1), 1);
CQ_EXPECT_COMPLETION(cqv, tag(102), 1);
CQ_EXPECT_COMPLETION(cqv, tag(1), 1);
cq_verify(cqv);
GPR_ASSERT(status == GRPC_STATUS_UNIMPLEMENTED);

@ -161,7 +161,7 @@ static void simple_request_body(grpc_end2end_test_fixture f) {
grpc_server_request_call(f.server, &s, &call_details,
&request_metadata_recv, f.cq, f.cq, tag(101));
GPR_ASSERT(GRPC_CALL_OK == error);
cq_expect_completion(cqv, tag(101), 1);
CQ_EXPECT_COMPLETION(cqv, tag(101), 1);
cq_verify(cqv);
peer = grpc_call_get_peer(s);
@ -195,8 +195,8 @@ static void simple_request_body(grpc_end2end_test_fixture f) {
error = grpc_call_start_batch(s, ops, (size_t)(op - ops), tag(102), NULL);
GPR_ASSERT(GRPC_CALL_OK == error);
cq_expect_completion(cqv, tag(102), 1);
cq_expect_completion(cqv, tag(1), 1);
CQ_EXPECT_COMPLETION(cqv, tag(102), 1);
CQ_EXPECT_COMPLETION(cqv, tag(1), 1);
cq_verify(cqv);
GPR_ASSERT(status == GRPC_STATUS_UNIMPLEMENTED);

@ -177,7 +177,7 @@ static void test_invoke_large_request(grpc_end2end_test_config config) {
grpc_server_request_call(f.server, &s, &call_details,
&request_metadata_recv, f.cq, f.cq, tag(101));
GPR_ASSERT(GRPC_CALL_OK == error);
cq_expect_completion(cqv, tag(101), 1);
CQ_EXPECT_COMPLETION(cqv, tag(101), 1);
cq_verify(cqv);
memset(ops, 0, sizeof(ops));
@ -195,7 +195,7 @@ static void test_invoke_large_request(grpc_end2end_test_config config) {
error = grpc_call_start_batch(s, ops, (size_t)(op - ops), tag(102), NULL);
GPR_ASSERT(GRPC_CALL_OK == error);
cq_expect_completion(cqv, tag(102), 1);
CQ_EXPECT_COMPLETION(cqv, tag(102), 1);
cq_verify(cqv);
memset(ops, 0, sizeof(ops));
@ -220,8 +220,8 @@ static void test_invoke_large_request(grpc_end2end_test_config config) {
error = grpc_call_start_batch(s, ops, (size_t)(op - ops), tag(103), NULL);
GPR_ASSERT(GRPC_CALL_OK == error);
cq_expect_completion(cqv, tag(103), 1);
cq_expect_completion(cqv, tag(1), 1);
CQ_EXPECT_COMPLETION(cqv, tag(103), 1);
CQ_EXPECT_COMPLETION(cqv, tag(1), 1);
cq_verify(cqv);
GPR_ASSERT(status == GRPC_STATUS_UNIMPLEMENTED);

@ -179,7 +179,7 @@ static void test_request_with_large_metadata(grpc_end2end_test_config config) {
&request_metadata_recv, f.cq, f.cq, tag(101));
GPR_ASSERT(GRPC_CALL_OK == error);
cq_expect_completion(cqv, tag(101), 1);
CQ_EXPECT_COMPLETION(cqv, tag(101), 1);
cq_verify(cqv);
memset(ops, 0, sizeof(ops));
@ -198,7 +198,7 @@ static void test_request_with_large_metadata(grpc_end2end_test_config config) {
error = grpc_call_start_batch(s, ops, (size_t)(op - ops), tag(102), NULL);
GPR_ASSERT(GRPC_CALL_OK == error);
cq_expect_completion(cqv, tag(102), 1);
CQ_EXPECT_COMPLETION(cqv, tag(102), 1);
cq_verify(cqv);
memset(ops, 0, sizeof(ops));
@ -220,8 +220,8 @@ static void test_request_with_large_metadata(grpc_end2end_test_config config) {
error = grpc_call_start_batch(s, ops, (size_t)(op - ops), tag(103), NULL);
GPR_ASSERT(GRPC_CALL_OK == error);
cq_expect_completion(cqv, tag(103), 1);
cq_expect_completion(cqv, tag(1), 1);
CQ_EXPECT_COMPLETION(cqv, tag(103), 1);
CQ_EXPECT_COMPLETION(cqv, tag(1), 1);
cq_verify(cqv);
GPR_ASSERT(status == GRPC_STATUS_OK);

@ -204,7 +204,7 @@ static void request_response_with_payload(grpc_end2end_test_fixture f,
grpc_server_request_call(f.server, &s, &call_details,
&request_metadata_recv, f.cq, f.cq, tag(101));
GPR_ASSERT(GRPC_CALL_OK == error);
cq_expect_completion(cqv, tag(101), 1);
CQ_EXPECT_COMPLETION(cqv, tag(101), 1);
cq_verify(cqv);
memset(ops, 0, sizeof(ops));
@ -222,7 +222,7 @@ static void request_response_with_payload(grpc_end2end_test_fixture f,
error = grpc_call_start_batch(s, ops, (size_t)(op - ops), tag(102), NULL);
GPR_ASSERT(GRPC_CALL_OK == error);
cq_expect_completion(cqv, tag(102), 1);
CQ_EXPECT_COMPLETION(cqv, tag(102), 1);
cq_verify(cqv);
memset(ops, 0, sizeof(ops));
@ -249,8 +249,8 @@ static void request_response_with_payload(grpc_end2end_test_fixture f,
error = grpc_call_start_batch(s, ops, (size_t)(op - ops), tag(103), NULL);
GPR_ASSERT(GRPC_CALL_OK == error);
cq_expect_completion(cqv, tag(103), 1);
cq_expect_completion(cqv, tag(1), 1);
CQ_EXPECT_COMPLETION(cqv, tag(103), 1);
CQ_EXPECT_COMPLETION(cqv, tag(1), 1);
cq_verify(cqv);
GPR_ASSERT(status == GRPC_STATUS_OK);

@ -153,7 +153,7 @@ static void simple_request_body(grpc_end2end_test_fixture f) {
grpc_server_request_call(f.server, &s, &call_details,
&request_metadata_recv, f.cq, f.cq, tag(101));
GPR_ASSERT(GRPC_CALL_OK == error);
cq_expect_completion(cqv, tag(101), 1);
CQ_EXPECT_COMPLETION(cqv, tag(101), 1);
cq_verify(cqv);
memset(ops, 0, sizeof(ops));
@ -178,8 +178,8 @@ static void simple_request_body(grpc_end2end_test_fixture f) {
error = grpc_call_start_batch(s, ops, (size_t)(op - ops), tag(102), NULL);
GPR_ASSERT(GRPC_CALL_OK == error);
cq_expect_completion(cqv, tag(102), 1);
cq_expect_completion(cqv, tag(1), 1);
CQ_EXPECT_COMPLETION(cqv, tag(102), 1);
CQ_EXPECT_COMPLETION(cqv, tag(1), 1);
cq_verify(cqv);
GPR_ASSERT(status == GRPC_STATUS_UNIMPLEMENTED);
@ -380,17 +380,17 @@ static void test_max_concurrent_streams(grpc_end2end_test_config config) {
error = grpc_call_start_batch(s1, ops, (size_t)(op - ops), tag(102), NULL);
GPR_ASSERT(GRPC_CALL_OK == error);
cq_expect_completion(cqv, tag(102), 1);
cq_expect_completion(cqv, tag(live_call + 2), 1);
CQ_EXPECT_COMPLETION(cqv, tag(102), 1);
CQ_EXPECT_COMPLETION(cqv, tag(live_call + 2), 1);
/* first request is finished, we should be able to start the second */
live_call = (live_call == 300) ? 400 : 300;
cq_expect_completion(cqv, tag(live_call + 1), 1);
CQ_EXPECT_COMPLETION(cqv, tag(live_call + 1), 1);
cq_verify(cqv);
GPR_ASSERT(GRPC_CALL_OK == grpc_server_request_call(
f.server, &s2, &call_details,
&request_metadata_recv, f.cq, f.cq, tag(201)));
cq_expect_completion(cqv, tag(201), 1);
CQ_EXPECT_COMPLETION(cqv, tag(201), 1);
cq_verify(cqv);
memset(ops, 0, sizeof(ops));
@ -415,8 +415,8 @@ static void test_max_concurrent_streams(grpc_end2end_test_config config) {
error = grpc_call_start_batch(s2, ops, (size_t)(op - ops), tag(202), NULL);
GPR_ASSERT(GRPC_CALL_OK == error);
cq_expect_completion(cqv, tag(live_call + 2), 1);
cq_expect_completion(cqv, tag(202), 1);
CQ_EXPECT_COMPLETION(cqv, tag(live_call + 2), 1);
CQ_EXPECT_COMPLETION(cqv, tag(202), 1);
cq_verify(cqv);
cq_verifier_destroy(cqv);

@ -189,7 +189,7 @@ static void test_max_message_length(grpc_end2end_test_config config,
grpc_server_request_call(f.server, &s, &call_details,
&request_metadata_recv, f.cq, f.cq, tag(101));
GPR_ASSERT(GRPC_CALL_OK == error);
cq_expect_completion(cqv, tag(101), 1);
CQ_EXPECT_COMPLETION(cqv, tag(101), 1);
cq_verify(cqv);
memset(ops, 0, sizeof(ops));
@ -207,8 +207,8 @@ static void test_max_message_length(grpc_end2end_test_config config,
error = grpc_call_start_batch(s, ops, (size_t)(op - ops), tag(102), NULL);
GPR_ASSERT(GRPC_CALL_OK == error);
cq_expect_completion(cqv, tag(102), 1);
cq_expect_completion(cqv, tag(1), 1);
CQ_EXPECT_COMPLETION(cqv, tag(102), 1);
CQ_EXPECT_COMPLETION(cqv, tag(1), 1);
cq_verify(cqv);
GPR_ASSERT(0 == strcmp(call_details.method, "/foo"));

@ -148,7 +148,7 @@ static void simple_request_body(grpc_end2end_test_fixture f, size_t num_ops) {
error = grpc_call_start_batch(c, ops, num_ops, tag(1), NULL);
GPR_ASSERT(GRPC_CALL_OK == error);
cq_expect_completion(cqv, tag(1), 1);
CQ_EXPECT_COMPLETION(cqv, tag(1), 1);
cq_verify(cqv);
GPR_ASSERT(status == GRPC_STATUS_DEADLINE_EXCEEDED);

@ -166,7 +166,7 @@ static void test_invoke_network_status_change(grpc_end2end_test_config config) {
GPR_ASSERT(GRPC_CALL_OK == grpc_server_request_call(
f.server, &s, &call_details,
&request_metadata_recv, f.cq, f.cq, tag(101)));
cq_expect_completion(cqv, tag(101), 1);
CQ_EXPECT_COMPLETION(cqv, tag(101), 1);
cq_verify(cqv);
op = ops;
@ -183,7 +183,7 @@ static void test_invoke_network_status_change(grpc_end2end_test_config config) {
error = grpc_call_start_batch(s, ops, (size_t)(op - ops), tag(102), NULL);
GPR_ASSERT(GRPC_CALL_OK == error);
cq_expect_completion(cqv, tag(102), 1);
CQ_EXPECT_COMPLETION(cqv, tag(102), 1);
cq_verify(cqv);
// Simulate the network loss event
@ -205,8 +205,8 @@ static void test_invoke_network_status_change(grpc_end2end_test_config config) {
error = grpc_call_start_batch(s, ops, (size_t)(op - ops), tag(103), NULL);
GPR_ASSERT(GRPC_CALL_OK == error);
cq_expect_completion(cqv, tag(103), 1);
cq_expect_completion(cqv, tag(1), 1);
CQ_EXPECT_COMPLETION(cqv, tag(103), 1);
CQ_EXPECT_COMPLETION(cqv, tag(1), 1);
cq_verify(cqv);
// Expected behavior of a RPC when network is lost.

@ -0,0 +1,293 @@
/*
*
* Copyright 2016, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following disclaimer
* in the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Google Inc. nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
#include "test/core/end2end/end2end_tests.h"
#include <stdio.h>
#include <string.h>
#include <grpc/byte_buffer.h>
#include <grpc/grpc.h>
#include <grpc/support/alloc.h>
#include <grpc/support/log.h>
#include <grpc/support/string_util.h>
#include <grpc/support/time.h>
#include <grpc/support/useful.h>
#include "src/core/lib/iomgr/error.h"
#include "src/core/lib/support/string.h"
#include "test/core/end2end/cq_verifier.h"
enum { TIMEOUT = 200000 };
static void *tag(intptr_t t) { return (void *)t; }
extern void gpr_default_log(gpr_log_func_args *args);
static void test_no_log(gpr_log_func_args *args) {
char *message = NULL;
gpr_asprintf(&message, "Unwanted log: %s", args->message);
args->message = message;
gpr_default_log(args);
gpr_free(message);
abort();
}
static void test_no_error_log(gpr_log_func_args *args) {
if (args->severity == GPR_LOG_SEVERITY_ERROR) {
test_no_log(args);
}
}
static grpc_end2end_test_fixture begin_test(grpc_end2end_test_config config,
const char *test_name,
grpc_channel_args *client_args,
grpc_channel_args *server_args) {
grpc_end2end_test_fixture f;
gpr_log(GPR_INFO, "%s/%s", test_name, config.name);
f = config.create_fixture(client_args, server_args);
config.init_server(&f, server_args);
config.init_client(&f, client_args);
return f;
}
static gpr_timespec n_seconds_time(int n) {
return GRPC_TIMEOUT_SECONDS_TO_DEADLINE(n);
}
static gpr_timespec five_seconds_time(void) { return n_seconds_time(5); }
static void drain_cq(grpc_completion_queue *cq) {
grpc_event ev;
do {
ev = grpc_completion_queue_next(cq, five_seconds_time(), NULL);
} while (ev.type != GRPC_QUEUE_SHUTDOWN);
}
static void shutdown_server(grpc_end2end_test_fixture *f) {
if (!f->server) return;
grpc_server_shutdown_and_notify(f->server, f->cq, tag(1000));
GPR_ASSERT(grpc_completion_queue_pluck(
f->cq, tag(1000), GRPC_TIMEOUT_SECONDS_TO_DEADLINE(5), NULL)
.type == GRPC_OP_COMPLETE);
grpc_server_destroy(f->server);
f->server = NULL;
}
static void shutdown_client(grpc_end2end_test_fixture *f) {
if (!f->client) return;
grpc_channel_destroy(f->client);
f->client = NULL;
}
static void end_test(grpc_end2end_test_fixture *f) {
shutdown_server(f);
shutdown_client(f);
grpc_completion_queue_shutdown(f->cq);
drain_cq(f->cq);
grpc_completion_queue_destroy(f->cq);
}
static void simple_request_body(grpc_end2end_test_fixture f) {
grpc_call *c;
grpc_call *s;
gpr_timespec deadline = five_seconds_time();
cq_verifier *cqv = cq_verifier_create(f.cq);
grpc_op ops[6];
grpc_op *op;
grpc_metadata_array initial_metadata_recv;
grpc_metadata_array trailing_metadata_recv;
grpc_metadata_array request_metadata_recv;
grpc_call_details call_details;
grpc_status_code status;
grpc_call_error error;
char *details = NULL;
size_t details_capacity = 0;
int was_cancelled = 2;
char *peer;
c = grpc_channel_create_call(f.client, NULL, GRPC_PROPAGATE_DEFAULTS, f.cq,
"/foo", "foo.test.google.fr:1234", deadline,
NULL);
GPR_ASSERT(c);
peer = grpc_call_get_peer(c);
GPR_ASSERT(peer != NULL);
gpr_free(peer);
grpc_metadata_array_init(&initial_metadata_recv);
grpc_metadata_array_init(&trailing_metadata_recv);
grpc_metadata_array_init(&request_metadata_recv);
grpc_call_details_init(&call_details);
memset(ops, 0, sizeof(ops));
op = ops;
op->op = GRPC_OP_SEND_INITIAL_METADATA;
op->data.send_initial_metadata.count = 0;
op->flags = 0;
op->reserved = NULL;
op++;
op->op = GRPC_OP_SEND_CLOSE_FROM_CLIENT;
op->flags = 0;
op->reserved = NULL;
op++;
op->op = GRPC_OP_RECV_INITIAL_METADATA;
op->data.recv_initial_metadata = &initial_metadata_recv;
op->flags = 0;
op->reserved = NULL;
op++;
op->op = GRPC_OP_RECV_STATUS_ON_CLIENT;
op->data.recv_status_on_client.trailing_metadata = &trailing_metadata_recv;
op->data.recv_status_on_client.status = &status;
op->data.recv_status_on_client.status_details = &details;
op->data.recv_status_on_client.status_details_capacity = &details_capacity;
op->flags = 0;
op->reserved = NULL;
op++;
error = grpc_call_start_batch(c, ops, (size_t)(op - ops), tag(1), NULL);
GPR_ASSERT(GRPC_CALL_OK == error);
error =
grpc_server_request_call(f.server, &s, &call_details,
&request_metadata_recv, f.cq, f.cq, tag(101));
GPR_ASSERT(GRPC_CALL_OK == error);
CQ_EXPECT_COMPLETION(cqv, tag(101), 1);
cq_verify(cqv);
peer = grpc_call_get_peer(s);
GPR_ASSERT(peer != NULL);
gpr_free(peer);
peer = grpc_call_get_peer(c);
GPR_ASSERT(peer != NULL);
gpr_free(peer);
memset(ops, 0, sizeof(ops));
op = ops;
op->op = GRPC_OP_SEND_INITIAL_METADATA;
op->data.send_initial_metadata.count = 0;
op->flags = 0;
op->reserved = NULL;
op++;
op->op = GRPC_OP_SEND_STATUS_FROM_SERVER;
op->data.send_status_from_server.trailing_metadata_count = 0;
op->data.send_status_from_server.status = GRPC_STATUS_UNIMPLEMENTED;
op->data.send_status_from_server.status_details = "xyz";
op->flags = 0;
op->reserved = NULL;
op++;
op->op = GRPC_OP_RECV_CLOSE_ON_SERVER;
op->data.recv_close_on_server.cancelled = &was_cancelled;
op->flags = 0;
op->reserved = NULL;
op++;
error = grpc_call_start_batch(s, ops, (size_t)(op - ops), tag(102), NULL);
GPR_ASSERT(GRPC_CALL_OK == error);
CQ_EXPECT_COMPLETION(cqv, tag(102), 1);
CQ_EXPECT_COMPLETION(cqv, tag(1), 1);
cq_verify(cqv);
GPR_ASSERT(status == GRPC_STATUS_UNIMPLEMENTED);
GPR_ASSERT(0 == strcmp(details, "xyz"));
GPR_ASSERT(0 == strcmp(call_details.method, "/foo"));
GPR_ASSERT(0 == strcmp(call_details.host, "foo.test.google.fr:1234"));
GPR_ASSERT(0 == call_details.flags);
GPR_ASSERT(was_cancelled == 1);
gpr_free(details);
grpc_metadata_array_destroy(&initial_metadata_recv);
grpc_metadata_array_destroy(&trailing_metadata_recv);
grpc_metadata_array_destroy(&request_metadata_recv);
grpc_call_details_destroy(&call_details);
grpc_call_destroy(c);
grpc_call_destroy(s);
cq_verifier_destroy(cqv);
}
static void test_invoke_simple_request(grpc_end2end_test_config config) {
grpc_end2end_test_fixture f;
f = begin_test(config, "test_invoke_simple_request_with_no_error_logging",
NULL, NULL);
simple_request_body(f);
end_test(&f);
config.tear_down_data(&f);
}
static void test_invoke_10_simple_requests(grpc_end2end_test_config config) {
int i;
grpc_end2end_test_fixture f =
begin_test(config, "test_invoke_10_simple_requests_with_no_error_logging",
NULL, NULL);
for (i = 0; i < 10; i++) {
simple_request_body(f);
gpr_log(GPR_INFO, "Passed simple request %d", i);
}
simple_request_body(f);
end_test(&f);
config.tear_down_data(&f);
}
static void test_no_error_logging_in_entire_process(
grpc_end2end_test_config config) {
int i;
gpr_set_log_function(test_no_error_log);
for (i = 0; i < 10; i++) {
test_invoke_simple_request(config);
}
test_invoke_10_simple_requests(config);
gpr_set_log_function(gpr_default_log);
}
static void test_no_logging_in_one_request(grpc_end2end_test_config config) {
int i;
grpc_end2end_test_fixture f =
begin_test(config, "test_no_logging_in_last_request", NULL, NULL);
for (i = 0; i < 10; i++) {
simple_request_body(f);
}
gpr_set_log_function(test_no_log);
simple_request_body(f);
gpr_set_log_function(gpr_default_log);
end_test(&f);
config.tear_down_data(&f);
}
void no_logging(grpc_end2end_test_config config) {
test_no_logging_in_one_request(config);
test_no_error_logging_in_entire_process(config);
}
void no_logging_pre_init(void) {}

@ -170,7 +170,7 @@ static void request_response_with_payload(grpc_end2end_test_fixture f) {
grpc_server_request_call(f.server, &s, &call_details,
&request_metadata_recv, f.cq, f.cq, tag(101));
GPR_ASSERT(GRPC_CALL_OK == error);
cq_expect_completion(cqv, tag(101), 1);
CQ_EXPECT_COMPLETION(cqv, tag(101), 1);
cq_verify(cqv);
memset(ops, 0, sizeof(ops));
@ -188,7 +188,7 @@ static void request_response_with_payload(grpc_end2end_test_fixture f) {
error = grpc_call_start_batch(s, ops, (size_t)(op - ops), tag(102), NULL);
GPR_ASSERT(GRPC_CALL_OK == error);
cq_expect_completion(cqv, tag(102), 1);
CQ_EXPECT_COMPLETION(cqv, tag(102), 1);
cq_verify(cqv);
memset(ops, 0, sizeof(ops));
@ -213,8 +213,8 @@ static void request_response_with_payload(grpc_end2end_test_fixture f) {
error = grpc_call_start_batch(s, ops, (size_t)(op - ops), tag(103), NULL);
GPR_ASSERT(GRPC_CALL_OK == error);
cq_expect_completion(cqv, tag(103), 1);
cq_expect_completion(cqv, tag(1), 1);
CQ_EXPECT_COMPLETION(cqv, tag(103), 1);
CQ_EXPECT_COMPLETION(cqv, tag(1), 1);
cq_verify(cqv);
GPR_ASSERT(status == GRPC_STATUS_OK);

@ -52,7 +52,7 @@ static void test_ping(grpc_end2end_test_config config) {
config.init_server(&f, NULL);
grpc_channel_ping(f.client, f.cq, tag(0), NULL);
cq_expect_completion(cqv, tag(0), 0);
CQ_EXPECT_COMPLETION(cqv, tag(0), 0);
/* check that we're still in idle, and start connecting */
GPR_ASSERT(grpc_channel_check_connectivity_state(f.client, 1) ==
@ -62,7 +62,7 @@ static void test_ping(grpc_end2end_test_config config) {
while (state != GRPC_CHANNEL_READY) {
grpc_channel_watch_connectivity_state(
f.client, state, GRPC_TIMEOUT_SECONDS_TO_DEADLINE(3), f.cq, tag(99));
cq_expect_completion(cqv, tag(99), 1);
CQ_EXPECT_COMPLETION(cqv, tag(99), 1);
cq_verify(cqv);
state = grpc_channel_check_connectivity_state(f.client, 0);
GPR_ASSERT(state == GRPC_CHANNEL_READY ||
@ -72,12 +72,12 @@ static void test_ping(grpc_end2end_test_config config) {
for (i = 1; i <= 5; i++) {
grpc_channel_ping(f.client, f.cq, tag(i), NULL);
cq_expect_completion(cqv, tag(i), 1);
CQ_EXPECT_COMPLETION(cqv, tag(i), 1);
cq_verify(cqv);
}
grpc_server_shutdown_and_notify(f.server, f.cq, tag(0xdead));
cq_expect_completion(cqv, tag(0xdead), 1);
CQ_EXPECT_COMPLETION(cqv, tag(0xdead), 1);
cq_verify(cqv);
/* cleanup server */

@ -160,7 +160,7 @@ static void test_pingpong_streaming(grpc_end2end_test_config config,
grpc_server_request_call(f.server, &s, &call_details,
&request_metadata_recv, f.cq, f.cq, tag(100));
GPR_ASSERT(GRPC_CALL_OK == error);
cq_expect_completion(cqv, tag(100), 1);
CQ_EXPECT_COMPLETION(cqv, tag(100), 1);
cq_verify(cqv);
memset(ops, 0, sizeof(ops));
@ -206,7 +206,7 @@ static void test_pingpong_streaming(grpc_end2end_test_config config,
op++;
error = grpc_call_start_batch(s, ops, (size_t)(op - ops), tag(102), NULL);
GPR_ASSERT(GRPC_CALL_OK == error);
cq_expect_completion(cqv, tag(102), 1);
CQ_EXPECT_COMPLETION(cqv, tag(102), 1);
cq_verify(cqv);
memset(ops, 0, sizeof(ops));
@ -218,8 +218,8 @@ static void test_pingpong_streaming(grpc_end2end_test_config config,
op++;
error = grpc_call_start_batch(s, ops, (size_t)(op - ops), tag(103), NULL);
GPR_ASSERT(GRPC_CALL_OK == error);
cq_expect_completion(cqv, tag(103), 1);
cq_expect_completion(cqv, tag(2), 1);
CQ_EXPECT_COMPLETION(cqv, tag(103), 1);
CQ_EXPECT_COMPLETION(cqv, tag(2), 1);
cq_verify(cqv);
grpc_byte_buffer_destroy(request_payload);
@ -252,10 +252,10 @@ static void test_pingpong_streaming(grpc_end2end_test_config config,
error = grpc_call_start_batch(s, ops, (size_t)(op - ops), tag(104), NULL);
GPR_ASSERT(GRPC_CALL_OK == error);
cq_expect_completion(cqv, tag(1), 1);
cq_expect_completion(cqv, tag(3), 1);
cq_expect_completion(cqv, tag(101), 1);
cq_expect_completion(cqv, tag(104), 1);
CQ_EXPECT_COMPLETION(cqv, tag(1), 1);
CQ_EXPECT_COMPLETION(cqv, tag(3), 1);
CQ_EXPECT_COMPLETION(cqv, tag(101), 1);
CQ_EXPECT_COMPLETION(cqv, tag(104), 1);
cq_verify(cqv);
grpc_call_destroy(c);

@ -154,7 +154,7 @@ static void simple_request_body(grpc_end2end_test_fixture f, void *rc) {
grpc_server_request_call(f.server, &s, &call_details,
&request_metadata_recv, f.cq, f.cq, tag(101));
GPR_ASSERT(GRPC_CALL_OK == error);
cq_expect_completion(cqv, tag(101), 1);
CQ_EXPECT_COMPLETION(cqv, tag(101), 1);
cq_verify(cqv);
memset(ops, 0, sizeof(ops));
@ -179,8 +179,8 @@ static void simple_request_body(grpc_end2end_test_fixture f, void *rc) {
error = grpc_call_start_batch(s, ops, (size_t)(op - ops), tag(102), NULL);
GPR_ASSERT(GRPC_CALL_OK == error);
cq_expect_completion(cqv, tag(102), 1);
cq_expect_completion(cqv, tag(1), 1);
CQ_EXPECT_COMPLETION(cqv, tag(102), 1);
CQ_EXPECT_COMPLETION(cqv, tag(1), 1);
cq_verify(cqv);
GPR_ASSERT(status == GRPC_STATUS_UNIMPLEMENTED);

Some files were not shown because too many files have changed in this diff Show More

Loading…
Cancel
Save