Merge branch 'stats' into stats_histo

pull/12357/head
Craig Tiller 7 years ago
commit 890f542498
  1. 2
      BUILD
  2. 6
      CMakeLists.txt
  3. 6
      Makefile
  4. 16
      bazel/grpc_build_system.bzl
  5. 1
      binding.gyp
  6. 2
      build.yaml
  7. 1
      config.m4
  8. 1
      config.w32
  9. 4
      doc/compression.md
  10. 1
      doc/environment_variables.md
  11. 3
      gRPC-Core.podspec
  12. 2
      grpc.gemspec
  13. 4
      grpc.gyp
  14. 13
      include/grpc++/impl/codegen/call.h
  15. 1
      include/grpc++/impl/codegen/core_codegen.h
  16. 1
      include/grpc++/impl/codegen/core_codegen_interface.h
  17. 1
      include/grpc/impl/codegen/atm.h
  18. 5
      include/grpc/impl/codegen/grpc_types.h
  19. 2
      package.xml
  20. 2
      src/core/ext/census/grpc_filter.c
  21. 506
      src/core/ext/filters/client_channel/client_channel.c
  22. 1
      src/core/ext/filters/client_channel/lb_policy/grpclb/client_load_reporting_filter.c
  23. 16
      src/core/ext/filters/client_channel/lb_policy/pick_first/pick_first.c
  24. 16
      src/core/ext/filters/client_channel/lb_policy/round_robin/round_robin.c
  25. 13
      src/core/ext/filters/client_channel/subchannel.c
  26. 5
      src/core/ext/filters/client_channel/subchannel.h
  27. 98
      src/core/ext/filters/deadline/deadline_filter.c
  28. 8
      src/core/ext/filters/deadline/deadline_filter.h
  29. 9
      src/core/ext/filters/http/client/http_client_filter.c
  30. 272
      src/core/ext/filters/http/message_compress/message_compress_filter.c
  31. 52
      src/core/ext/filters/http/server/http_server_filter.c
  32. 1
      src/core/ext/filters/load_reporting/load_reporting_filter.c
  33. 1
      src/core/ext/filters/max_age/max_age_filter.c
  34. 6
      src/core/ext/filters/message_size/message_size_filter.c
  35. 1
      src/core/ext/filters/workarounds/workaround_cronet_compression_filter.c
  36. 29
      src/core/ext/transport/chttp2/transport/chttp2_transport.c
  37. 2
      src/core/ext/transport/chttp2/transport/internal.h
  38. 2
      src/core/ext/transport/chttp2/transport/parsing.c
  39. 5
      src/core/ext/transport/cronet/transport/cronet_transport.c
  40. 12
      src/core/ext/transport/inproc/inproc_transport.c
  41. 16
      src/core/lib/channel/channel_stack.c
  42. 11
      src/core/lib/channel/channel_stack.h
  43. 92
      src/core/lib/channel/connected_channel.c
  44. 180
      src/core/lib/iomgr/call_combiner.c
  45. 104
      src/core/lib/iomgr/call_combiner.h
  46. 147
      src/core/lib/security/transport/client_auth_filter.c
  47. 83
      src/core/lib/security/transport/server_auth_filter.c
  48. 148
      src/core/lib/surface/call.c
  49. 2
      src/core/lib/surface/init.c
  50. 19
      src/core/lib/surface/lame_client.cc
  51. 2
      src/core/lib/surface/server.c
  52. 1
      src/core/lib/transport/byte_stream.c
  53. 4
      src/core/lib/transport/byte_stream.h
  54. 19
      src/core/lib/transport/transport.c
  55. 13
      src/core/lib/transport/transport.h
  56. 3
      src/core/lib/transport/transport_impl.h
  57. 7
      src/core/lib/transport/transport_op_string.c
  58. 6
      src/cpp/common/channel_filter.cc
  59. 22
      src/cpp/common/channel_filter.h
  60. 4
      src/cpp/common/core_codegen.cc
  61. 10
      src/proto/grpc/health/v1/BUILD
  62. 10
      src/proto/grpc/lb/v1/BUILD
  63. 10
      src/proto/grpc/reflection/v1alpha/BUILD
  64. 10
      src/proto/grpc/status/BUILD
  65. 10
      src/proto/grpc/testing/BUILD
  66. 10
      src/proto/grpc/testing/duplicate/BUILD
  67. 1
      src/python/grpcio/grpc_core_dependencies.py
  68. 9
      test/core/bad_client/BUILD
  69. 9
      test/core/bad_ssl/BUILD
  70. 11
      test/core/census/BUILD
  71. 11
      test/core/channel/BUILD
  72. 5
      test/core/channel/channel_stack_test.c
  73. 14
      test/core/channel/minimal_stack_is_minimal_test.c
  74. 11
      test/core/client_channel/BUILD
  75. 11
      test/core/client_channel/resolvers/BUILD
  76. 11
      test/core/compression/BUILD
  77. 9
      test/core/end2end/BUILD
  78. 11
      test/core/end2end/fuzzers/BUILD
  79. 11
      test/core/end2end/tests/cancel_after_round_trip.c
  80. 1
      test/core/end2end/tests/filter_call_init_fails.c
  81. 3
      test/core/end2end/tests/filter_causes_close.c
  82. 2
      test/core/end2end/tests/filter_latency.c
  83. 11
      test/core/end2end/tests/resource_quota_server.c
  84. 11
      test/core/fling/BUILD
  85. 11
      test/core/handshake/BUILD
  86. 11
      test/core/http/BUILD
  87. 10
      test/core/iomgr/BUILD
  88. 11
      test/core/json/BUILD
  89. 11
      test/core/nanopb/BUILD
  90. 13
      test/core/network_benchmarks/BUILD
  91. 9
      test/core/security/BUILD
  92. 11
      test/core/slice/BUILD
  93. 9
      test/core/support/BUILD
  94. 9
      test/core/surface/BUILD
  95. 9
      test/core/transport/BUILD
  96. 9
      test/core/transport/chttp2/BUILD
  97. 9
      test/core/tsi/BUILD
  98. 10
      test/core/util/BUILD
  99. 9
      test/cpp/codegen/BUILD
  100. 9
      test/cpp/common/BUILD
  101. Some files were not shown because too many files have changed in this diff Show More

@ -577,7 +577,6 @@ grpc_cc_library(
"src/core/lib/http/format_request.c", "src/core/lib/http/format_request.c",
"src/core/lib/http/httpcli.c", "src/core/lib/http/httpcli.c",
"src/core/lib/http/parser.c", "src/core/lib/http/parser.c",
"src/core/lib/iomgr/call_combiner.c",
"src/core/lib/iomgr/closure.c", "src/core/lib/iomgr/closure.c",
"src/core/lib/iomgr/combiner.c", "src/core/lib/iomgr/combiner.c",
"src/core/lib/iomgr/endpoint.c", "src/core/lib/iomgr/endpoint.c",
@ -710,7 +709,6 @@ grpc_cc_library(
"src/core/lib/http/format_request.h", "src/core/lib/http/format_request.h",
"src/core/lib/http/httpcli.h", "src/core/lib/http/httpcli.h",
"src/core/lib/http/parser.h", "src/core/lib/http/parser.h",
"src/core/lib/iomgr/call_combiner.h",
"src/core/lib/iomgr/closure.h", "src/core/lib/iomgr/closure.h",
"src/core/lib/iomgr/combiner.h", "src/core/lib/iomgr/combiner.h",
"src/core/lib/iomgr/endpoint.h", "src/core/lib/iomgr/endpoint.h",

@ -967,7 +967,6 @@ add_library(grpc
src/core/lib/http/format_request.c src/core/lib/http/format_request.c
src/core/lib/http/httpcli.c src/core/lib/http/httpcli.c
src/core/lib/http/parser.c src/core/lib/http/parser.c
src/core/lib/iomgr/call_combiner.c
src/core/lib/iomgr/closure.c src/core/lib/iomgr/closure.c
src/core/lib/iomgr/combiner.c src/core/lib/iomgr/combiner.c
src/core/lib/iomgr/endpoint.c src/core/lib/iomgr/endpoint.c
@ -1319,7 +1318,6 @@ add_library(grpc_cronet
src/core/lib/http/format_request.c src/core/lib/http/format_request.c
src/core/lib/http/httpcli.c src/core/lib/http/httpcli.c
src/core/lib/http/parser.c src/core/lib/http/parser.c
src/core/lib/iomgr/call_combiner.c
src/core/lib/iomgr/closure.c src/core/lib/iomgr/closure.c
src/core/lib/iomgr/combiner.c src/core/lib/iomgr/combiner.c
src/core/lib/iomgr/endpoint.c src/core/lib/iomgr/endpoint.c
@ -1639,7 +1637,6 @@ add_library(grpc_test_util
src/core/lib/http/format_request.c src/core/lib/http/format_request.c
src/core/lib/http/httpcli.c src/core/lib/http/httpcli.c
src/core/lib/http/parser.c src/core/lib/http/parser.c
src/core/lib/iomgr/call_combiner.c
src/core/lib/iomgr/closure.c src/core/lib/iomgr/closure.c
src/core/lib/iomgr/combiner.c src/core/lib/iomgr/combiner.c
src/core/lib/iomgr/endpoint.c src/core/lib/iomgr/endpoint.c
@ -1903,7 +1900,6 @@ add_library(grpc_test_util_unsecure
src/core/lib/http/format_request.c src/core/lib/http/format_request.c
src/core/lib/http/httpcli.c src/core/lib/http/httpcli.c
src/core/lib/http/parser.c src/core/lib/http/parser.c
src/core/lib/iomgr/call_combiner.c
src/core/lib/iomgr/closure.c src/core/lib/iomgr/closure.c
src/core/lib/iomgr/combiner.c src/core/lib/iomgr/combiner.c
src/core/lib/iomgr/endpoint.c src/core/lib/iomgr/endpoint.c
@ -2153,7 +2149,6 @@ add_library(grpc_unsecure
src/core/lib/http/format_request.c src/core/lib/http/format_request.c
src/core/lib/http/httpcli.c src/core/lib/http/httpcli.c
src/core/lib/http/parser.c src/core/lib/http/parser.c
src/core/lib/iomgr/call_combiner.c
src/core/lib/iomgr/closure.c src/core/lib/iomgr/closure.c
src/core/lib/iomgr/combiner.c src/core/lib/iomgr/combiner.c
src/core/lib/iomgr/endpoint.c src/core/lib/iomgr/endpoint.c
@ -2855,7 +2850,6 @@ add_library(grpc++_cronet
src/core/lib/http/format_request.c src/core/lib/http/format_request.c
src/core/lib/http/httpcli.c src/core/lib/http/httpcli.c
src/core/lib/http/parser.c src/core/lib/http/parser.c
src/core/lib/iomgr/call_combiner.c
src/core/lib/iomgr/closure.c src/core/lib/iomgr/closure.c
src/core/lib/iomgr/combiner.c src/core/lib/iomgr/combiner.c
src/core/lib/iomgr/endpoint.c src/core/lib/iomgr/endpoint.c

@ -2912,7 +2912,6 @@ LIBGRPC_SRC = \
src/core/lib/http/format_request.c \ src/core/lib/http/format_request.c \
src/core/lib/http/httpcli.c \ src/core/lib/http/httpcli.c \
src/core/lib/http/parser.c \ src/core/lib/http/parser.c \
src/core/lib/iomgr/call_combiner.c \
src/core/lib/iomgr/closure.c \ src/core/lib/iomgr/closure.c \
src/core/lib/iomgr/combiner.c \ src/core/lib/iomgr/combiner.c \
src/core/lib/iomgr/endpoint.c \ src/core/lib/iomgr/endpoint.c \
@ -3262,7 +3261,6 @@ LIBGRPC_CRONET_SRC = \
src/core/lib/http/format_request.c \ src/core/lib/http/format_request.c \
src/core/lib/http/httpcli.c \ src/core/lib/http/httpcli.c \
src/core/lib/http/parser.c \ src/core/lib/http/parser.c \
src/core/lib/iomgr/call_combiner.c \
src/core/lib/iomgr/closure.c \ src/core/lib/iomgr/closure.c \
src/core/lib/iomgr/combiner.c \ src/core/lib/iomgr/combiner.c \
src/core/lib/iomgr/endpoint.c \ src/core/lib/iomgr/endpoint.c \
@ -3579,7 +3577,6 @@ LIBGRPC_TEST_UTIL_SRC = \
src/core/lib/http/format_request.c \ src/core/lib/http/format_request.c \
src/core/lib/http/httpcli.c \ src/core/lib/http/httpcli.c \
src/core/lib/http/parser.c \ src/core/lib/http/parser.c \
src/core/lib/iomgr/call_combiner.c \
src/core/lib/iomgr/closure.c \ src/core/lib/iomgr/closure.c \
src/core/lib/iomgr/combiner.c \ src/core/lib/iomgr/combiner.c \
src/core/lib/iomgr/endpoint.c \ src/core/lib/iomgr/endpoint.c \
@ -3832,7 +3829,6 @@ LIBGRPC_TEST_UTIL_UNSECURE_SRC = \
src/core/lib/http/format_request.c \ src/core/lib/http/format_request.c \
src/core/lib/http/httpcli.c \ src/core/lib/http/httpcli.c \
src/core/lib/http/parser.c \ src/core/lib/http/parser.c \
src/core/lib/iomgr/call_combiner.c \
src/core/lib/iomgr/closure.c \ src/core/lib/iomgr/closure.c \
src/core/lib/iomgr/combiner.c \ src/core/lib/iomgr/combiner.c \
src/core/lib/iomgr/endpoint.c \ src/core/lib/iomgr/endpoint.c \
@ -4058,7 +4054,6 @@ LIBGRPC_UNSECURE_SRC = \
src/core/lib/http/format_request.c \ src/core/lib/http/format_request.c \
src/core/lib/http/httpcli.c \ src/core/lib/http/httpcli.c \
src/core/lib/http/parser.c \ src/core/lib/http/parser.c \
src/core/lib/iomgr/call_combiner.c \
src/core/lib/iomgr/closure.c \ src/core/lib/iomgr/closure.c \
src/core/lib/iomgr/combiner.c \ src/core/lib/iomgr/combiner.c \
src/core/lib/iomgr/endpoint.c \ src/core/lib/iomgr/endpoint.c \
@ -4743,7 +4738,6 @@ LIBGRPC++_CRONET_SRC = \
src/core/lib/http/format_request.c \ src/core/lib/http/format_request.c \
src/core/lib/http/httpcli.c \ src/core/lib/http/httpcli.c \
src/core/lib/http/parser.c \ src/core/lib/http/parser.c \
src/core/lib/iomgr/call_combiner.c \
src/core/lib/iomgr/closure.c \ src/core/lib/iomgr/closure.c \
src/core/lib/iomgr/combiner.c \ src/core/lib/iomgr/combiner.c \
src/core/lib/iomgr/endpoint.c \ src/core/lib/iomgr/endpoint.c \

@ -105,3 +105,19 @@ def grpc_sh_test(name, srcs, args = [], data = []):
srcs = srcs, srcs = srcs,
args = args, args = args,
data = data) data = data)
def grpc_package(name, visibility = "private", features = []):
if visibility == "tests":
visibility = ["//test:__subpackages__"]
elif visibility == "public":
visibility = ["//visibility:public"]
elif visibility == "private":
visibility = []
else:
fail("Unknown visibility " + visibility)
if len(visibility) != 0:
native.package(
default_visibility = visibility,
features = features
)

@ -672,7 +672,6 @@
'src/core/lib/http/format_request.c', 'src/core/lib/http/format_request.c',
'src/core/lib/http/httpcli.c', 'src/core/lib/http/httpcli.c',
'src/core/lib/http/parser.c', 'src/core/lib/http/parser.c',
'src/core/lib/iomgr/call_combiner.c',
'src/core/lib/iomgr/closure.c', 'src/core/lib/iomgr/closure.c',
'src/core/lib/iomgr/combiner.c', 'src/core/lib/iomgr/combiner.c',
'src/core/lib/iomgr/endpoint.c', 'src/core/lib/iomgr/endpoint.c',

@ -200,7 +200,6 @@ filegroups:
- src/core/lib/http/format_request.c - src/core/lib/http/format_request.c
- src/core/lib/http/httpcli.c - src/core/lib/http/httpcli.c
- src/core/lib/http/parser.c - src/core/lib/http/parser.c
- src/core/lib/iomgr/call_combiner.c
- src/core/lib/iomgr/closure.c - src/core/lib/iomgr/closure.c
- src/core/lib/iomgr/combiner.c - src/core/lib/iomgr/combiner.c
- src/core/lib/iomgr/endpoint.c - src/core/lib/iomgr/endpoint.c
@ -353,7 +352,6 @@ filegroups:
- src/core/lib/http/format_request.h - src/core/lib/http/format_request.h
- src/core/lib/http/httpcli.h - src/core/lib/http/httpcli.h
- src/core/lib/http/parser.h - src/core/lib/http/parser.h
- src/core/lib/iomgr/call_combiner.h
- src/core/lib/iomgr/closure.h - src/core/lib/iomgr/closure.h
- src/core/lib/iomgr/combiner.h - src/core/lib/iomgr/combiner.h
- src/core/lib/iomgr/endpoint.h - src/core/lib/iomgr/endpoint.h

@ -101,7 +101,6 @@ if test "$PHP_GRPC" != "no"; then
src/core/lib/http/format_request.c \ src/core/lib/http/format_request.c \
src/core/lib/http/httpcli.c \ src/core/lib/http/httpcli.c \
src/core/lib/http/parser.c \ src/core/lib/http/parser.c \
src/core/lib/iomgr/call_combiner.c \
src/core/lib/iomgr/closure.c \ src/core/lib/iomgr/closure.c \
src/core/lib/iomgr/combiner.c \ src/core/lib/iomgr/combiner.c \
src/core/lib/iomgr/endpoint.c \ src/core/lib/iomgr/endpoint.c \

@ -78,7 +78,6 @@ if (PHP_GRPC != "no") {
"src\\core\\lib\\http\\format_request.c " + "src\\core\\lib\\http\\format_request.c " +
"src\\core\\lib\\http\\httpcli.c " + "src\\core\\lib\\http\\httpcli.c " +
"src\\core\\lib\\http\\parser.c " + "src\\core\\lib\\http\\parser.c " +
"src\\core\\lib\\iomgr\\call_combiner.c " +
"src\\core\\lib\\iomgr\\closure.c " + "src\\core\\lib\\iomgr\\closure.c " +
"src\\core\\lib\\iomgr\\combiner.c " + "src\\core\\lib\\iomgr\\combiner.c " +
"src\\core\\lib\\iomgr\\endpoint.c " + "src\\core\\lib\\iomgr\\endpoint.c " +

@ -52,8 +52,8 @@ by the client WILL result in an `INTERNAL` error status on the client side.
Note that a peer MAY choose to not disclose all the encodings it supports. Note that a peer MAY choose to not disclose all the encodings it supports.
However, if it receives a message compressed in an undisclosed but supported However, if it receives a message compressed in an undisclosed but supported
encoding, it MUST include said encoding in the response's `grpc-accept-encoding encoding, it MUST include said encoding in the response's `grpc-accept-encoding`
h`eader. header.
For every message a server is requested to compress using an algorithm it knows For every message a server is requested to compress using an algorithm it knows
the client doesn't support (as indicated by the last `grpc-accept-encoding` the client doesn't support (as indicated by the last `grpc-accept-encoding`

@ -39,7 +39,6 @@ some configuration as environment variables that can be set.
gRPC C core is processing requests via debug logs. Available tracers include: gRPC C core is processing requests via debug logs. Available tracers include:
- api - traces api calls to the C core - api - traces api calls to the C core
- bdp_estimator - traces behavior of bdp estimation logic - bdp_estimator - traces behavior of bdp estimation logic
- call_combiner - traces call combiner state
- call_error - traces the possible errors contributing to final call status - call_error - traces the possible errors contributing to final call status
- channel - traces operations on the C core channel stack - channel - traces operations on the C core channel stack
- client_channel - traces client channel activity, including resolver - client_channel - traces client channel activity, including resolver

@ -334,7 +334,6 @@ Pod::Spec.new do |s|
'src/core/lib/http/format_request.h', 'src/core/lib/http/format_request.h',
'src/core/lib/http/httpcli.h', 'src/core/lib/http/httpcli.h',
'src/core/lib/http/parser.h', 'src/core/lib/http/parser.h',
'src/core/lib/iomgr/call_combiner.h',
'src/core/lib/iomgr/closure.h', 'src/core/lib/iomgr/closure.h',
'src/core/lib/iomgr/combiner.h', 'src/core/lib/iomgr/combiner.h',
'src/core/lib/iomgr/endpoint.h', 'src/core/lib/iomgr/endpoint.h',
@ -485,7 +484,6 @@ Pod::Spec.new do |s|
'src/core/lib/http/format_request.c', 'src/core/lib/http/format_request.c',
'src/core/lib/http/httpcli.c', 'src/core/lib/http/httpcli.c',
'src/core/lib/http/parser.c', 'src/core/lib/http/parser.c',
'src/core/lib/iomgr/call_combiner.c',
'src/core/lib/iomgr/closure.c', 'src/core/lib/iomgr/closure.c',
'src/core/lib/iomgr/combiner.c', 'src/core/lib/iomgr/combiner.c',
'src/core/lib/iomgr/endpoint.c', 'src/core/lib/iomgr/endpoint.c',
@ -833,7 +831,6 @@ Pod::Spec.new do |s|
'src/core/lib/http/format_request.h', 'src/core/lib/http/format_request.h',
'src/core/lib/http/httpcli.h', 'src/core/lib/http/httpcli.h',
'src/core/lib/http/parser.h', 'src/core/lib/http/parser.h',
'src/core/lib/iomgr/call_combiner.h',
'src/core/lib/iomgr/closure.h', 'src/core/lib/iomgr/closure.h',
'src/core/lib/iomgr/combiner.h', 'src/core/lib/iomgr/combiner.h',
'src/core/lib/iomgr/endpoint.h', 'src/core/lib/iomgr/endpoint.h',

@ -266,7 +266,6 @@ Gem::Specification.new do |s|
s.files += %w( src/core/lib/http/format_request.h ) s.files += %w( src/core/lib/http/format_request.h )
s.files += %w( src/core/lib/http/httpcli.h ) s.files += %w( src/core/lib/http/httpcli.h )
s.files += %w( src/core/lib/http/parser.h ) s.files += %w( src/core/lib/http/parser.h )
s.files += %w( src/core/lib/iomgr/call_combiner.h )
s.files += %w( src/core/lib/iomgr/closure.h ) s.files += %w( src/core/lib/iomgr/closure.h )
s.files += %w( src/core/lib/iomgr/combiner.h ) s.files += %w( src/core/lib/iomgr/combiner.h )
s.files += %w( src/core/lib/iomgr/endpoint.h ) s.files += %w( src/core/lib/iomgr/endpoint.h )
@ -421,7 +420,6 @@ Gem::Specification.new do |s|
s.files += %w( src/core/lib/http/format_request.c ) s.files += %w( src/core/lib/http/format_request.c )
s.files += %w( src/core/lib/http/httpcli.c ) s.files += %w( src/core/lib/http/httpcli.c )
s.files += %w( src/core/lib/http/parser.c ) s.files += %w( src/core/lib/http/parser.c )
s.files += %w( src/core/lib/iomgr/call_combiner.c )
s.files += %w( src/core/lib/iomgr/closure.c ) s.files += %w( src/core/lib/iomgr/closure.c )
s.files += %w( src/core/lib/iomgr/combiner.c ) s.files += %w( src/core/lib/iomgr/combiner.c )
s.files += %w( src/core/lib/iomgr/endpoint.c ) s.files += %w( src/core/lib/iomgr/endpoint.c )

@ -238,7 +238,6 @@
'src/core/lib/http/format_request.c', 'src/core/lib/http/format_request.c',
'src/core/lib/http/httpcli.c', 'src/core/lib/http/httpcli.c',
'src/core/lib/http/parser.c', 'src/core/lib/http/parser.c',
'src/core/lib/iomgr/call_combiner.c',
'src/core/lib/iomgr/closure.c', 'src/core/lib/iomgr/closure.c',
'src/core/lib/iomgr/combiner.c', 'src/core/lib/iomgr/combiner.c',
'src/core/lib/iomgr/endpoint.c', 'src/core/lib/iomgr/endpoint.c',
@ -539,7 +538,6 @@
'src/core/lib/http/format_request.c', 'src/core/lib/http/format_request.c',
'src/core/lib/http/httpcli.c', 'src/core/lib/http/httpcli.c',
'src/core/lib/http/parser.c', 'src/core/lib/http/parser.c',
'src/core/lib/iomgr/call_combiner.c',
'src/core/lib/iomgr/closure.c', 'src/core/lib/iomgr/closure.c',
'src/core/lib/iomgr/combiner.c', 'src/core/lib/iomgr/combiner.c',
'src/core/lib/iomgr/endpoint.c', 'src/core/lib/iomgr/endpoint.c',
@ -745,7 +743,6 @@
'src/core/lib/http/format_request.c', 'src/core/lib/http/format_request.c',
'src/core/lib/http/httpcli.c', 'src/core/lib/http/httpcli.c',
'src/core/lib/http/parser.c', 'src/core/lib/http/parser.c',
'src/core/lib/iomgr/call_combiner.c',
'src/core/lib/iomgr/closure.c', 'src/core/lib/iomgr/closure.c',
'src/core/lib/iomgr/combiner.c', 'src/core/lib/iomgr/combiner.c',
'src/core/lib/iomgr/endpoint.c', 'src/core/lib/iomgr/endpoint.c',
@ -936,7 +933,6 @@
'src/core/lib/http/format_request.c', 'src/core/lib/http/format_request.c',
'src/core/lib/http/httpcli.c', 'src/core/lib/http/httpcli.c',
'src/core/lib/http/parser.c', 'src/core/lib/http/parser.c',
'src/core/lib/iomgr/call_combiner.c',
'src/core/lib/iomgr/closure.c', 'src/core/lib/iomgr/closure.c',
'src/core/lib/iomgr/combiner.c', 'src/core/lib/iomgr/combiner.c',
'src/core/lib/iomgr/endpoint.c', 'src/core/lib/iomgr/endpoint.c',

@ -272,7 +272,7 @@ class CallOpSendInitialMetadata {
class CallOpSendMessage { class CallOpSendMessage {
public: public:
CallOpSendMessage() : send_buf_(nullptr), own_buf_(false) {} CallOpSendMessage() : send_buf_(nullptr) {}
/// Send \a message using \a options for the write. The \a options are cleared /// Send \a message using \a options for the write. The \a options are cleared
/// after use. /// after use.
@ -295,20 +295,25 @@ class CallOpSendMessage {
write_options_.Clear(); write_options_.Clear();
} }
void FinishOp(bool* status) { void FinishOp(bool* status) {
if (own_buf_) g_core_codegen_interface->grpc_byte_buffer_destroy(send_buf_); g_core_codegen_interface->grpc_byte_buffer_destroy(send_buf_);
send_buf_ = nullptr; send_buf_ = nullptr;
} }
private: private:
grpc_byte_buffer* send_buf_; grpc_byte_buffer* send_buf_;
WriteOptions write_options_; WriteOptions write_options_;
bool own_buf_;
}; };
template <class M> template <class M>
Status CallOpSendMessage::SendMessage(const M& message, WriteOptions options) { Status CallOpSendMessage::SendMessage(const M& message, WriteOptions options) {
write_options_ = options; write_options_ = options;
return SerializationTraits<M>::Serialize(message, &send_buf_, &own_buf_); bool own_buf;
Status result =
SerializationTraits<M>::Serialize(message, &send_buf_, &own_buf);
if (!own_buf) {
send_buf_ = g_core_codegen_interface->grpc_byte_buffer_copy(send_buf_);
}
return result;
} }
template <class M> template <class M>

@ -68,6 +68,7 @@ class CoreCodegen final : public CoreCodegenInterface {
void grpc_call_unref(grpc_call* call) override; void grpc_call_unref(grpc_call* call) override;
virtual void* grpc_call_arena_alloc(grpc_call* call, size_t length) override; virtual void* grpc_call_arena_alloc(grpc_call* call, size_t length) override;
grpc_byte_buffer* grpc_byte_buffer_copy(grpc_byte_buffer* bb) override;
void grpc_byte_buffer_destroy(grpc_byte_buffer* bb) override; void grpc_byte_buffer_destroy(grpc_byte_buffer* bb) override;
int grpc_byte_buffer_reader_init(grpc_byte_buffer_reader* reader, int grpc_byte_buffer_reader_init(grpc_byte_buffer_reader* reader,

@ -74,6 +74,7 @@ class CoreCodegenInterface {
virtual void gpr_cv_signal(gpr_cv* cv) = 0; virtual void gpr_cv_signal(gpr_cv* cv) = 0;
virtual void gpr_cv_broadcast(gpr_cv* cv) = 0; virtual void gpr_cv_broadcast(gpr_cv* cv) = 0;
virtual grpc_byte_buffer* grpc_byte_buffer_copy(grpc_byte_buffer* bb) = 0;
virtual void grpc_byte_buffer_destroy(grpc_byte_buffer* bb) = 0; virtual void grpc_byte_buffer_destroy(grpc_byte_buffer* bb) = 0;
virtual int grpc_byte_buffer_reader_init(grpc_byte_buffer_reader* reader, virtual int grpc_byte_buffer_reader_init(grpc_byte_buffer_reader* reader,

@ -46,7 +46,6 @@
// Atomically return *p, with acquire semantics. // Atomically return *p, with acquire semantics.
gpr_atm gpr_atm_acq_load(gpr_atm *p); gpr_atm gpr_atm_acq_load(gpr_atm *p);
gpr_atm gpr_atm_no_barrier_load(gpr_atm *p);
// Atomically set *p = value, with release semantics. // Atomically set *p = value, with release semantics.
void gpr_atm_rel_store(gpr_atm *p, gpr_atm value); void gpr_atm_rel_store(gpr_atm *p, gpr_atm value);

@ -511,6 +511,11 @@ typedef struct grpc_op {
} maybe_stream_compression_level; } maybe_stream_compression_level;
} send_initial_metadata; } send_initial_metadata;
struct grpc_op_send_message { struct grpc_op_send_message {
/** This op takes ownership of the slices in send_message. After
* a call completes, the contents of send_message are not guaranteed
* and likely empty. The original owner should still call
* grpc_byte_buffer_destroy() on this object however.
*/
struct grpc_byte_buffer *send_message; struct grpc_byte_buffer *send_message;
} send_message; } send_message;
struct grpc_op_send_status_from_server { struct grpc_op_send_status_from_server {

@ -276,7 +276,6 @@
<file baseinstalldir="/" name="src/core/lib/http/format_request.h" role="src" /> <file baseinstalldir="/" name="src/core/lib/http/format_request.h" role="src" />
<file baseinstalldir="/" name="src/core/lib/http/httpcli.h" role="src" /> <file baseinstalldir="/" name="src/core/lib/http/httpcli.h" role="src" />
<file baseinstalldir="/" name="src/core/lib/http/parser.h" role="src" /> <file baseinstalldir="/" name="src/core/lib/http/parser.h" role="src" />
<file baseinstalldir="/" name="src/core/lib/iomgr/call_combiner.h" role="src" />
<file baseinstalldir="/" name="src/core/lib/iomgr/closure.h" role="src" /> <file baseinstalldir="/" name="src/core/lib/iomgr/closure.h" role="src" />
<file baseinstalldir="/" name="src/core/lib/iomgr/combiner.h" role="src" /> <file baseinstalldir="/" name="src/core/lib/iomgr/combiner.h" role="src" />
<file baseinstalldir="/" name="src/core/lib/iomgr/endpoint.h" role="src" /> <file baseinstalldir="/" name="src/core/lib/iomgr/endpoint.h" role="src" />
@ -431,7 +430,6 @@
<file baseinstalldir="/" name="src/core/lib/http/format_request.c" role="src" /> <file baseinstalldir="/" name="src/core/lib/http/format_request.c" role="src" />
<file baseinstalldir="/" name="src/core/lib/http/httpcli.c" role="src" /> <file baseinstalldir="/" name="src/core/lib/http/httpcli.c" role="src" />
<file baseinstalldir="/" name="src/core/lib/http/parser.c" role="src" /> <file baseinstalldir="/" name="src/core/lib/http/parser.c" role="src" />
<file baseinstalldir="/" name="src/core/lib/iomgr/call_combiner.c" role="src" />
<file baseinstalldir="/" name="src/core/lib/iomgr/closure.c" role="src" /> <file baseinstalldir="/" name="src/core/lib/iomgr/closure.c" role="src" />
<file baseinstalldir="/" name="src/core/lib/iomgr/combiner.c" role="src" /> <file baseinstalldir="/" name="src/core/lib/iomgr/combiner.c" role="src" />
<file baseinstalldir="/" name="src/core/lib/iomgr/endpoint.c" role="src" /> <file baseinstalldir="/" name="src/core/lib/iomgr/endpoint.c" role="src" />

@ -179,6 +179,7 @@ const grpc_channel_filter grpc_client_census_filter = {
sizeof(channel_data), sizeof(channel_data),
init_channel_elem, init_channel_elem,
destroy_channel_elem, destroy_channel_elem,
grpc_call_next_get_peer,
grpc_channel_next_get_info, grpc_channel_next_get_info,
"census-client"}; "census-client"};
@ -192,5 +193,6 @@ const grpc_channel_filter grpc_server_census_filter = {
sizeof(channel_data), sizeof(channel_data),
init_channel_elem, init_channel_elem,
destroy_channel_elem, destroy_channel_elem,
grpc_call_next_get_peer,
grpc_channel_next_get_info, grpc_channel_next_get_info,
"census-server"}; "census-server"};

@ -796,8 +796,7 @@ static void cc_destroy_channel_elem(grpc_exec_ctx *exec_ctx,
// send_message // send_message
// recv_trailing_metadata // recv_trailing_metadata
// send_trailing_metadata // send_trailing_metadata
// We also add room for a single cancel_stream batch. #define MAX_WAITING_BATCHES 6
#define MAX_WAITING_BATCHES 7
/** Call data. Holds a pointer to grpc_subchannel_call and the /** Call data. Holds a pointer to grpc_subchannel_call and the
associated machinery to create such a pointer. associated machinery to create such a pointer.
@ -809,25 +808,23 @@ typedef struct client_channel_call_data {
// The code in deadline_filter.c requires this to be the first field. // The code in deadline_filter.c requires this to be the first field.
// TODO(roth): This is slightly sub-optimal in that grpc_deadline_state // TODO(roth): This is slightly sub-optimal in that grpc_deadline_state
// and this struct both independently store a pointer to the call // and this struct both independently store a pointer to the call
// combiner. If/when we have time, find a way to avoid this without // stack and each has its own mutex. If/when we have time, find a way
// breaking the grpc_deadline_state abstraction. // to avoid this without breaking the grpc_deadline_state abstraction.
grpc_deadline_state deadline_state; grpc_deadline_state deadline_state;
grpc_slice path; // Request path. grpc_slice path; // Request path.
gpr_timespec call_start_time; gpr_timespec call_start_time;
gpr_timespec deadline; gpr_timespec deadline;
gpr_arena *arena;
grpc_call_combiner *call_combiner;
grpc_server_retry_throttle_data *retry_throttle_data; grpc_server_retry_throttle_data *retry_throttle_data;
method_parameters *method_params; method_parameters *method_params;
grpc_subchannel_call *subchannel_call; /** either 0 for no call, a pointer to a grpc_subchannel_call (if the lowest
grpc_error *error; bit is 0), or a pointer to an error (if the lowest bit is 1) */
gpr_atm subchannel_call_or_error;
gpr_arena *arena;
grpc_lb_policy *lb_policy; // Holds ref while LB pick is pending. grpc_lb_policy *lb_policy; // Holds ref while LB pick is pending.
grpc_closure lb_pick_closure; grpc_closure lb_pick_closure;
grpc_closure cancel_closure;
grpc_connected_subchannel *connected_subchannel; grpc_connected_subchannel *connected_subchannel;
grpc_call_context_element subchannel_call_context[GRPC_CONTEXT_COUNT]; grpc_call_context_element subchannel_call_context[GRPC_CONTEXT_COUNT];
@ -835,9 +832,10 @@ typedef struct client_channel_call_data {
grpc_transport_stream_op_batch *waiting_for_pick_batches[MAX_WAITING_BATCHES]; grpc_transport_stream_op_batch *waiting_for_pick_batches[MAX_WAITING_BATCHES];
size_t waiting_for_pick_batches_count; size_t waiting_for_pick_batches_count;
grpc_closure handle_pending_batch_in_call_combiner[MAX_WAITING_BATCHES];
grpc_transport_stream_op_batch *initial_metadata_batch; grpc_transport_stream_op_batch_payload *initial_metadata_payload;
grpc_call_stack *owning_call;
grpc_linked_mdelem lb_token_mdelem; grpc_linked_mdelem lb_token_mdelem;
@ -845,40 +843,53 @@ typedef struct client_channel_call_data {
grpc_closure *original_on_complete; grpc_closure *original_on_complete;
} call_data; } call_data;
typedef struct {
grpc_subchannel_call *subchannel_call;
grpc_error *error;
} call_or_error;
static call_or_error get_call_or_error(call_data *p) {
gpr_atm c = gpr_atm_acq_load(&p->subchannel_call_or_error);
if (c == 0)
return (call_or_error){NULL, NULL};
else if (c & 1)
return (call_or_error){NULL, (grpc_error *)((c) & ~(gpr_atm)1)};
else
return (call_or_error){(grpc_subchannel_call *)c, NULL};
}
static bool set_call_or_error(call_data *p, call_or_error coe) {
// this should always be under a lock
call_or_error existing = get_call_or_error(p);
if (existing.error != GRPC_ERROR_NONE) {
GRPC_ERROR_UNREF(coe.error);
return false;
}
GPR_ASSERT(existing.subchannel_call == NULL);
if (coe.error != GRPC_ERROR_NONE) {
GPR_ASSERT(coe.subchannel_call == NULL);
gpr_atm_rel_store(&p->subchannel_call_or_error, 1 | (gpr_atm)coe.error);
} else {
GPR_ASSERT(coe.subchannel_call != NULL);
gpr_atm_rel_store(&p->subchannel_call_or_error,
(gpr_atm)coe.subchannel_call);
}
return true;
}
grpc_subchannel_call *grpc_client_channel_get_subchannel_call( grpc_subchannel_call *grpc_client_channel_get_subchannel_call(
grpc_call_element *elem) { grpc_call_element *call_elem) {
call_data *calld = elem->call_data; return get_call_or_error(call_elem->call_data).subchannel_call;
return calld->subchannel_call;
} }
// This is called via the call combiner, so access to calld is synchronized. static void waiting_for_pick_batches_add_locked(
static void waiting_for_pick_batches_add(
call_data *calld, grpc_transport_stream_op_batch *batch) { call_data *calld, grpc_transport_stream_op_batch *batch) {
if (batch->send_initial_metadata) {
GPR_ASSERT(calld->initial_metadata_batch == NULL);
calld->initial_metadata_batch = batch;
} else {
GPR_ASSERT(calld->waiting_for_pick_batches_count < MAX_WAITING_BATCHES); GPR_ASSERT(calld->waiting_for_pick_batches_count < MAX_WAITING_BATCHES);
calld->waiting_for_pick_batches[calld->waiting_for_pick_batches_count++] = calld->waiting_for_pick_batches[calld->waiting_for_pick_batches_count++] =
batch; batch;
} }
}
// This is called via the call combiner, so access to calld is synchronized. static void waiting_for_pick_batches_fail_locked(grpc_exec_ctx *exec_ctx,
static void fail_pending_batch_in_call_combiner(grpc_exec_ctx *exec_ctx,
void *arg, grpc_error *error) {
call_data *calld = arg;
if (calld->waiting_for_pick_batches_count > 0) {
--calld->waiting_for_pick_batches_count;
grpc_transport_stream_op_batch_finish_with_failure(
exec_ctx,
calld->waiting_for_pick_batches[calld->waiting_for_pick_batches_count],
GRPC_ERROR_REF(error), calld->call_combiner);
}
}
// This is called via the call combiner, so access to calld is synchronized.
static void waiting_for_pick_batches_fail(grpc_exec_ctx *exec_ctx,
grpc_call_element *elem, grpc_call_element *elem,
grpc_error *error) { grpc_error *error) {
call_data *calld = elem->call_data; call_data *calld = elem->call_data;
@ -889,60 +900,34 @@ static void waiting_for_pick_batches_fail(grpc_exec_ctx *exec_ctx,
grpc_error_string(error)); grpc_error_string(error));
} }
for (size_t i = 0; i < calld->waiting_for_pick_batches_count; ++i) { for (size_t i = 0; i < calld->waiting_for_pick_batches_count; ++i) {
GRPC_CLOSURE_INIT(&calld->handle_pending_batch_in_call_combiner[i],
fail_pending_batch_in_call_combiner, calld,
grpc_schedule_on_exec_ctx);
GRPC_CALL_COMBINER_START(exec_ctx, calld->call_combiner,
&calld->handle_pending_batch_in_call_combiner[i],
GRPC_ERROR_REF(error),
"waiting_for_pick_batches_fail");
}
if (calld->initial_metadata_batch != NULL) {
grpc_transport_stream_op_batch_finish_with_failure( grpc_transport_stream_op_batch_finish_with_failure(
exec_ctx, calld->initial_metadata_batch, GRPC_ERROR_REF(error), exec_ctx, calld->waiting_for_pick_batches[i], GRPC_ERROR_REF(error));
calld->call_combiner);
} else {
GRPC_CALL_COMBINER_STOP(exec_ctx, calld->call_combiner,
"waiting_for_pick_batches_fail");
} }
calld->waiting_for_pick_batches_count = 0;
GRPC_ERROR_UNREF(error); GRPC_ERROR_UNREF(error);
} }
// This is called via the call combiner, so access to calld is synchronized. static void waiting_for_pick_batches_resume_locked(grpc_exec_ctx *exec_ctx,
static void run_pending_batch_in_call_combiner(grpc_exec_ctx *exec_ctx,
void *arg, grpc_error *ignored) {
call_data *calld = arg;
if (calld->waiting_for_pick_batches_count > 0) {
--calld->waiting_for_pick_batches_count;
grpc_subchannel_call_process_op(
exec_ctx, calld->subchannel_call,
calld->waiting_for_pick_batches[calld->waiting_for_pick_batches_count]);
}
}
// This is called via the call combiner, so access to calld is synchronized.
static void waiting_for_pick_batches_resume(grpc_exec_ctx *exec_ctx,
grpc_call_element *elem) { grpc_call_element *elem) {
channel_data *chand = elem->channel_data;
call_data *calld = elem->call_data; call_data *calld = elem->call_data;
if (calld->waiting_for_pick_batches_count == 0) return;
call_or_error coe = get_call_or_error(calld);
if (coe.error != GRPC_ERROR_NONE) {
waiting_for_pick_batches_fail_locked(exec_ctx, elem,
GRPC_ERROR_REF(coe.error));
return;
}
if (GRPC_TRACER_ON(grpc_client_channel_trace)) { if (GRPC_TRACER_ON(grpc_client_channel_trace)) {
gpr_log(GPR_DEBUG, "chand=%p calld=%p: sending %" PRIdPTR gpr_log(GPR_DEBUG, "chand=%p calld=%p: sending %" PRIdPTR
" pending batches to subchannel_call=%p", " pending batches to subchannel_call=%p",
chand, calld, calld->waiting_for_pick_batches_count, elem->channel_data, calld, calld->waiting_for_pick_batches_count,
calld->subchannel_call); coe.subchannel_call);
} }
for (size_t i = 0; i < calld->waiting_for_pick_batches_count; ++i) { for (size_t i = 0; i < calld->waiting_for_pick_batches_count; ++i) {
GRPC_CLOSURE_INIT(&calld->handle_pending_batch_in_call_combiner[i], grpc_subchannel_call_process_op(exec_ctx, coe.subchannel_call,
run_pending_batch_in_call_combiner, calld, calld->waiting_for_pick_batches[i]);
grpc_schedule_on_exec_ctx);
GRPC_CALL_COMBINER_START(exec_ctx, calld->call_combiner,
&calld->handle_pending_batch_in_call_combiner[i],
GRPC_ERROR_NONE,
"waiting_for_pick_batches_resume");
} }
GPR_ASSERT(calld->initial_metadata_batch != NULL); calld->waiting_for_pick_batches_count = 0;
grpc_subchannel_call_process_op(exec_ctx, calld->subchannel_call,
calld->initial_metadata_batch);
} }
// Applies service config to the call. Must be invoked once we know // Applies service config to the call. Must be invoked once we know
@ -983,28 +968,29 @@ static void apply_service_config_to_call_locked(grpc_exec_ctx *exec_ctx,
static void create_subchannel_call_locked(grpc_exec_ctx *exec_ctx, static void create_subchannel_call_locked(grpc_exec_ctx *exec_ctx,
grpc_call_element *elem, grpc_call_element *elem,
grpc_error *error) { grpc_error *error) {
channel_data *chand = elem->channel_data;
call_data *calld = elem->call_data; call_data *calld = elem->call_data;
grpc_subchannel_call *subchannel_call = NULL;
const grpc_connected_subchannel_call_args call_args = { const grpc_connected_subchannel_call_args call_args = {
.pollent = calld->pollent, .pollent = calld->pollent,
.path = calld->path, .path = calld->path,
.start_time = calld->call_start_time, .start_time = calld->call_start_time,
.deadline = calld->deadline, .deadline = calld->deadline,
.arena = calld->arena, .arena = calld->arena,
.context = calld->subchannel_call_context, .context = calld->subchannel_call_context};
.call_combiner = calld->call_combiner};
grpc_error *new_error = grpc_connected_subchannel_create_call( grpc_error *new_error = grpc_connected_subchannel_create_call(
exec_ctx, calld->connected_subchannel, &call_args, exec_ctx, calld->connected_subchannel, &call_args, &subchannel_call);
&calld->subchannel_call);
if (GRPC_TRACER_ON(grpc_client_channel_trace)) { if (GRPC_TRACER_ON(grpc_client_channel_trace)) {
gpr_log(GPR_DEBUG, "chand=%p calld=%p: create subchannel_call=%p: error=%s", gpr_log(GPR_DEBUG, "chand=%p calld=%p: create subchannel_call=%p: error=%s",
chand, calld, calld->subchannel_call, grpc_error_string(new_error)); elem->channel_data, calld, subchannel_call,
grpc_error_string(new_error));
} }
GPR_ASSERT(set_call_or_error(
calld, (call_or_error){.subchannel_call = subchannel_call}));
if (new_error != GRPC_ERROR_NONE) { if (new_error != GRPC_ERROR_NONE) {
new_error = grpc_error_add_child(new_error, error); new_error = grpc_error_add_child(new_error, error);
waiting_for_pick_batches_fail(exec_ctx, elem, new_error); waiting_for_pick_batches_fail_locked(exec_ctx, elem, new_error);
} else { } else {
waiting_for_pick_batches_resume(exec_ctx, elem); waiting_for_pick_batches_resume_locked(exec_ctx, elem);
} }
GRPC_ERROR_UNREF(error); GRPC_ERROR_UNREF(error);
} }
@ -1016,10 +1002,11 @@ static void subchannel_ready_locked(grpc_exec_ctx *exec_ctx,
channel_data *chand = elem->channel_data; channel_data *chand = elem->channel_data;
grpc_polling_entity_del_from_pollset_set(exec_ctx, calld->pollent, grpc_polling_entity_del_from_pollset_set(exec_ctx, calld->pollent,
chand->interested_parties); chand->interested_parties);
call_or_error coe = get_call_or_error(calld);
if (calld->connected_subchannel == NULL) { if (calld->connected_subchannel == NULL) {
// Failed to create subchannel. // Failed to create subchannel.
GRPC_ERROR_UNREF(calld->error); grpc_error *failure =
calld->error = error == GRPC_ERROR_NONE error == GRPC_ERROR_NONE
? GRPC_ERROR_CREATE_FROM_STATIC_STRING( ? GRPC_ERROR_CREATE_FROM_STATIC_STRING(
"Call dropped by load balancing policy") "Call dropped by load balancing policy")
: GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING( : GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
@ -1027,16 +1014,48 @@ static void subchannel_ready_locked(grpc_exec_ctx *exec_ctx,
if (GRPC_TRACER_ON(grpc_client_channel_trace)) { if (GRPC_TRACER_ON(grpc_client_channel_trace)) {
gpr_log(GPR_DEBUG, gpr_log(GPR_DEBUG,
"chand=%p calld=%p: failed to create subchannel: error=%s", chand, "chand=%p calld=%p: failed to create subchannel: error=%s", chand,
calld, grpc_error_string(calld->error)); calld, grpc_error_string(failure));
}
set_call_or_error(calld, (call_or_error){.error = GRPC_ERROR_REF(failure)});
waiting_for_pick_batches_fail_locked(exec_ctx, elem, failure);
} else if (coe.error != GRPC_ERROR_NONE) {
/* already cancelled before subchannel became ready */
grpc_error *child_errors[] = {error, coe.error};
grpc_error *cancellation_error =
GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
"Cancelled before creating subchannel", child_errors,
GPR_ARRAY_SIZE(child_errors));
/* if due to deadline, attach the deadline exceeded status to the error */
if (gpr_time_cmp(calld->deadline, gpr_now(GPR_CLOCK_MONOTONIC)) < 0) {
cancellation_error =
grpc_error_set_int(cancellation_error, GRPC_ERROR_INT_GRPC_STATUS,
GRPC_STATUS_DEADLINE_EXCEEDED);
} }
waiting_for_pick_batches_fail(exec_ctx, elem, GRPC_ERROR_REF(calld->error)); if (GRPC_TRACER_ON(grpc_client_channel_trace)) {
gpr_log(GPR_DEBUG,
"chand=%p calld=%p: cancelled before subchannel became ready: %s",
chand, calld, grpc_error_string(cancellation_error));
}
waiting_for_pick_batches_fail_locked(exec_ctx, elem, cancellation_error);
} else { } else {
/* Create call on subchannel. */ /* Create call on subchannel. */
create_subchannel_call_locked(exec_ctx, elem, GRPC_ERROR_REF(error)); create_subchannel_call_locked(exec_ctx, elem, GRPC_ERROR_REF(error));
} }
GRPC_CALL_STACK_UNREF(exec_ctx, calld->owning_call, "pick_subchannel");
GRPC_ERROR_UNREF(error); GRPC_ERROR_UNREF(error);
} }
static char *cc_get_peer(grpc_exec_ctx *exec_ctx, grpc_call_element *elem) {
call_data *calld = elem->call_data;
grpc_subchannel_call *subchannel_call =
get_call_or_error(calld).subchannel_call;
if (subchannel_call == NULL) {
return NULL;
} else {
return grpc_subchannel_call_get_peer(exec_ctx, subchannel_call);
}
}
/** Return true if subchannel is available immediately (in which case /** Return true if subchannel is available immediately (in which case
subchannel_ready_locked() should not be called), or false otherwise (in subchannel_ready_locked() should not be called), or false otherwise (in
which case subchannel_ready_locked() should be called when the subchannel which case subchannel_ready_locked() should be called when the subchannel
@ -1050,44 +1069,6 @@ typedef struct {
grpc_closure closure; grpc_closure closure;
} pick_after_resolver_result_args; } pick_after_resolver_result_args;
// Note: This runs under the client_channel combiner, but will NOT be
// holding the call combiner.
static void pick_after_resolver_result_cancel_locked(grpc_exec_ctx *exec_ctx,
void *arg,
grpc_error *error) {
grpc_call_element *elem = arg;
channel_data *chand = elem->channel_data;
call_data *calld = elem->call_data;
// If we don't yet have a resolver result, then a closure for
// pick_after_resolver_result_done_locked() will have been added to
// chand->waiting_for_resolver_result_closures, and it may not be invoked
// until after this call has been destroyed. We mark the operation as
// cancelled, so that when pick_after_resolver_result_done_locked()
// is called, it will be a no-op. We also immediately invoke
// subchannel_ready_locked() to propagate the error back to the caller.
for (grpc_closure *closure = chand->waiting_for_resolver_result_closures.head;
closure != NULL; closure = closure->next_data.next) {
pick_after_resolver_result_args *args = closure->cb_arg;
if (!args->cancelled && args->elem == elem) {
if (GRPC_TRACER_ON(grpc_client_channel_trace)) {
gpr_log(GPR_DEBUG,
"chand=%p calld=%p: "
"cancelling pick waiting for resolver result",
chand, calld);
}
args->cancelled = true;
// Note: Although we are not in the call combiner here, we are
// basically stealing the call combiner from the pending pick, so
// it's safe to call subchannel_ready_locked() here -- we are
// essentially calling it here instead of calling it in
// pick_after_resolver_result_done_locked().
subchannel_ready_locked(exec_ctx, elem,
GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
"Pick cancelled", &error, 1));
}
}
}
static void pick_after_resolver_result_done_locked(grpc_exec_ctx *exec_ctx, static void pick_after_resolver_result_done_locked(grpc_exec_ctx *exec_ctx,
void *arg, void *arg,
grpc_error *error) { grpc_error *error) {
@ -1098,24 +1079,21 @@ static void pick_after_resolver_result_done_locked(grpc_exec_ctx *exec_ctx,
gpr_log(GPR_DEBUG, "call cancelled before resolver result"); gpr_log(GPR_DEBUG, "call cancelled before resolver result");
} }
} else { } else {
grpc_call_element *elem = args->elem; channel_data *chand = args->elem->channel_data;
channel_data *chand = elem->channel_data; call_data *calld = args->elem->call_data;
call_data *calld = elem->call_data;
grpc_call_combiner_set_notify_on_cancel(exec_ctx, calld->call_combiner,
NULL);
if (error != GRPC_ERROR_NONE) { if (error != GRPC_ERROR_NONE) {
if (GRPC_TRACER_ON(grpc_client_channel_trace)) { if (GRPC_TRACER_ON(grpc_client_channel_trace)) {
gpr_log(GPR_DEBUG, "chand=%p calld=%p: resolver failed to return data", gpr_log(GPR_DEBUG, "chand=%p calld=%p: resolver failed to return data",
chand, calld); chand, calld);
} }
subchannel_ready_locked(exec_ctx, elem, GRPC_ERROR_REF(error)); subchannel_ready_locked(exec_ctx, args->elem, GRPC_ERROR_REF(error));
} else { } else {
if (GRPC_TRACER_ON(grpc_client_channel_trace)) { if (GRPC_TRACER_ON(grpc_client_channel_trace)) {
gpr_log(GPR_DEBUG, "chand=%p calld=%p: resolver returned, doing pick", gpr_log(GPR_DEBUG, "chand=%p calld=%p: resolver returned, doing pick",
chand, calld); chand, calld);
} }
if (pick_subchannel_locked(exec_ctx, elem)) { if (pick_subchannel_locked(exec_ctx, args->elem)) {
subchannel_ready_locked(exec_ctx, elem, GRPC_ERROR_NONE); subchannel_ready_locked(exec_ctx, args->elem, GRPC_ERROR_NONE);
} }
} }
} }
@ -1138,33 +1116,41 @@ static void pick_after_resolver_result_start_locked(grpc_exec_ctx *exec_ctx,
args, grpc_combiner_scheduler(chand->combiner)); args, grpc_combiner_scheduler(chand->combiner));
grpc_closure_list_append(&chand->waiting_for_resolver_result_closures, grpc_closure_list_append(&chand->waiting_for_resolver_result_closures,
&args->closure, GRPC_ERROR_NONE); &args->closure, GRPC_ERROR_NONE);
grpc_call_combiner_set_notify_on_cancel(
exec_ctx, calld->call_combiner,
GRPC_CLOSURE_INIT(&calld->cancel_closure,
pick_after_resolver_result_cancel_locked, elem,
grpc_combiner_scheduler(chand->combiner)));
} }
// Note: This runs under the client_channel combiner, but will NOT be static void pick_after_resolver_result_cancel_locked(grpc_exec_ctx *exec_ctx,
// holding the call combiner. grpc_call_element *elem,
static void pick_callback_cancel_locked(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error) { grpc_error *error) {
grpc_call_element *elem = arg;
channel_data *chand = elem->channel_data; channel_data *chand = elem->channel_data;
call_data *calld = elem->call_data; call_data *calld = elem->call_data;
if (calld->lb_policy != NULL) { // If we don't yet have a resolver result, then a closure for
// pick_after_resolver_result_done_locked() will have been added to
// chand->waiting_for_resolver_result_closures, and it may not be invoked
// until after this call has been destroyed. We mark the operation as
// cancelled, so that when pick_after_resolver_result_done_locked()
// is called, it will be a no-op. We also immediately invoke
// subchannel_ready_locked() to propagate the error back to the caller.
for (grpc_closure *closure = chand->waiting_for_resolver_result_closures.head;
closure != NULL; closure = closure->next_data.next) {
pick_after_resolver_result_args *args = closure->cb_arg;
if (!args->cancelled && args->elem == elem) {
if (GRPC_TRACER_ON(grpc_client_channel_trace)) { if (GRPC_TRACER_ON(grpc_client_channel_trace)) {
gpr_log(GPR_DEBUG, "chand=%p calld=%p: cancelling pick from LB policy %p", gpr_log(GPR_DEBUG,
chand, calld, calld->lb_policy); "chand=%p calld=%p: "
"cancelling pick waiting for resolver result",
chand, calld);
} }
grpc_lb_policy_cancel_pick_locked(exec_ctx, calld->lb_policy, args->cancelled = true;
&calld->connected_subchannel, subchannel_ready_locked(exec_ctx, elem,
GRPC_ERROR_REF(error)); GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
"Pick cancelled", &error, 1));
} }
} }
GRPC_ERROR_UNREF(error);
}
// Callback invoked by grpc_lb_policy_pick_locked() for async picks. // Callback invoked by grpc_lb_policy_pick_locked() for async picks.
// Unrefs the LB policy and invokes subchannel_ready_locked(). // Unrefs the LB policy after invoking subchannel_ready_locked().
static void pick_callback_done_locked(grpc_exec_ctx *exec_ctx, void *arg, static void pick_callback_done_locked(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error) { grpc_error *error) {
grpc_call_element *elem = arg; grpc_call_element *elem = arg;
@ -1174,7 +1160,6 @@ static void pick_callback_done_locked(grpc_exec_ctx *exec_ctx, void *arg,
gpr_log(GPR_DEBUG, "chand=%p calld=%p: pick completed asynchronously", gpr_log(GPR_DEBUG, "chand=%p calld=%p: pick completed asynchronously",
chand, calld); chand, calld);
} }
grpc_call_combiner_set_notify_on_cancel(exec_ctx, calld->call_combiner, NULL);
GPR_ASSERT(calld->lb_policy != NULL); GPR_ASSERT(calld->lb_policy != NULL);
GRPC_LB_POLICY_UNREF(exec_ctx, calld->lb_policy, "pick_subchannel"); GRPC_LB_POLICY_UNREF(exec_ctx, calld->lb_policy, "pick_subchannel");
calld->lb_policy = NULL; calld->lb_policy = NULL;
@ -1209,15 +1194,24 @@ static bool pick_callback_start_locked(grpc_exec_ctx *exec_ctx,
} }
GRPC_LB_POLICY_UNREF(exec_ctx, calld->lb_policy, "pick_subchannel"); GRPC_LB_POLICY_UNREF(exec_ctx, calld->lb_policy, "pick_subchannel");
calld->lb_policy = NULL; calld->lb_policy = NULL;
} else {
grpc_call_combiner_set_notify_on_cancel(
exec_ctx, calld->call_combiner,
GRPC_CLOSURE_INIT(&calld->cancel_closure, pick_callback_cancel_locked,
elem, grpc_combiner_scheduler(chand->combiner)));
} }
return pick_done; return pick_done;
} }
static void pick_callback_cancel_locked(grpc_exec_ctx *exec_ctx,
grpc_call_element *elem,
grpc_error *error) {
channel_data *chand = elem->channel_data;
call_data *calld = elem->call_data;
GPR_ASSERT(calld->lb_policy != NULL);
if (GRPC_TRACER_ON(grpc_client_channel_trace)) {
gpr_log(GPR_DEBUG, "chand=%p calld=%p: cancelling pick from LB policy %p",
chand, calld, calld->lb_policy);
}
grpc_lb_policy_cancel_pick_locked(exec_ctx, calld->lb_policy,
&calld->connected_subchannel, error);
}
static bool pick_subchannel_locked(grpc_exec_ctx *exec_ctx, static bool pick_subchannel_locked(grpc_exec_ctx *exec_ctx,
grpc_call_element *elem) { grpc_call_element *elem) {
GPR_TIMER_BEGIN("pick_subchannel", 0); GPR_TIMER_BEGIN("pick_subchannel", 0);
@ -1230,7 +1224,7 @@ static bool pick_subchannel_locked(grpc_exec_ctx *exec_ctx,
// Otherwise, if the service config specified a value for this // Otherwise, if the service config specified a value for this
// method, use that. // method, use that.
uint32_t initial_metadata_flags = uint32_t initial_metadata_flags =
calld->initial_metadata_batch->payload->send_initial_metadata calld->initial_metadata_payload->send_initial_metadata
.send_initial_metadata_flags; .send_initial_metadata_flags;
const bool wait_for_ready_set_from_api = const bool wait_for_ready_set_from_api =
initial_metadata_flags & initial_metadata_flags &
@ -1247,7 +1241,7 @@ static bool pick_subchannel_locked(grpc_exec_ctx *exec_ctx,
} }
} }
const grpc_lb_policy_pick_args inputs = { const grpc_lb_policy_pick_args inputs = {
calld->initial_metadata_batch->payload->send_initial_metadata calld->initial_metadata_payload->send_initial_metadata
.send_initial_metadata, .send_initial_metadata,
initial_metadata_flags, &calld->lb_token_mdelem}; initial_metadata_flags, &calld->lb_token_mdelem};
pick_done = pick_callback_start_locked(exec_ctx, elem, &inputs); pick_done = pick_callback_start_locked(exec_ctx, elem, &inputs);
@ -1264,33 +1258,91 @@ static bool pick_subchannel_locked(grpc_exec_ctx *exec_ctx,
return pick_done; return pick_done;
} }
static void start_pick_locked(grpc_exec_ctx *exec_ctx, void *arg, static void start_transport_stream_op_batch_locked(grpc_exec_ctx *exec_ctx,
void *arg,
grpc_error *error_ignored) { grpc_error *error_ignored) {
GPR_TIMER_BEGIN("start_pick_locked", 0); GPR_TIMER_BEGIN("start_transport_stream_op_batch_locked", 0);
grpc_call_element *elem = (grpc_call_element *)arg; grpc_transport_stream_op_batch *batch = arg;
call_data *calld = (call_data *)elem->call_data; grpc_call_element *elem = batch->handler_private.extra_arg;
channel_data *chand = (channel_data *)elem->channel_data; call_data *calld = elem->call_data;
channel_data *chand = elem->channel_data;
/* need to recheck that another thread hasn't set the call */
call_or_error coe = get_call_or_error(calld);
if (coe.error != GRPC_ERROR_NONE) {
if (GRPC_TRACER_ON(grpc_client_channel_trace)) {
gpr_log(GPR_DEBUG, "chand=%p calld=%p: failing batch with error: %s",
chand, calld, grpc_error_string(coe.error));
}
grpc_transport_stream_op_batch_finish_with_failure(
exec_ctx, batch, GRPC_ERROR_REF(coe.error));
goto done;
}
if (coe.subchannel_call != NULL) {
if (GRPC_TRACER_ON(grpc_client_channel_trace)) {
gpr_log(GPR_DEBUG,
"chand=%p calld=%p: sending batch to subchannel_call=%p", chand,
calld, coe.subchannel_call);
}
grpc_subchannel_call_process_op(exec_ctx, coe.subchannel_call, batch);
goto done;
}
// Add to waiting-for-pick list. If we succeed in getting a
// subchannel call below, we'll handle this batch (along with any
// other waiting batches) in waiting_for_pick_batches_resume_locked().
waiting_for_pick_batches_add_locked(calld, batch);
// If this is a cancellation, cancel the pending pick (if any) and
// fail any pending batches.
if (batch->cancel_stream) {
grpc_error *error = batch->payload->cancel_stream.cancel_error;
if (GRPC_TRACER_ON(grpc_client_channel_trace)) {
gpr_log(GPR_DEBUG, "chand=%p calld=%p: recording cancel_error=%s", chand,
calld, grpc_error_string(error));
}
/* Stash a copy of cancel_error in our call data, so that we can use
it for subsequent operations. This ensures that if the call is
cancelled before any batches are passed down (e.g., if the deadline
is in the past when the call starts), we can return the right
error to the caller when the first batch does get passed down. */
set_call_or_error(calld, (call_or_error){.error = GRPC_ERROR_REF(error)});
if (calld->lb_policy != NULL) {
pick_callback_cancel_locked(exec_ctx, elem, GRPC_ERROR_REF(error));
} else {
pick_after_resolver_result_cancel_locked(exec_ctx, elem,
GRPC_ERROR_REF(error));
}
waiting_for_pick_batches_fail_locked(exec_ctx, elem, GRPC_ERROR_REF(error));
goto done;
}
/* if we don't have a subchannel, try to get one */
if (batch->send_initial_metadata) {
GPR_ASSERT(calld->connected_subchannel == NULL); GPR_ASSERT(calld->connected_subchannel == NULL);
calld->initial_metadata_payload = batch->payload;
GRPC_CALL_STACK_REF(calld->owning_call, "pick_subchannel");
/* If a subchannel is not available immediately, the polling entity from
call_data should be provided to channel_data's interested_parties, so
that IO of the lb_policy and resolver could be done under it. */
if (pick_subchannel_locked(exec_ctx, elem)) { if (pick_subchannel_locked(exec_ctx, elem)) {
// Pick was returned synchronously. // Pick was returned synchronously.
GRPC_CALL_STACK_UNREF(exec_ctx, calld->owning_call, "pick_subchannel");
if (calld->connected_subchannel == NULL) { if (calld->connected_subchannel == NULL) {
GRPC_ERROR_UNREF(calld->error); grpc_error *error = GRPC_ERROR_CREATE_FROM_STATIC_STRING(
calld->error = GRPC_ERROR_CREATE_FROM_STATIC_STRING(
"Call dropped by load balancing policy"); "Call dropped by load balancing policy");
waiting_for_pick_batches_fail(exec_ctx, elem, set_call_or_error(calld,
GRPC_ERROR_REF(calld->error)); (call_or_error){.error = GRPC_ERROR_REF(error)});
waiting_for_pick_batches_fail_locked(exec_ctx, elem, error);
} else { } else {
// Create subchannel call. // Create subchannel call.
create_subchannel_call_locked(exec_ctx, elem, GRPC_ERROR_NONE); create_subchannel_call_locked(exec_ctx, elem, GRPC_ERROR_NONE);
} }
} else { } else {
// Pick will be done asynchronously. Add the call's polling entity to
// the channel's interested_parties, so that I/O for the resolver
// and LB policy can be done under it.
grpc_polling_entity_add_to_pollset_set(exec_ctx, calld->pollent, grpc_polling_entity_add_to_pollset_set(exec_ctx, calld->pollent,
chand->interested_parties); chand->interested_parties);
} }
GPR_TIMER_END("start_pick_locked", 0); }
done:
GRPC_CALL_STACK_UNREF(exec_ctx, calld->owning_call,
"start_transport_stream_op_batch");
GPR_TIMER_END("start_transport_stream_op_batch_locked", 0);
} }
static void on_complete(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) { static void on_complete(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) {
@ -1313,49 +1365,27 @@ static void on_complete(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) {
GRPC_ERROR_REF(error)); GRPC_ERROR_REF(error));
} }
/* The logic here is fairly complicated, due to (a) the fact that we
need to handle the case where we receive the send op before the
initial metadata op, and (b) the need for efficiency, especially in
the streaming case.
We use double-checked locking to initially see if initialization has been
performed. If it has not, we acquire the combiner and perform initialization.
If it has, we proceed on the fast path. */
static void cc_start_transport_stream_op_batch( static void cc_start_transport_stream_op_batch(
grpc_exec_ctx *exec_ctx, grpc_call_element *elem, grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
grpc_transport_stream_op_batch *batch) { grpc_transport_stream_op_batch *batch) {
call_data *calld = elem->call_data; call_data *calld = elem->call_data;
channel_data *chand = elem->channel_data; channel_data *chand = elem->channel_data;
if (GRPC_TRACER_ON(grpc_client_channel_trace) ||
GRPC_TRACER_ON(grpc_trace_channel)) {
grpc_call_log_op(GPR_INFO, elem, batch);
}
if (chand->deadline_checking_enabled) { if (chand->deadline_checking_enabled) {
grpc_deadline_state_client_start_transport_stream_op_batch(exec_ctx, elem, grpc_deadline_state_client_start_transport_stream_op_batch(exec_ctx, elem,
batch); batch);
} }
GPR_TIMER_BEGIN("cc_start_transport_stream_op_batch", 0);
// If we've previously been cancelled, immediately fail any new batches.
if (calld->error != GRPC_ERROR_NONE) {
if (GRPC_TRACER_ON(grpc_client_channel_trace)) {
gpr_log(GPR_DEBUG, "chand=%p calld=%p: failing batch with error: %s",
chand, calld, grpc_error_string(calld->error));
}
grpc_transport_stream_op_batch_finish_with_failure(
exec_ctx, batch, GRPC_ERROR_REF(calld->error), calld->call_combiner);
goto done;
}
if (batch->cancel_stream) {
// Stash a copy of cancel_error in our call data, so that we can use
// it for subsequent operations. This ensures that if the call is
// cancelled before any batches are passed down (e.g., if the deadline
// is in the past when the call starts), we can return the right
// error to the caller when the first batch does get passed down.
GRPC_ERROR_UNREF(calld->error);
calld->error = GRPC_ERROR_REF(batch->payload->cancel_stream.cancel_error);
if (GRPC_TRACER_ON(grpc_client_channel_trace)) {
gpr_log(GPR_DEBUG, "chand=%p calld=%p: recording cancel_error=%s", chand,
calld, grpc_error_string(calld->error));
}
// If we have a subchannel call, send the cancellation batch down.
// Otherwise, fail all pending batches.
if (calld->subchannel_call != NULL) {
grpc_subchannel_call_process_op(exec_ctx, calld->subchannel_call, batch);
} else {
waiting_for_pick_batches_add(calld, batch);
waiting_for_pick_batches_fail(exec_ctx, elem,
GRPC_ERROR_REF(calld->error));
}
goto done;
}
// Intercept on_complete for recv_trailing_metadata so that we can // Intercept on_complete for recv_trailing_metadata so that we can
// check retry throttle status. // check retry throttle status.
if (batch->recv_trailing_metadata) { if (batch->recv_trailing_metadata) {
@ -1365,43 +1395,38 @@ static void cc_start_transport_stream_op_batch(
grpc_schedule_on_exec_ctx); grpc_schedule_on_exec_ctx);
batch->on_complete = &calld->on_complete; batch->on_complete = &calld->on_complete;
} }
// Check if we've already gotten a subchannel call. /* try to (atomically) get the call */
// Note that once we have completed the pick, we do not need to enter call_or_error coe = get_call_or_error(calld);
// the channel combiner, which is more efficient (especially for GPR_TIMER_BEGIN("cc_start_transport_stream_op_batch", 0);
// streaming calls). if (coe.error != GRPC_ERROR_NONE) {
if (calld->subchannel_call != NULL) { if (GRPC_TRACER_ON(grpc_client_channel_trace)) {
gpr_log(GPR_DEBUG, "chand=%p calld=%p: failing batch with error: %s",
chand, calld, grpc_error_string(coe.error));
}
grpc_transport_stream_op_batch_finish_with_failure(
exec_ctx, batch, GRPC_ERROR_REF(coe.error));
goto done;
}
if (coe.subchannel_call != NULL) {
if (GRPC_TRACER_ON(grpc_client_channel_trace)) { if (GRPC_TRACER_ON(grpc_client_channel_trace)) {
gpr_log(GPR_DEBUG, gpr_log(GPR_DEBUG,
"chand=%p calld=%p: sending batch to subchannel_call=%p", chand, "chand=%p calld=%p: sending batch to subchannel_call=%p", chand,
calld, calld->subchannel_call); calld, coe.subchannel_call);
} }
grpc_subchannel_call_process_op(exec_ctx, calld->subchannel_call, batch); grpc_subchannel_call_process_op(exec_ctx, coe.subchannel_call, batch);
goto done; goto done;
} }
// We do not yet have a subchannel call. /* we failed; lock and figure out what to do */
// Add the batch to the waiting-for-pick list.
waiting_for_pick_batches_add(calld, batch);
// For batches containing a send_initial_metadata op, enter the channel
// combiner to start a pick.
if (batch->send_initial_metadata) {
if (GRPC_TRACER_ON(grpc_client_channel_trace)) { if (GRPC_TRACER_ON(grpc_client_channel_trace)) {
gpr_log(GPR_DEBUG, "chand=%p calld=%p: entering combiner", chand, calld); gpr_log(GPR_DEBUG, "chand=%p calld=%p: entering combiner", chand, calld);
} }
GRPC_CALL_STACK_REF(calld->owning_call, "start_transport_stream_op_batch");
batch->handler_private.extra_arg = elem;
GRPC_CLOSURE_SCHED( GRPC_CLOSURE_SCHED(
exec_ctx, exec_ctx, GRPC_CLOSURE_INIT(&batch->handler_private.closure,
GRPC_CLOSURE_INIT(&batch->handler_private.closure, start_pick_locked, start_transport_stream_op_batch_locked, batch,
elem, grpc_combiner_scheduler(chand->combiner)), grpc_combiner_scheduler(chand->combiner)),
GRPC_ERROR_NONE); GRPC_ERROR_NONE);
} else {
// For all other batches, release the call combiner.
if (GRPC_TRACER_ON(grpc_client_channel_trace)) {
gpr_log(GPR_DEBUG,
"chand=%p calld=%p: saved batch, yeilding call combiner", chand,
calld);
}
GRPC_CALL_COMBINER_STOP(exec_ctx, calld->call_combiner,
"batch does not include send_initial_metadata");
}
done: done:
GPR_TIMER_END("cc_start_transport_stream_op_batch", 0); GPR_TIMER_END("cc_start_transport_stream_op_batch", 0);
} }
@ -1416,11 +1441,10 @@ static grpc_error *cc_init_call_elem(grpc_exec_ctx *exec_ctx,
calld->path = grpc_slice_ref_internal(args->path); calld->path = grpc_slice_ref_internal(args->path);
calld->call_start_time = args->start_time; calld->call_start_time = args->start_time;
calld->deadline = gpr_convert_clock_type(args->deadline, GPR_CLOCK_MONOTONIC); calld->deadline = gpr_convert_clock_type(args->deadline, GPR_CLOCK_MONOTONIC);
calld->owning_call = args->call_stack;
calld->arena = args->arena; calld->arena = args->arena;
calld->call_combiner = args->call_combiner;
if (chand->deadline_checking_enabled) { if (chand->deadline_checking_enabled) {
grpc_deadline_state_init(exec_ctx, elem, args->call_stack, grpc_deadline_state_init(exec_ctx, elem, args->call_stack, calld->deadline);
args->call_combiner, calld->deadline);
} }
return GRPC_ERROR_NONE; return GRPC_ERROR_NONE;
} }
@ -1439,12 +1463,13 @@ static void cc_destroy_call_elem(grpc_exec_ctx *exec_ctx,
if (calld->method_params != NULL) { if (calld->method_params != NULL) {
method_parameters_unref(calld->method_params); method_parameters_unref(calld->method_params);
} }
GRPC_ERROR_UNREF(calld->error); call_or_error coe = get_call_or_error(calld);
if (calld->subchannel_call != NULL) { GRPC_ERROR_UNREF(coe.error);
grpc_subchannel_call_set_cleanup_closure(calld->subchannel_call, if (coe.subchannel_call != NULL) {
grpc_subchannel_call_set_cleanup_closure(coe.subchannel_call,
then_schedule_closure); then_schedule_closure);
then_schedule_closure = NULL; then_schedule_closure = NULL;
GRPC_SUBCHANNEL_CALL_UNREF(exec_ctx, calld->subchannel_call, GRPC_SUBCHANNEL_CALL_UNREF(exec_ctx, coe.subchannel_call,
"client_channel_destroy_call"); "client_channel_destroy_call");
} }
GPR_ASSERT(calld->lb_policy == NULL); GPR_ASSERT(calld->lb_policy == NULL);
@ -1483,6 +1508,7 @@ const grpc_channel_filter grpc_client_channel_filter = {
sizeof(channel_data), sizeof(channel_data),
cc_init_channel_elem, cc_init_channel_elem,
cc_destroy_channel_elem, cc_destroy_channel_elem,
cc_get_peer,
cc_get_channel_info, cc_get_channel_info,
"client-channel", "client-channel",
}; };

@ -132,5 +132,6 @@ const grpc_channel_filter grpc_client_load_reporting_filter = {
0, // sizeof(channel_data) 0, // sizeof(channel_data)
init_channel_elem, init_channel_elem,
destroy_channel_elem, destroy_channel_elem,
grpc_call_next_get_peer,
grpc_channel_next_get_info, grpc_channel_next_get_info,
"client_load_reporting"}; "client_load_reporting"};

@ -296,8 +296,6 @@ static void stop_connectivity_watchers(grpc_exec_ctx *exec_ctx,
static void pf_update_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy, static void pf_update_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy,
const grpc_lb_policy_args *args) { const grpc_lb_policy_args *args) {
pick_first_lb_policy *p = (pick_first_lb_policy *)policy; pick_first_lb_policy *p = (pick_first_lb_policy *)policy;
/* Find the number of backend addresses. We ignore balancer
* addresses, since we don't know how to handle them. */
const grpc_arg *arg = const grpc_arg *arg =
grpc_channel_args_find(args->args, GRPC_ARG_LB_ADDRESSES); grpc_channel_args_find(args->args, GRPC_ARG_LB_ADDRESSES);
if (arg == NULL || arg->type != GRPC_ARG_POINTER) { if (arg == NULL || arg->type != GRPC_ARG_POINTER) {
@ -317,11 +315,7 @@ static void pf_update_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy,
return; return;
} }
const grpc_lb_addresses *addresses = arg->value.pointer.p; const grpc_lb_addresses *addresses = arg->value.pointer.p;
size_t num_addrs = 0; if (addresses->num_addresses == 0) {
for (size_t i = 0; i < addresses->num_addresses; i++) {
if (!addresses->addresses[i].is_balancer) ++num_addrs;
}
if (num_addrs == 0) {
// Empty update. Unsubscribe from all current subchannels and put the // Empty update. Unsubscribe from all current subchannels and put the
// channel in TRANSIENT_FAILURE. // channel in TRANSIENT_FAILURE.
grpc_connectivity_state_set( grpc_connectivity_state_set(
@ -333,9 +327,10 @@ static void pf_update_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy,
} }
if (GRPC_TRACER_ON(grpc_lb_pick_first_trace)) { if (GRPC_TRACER_ON(grpc_lb_pick_first_trace)) {
gpr_log(GPR_INFO, "Pick First %p received update with %lu addresses", gpr_log(GPR_INFO, "Pick First %p received update with %lu addresses",
(void *)p, (unsigned long)num_addrs); (void *)p, (unsigned long)addresses->num_addresses);
} }
grpc_subchannel_args *sc_args = gpr_zalloc(sizeof(*sc_args) * num_addrs); grpc_subchannel_args *sc_args =
gpr_zalloc(sizeof(*sc_args) * addresses->num_addresses);
/* We remove the following keys in order for subchannel keys belonging to /* We remove the following keys in order for subchannel keys belonging to
* subchannels point to the same address to match. */ * subchannels point to the same address to match. */
static const char *keys_to_remove[] = {GRPC_ARG_SUBCHANNEL_ADDRESS, static const char *keys_to_remove[] = {GRPC_ARG_SUBCHANNEL_ADDRESS,
@ -344,7 +339,8 @@ static void pf_update_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy,
/* Create list of subchannel args for new addresses in \a args. */ /* Create list of subchannel args for new addresses in \a args. */
for (size_t i = 0; i < addresses->num_addresses; i++) { for (size_t i = 0; i < addresses->num_addresses; i++) {
if (addresses->addresses[i].is_balancer) continue; // If there were any balancer, we would have chosen grpclb policy instead.
GPR_ASSERT(!addresses->addresses[i].is_balancer);
if (addresses->addresses[i].user_data != NULL) { if (addresses->addresses[i].user_data != NULL) {
gpr_log(GPR_ERROR, gpr_log(GPR_ERROR,
"This LB policy doesn't support user data. It will be ignored"); "This LB policy doesn't support user data. It will be ignored");

@ -737,8 +737,6 @@ static void rr_ping_one_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
static void rr_update_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy, static void rr_update_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy,
const grpc_lb_policy_args *args) { const grpc_lb_policy_args *args) {
round_robin_lb_policy *p = (round_robin_lb_policy *)policy; round_robin_lb_policy *p = (round_robin_lb_policy *)policy;
/* Find the number of backend addresses. We ignore balancer addresses, since
* we don't know how to handle them. */
const grpc_arg *arg = const grpc_arg *arg =
grpc_channel_args_find(args->args, GRPC_ARG_LB_ADDRESSES); grpc_channel_args_find(args->args, GRPC_ARG_LB_ADDRESSES);
if (arg == NULL || arg->type != GRPC_ARG_POINTER) { if (arg == NULL || arg->type != GRPC_ARG_POINTER) {
@ -757,12 +755,9 @@ static void rr_update_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy,
return; return;
} }
grpc_lb_addresses *addresses = arg->value.pointer.p; grpc_lb_addresses *addresses = arg->value.pointer.p;
size_t num_addrs = 0; rr_subchannel_list *subchannel_list =
for (size_t i = 0; i < addresses->num_addresses; i++) { rr_subchannel_list_create(p, addresses->num_addresses);
if (!addresses->addresses[i].is_balancer) ++num_addrs; if (addresses->num_addresses == 0) {
}
rr_subchannel_list *subchannel_list = rr_subchannel_list_create(p, num_addrs);
if (num_addrs == 0) {
grpc_connectivity_state_set( grpc_connectivity_state_set(
exec_ctx, &p->state_tracker, GRPC_CHANNEL_TRANSIENT_FAILURE, exec_ctx, &p->state_tracker, GRPC_CHANNEL_TRANSIENT_FAILURE,
GRPC_ERROR_CREATE_FROM_STATIC_STRING("Empty update"), GRPC_ERROR_CREATE_FROM_STATIC_STRING("Empty update"),
@ -794,9 +789,8 @@ static void rr_update_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy,
GRPC_ARG_LB_ADDRESSES}; GRPC_ARG_LB_ADDRESSES};
/* Create subchannels for addresses in the update. */ /* Create subchannels for addresses in the update. */
for (size_t i = 0; i < addresses->num_addresses; i++) { for (size_t i = 0; i < addresses->num_addresses; i++) {
/* Skip balancer addresses, since we only know how to handle backends. */ // If there were any balancer, we would have chosen grpclb policy instead.
if (addresses->addresses[i].is_balancer) continue; GPR_ASSERT(!addresses->addresses[i].is_balancer);
GPR_ASSERT(i < num_addrs);
memset(&sc_args, 0, sizeof(grpc_subchannel_args)); memset(&sc_args, 0, sizeof(grpc_subchannel_args));
grpc_arg addr_arg = grpc_arg addr_arg =
grpc_create_subchannel_address_arg(&addresses->addresses[i].address); grpc_create_subchannel_address_arg(&addresses->addresses[i].address);

@ -724,6 +724,13 @@ void grpc_subchannel_call_unref(grpc_exec_ctx *exec_ctx,
GRPC_CALL_STACK_UNREF(exec_ctx, SUBCHANNEL_CALL_TO_CALL_STACK(c), REF_REASON); GRPC_CALL_STACK_UNREF(exec_ctx, SUBCHANNEL_CALL_TO_CALL_STACK(c), REF_REASON);
} }
char *grpc_subchannel_call_get_peer(grpc_exec_ctx *exec_ctx,
grpc_subchannel_call *call) {
grpc_call_stack *call_stack = SUBCHANNEL_CALL_TO_CALL_STACK(call);
grpc_call_element *top_elem = grpc_call_stack_element(call_stack, 0);
return top_elem->filter->get_peer(exec_ctx, top_elem);
}
void grpc_subchannel_call_process_op(grpc_exec_ctx *exec_ctx, void grpc_subchannel_call_process_op(grpc_exec_ctx *exec_ctx,
grpc_subchannel_call *call, grpc_subchannel_call *call,
grpc_transport_stream_op_batch *op) { grpc_transport_stream_op_batch *op) {
@ -753,15 +760,13 @@ grpc_error *grpc_connected_subchannel_create_call(
args->arena, sizeof(grpc_subchannel_call) + chanstk->call_stack_size); args->arena, sizeof(grpc_subchannel_call) + chanstk->call_stack_size);
grpc_call_stack *callstk = SUBCHANNEL_CALL_TO_CALL_STACK(*call); grpc_call_stack *callstk = SUBCHANNEL_CALL_TO_CALL_STACK(*call);
(*call)->connection = GRPC_CONNECTED_SUBCHANNEL_REF(con, "subchannel_call"); (*call)->connection = GRPC_CONNECTED_SUBCHANNEL_REF(con, "subchannel_call");
const grpc_call_element_args call_args = { const grpc_call_element_args call_args = {.call_stack = callstk,
.call_stack = callstk,
.server_transport_data = NULL, .server_transport_data = NULL,
.context = args->context, .context = args->context,
.path = args->path, .path = args->path,
.start_time = args->start_time, .start_time = args->start_time,
.deadline = args->deadline, .deadline = args->deadline,
.arena = args->arena, .arena = args->arena};
.call_combiner = args->call_combiner};
grpc_error *error = grpc_call_stack_init( grpc_error *error = grpc_call_stack_init(
exec_ctx, chanstk, 1, subchannel_call_destroy, *call, &call_args); exec_ctx, chanstk, 1, subchannel_call_destroy, *call, &call_args);
if (error != GRPC_ERROR_NONE) { if (error != GRPC_ERROR_NONE) {

@ -106,7 +106,6 @@ typedef struct {
gpr_timespec deadline; gpr_timespec deadline;
gpr_arena *arena; gpr_arena *arena;
grpc_call_context_element *context; grpc_call_context_element *context;
grpc_call_combiner *call_combiner;
} grpc_connected_subchannel_call_args; } grpc_connected_subchannel_call_args;
grpc_error *grpc_connected_subchannel_create_call( grpc_error *grpc_connected_subchannel_create_call(
@ -151,6 +150,10 @@ void grpc_subchannel_call_process_op(grpc_exec_ctx *exec_ctx,
grpc_subchannel_call *subchannel_call, grpc_subchannel_call *subchannel_call,
grpc_transport_stream_op_batch *op); grpc_transport_stream_op_batch *op);
/** continue querying for peer */
char *grpc_subchannel_call_get_peer(grpc_exec_ctx *exec_ctx,
grpc_subchannel_call *subchannel_call);
/** Must be called once per call. Sets the 'then_schedule_closure' argument for /** Must be called once per call. Sets the 'then_schedule_closure' argument for
call stack destruction. */ call stack destruction. */
void grpc_subchannel_call_set_cleanup_closure( void grpc_subchannel_call_set_cleanup_closure(

@ -34,56 +34,22 @@
// grpc_deadline_state // grpc_deadline_state
// //
// The on_complete callback used when sending a cancel_error batch down the
// filter stack. Yields the call combiner when the batch returns.
static void yield_call_combiner(grpc_exec_ctx* exec_ctx, void* arg,
grpc_error* ignored) {
grpc_deadline_state* deadline_state = arg;
GRPC_CALL_COMBINER_STOP(exec_ctx, deadline_state->call_combiner,
"got on_complete from cancel_stream batch");
GRPC_CALL_STACK_UNREF(exec_ctx, deadline_state->call_stack, "deadline_timer");
}
// This is called via the call combiner, so access to deadline_state is
// synchronized.
static void send_cancel_op_in_call_combiner(grpc_exec_ctx* exec_ctx, void* arg,
grpc_error* error) {
grpc_call_element* elem = arg;
grpc_deadline_state* deadline_state = elem->call_data;
grpc_transport_stream_op_batch* batch = grpc_make_transport_stream_op(
GRPC_CLOSURE_INIT(&deadline_state->timer_callback, yield_call_combiner,
deadline_state, grpc_schedule_on_exec_ctx));
batch->cancel_stream = true;
batch->payload->cancel_stream.cancel_error = GRPC_ERROR_REF(error);
elem->filter->start_transport_stream_op_batch(exec_ctx, elem, batch);
}
// Timer callback. // Timer callback.
static void timer_callback(grpc_exec_ctx* exec_ctx, void* arg, static void timer_callback(grpc_exec_ctx* exec_ctx, void* arg,
grpc_error* error) { grpc_error* error) {
grpc_call_element* elem = (grpc_call_element*)arg; grpc_call_element* elem = (grpc_call_element*)arg;
grpc_deadline_state* deadline_state = (grpc_deadline_state*)elem->call_data; grpc_deadline_state* deadline_state = (grpc_deadline_state*)elem->call_data;
if (error != GRPC_ERROR_CANCELLED) { if (error != GRPC_ERROR_CANCELLED) {
error = grpc_error_set_int( grpc_call_element_signal_error(
exec_ctx, elem,
grpc_error_set_int(
GRPC_ERROR_CREATE_FROM_STATIC_STRING("Deadline Exceeded"), GRPC_ERROR_CREATE_FROM_STATIC_STRING("Deadline Exceeded"),
GRPC_ERROR_INT_GRPC_STATUS, GRPC_STATUS_DEADLINE_EXCEEDED); GRPC_ERROR_INT_GRPC_STATUS, GRPC_STATUS_DEADLINE_EXCEEDED));
grpc_call_combiner_cancel(exec_ctx, deadline_state->call_combiner,
GRPC_ERROR_REF(error));
GRPC_CLOSURE_INIT(&deadline_state->timer_callback,
send_cancel_op_in_call_combiner, elem,
grpc_schedule_on_exec_ctx);
GRPC_CALL_COMBINER_START(exec_ctx, deadline_state->call_combiner,
&deadline_state->timer_callback, error,
"deadline exceeded -- sending cancel_stream op");
} else {
GRPC_CALL_STACK_UNREF(exec_ctx, deadline_state->call_stack,
"deadline_timer");
} }
GRPC_CALL_STACK_UNREF(exec_ctx, deadline_state->call_stack, "deadline_timer");
} }
// Starts the deadline timer. // Starts the deadline timer.
// This is called via the call combiner, so access to deadline_state is
// synchronized.
static void start_timer_if_needed(grpc_exec_ctx* exec_ctx, static void start_timer_if_needed(grpc_exec_ctx* exec_ctx,
grpc_call_element* elem, grpc_call_element* elem,
gpr_timespec deadline) { gpr_timespec deadline) {
@ -92,39 +58,51 @@ static void start_timer_if_needed(grpc_exec_ctx* exec_ctx,
return; return;
} }
grpc_deadline_state* deadline_state = (grpc_deadline_state*)elem->call_data; grpc_deadline_state* deadline_state = (grpc_deadline_state*)elem->call_data;
grpc_deadline_timer_state cur_state;
grpc_closure* closure = NULL; grpc_closure* closure = NULL;
switch (deadline_state->timer_state) { retry:
cur_state =
(grpc_deadline_timer_state)gpr_atm_acq_load(&deadline_state->timer_state);
switch (cur_state) {
case GRPC_DEADLINE_STATE_PENDING: case GRPC_DEADLINE_STATE_PENDING:
// Note: We do not start the timer if there is already a timer // Note: We do not start the timer if there is already a timer
return; return;
case GRPC_DEADLINE_STATE_FINISHED: case GRPC_DEADLINE_STATE_FINISHED:
deadline_state->timer_state = GRPC_DEADLINE_STATE_PENDING; if (gpr_atm_rel_cas(&deadline_state->timer_state,
GRPC_DEADLINE_STATE_FINISHED,
GRPC_DEADLINE_STATE_PENDING)) {
// If we've already created and destroyed a timer, we always create a // If we've already created and destroyed a timer, we always create a
// new closure: we have no other guarantee that the inlined closure is // new closure: we have no other guarantee that the inlined closure is
// not in use (it may hold a pending call to timer_callback) // not in use (it may hold a pending call to timer_callback)
closure = closure = GRPC_CLOSURE_CREATE(timer_callback, elem,
GRPC_CLOSURE_CREATE(timer_callback, elem, grpc_schedule_on_exec_ctx); grpc_schedule_on_exec_ctx);
} else {
goto retry;
}
break; break;
case GRPC_DEADLINE_STATE_INITIAL: case GRPC_DEADLINE_STATE_INITIAL:
deadline_state->timer_state = GRPC_DEADLINE_STATE_PENDING; if (gpr_atm_rel_cas(&deadline_state->timer_state,
GRPC_DEADLINE_STATE_INITIAL,
GRPC_DEADLINE_STATE_PENDING)) {
closure = closure =
GRPC_CLOSURE_INIT(&deadline_state->timer_callback, timer_callback, GRPC_CLOSURE_INIT(&deadline_state->timer_callback, timer_callback,
elem, grpc_schedule_on_exec_ctx); elem, grpc_schedule_on_exec_ctx);
} else {
goto retry;
}
break; break;
} }
GPR_ASSERT(closure != NULL); GPR_ASSERT(closure);
GRPC_CALL_STACK_REF(deadline_state->call_stack, "deadline_timer"); GRPC_CALL_STACK_REF(deadline_state->call_stack, "deadline_timer");
grpc_timer_init(exec_ctx, &deadline_state->timer, deadline, closure, grpc_timer_init(exec_ctx, &deadline_state->timer, deadline, closure,
gpr_now(GPR_CLOCK_MONOTONIC)); gpr_now(GPR_CLOCK_MONOTONIC));
} }
// Cancels the deadline timer. // Cancels the deadline timer.
// This is called via the call combiner, so access to deadline_state is
// synchronized.
static void cancel_timer_if_needed(grpc_exec_ctx* exec_ctx, static void cancel_timer_if_needed(grpc_exec_ctx* exec_ctx,
grpc_deadline_state* deadline_state) { grpc_deadline_state* deadline_state) {
if (deadline_state->timer_state == GRPC_DEADLINE_STATE_PENDING) { if (gpr_atm_rel_cas(&deadline_state->timer_state, GRPC_DEADLINE_STATE_PENDING,
deadline_state->timer_state = GRPC_DEADLINE_STATE_FINISHED; GRPC_DEADLINE_STATE_FINISHED)) {
grpc_timer_cancel(exec_ctx, &deadline_state->timer); grpc_timer_cancel(exec_ctx, &deadline_state->timer);
} else { } else {
// timer was either in STATE_INITAL (nothing to cancel) // timer was either in STATE_INITAL (nothing to cancel)
@ -153,7 +131,6 @@ static void inject_on_complete_cb(grpc_deadline_state* deadline_state,
// Callback and associated state for starting the timer after call stack // Callback and associated state for starting the timer after call stack
// initialization has been completed. // initialization has been completed.
struct start_timer_after_init_state { struct start_timer_after_init_state {
bool in_call_combiner;
grpc_call_element* elem; grpc_call_element* elem;
gpr_timespec deadline; gpr_timespec deadline;
grpc_closure closure; grpc_closure closure;
@ -161,29 +138,15 @@ struct start_timer_after_init_state {
static void start_timer_after_init(grpc_exec_ctx* exec_ctx, void* arg, static void start_timer_after_init(grpc_exec_ctx* exec_ctx, void* arg,
grpc_error* error) { grpc_error* error) {
struct start_timer_after_init_state* state = arg; struct start_timer_after_init_state* state = arg;
grpc_deadline_state* deadline_state = state->elem->call_data;
if (!state->in_call_combiner) {
// We are initially called without holding the call combiner, so we
// need to bounce ourselves into it.
state->in_call_combiner = true;
GRPC_CALL_COMBINER_START(exec_ctx, deadline_state->call_combiner,
&state->closure, GRPC_ERROR_REF(error),
"scheduling deadline timer");
return;
}
start_timer_if_needed(exec_ctx, state->elem, state->deadline); start_timer_if_needed(exec_ctx, state->elem, state->deadline);
gpr_free(state); gpr_free(state);
GRPC_CALL_COMBINER_STOP(exec_ctx, deadline_state->call_combiner,
"done scheduling deadline timer");
} }
void grpc_deadline_state_init(grpc_exec_ctx* exec_ctx, grpc_call_element* elem, void grpc_deadline_state_init(grpc_exec_ctx* exec_ctx, grpc_call_element* elem,
grpc_call_stack* call_stack, grpc_call_stack* call_stack,
grpc_call_combiner* call_combiner,
gpr_timespec deadline) { gpr_timespec deadline) {
grpc_deadline_state* deadline_state = (grpc_deadline_state*)elem->call_data; grpc_deadline_state* deadline_state = (grpc_deadline_state*)elem->call_data;
deadline_state->call_stack = call_stack; deadline_state->call_stack = call_stack;
deadline_state->call_combiner = call_combiner;
// Deadline will always be infinite on servers, so the timer will only be // Deadline will always be infinite on servers, so the timer will only be
// set on clients with a finite deadline. // set on clients with a finite deadline.
deadline = gpr_convert_clock_type(deadline, GPR_CLOCK_MONOTONIC); deadline = gpr_convert_clock_type(deadline, GPR_CLOCK_MONOTONIC);
@ -195,7 +158,7 @@ void grpc_deadline_state_init(grpc_exec_ctx* exec_ctx, grpc_call_element* elem,
// call stack initialization is finished. To avoid that problem, we // call stack initialization is finished. To avoid that problem, we
// create a closure to start the timer, and we schedule that closure // create a closure to start the timer, and we schedule that closure
// to be run after call stack initialization is done. // to be run after call stack initialization is done.
struct start_timer_after_init_state* state = gpr_zalloc(sizeof(*state)); struct start_timer_after_init_state* state = gpr_malloc(sizeof(*state));
state->elem = elem; state->elem = elem;
state->deadline = deadline; state->deadline = deadline;
GRPC_CLOSURE_INIT(&state->closure, start_timer_after_init, state, GRPC_CLOSURE_INIT(&state->closure, start_timer_after_init, state,
@ -269,8 +232,7 @@ typedef struct server_call_data {
static grpc_error* init_call_elem(grpc_exec_ctx* exec_ctx, static grpc_error* init_call_elem(grpc_exec_ctx* exec_ctx,
grpc_call_element* elem, grpc_call_element* elem,
const grpc_call_element_args* args) { const grpc_call_element_args* args) {
grpc_deadline_state_init(exec_ctx, elem, args->call_stack, grpc_deadline_state_init(exec_ctx, elem, args->call_stack, args->deadline);
args->call_combiner, args->deadline);
return GRPC_ERROR_NONE; return GRPC_ERROR_NONE;
} }
@ -348,6 +310,7 @@ const grpc_channel_filter grpc_client_deadline_filter = {
0, // sizeof(channel_data) 0, // sizeof(channel_data)
init_channel_elem, init_channel_elem,
destroy_channel_elem, destroy_channel_elem,
grpc_call_next_get_peer,
grpc_channel_next_get_info, grpc_channel_next_get_info,
"deadline", "deadline",
}; };
@ -362,6 +325,7 @@ const grpc_channel_filter grpc_server_deadline_filter = {
0, // sizeof(channel_data) 0, // sizeof(channel_data)
init_channel_elem, init_channel_elem,
destroy_channel_elem, destroy_channel_elem,
grpc_call_next_get_peer,
grpc_channel_next_get_info, grpc_channel_next_get_info,
"deadline", "deadline",
}; };

@ -31,8 +31,7 @@ typedef enum grpc_deadline_timer_state {
typedef struct grpc_deadline_state { typedef struct grpc_deadline_state {
// We take a reference to the call stack for the timer callback. // We take a reference to the call stack for the timer callback.
grpc_call_stack* call_stack; grpc_call_stack* call_stack;
grpc_call_combiner* call_combiner; gpr_atm timer_state;
grpc_deadline_timer_state timer_state;
grpc_timer timer; grpc_timer timer;
grpc_closure timer_callback; grpc_closure timer_callback;
// Closure to invoke when the call is complete. // Closure to invoke when the call is complete.
@ -51,7 +50,6 @@ typedef struct grpc_deadline_state {
// assumes elem->call_data is zero'd // assumes elem->call_data is zero'd
void grpc_deadline_state_init(grpc_exec_ctx* exec_ctx, grpc_call_element* elem, void grpc_deadline_state_init(grpc_exec_ctx* exec_ctx, grpc_call_element* elem,
grpc_call_stack* call_stack, grpc_call_stack* call_stack,
grpc_call_combiner* call_combiner,
gpr_timespec deadline); gpr_timespec deadline);
void grpc_deadline_state_destroy(grpc_exec_ctx* exec_ctx, void grpc_deadline_state_destroy(grpc_exec_ctx* exec_ctx,
grpc_call_element* elem); grpc_call_element* elem);
@ -63,8 +61,6 @@ void grpc_deadline_state_destroy(grpc_exec_ctx* exec_ctx,
// to ensure that the timer callback is not invoked while it is in the // to ensure that the timer callback is not invoked while it is in the
// process of being reset, which means that attempting to increase the // process of being reset, which means that attempting to increase the
// deadline may result in the timer being called twice. // deadline may result in the timer being called twice.
//
// Note: Must be called while holding the call combiner.
void grpc_deadline_state_reset(grpc_exec_ctx* exec_ctx, grpc_call_element* elem, void grpc_deadline_state_reset(grpc_exec_ctx* exec_ctx, grpc_call_element* elem,
gpr_timespec new_deadline); gpr_timespec new_deadline);
@ -74,8 +70,6 @@ void grpc_deadline_state_reset(grpc_exec_ctx* exec_ctx, grpc_call_element* elem,
// //
// Note: It is the caller's responsibility to chain to the next filter if // Note: It is the caller's responsibility to chain to the next filter if
// necessary after this function returns. // necessary after this function returns.
//
// Note: Must be called while holding the call combiner.
void grpc_deadline_state_client_start_transport_stream_op_batch( void grpc_deadline_state_client_start_transport_stream_op_batch(
grpc_exec_ctx* exec_ctx, grpc_call_element* elem, grpc_exec_ctx* exec_ctx, grpc_call_element* elem,
grpc_transport_stream_op_batch* op); grpc_transport_stream_op_batch* op);

@ -36,7 +36,6 @@
static const size_t kMaxPayloadSizeForGet = 2048; static const size_t kMaxPayloadSizeForGet = 2048;
typedef struct call_data { typedef struct call_data {
grpc_call_combiner *call_combiner;
// State for handling send_initial_metadata ops. // State for handling send_initial_metadata ops.
grpc_linked_mdelem method; grpc_linked_mdelem method;
grpc_linked_mdelem scheme; grpc_linked_mdelem scheme;
@ -216,13 +215,13 @@ static void on_send_message_next_done(grpc_exec_ctx *exec_ctx, void *arg,
call_data *calld = (call_data *)elem->call_data; call_data *calld = (call_data *)elem->call_data;
if (error != GRPC_ERROR_NONE) { if (error != GRPC_ERROR_NONE) {
grpc_transport_stream_op_batch_finish_with_failure( grpc_transport_stream_op_batch_finish_with_failure(
exec_ctx, calld->send_message_batch, error, calld->call_combiner); exec_ctx, calld->send_message_batch, error);
return; return;
} }
error = pull_slice_from_send_message(exec_ctx, calld); error = pull_slice_from_send_message(exec_ctx, calld);
if (error != GRPC_ERROR_NONE) { if (error != GRPC_ERROR_NONE) {
grpc_transport_stream_op_batch_finish_with_failure( grpc_transport_stream_op_batch_finish_with_failure(
exec_ctx, calld->send_message_batch, error, calld->call_combiner); exec_ctx, calld->send_message_batch, error);
return; return;
} }
// There may or may not be more to read, but we don't care. If we got // There may or may not be more to read, but we don't care. If we got
@ -415,7 +414,7 @@ static void hc_start_transport_stream_op_batch(
done: done:
if (error != GRPC_ERROR_NONE) { if (error != GRPC_ERROR_NONE) {
grpc_transport_stream_op_batch_finish_with_failure( grpc_transport_stream_op_batch_finish_with_failure(
exec_ctx, calld->send_message_batch, error, calld->call_combiner); exec_ctx, calld->send_message_batch, error);
} else if (!batch_will_be_handled_asynchronously) { } else if (!batch_will_be_handled_asynchronously) {
grpc_call_next_op(exec_ctx, elem, batch); grpc_call_next_op(exec_ctx, elem, batch);
} }
@ -427,7 +426,6 @@ static grpc_error *init_call_elem(grpc_exec_ctx *exec_ctx,
grpc_call_element *elem, grpc_call_element *elem,
const grpc_call_element_args *args) { const grpc_call_element_args *args) {
call_data *calld = (call_data *)elem->call_data; call_data *calld = (call_data *)elem->call_data;
calld->call_combiner = args->call_combiner;
GRPC_CLOSURE_INIT(&calld->recv_initial_metadata_ready, GRPC_CLOSURE_INIT(&calld->recv_initial_metadata_ready,
recv_initial_metadata_ready, elem, recv_initial_metadata_ready, elem,
grpc_schedule_on_exec_ctx); grpc_schedule_on_exec_ctx);
@ -567,5 +565,6 @@ const grpc_channel_filter grpc_http_client_filter = {
sizeof(channel_data), sizeof(channel_data),
init_channel_elem, init_channel_elem,
destroy_channel_elem, destroy_channel_elem,
grpc_call_next_get_peer,
grpc_channel_next_get_info, grpc_channel_next_get_info,
"http-client"}; "http-client"};

@ -35,29 +35,35 @@
#include "src/core/lib/surface/call.h" #include "src/core/lib/surface/call.h"
#include "src/core/lib/transport/static_metadata.h" #include "src/core/lib/transport/static_metadata.h"
typedef enum { #define INITIAL_METADATA_UNSEEN 0
// Initial metadata not yet seen. #define HAS_COMPRESSION_ALGORITHM 2
INITIAL_METADATA_UNSEEN = 0, #define NO_COMPRESSION_ALGORITHM 4
// Initial metadata seen; compression algorithm set.
HAS_COMPRESSION_ALGORITHM, #define CANCELLED_BIT ((gpr_atm)1)
// Initial metadata seen; no compression algorithm set.
NO_COMPRESSION_ALGORITHM,
} initial_metadata_state;
typedef struct call_data { typedef struct call_data {
grpc_call_combiner *call_combiner; grpc_slice_buffer slices; /**< Buffers up input slices to be compressed */
grpc_linked_mdelem compression_algorithm_storage; grpc_linked_mdelem compression_algorithm_storage;
grpc_linked_mdelem stream_compression_algorithm_storage; grpc_linked_mdelem stream_compression_algorithm_storage;
grpc_linked_mdelem accept_encoding_storage; grpc_linked_mdelem accept_encoding_storage;
grpc_linked_mdelem accept_stream_encoding_storage; grpc_linked_mdelem accept_stream_encoding_storage;
uint32_t remaining_slice_bytes;
/** Compression algorithm we'll try to use. It may be given by incoming /** Compression algorithm we'll try to use. It may be given by incoming
* metadata, or by the channel's default compression settings. */ * metadata, or by the channel's default compression settings. */
grpc_compression_algorithm compression_algorithm; grpc_compression_algorithm compression_algorithm;
initial_metadata_state send_initial_metadata_state;
grpc_error *cancel_error; /* Atomic recording the state of initial metadata; allowed values:
grpc_closure start_send_message_batch_in_call_combiner; INITIAL_METADATA_UNSEEN - initial metadata op not seen
HAS_COMPRESSION_ALGORITHM - initial metadata seen; compression algorithm
set
NO_COMPRESSION_ALGORITHM - initial metadata seen; no compression algorithm
set
pointer - a stalled op containing a send_message that's waiting on initial
metadata
pointer | CANCELLED_BIT - request was cancelled with error pointed to */
gpr_atm send_initial_metadata_state;
grpc_transport_stream_op_batch *send_message_batch; grpc_transport_stream_op_batch *send_message_batch;
grpc_slice_buffer slices; /**< Buffers up input slices to be compressed */
grpc_slice_buffer_stream replacement_stream; grpc_slice_buffer_stream replacement_stream;
grpc_closure *original_send_message_on_complete; grpc_closure *original_send_message_on_complete;
grpc_closure send_message_on_complete; grpc_closure send_message_on_complete;
@ -86,13 +92,13 @@ static bool skip_compression(grpc_call_element *elem, uint32_t flags,
channel_data *channeld = elem->channel_data; channel_data *channeld = elem->channel_data;
if (flags & (GRPC_WRITE_NO_COMPRESS | GRPC_WRITE_INTERNAL_COMPRESS)) { if (flags & (GRPC_WRITE_NO_COMPRESS | GRPC_WRITE_INTERNAL_COMPRESS)) {
return true; return 1;
} }
if (has_compression_algorithm) { if (has_compression_algorithm) {
if (calld->compression_algorithm == GRPC_COMPRESS_NONE) { if (calld->compression_algorithm == GRPC_COMPRESS_NONE) {
return true; return 1;
} }
return false; /* we have an actual call-specific algorithm */ return 0; /* we have an actual call-specific algorithm */
} }
/* no per-call compression override */ /* no per-call compression override */
return channeld->default_compression_algorithm == GRPC_COMPRESS_NONE; return channeld->default_compression_algorithm == GRPC_COMPRESS_NONE;
@ -220,18 +226,6 @@ static void send_message_on_complete(grpc_exec_ctx *exec_ctx, void *arg,
GRPC_ERROR_REF(error)); GRPC_ERROR_REF(error));
} }
static void send_message_batch_continue(grpc_exec_ctx *exec_ctx,
grpc_call_element *elem) {
call_data *calld = (call_data *)elem->call_data;
// Note: The call to grpc_call_next_op() results in yielding the
// call combiner, so we need to clear calld->send_message_batch
// before we do that.
grpc_transport_stream_op_batch *send_message_batch =
calld->send_message_batch;
calld->send_message_batch = NULL;
grpc_call_next_op(exec_ctx, elem, send_message_batch);
}
static void finish_send_message(grpc_exec_ctx *exec_ctx, static void finish_send_message(grpc_exec_ctx *exec_ctx,
grpc_call_element *elem) { grpc_call_element *elem) {
call_data *calld = (call_data *)elem->call_data; call_data *calld = (call_data *)elem->call_data;
@ -240,8 +234,8 @@ static void finish_send_message(grpc_exec_ctx *exec_ctx,
grpc_slice_buffer_init(&tmp); grpc_slice_buffer_init(&tmp);
uint32_t send_flags = uint32_t send_flags =
calld->send_message_batch->payload->send_message.send_message->flags; calld->send_message_batch->payload->send_message.send_message->flags;
bool did_compress = grpc_msg_compress(exec_ctx, calld->compression_algorithm, const bool did_compress = grpc_msg_compress(
&calld->slices, &tmp); exec_ctx, calld->compression_algorithm, &calld->slices, &tmp);
if (did_compress) { if (did_compress) {
if (GRPC_TRACER_ON(grpc_compression_trace)) { if (GRPC_TRACER_ON(grpc_compression_trace)) {
char *algo_name; char *algo_name;
@ -279,19 +273,7 @@ static void finish_send_message(grpc_exec_ctx *exec_ctx,
calld->original_send_message_on_complete = calld->original_send_message_on_complete =
calld->send_message_batch->on_complete; calld->send_message_batch->on_complete;
calld->send_message_batch->on_complete = &calld->send_message_on_complete; calld->send_message_batch->on_complete = &calld->send_message_on_complete;
send_message_batch_continue(exec_ctx, elem); grpc_call_next_op(exec_ctx, elem, calld->send_message_batch);
}
static void fail_send_message_batch_in_call_combiner(grpc_exec_ctx *exec_ctx,
void *arg,
grpc_error *error) {
call_data *calld = arg;
if (calld->send_message_batch != NULL) {
grpc_transport_stream_op_batch_finish_with_failure(
exec_ctx, calld->send_message_batch, GRPC_ERROR_REF(error),
calld->call_combiner);
calld->send_message_batch = NULL;
}
} }
// Pulls a slice from the send_message byte stream and adds it to calld->slices. // Pulls a slice from the send_message byte stream and adds it to calld->slices.
@ -311,25 +293,21 @@ static grpc_error *pull_slice_from_send_message(grpc_exec_ctx *exec_ctx,
// If all data has been read, invokes finish_send_message(). Otherwise, // If all data has been read, invokes finish_send_message(). Otherwise,
// an async call to grpc_byte_stream_next() has been started, which will // an async call to grpc_byte_stream_next() has been started, which will
// eventually result in calling on_send_message_next_done(). // eventually result in calling on_send_message_next_done().
static void continue_reading_send_message(grpc_exec_ctx *exec_ctx, static grpc_error *continue_reading_send_message(grpc_exec_ctx *exec_ctx,
grpc_call_element *elem) { grpc_call_element *elem) {
call_data *calld = (call_data *)elem->call_data; call_data *calld = (call_data *)elem->call_data;
while (grpc_byte_stream_next( while (grpc_byte_stream_next(
exec_ctx, calld->send_message_batch->payload->send_message.send_message, exec_ctx, calld->send_message_batch->payload->send_message.send_message,
~(size_t)0, &calld->on_send_message_next_done)) { ~(size_t)0, &calld->on_send_message_next_done)) {
grpc_error *error = pull_slice_from_send_message(exec_ctx, calld); grpc_error *error = pull_slice_from_send_message(exec_ctx, calld);
if (error != GRPC_ERROR_NONE) { if (error != GRPC_ERROR_NONE) return error;
// Closure callback; does not take ownership of error.
fail_send_message_batch_in_call_combiner(exec_ctx, calld, error);
GRPC_ERROR_UNREF(error);
return;
}
if (calld->slices.length == if (calld->slices.length ==
calld->send_message_batch->payload->send_message.send_message->length) { calld->send_message_batch->payload->send_message.send_message->length) {
finish_send_message(exec_ctx, elem); finish_send_message(exec_ctx, elem);
break; break;
} }
} }
return GRPC_ERROR_NONE;
} }
// Async callback for grpc_byte_stream_next(). // Async callback for grpc_byte_stream_next().
@ -337,37 +315,46 @@ static void on_send_message_next_done(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error) { grpc_error *error) {
grpc_call_element *elem = (grpc_call_element *)arg; grpc_call_element *elem = (grpc_call_element *)arg;
call_data *calld = (call_data *)elem->call_data; call_data *calld = (call_data *)elem->call_data;
if (error != GRPC_ERROR_NONE) { if (error != GRPC_ERROR_NONE) goto fail;
// Closure callback; does not take ownership of error.
fail_send_message_batch_in_call_combiner(exec_ctx, calld, error);
return;
}
error = pull_slice_from_send_message(exec_ctx, calld); error = pull_slice_from_send_message(exec_ctx, calld);
if (error != GRPC_ERROR_NONE) { if (error != GRPC_ERROR_NONE) goto fail;
// Closure callback; does not take ownership of error.
fail_send_message_batch_in_call_combiner(exec_ctx, calld, error);
GRPC_ERROR_UNREF(error);
return;
}
if (calld->slices.length == if (calld->slices.length ==
calld->send_message_batch->payload->send_message.send_message->length) { calld->send_message_batch->payload->send_message.send_message->length) {
finish_send_message(exec_ctx, elem); finish_send_message(exec_ctx, elem);
} else { } else {
continue_reading_send_message(exec_ctx, elem); // This will either finish reading all of the data and invoke
// finish_send_message(), or else it will make an async call to
// grpc_byte_stream_next(), which will eventually result in calling
// this function again.
error = continue_reading_send_message(exec_ctx, elem);
if (error != GRPC_ERROR_NONE) goto fail;
} }
return;
fail:
grpc_transport_stream_op_batch_finish_with_failure(
exec_ctx, calld->send_message_batch, error);
} }
static void start_send_message_batch(grpc_exec_ctx *exec_ctx, void *arg, static void start_send_message_batch(grpc_exec_ctx *exec_ctx,
grpc_error *unused) { grpc_call_element *elem,
grpc_call_element *elem = (grpc_call_element *)arg; grpc_transport_stream_op_batch *batch,
bool has_compression_algorithm) {
call_data *calld = (call_data *)elem->call_data; call_data *calld = (call_data *)elem->call_data;
if (skip_compression( if (!skip_compression(elem, batch->payload->send_message.send_message->flags,
elem, has_compression_algorithm)) {
calld->send_message_batch->payload->send_message.send_message->flags, calld->send_message_batch = batch;
calld->send_initial_metadata_state == HAS_COMPRESSION_ALGORITHM)) { // This will either finish reading all of the data and invoke
send_message_batch_continue(exec_ctx, elem); // finish_send_message(), or else it will make an async call to
// grpc_byte_stream_next(), which will eventually result in calling
// on_send_message_next_done().
grpc_error *error = continue_reading_send_message(exec_ctx, elem);
if (error != GRPC_ERROR_NONE) {
grpc_transport_stream_op_batch_finish_with_failure(
exec_ctx, calld->send_message_batch, error);
}
} else { } else {
continue_reading_send_message(exec_ctx, elem); /* pass control down the stack */
grpc_call_next_op(exec_ctx, elem, batch);
} }
} }
@ -375,80 +362,95 @@ static void compress_start_transport_stream_op_batch(
grpc_exec_ctx *exec_ctx, grpc_call_element *elem, grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
grpc_transport_stream_op_batch *batch) { grpc_transport_stream_op_batch *batch) {
call_data *calld = elem->call_data; call_data *calld = elem->call_data;
GPR_TIMER_BEGIN("compress_start_transport_stream_op_batch", 0); GPR_TIMER_BEGIN("compress_start_transport_stream_op_batch", 0);
// Handle cancel_stream.
if (batch->cancel_stream) { if (batch->cancel_stream) {
GRPC_ERROR_UNREF(calld->cancel_error); // TODO(roth): As part of the upcoming call combiner work, change
calld->cancel_error = // this to call grpc_byte_stream_shutdown() on the incoming byte
// stream, to cancel any in-flight calls to grpc_byte_stream_next().
GRPC_ERROR_REF(batch->payload->cancel_stream.cancel_error); GRPC_ERROR_REF(batch->payload->cancel_stream.cancel_error);
if (calld->send_message_batch != NULL) { gpr_atm cur = gpr_atm_full_xchg(
if (calld->send_initial_metadata_state == INITIAL_METADATA_UNSEEN) { &calld->send_initial_metadata_state,
GRPC_CALL_COMBINER_START( CANCELLED_BIT | (gpr_atm)batch->payload->cancel_stream.cancel_error);
exec_ctx, calld->call_combiner, switch (cur) {
GRPC_CLOSURE_CREATE(fail_send_message_batch_in_call_combiner, calld, case HAS_COMPRESSION_ALGORITHM:
grpc_schedule_on_exec_ctx), case NO_COMPRESSION_ALGORITHM:
GRPC_ERROR_REF(calld->cancel_error), "failing send_message op"); case INITIAL_METADATA_UNSEEN:
break;
default:
if ((cur & CANCELLED_BIT) == 0) {
grpc_transport_stream_op_batch_finish_with_failure(
exec_ctx, (grpc_transport_stream_op_batch *)cur,
GRPC_ERROR_REF(batch->payload->cancel_stream.cancel_error));
} else { } else {
grpc_byte_stream_shutdown( GRPC_ERROR_UNREF((grpc_error *)(cur & ~CANCELLED_BIT));
exec_ctx,
calld->send_message_batch->payload->send_message.send_message,
GRPC_ERROR_REF(calld->cancel_error));
} }
break;
} }
} else if (calld->cancel_error != GRPC_ERROR_NONE) {
grpc_transport_stream_op_batch_finish_with_failure(
exec_ctx, batch, GRPC_ERROR_REF(calld->cancel_error),
calld->call_combiner);
goto done;
} }
// Handle send_initial_metadata.
if (batch->send_initial_metadata) { if (batch->send_initial_metadata) {
GPR_ASSERT(calld->send_initial_metadata_state == INITIAL_METADATA_UNSEEN);
bool has_compression_algorithm; bool has_compression_algorithm;
grpc_error *error = process_send_initial_metadata( grpc_error *error = process_send_initial_metadata(
exec_ctx, elem, exec_ctx, elem,
batch->payload->send_initial_metadata.send_initial_metadata, batch->payload->send_initial_metadata.send_initial_metadata,
&has_compression_algorithm); &has_compression_algorithm);
if (error != GRPC_ERROR_NONE) { if (error != GRPC_ERROR_NONE) {
grpc_transport_stream_op_batch_finish_with_failure(exec_ctx, batch, error, grpc_transport_stream_op_batch_finish_with_failure(exec_ctx, batch,
calld->call_combiner); error);
goto done; return;
} }
calld->send_initial_metadata_state = has_compression_algorithm gpr_atm cur;
retry_send_im:
cur = gpr_atm_acq_load(&calld->send_initial_metadata_state);
GPR_ASSERT(cur != HAS_COMPRESSION_ALGORITHM &&
cur != NO_COMPRESSION_ALGORITHM);
if ((cur & CANCELLED_BIT) == 0) {
if (!gpr_atm_rel_cas(&calld->send_initial_metadata_state, cur,
has_compression_algorithm
? HAS_COMPRESSION_ALGORITHM ? HAS_COMPRESSION_ALGORITHM
: NO_COMPRESSION_ALGORITHM; : NO_COMPRESSION_ALGORITHM)) {
// If we had previously received a batch containing a send_message op, goto retry_send_im;
// handle it now. Note that we need to re-enter the call combiner }
// for this, since we can't send two batches down while holding the if (cur != INITIAL_METADATA_UNSEEN) {
// call combiner, since the connected_channel filter (at the bottom of start_send_message_batch(exec_ctx, elem,
// the call stack) will release the call combiner for each batch it sees. (grpc_transport_stream_op_batch *)cur,
if (calld->send_message_batch != NULL) { has_compression_algorithm);
GRPC_CALL_COMBINER_START( }
exec_ctx, calld->call_combiner, }
&calld->start_send_message_batch_in_call_combiner, GRPC_ERROR_NONE, }
"starting send_message after send_initial_metadata");
}
}
// Handle send_message.
if (batch->send_message) { if (batch->send_message) {
GPR_ASSERT(calld->send_message_batch == NULL); gpr_atm cur;
calld->send_message_batch = batch; retry_send:
// If we have not yet seen send_initial_metadata, then we have to cur = gpr_atm_acq_load(&calld->send_initial_metadata_state);
// wait. We save the batch in calld and then drop the call switch (cur) {
// combiner, which we'll have to pick up again later when we get case INITIAL_METADATA_UNSEEN:
// send_initial_metadata. if (!gpr_atm_rel_cas(&calld->send_initial_metadata_state, cur,
if (calld->send_initial_metadata_state == INITIAL_METADATA_UNSEEN) { (gpr_atm)batch)) {
GRPC_CALL_COMBINER_STOP( goto retry_send;
exec_ctx, calld->call_combiner, }
"send_message batch pending send_initial_metadata"); break;
goto done; case HAS_COMPRESSION_ALGORITHM:
} case NO_COMPRESSION_ALGORITHM:
start_send_message_batch(exec_ctx, elem, GRPC_ERROR_NONE); start_send_message_batch(exec_ctx, elem, batch,
cur == HAS_COMPRESSION_ALGORITHM);
break;
default:
if (cur & CANCELLED_BIT) {
grpc_transport_stream_op_batch_finish_with_failure(
exec_ctx, batch,
GRPC_ERROR_REF((grpc_error *)(cur & ~CANCELLED_BIT)));
} else { } else {
// Pass control down the stack. /* >1 send_message concurrently */
GPR_UNREACHABLE_CODE(break);
}
}
} else {
/* pass control down the stack */
grpc_call_next_op(exec_ctx, elem, batch); grpc_call_next_op(exec_ctx, elem, batch);
} }
done:
GPR_TIMER_END("compress_start_transport_stream_op_batch", 0); GPR_TIMER_END("compress_start_transport_stream_op_batch", 0);
} }
@ -456,16 +458,16 @@ done:
static grpc_error *init_call_elem(grpc_exec_ctx *exec_ctx, static grpc_error *init_call_elem(grpc_exec_ctx *exec_ctx,
grpc_call_element *elem, grpc_call_element *elem,
const grpc_call_element_args *args) { const grpc_call_element_args *args) {
call_data *calld = (call_data *)elem->call_data; /* grab pointers to our data from the call element */
calld->call_combiner = args->call_combiner; call_data *calld = elem->call_data;
calld->cancel_error = GRPC_ERROR_NONE;
/* initialize members */
grpc_slice_buffer_init(&calld->slices); grpc_slice_buffer_init(&calld->slices);
GRPC_CLOSURE_INIT(&calld->start_send_message_batch_in_call_combiner,
start_send_message_batch, elem, grpc_schedule_on_exec_ctx);
GRPC_CLOSURE_INIT(&calld->on_send_message_next_done, GRPC_CLOSURE_INIT(&calld->on_send_message_next_done,
on_send_message_next_done, elem, grpc_schedule_on_exec_ctx); on_send_message_next_done, elem, grpc_schedule_on_exec_ctx);
GRPC_CLOSURE_INIT(&calld->send_message_on_complete, send_message_on_complete, GRPC_CLOSURE_INIT(&calld->send_message_on_complete, send_message_on_complete,
elem, grpc_schedule_on_exec_ctx); elem, grpc_schedule_on_exec_ctx);
return GRPC_ERROR_NONE; return GRPC_ERROR_NONE;
} }
@ -473,9 +475,14 @@ static grpc_error *init_call_elem(grpc_exec_ctx *exec_ctx,
static void destroy_call_elem(grpc_exec_ctx *exec_ctx, grpc_call_element *elem, static void destroy_call_elem(grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
const grpc_call_final_info *final_info, const grpc_call_final_info *final_info,
grpc_closure *ignored) { grpc_closure *ignored) {
/* grab pointers to our data from the call element */
call_data *calld = elem->call_data; call_data *calld = elem->call_data;
grpc_slice_buffer_destroy_internal(exec_ctx, &calld->slices); grpc_slice_buffer_destroy_internal(exec_ctx, &calld->slices);
GRPC_ERROR_UNREF(calld->cancel_error); gpr_atm imstate =
gpr_atm_no_barrier_load(&calld->send_initial_metadata_state);
if (imstate & CANCELLED_BIT) {
GRPC_ERROR_UNREF((grpc_error *)(imstate & ~CANCELLED_BIT));
}
} }
/* Constructor for channel_data */ /* Constructor for channel_data */
@ -543,5 +550,6 @@ const grpc_channel_filter grpc_message_compress_filter = {
sizeof(channel_data), sizeof(channel_data),
init_channel_elem, init_channel_elem,
destroy_channel_elem, destroy_channel_elem,
grpc_call_next_get_peer,
grpc_channel_next_get_info, grpc_channel_next_get_info,
"message_compress"}; "compress"};

@ -32,8 +32,6 @@
#define EXPECTED_CONTENT_TYPE_LENGTH sizeof(EXPECTED_CONTENT_TYPE) - 1 #define EXPECTED_CONTENT_TYPE_LENGTH sizeof(EXPECTED_CONTENT_TYPE) - 1
typedef struct call_data { typedef struct call_data {
grpc_call_combiner *call_combiner;
grpc_linked_mdelem status; grpc_linked_mdelem status;
grpc_linked_mdelem content_type; grpc_linked_mdelem content_type;
@ -283,11 +281,7 @@ static void hs_on_complete(grpc_exec_ctx *exec_ctx, void *user_data,
*calld->pp_recv_message = calld->payload_bin_delivered *calld->pp_recv_message = calld->payload_bin_delivered
? NULL ? NULL
: (grpc_byte_stream *)&calld->read_stream; : (grpc_byte_stream *)&calld->read_stream;
// Re-enter call combiner for recv_message_ready, since the surface GRPC_CLOSURE_RUN(exec_ctx, calld->recv_message_ready, GRPC_ERROR_REF(err));
// code will release the call combiner for each callback it receives.
GRPC_CALL_COMBINER_START(exec_ctx, calld->call_combiner,
calld->recv_message_ready, GRPC_ERROR_REF(err),
"resuming recv_message_ready from on_complete");
calld->recv_message_ready = NULL; calld->recv_message_ready = NULL;
calld->payload_bin_delivered = true; calld->payload_bin_delivered = true;
} }
@ -299,19 +293,14 @@ static void hs_recv_message_ready(grpc_exec_ctx *exec_ctx, void *user_data,
grpc_call_element *elem = user_data; grpc_call_element *elem = user_data;
call_data *calld = elem->call_data; call_data *calld = elem->call_data;
if (calld->seen_path_with_query) { if (calld->seen_path_with_query) {
// Do nothing. This is probably a GET request, and payload will be /* do nothing. This is probably a GET request, and payload will be returned
// returned in hs_on_complete callback. in hs_on_complete callback. */
// Note that we release the call combiner here, so that other
// callbacks can run.
GRPC_CALL_COMBINER_STOP(exec_ctx, calld->call_combiner,
"pausing recv_message_ready until on_complete");
} else { } else {
GRPC_CLOSURE_RUN(exec_ctx, calld->recv_message_ready, GRPC_ERROR_REF(err)); GRPC_CLOSURE_RUN(exec_ctx, calld->recv_message_ready, GRPC_ERROR_REF(err));
} }
} }
static grpc_error *hs_mutate_op(grpc_exec_ctx *exec_ctx, static void hs_mutate_op(grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
grpc_call_element *elem,
grpc_transport_stream_op_batch *op) { grpc_transport_stream_op_batch *op) {
/* grab pointers to our data from the call element */ /* grab pointers to our data from the call element */
call_data *calld = elem->call_data; call_data *calld = elem->call_data;
@ -334,7 +323,10 @@ static grpc_error *hs_mutate_op(grpc_exec_ctx *exec_ctx,
server_filter_outgoing_metadata( server_filter_outgoing_metadata(
exec_ctx, elem, exec_ctx, elem,
op->payload->send_initial_metadata.send_initial_metadata)); op->payload->send_initial_metadata.send_initial_metadata));
if (error != GRPC_ERROR_NONE) return error; if (error != GRPC_ERROR_NONE) {
grpc_transport_stream_op_batch_finish_with_failure(exec_ctx, op, error);
return;
}
} }
if (op->recv_initial_metadata) { if (op->recv_initial_metadata) {
@ -367,25 +359,21 @@ static grpc_error *hs_mutate_op(grpc_exec_ctx *exec_ctx,
grpc_error *error = server_filter_outgoing_metadata( grpc_error *error = server_filter_outgoing_metadata(
exec_ctx, elem, exec_ctx, elem,
op->payload->send_trailing_metadata.send_trailing_metadata); op->payload->send_trailing_metadata.send_trailing_metadata);
if (error != GRPC_ERROR_NONE) return error; if (error != GRPC_ERROR_NONE) {
grpc_transport_stream_op_batch_finish_with_failure(exec_ctx, op, error);
return;
}
} }
return GRPC_ERROR_NONE;
} }
static void hs_start_transport_stream_op_batch( static void hs_start_transport_op(grpc_exec_ctx *exec_ctx,
grpc_exec_ctx *exec_ctx, grpc_call_element *elem, grpc_call_element *elem,
grpc_transport_stream_op_batch *op) { grpc_transport_stream_op_batch *op) {
call_data *calld = elem->call_data; GRPC_CALL_LOG_OP(GPR_INFO, elem, op);
GPR_TIMER_BEGIN("hs_start_transport_stream_op_batch", 0); GPR_TIMER_BEGIN("hs_start_transport_op", 0);
grpc_error *error = hs_mutate_op(exec_ctx, elem, op); hs_mutate_op(exec_ctx, elem, op);
if (error != GRPC_ERROR_NONE) {
grpc_transport_stream_op_batch_finish_with_failure(exec_ctx, op, error,
calld->call_combiner);
} else {
grpc_call_next_op(exec_ctx, elem, op); grpc_call_next_op(exec_ctx, elem, op);
} GPR_TIMER_END("hs_start_transport_op", 0);
GPR_TIMER_END("hs_start_transport_stream_op_batch", 0);
} }
/* Constructor for call_data */ /* Constructor for call_data */
@ -395,7 +383,6 @@ static grpc_error *init_call_elem(grpc_exec_ctx *exec_ctx,
/* grab pointers to our data from the call element */ /* grab pointers to our data from the call element */
call_data *calld = elem->call_data; call_data *calld = elem->call_data;
/* initialize members */ /* initialize members */
calld->call_combiner = args->call_combiner;
GRPC_CLOSURE_INIT(&calld->hs_on_recv, hs_on_recv, elem, GRPC_CLOSURE_INIT(&calld->hs_on_recv, hs_on_recv, elem,
grpc_schedule_on_exec_ctx); grpc_schedule_on_exec_ctx);
GRPC_CLOSURE_INIT(&calld->hs_on_complete, hs_on_complete, elem, GRPC_CLOSURE_INIT(&calld->hs_on_complete, hs_on_complete, elem,
@ -427,7 +414,7 @@ static void destroy_channel_elem(grpc_exec_ctx *exec_ctx,
grpc_channel_element *elem) {} grpc_channel_element *elem) {}
const grpc_channel_filter grpc_http_server_filter = { const grpc_channel_filter grpc_http_server_filter = {
hs_start_transport_stream_op_batch, hs_start_transport_op,
grpc_channel_next_op, grpc_channel_next_op,
sizeof(call_data), sizeof(call_data),
init_call_elem, init_call_elem,
@ -436,5 +423,6 @@ const grpc_channel_filter grpc_http_server_filter = {
sizeof(channel_data), sizeof(channel_data),
init_channel_elem, init_channel_elem,
destroy_channel_elem, destroy_channel_elem,
grpc_call_next_get_peer,
grpc_channel_next_get_info, grpc_channel_next_get_info,
"http-server"}; "http-server"};

@ -223,5 +223,6 @@ const grpc_channel_filter grpc_load_reporting_filter = {
sizeof(channel_data), sizeof(channel_data),
init_channel_elem, init_channel_elem,
destroy_channel_elem, destroy_channel_elem,
grpc_call_next_get_peer,
grpc_channel_next_get_info, grpc_channel_next_get_info,
"load_reporting"}; "load_reporting"};

@ -391,6 +391,7 @@ const grpc_channel_filter grpc_max_age_filter = {
sizeof(channel_data), sizeof(channel_data),
init_channel_elem, init_channel_elem,
destroy_channel_elem, destroy_channel_elem,
grpc_call_next_get_peer,
grpc_channel_next_get_info, grpc_channel_next_get_info,
"max_age"}; "max_age"};

@ -68,7 +68,6 @@ static void* message_size_limits_create_from_json(const grpc_json* json) {
} }
typedef struct call_data { typedef struct call_data {
grpc_call_combiner* call_combiner;
message_size_limits limits; message_size_limits limits;
// Receive closures are chained: we inject this closure as the // Receive closures are chained: we inject this closure as the
// recv_message_ready up-call on transport_stream_op, and remember to // recv_message_ready up-call on transport_stream_op, and remember to
@ -132,8 +131,7 @@ static void start_transport_stream_op_batch(
exec_ctx, op, exec_ctx, op,
grpc_error_set_int(GRPC_ERROR_CREATE_FROM_COPIED_STRING(message_string), grpc_error_set_int(GRPC_ERROR_CREATE_FROM_COPIED_STRING(message_string),
GRPC_ERROR_INT_GRPC_STATUS, GRPC_ERROR_INT_GRPC_STATUS,
GRPC_STATUS_RESOURCE_EXHAUSTED), GRPC_STATUS_RESOURCE_EXHAUSTED));
calld->call_combiner);
gpr_free(message_string); gpr_free(message_string);
return; return;
} }
@ -154,7 +152,6 @@ static grpc_error* init_call_elem(grpc_exec_ctx* exec_ctx,
const grpc_call_element_args* args) { const grpc_call_element_args* args) {
channel_data* chand = (channel_data*)elem->channel_data; channel_data* chand = (channel_data*)elem->channel_data;
call_data* calld = (call_data*)elem->call_data; call_data* calld = (call_data*)elem->call_data;
calld->call_combiner = args->call_combiner;
calld->next_recv_message_ready = NULL; calld->next_recv_message_ready = NULL;
GRPC_CLOSURE_INIT(&calld->recv_message_ready, recv_message_ready, elem, GRPC_CLOSURE_INIT(&calld->recv_message_ready, recv_message_ready, elem,
grpc_schedule_on_exec_ctx); grpc_schedule_on_exec_ctx);
@ -262,6 +259,7 @@ const grpc_channel_filter grpc_message_size_filter = {
sizeof(channel_data), sizeof(channel_data),
init_channel_elem, init_channel_elem,
destroy_channel_elem, destroy_channel_elem,
grpc_call_next_get_peer,
grpc_channel_next_get_info, grpc_channel_next_get_info,
"message_size"}; "message_size"};

@ -177,6 +177,7 @@ const grpc_channel_filter grpc_workaround_cronet_compression_filter = {
0, 0,
init_channel_elem, init_channel_elem,
destroy_channel_elem, destroy_channel_elem,
grpc_call_next_get_peer,
grpc_channel_next_get_info, grpc_channel_next_get_info,
"workaround_cronet_compression"}; "workaround_cronet_compression"};

@ -1370,26 +1370,15 @@ static void perform_stream_op_locked(grpc_exec_ctx *exec_ctx, void *stream_op,
"send_initial_metadata_finished"); "send_initial_metadata_finished");
} }
} }
if (op_payload->send_initial_metadata.peer_string != NULL) {
gpr_atm_rel_store(op_payload->send_initial_metadata.peer_string,
(gpr_atm)gpr_strdup(t->peer_string));
}
} }
if (op->send_message) { if (op->send_message) {
on_complete->next_data.scratch |= CLOSURE_BARRIER_MAY_COVER_WRITE; on_complete->next_data.scratch |= CLOSURE_BARRIER_MAY_COVER_WRITE;
s->fetching_send_message_finished = add_closure_barrier(op->on_complete); s->fetching_send_message_finished = add_closure_barrier(op->on_complete);
if (s->write_closed) { if (s->write_closed) {
// Return an error unless the client has already received trailing
// metadata from the server, since an application using a
// streaming call might send another message before getting a
// recv_message failure, breaking out of its loop, and then
// starting recv_trailing_metadata.
grpc_chttp2_complete_closure_step( grpc_chttp2_complete_closure_step(
exec_ctx, t, s, &s->fetching_send_message_finished, exec_ctx, t, s, &s->fetching_send_message_finished,
t->is_client && s->received_trailing_metadata GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
? GRPC_ERROR_NONE
: GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
"Attempt to send message after stream was closed", "Attempt to send message after stream was closed",
&s->write_closed_error, 1), &s->write_closed_error, 1),
"fetching_send_message_finished"); "fetching_send_message_finished");
@ -1477,10 +1466,6 @@ static void perform_stream_op_locked(grpc_exec_ctx *exec_ctx, void *stream_op,
op_payload->recv_initial_metadata.recv_initial_metadata; op_payload->recv_initial_metadata.recv_initial_metadata;
s->trailing_metadata_available = s->trailing_metadata_available =
op_payload->recv_initial_metadata.trailing_metadata_available; op_payload->recv_initial_metadata.trailing_metadata_available;
if (op_payload->recv_initial_metadata.peer_string != NULL) {
gpr_atm_rel_store(op_payload->recv_initial_metadata.peer_string,
(gpr_atm)gpr_strdup(t->peer_string));
}
grpc_chttp2_maybe_complete_recv_initial_metadata(exec_ctx, t, s); grpc_chttp2_maybe_complete_recv_initial_metadata(exec_ctx, t, s);
} }
@ -1839,7 +1824,8 @@ void grpc_chttp2_maybe_complete_recv_trailing_metadata(grpc_exec_ctx *exec_ctx,
} }
} }
} }
if (s->read_closed && s->frame_storage.length == 0 && !pending_data && if (s->read_closed && s->frame_storage.length == 0 &&
(!pending_data || s->seen_error) &&
s->recv_trailing_metadata_finished != NULL) { s->recv_trailing_metadata_finished != NULL) {
grpc_chttp2_incoming_metadata_buffer_publish( grpc_chttp2_incoming_metadata_buffer_publish(
exec_ctx, &s->metadata_buffer[1], s->recv_trailing_metadata); exec_ctx, &s->metadata_buffer[1], s->recv_trailing_metadata);
@ -2945,6 +2931,14 @@ static void destructive_reclaimer_locked(grpc_exec_ctx *exec_ctx, void *arg,
GRPC_CHTTP2_UNREF_TRANSPORT(exec_ctx, t, "destructive_reclaimer"); GRPC_CHTTP2_UNREF_TRANSPORT(exec_ctx, t, "destructive_reclaimer");
} }
/*******************************************************************************
* INTEGRATION GLUE
*/
static char *chttp2_get_peer(grpc_exec_ctx *exec_ctx, grpc_transport *t) {
return gpr_strdup(((grpc_chttp2_transport *)t)->peer_string);
}
/******************************************************************************* /*******************************************************************************
* MONITORING * MONITORING
*/ */
@ -2962,6 +2956,7 @@ static const grpc_transport_vtable vtable = {sizeof(grpc_chttp2_stream),
perform_transport_op, perform_transport_op,
destroy_stream, destroy_stream,
destroy_transport, destroy_transport,
chttp2_get_peer,
chttp2_get_endpoint}; chttp2_get_endpoint};
grpc_transport *grpc_create_chttp2_transport( grpc_transport *grpc_create_chttp2_transport(

@ -509,8 +509,6 @@ struct grpc_chttp2_stream {
/** Are we buffering writes on this stream? If yes, we won't become writable /** Are we buffering writes on this stream? If yes, we won't become writable
until there's enough queued up in the flow_controlled_buffer */ until there's enough queued up in the flow_controlled_buffer */
bool write_buffering; bool write_buffering;
/** Has trailing metadata been received. */
bool received_trailing_metadata;
/** the error that resulted in this stream being read-closed */ /** the error that resulted in this stream being read-closed */
grpc_error *read_closed_error; grpc_error *read_closed_error;

@ -623,7 +623,6 @@ static grpc_error *init_header_frame_parser(grpc_exec_ctx *exec_ctx,
*s->trailing_metadata_available = true; *s->trailing_metadata_available = true;
} }
t->hpack_parser.on_header = on_trailing_header; t->hpack_parser.on_header = on_trailing_header;
s->received_trailing_metadata = true;
} else { } else {
GRPC_CHTTP2_IF_TRACING(gpr_log(GPR_INFO, "parsing initial_metadata")); GRPC_CHTTP2_IF_TRACING(gpr_log(GPR_INFO, "parsing initial_metadata"));
t->hpack_parser.on_header = on_initial_header; t->hpack_parser.on_header = on_initial_header;
@ -632,7 +631,6 @@ static grpc_error *init_header_frame_parser(grpc_exec_ctx *exec_ctx,
case 1: case 1:
GRPC_CHTTP2_IF_TRACING(gpr_log(GPR_INFO, "parsing trailing_metadata")); GRPC_CHTTP2_IF_TRACING(gpr_log(GPR_INFO, "parsing trailing_metadata"));
t->hpack_parser.on_header = on_trailing_header; t->hpack_parser.on_header = on_trailing_header;
s->received_trailing_metadata = true;
break; break;
case 2: case 2:
gpr_log(GPR_ERROR, "too many header frames received"); gpr_log(GPR_ERROR, "too many header frames received");

@ -1386,6 +1386,10 @@ static void destroy_stream(grpc_exec_ctx *exec_ctx, grpc_transport *gt,
static void destroy_transport(grpc_exec_ctx *exec_ctx, grpc_transport *gt) {} static void destroy_transport(grpc_exec_ctx *exec_ctx, grpc_transport *gt) {}
static char *get_peer(grpc_exec_ctx *exec_ctx, grpc_transport *gt) {
return NULL;
}
static grpc_endpoint *get_endpoint(grpc_exec_ctx *exec_ctx, static grpc_endpoint *get_endpoint(grpc_exec_ctx *exec_ctx,
grpc_transport *gt) { grpc_transport *gt) {
return NULL; return NULL;
@ -1404,6 +1408,7 @@ static const grpc_transport_vtable grpc_cronet_vtable = {
perform_op, perform_op,
destroy_stream, destroy_stream,
destroy_transport, destroy_transport,
get_peer,
get_endpoint}; get_endpoint};
grpc_transport *grpc_create_cronet_transport(void *engine, const char *target, grpc_transport *grpc_create_cronet_transport(void *engine, const char *target,

@ -1251,14 +1251,20 @@ static void set_pollset_set(grpc_exec_ctx *exec_ctx, grpc_transport *gt,
// Nothing to do here // Nothing to do here
} }
static char *get_peer(grpc_exec_ctx *exec_ctx, grpc_transport *t) {
return gpr_strdup("inproc");
}
static grpc_endpoint *get_endpoint(grpc_exec_ctx *exec_ctx, grpc_transport *t) { static grpc_endpoint *get_endpoint(grpc_exec_ctx *exec_ctx, grpc_transport *t) {
return NULL; return NULL;
} }
static const grpc_transport_vtable inproc_vtable = { static const grpc_transport_vtable inproc_vtable = {
sizeof(inproc_stream), "inproc", init_stream, sizeof(inproc_stream), "inproc",
set_pollset, set_pollset_set, perform_stream_op, init_stream, set_pollset,
perform_transport_op, destroy_stream, destroy_transport, set_pollset_set, perform_stream_op,
perform_transport_op, destroy_stream,
destroy_transport, get_peer,
get_endpoint}; get_endpoint};
/******************************************************************************* /*******************************************************************************

@ -233,10 +233,15 @@ void grpc_call_stack_destroy(grpc_exec_ctx *exec_ctx, grpc_call_stack *stack,
void grpc_call_next_op(grpc_exec_ctx *exec_ctx, grpc_call_element *elem, void grpc_call_next_op(grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
grpc_transport_stream_op_batch *op) { grpc_transport_stream_op_batch *op) {
grpc_call_element *next_elem = elem + 1; grpc_call_element *next_elem = elem + 1;
GRPC_CALL_LOG_OP(GPR_INFO, next_elem, op);
next_elem->filter->start_transport_stream_op_batch(exec_ctx, next_elem, op); next_elem->filter->start_transport_stream_op_batch(exec_ctx, next_elem, op);
} }
char *grpc_call_next_get_peer(grpc_exec_ctx *exec_ctx,
grpc_call_element *elem) {
grpc_call_element *next_elem = elem + 1;
return next_elem->filter->get_peer(exec_ctx, next_elem);
}
void grpc_channel_next_get_info(grpc_exec_ctx *exec_ctx, void grpc_channel_next_get_info(grpc_exec_ctx *exec_ctx,
grpc_channel_element *elem, grpc_channel_element *elem,
const grpc_channel_info *channel_info) { const grpc_channel_info *channel_info) {
@ -260,3 +265,12 @@ grpc_call_stack *grpc_call_stack_from_top_element(grpc_call_element *elem) {
return (grpc_call_stack *)((char *)(elem)-ROUND_UP_TO_ALIGNMENT_SIZE( return (grpc_call_stack *)((char *)(elem)-ROUND_UP_TO_ALIGNMENT_SIZE(
sizeof(grpc_call_stack))); sizeof(grpc_call_stack)));
} }
void grpc_call_element_signal_error(grpc_exec_ctx *exec_ctx,
grpc_call_element *elem,
grpc_error *error) {
grpc_transport_stream_op_batch *op = grpc_make_transport_stream_op(NULL);
op->cancel_stream = true;
op->payload->cancel_stream.cancel_error = error;
elem->filter->start_transport_stream_op_batch(exec_ctx, elem, op);
}

@ -40,7 +40,6 @@
#include <grpc/support/time.h> #include <grpc/support/time.h>
#include "src/core/lib/debug/trace.h" #include "src/core/lib/debug/trace.h"
#include "src/core/lib/iomgr/call_combiner.h"
#include "src/core/lib/iomgr/polling_entity.h" #include "src/core/lib/iomgr/polling_entity.h"
#include "src/core/lib/support/arena.h" #include "src/core/lib/support/arena.h"
#include "src/core/lib/transport/transport.h" #include "src/core/lib/transport/transport.h"
@ -72,7 +71,6 @@ typedef struct {
gpr_timespec start_time; gpr_timespec start_time;
gpr_timespec deadline; gpr_timespec deadline;
gpr_arena *arena; gpr_arena *arena;
grpc_call_combiner *call_combiner;
} grpc_call_element_args; } grpc_call_element_args;
typedef struct { typedef struct {
@ -152,6 +150,9 @@ typedef struct {
void (*destroy_channel_elem)(grpc_exec_ctx *exec_ctx, void (*destroy_channel_elem)(grpc_exec_ctx *exec_ctx,
grpc_channel_element *elem); grpc_channel_element *elem);
/* Implement grpc_call_get_peer() */
char *(*get_peer)(grpc_exec_ctx *exec_ctx, grpc_call_element *elem);
/* Implement grpc_channel_get_info() */ /* Implement grpc_channel_get_info() */
void (*get_channel_info)(grpc_exec_ctx *exec_ctx, grpc_channel_element *elem, void (*get_channel_info)(grpc_exec_ctx *exec_ctx, grpc_channel_element *elem,
const grpc_channel_info *channel_info); const grpc_channel_info *channel_info);
@ -270,6 +271,8 @@ void grpc_call_next_op(grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
stack */ stack */
void grpc_channel_next_op(grpc_exec_ctx *exec_ctx, grpc_channel_element *elem, void grpc_channel_next_op(grpc_exec_ctx *exec_ctx, grpc_channel_element *elem,
grpc_transport_op *op); grpc_transport_op *op);
/* Pass through a request to get_peer to the next child element */
char *grpc_call_next_get_peer(grpc_exec_ctx *exec_ctx, grpc_call_element *elem);
/* Pass through a request to get_channel_info() to the next child element */ /* Pass through a request to get_channel_info() to the next child element */
void grpc_channel_next_get_info(grpc_exec_ctx *exec_ctx, void grpc_channel_next_get_info(grpc_exec_ctx *exec_ctx,
grpc_channel_element *elem, grpc_channel_element *elem,
@ -285,6 +288,10 @@ void grpc_call_log_op(char *file, int line, gpr_log_severity severity,
grpc_call_element *elem, grpc_call_element *elem,
grpc_transport_stream_op_batch *op); grpc_transport_stream_op_batch *op);
void grpc_call_element_signal_error(grpc_exec_ctx *exec_ctx,
grpc_call_element *cur_elem,
grpc_error *error);
extern grpc_tracer_flag grpc_trace_channel; extern grpc_tracer_flag grpc_trace_channel;
#define GRPC_CALL_LOG_OP(sev, elem, op) \ #define GRPC_CALL_LOG_OP(sev, elem, op) \

@ -36,57 +36,7 @@ typedef struct connected_channel_channel_data {
grpc_transport *transport; grpc_transport *transport;
} channel_data; } channel_data;
typedef struct { typedef struct connected_channel_call_data { void *unused; } call_data;
grpc_closure closure;
grpc_closure *original_closure;
grpc_call_combiner *call_combiner;
const char *reason;
} callback_state;
typedef struct connected_channel_call_data {
grpc_call_combiner *call_combiner;
// Closures used for returning results on the call combiner.
callback_state on_complete[6]; // Max number of pending batches.
callback_state recv_initial_metadata_ready;
callback_state recv_message_ready;
} call_data;
static void run_in_call_combiner(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error) {
callback_state *state = (callback_state *)arg;
GRPC_CALL_COMBINER_START(exec_ctx, state->call_combiner,
state->original_closure, GRPC_ERROR_REF(error),
state->reason);
}
static void run_cancel_in_call_combiner(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error) {
run_in_call_combiner(exec_ctx, arg, error);
gpr_free(arg);
}
static void intercept_callback(call_data *calld, callback_state *state,
bool free_when_done, const char *reason,
grpc_closure **original_closure) {
state->original_closure = *original_closure;
state->call_combiner = calld->call_combiner;
state->reason = reason;
*original_closure = GRPC_CLOSURE_INIT(
&state->closure,
free_when_done ? run_cancel_in_call_combiner : run_in_call_combiner,
state, grpc_schedule_on_exec_ctx);
}
static callback_state *get_state_for_batch(
call_data *calld, grpc_transport_stream_op_batch *batch) {
if (batch->send_initial_metadata) return &calld->on_complete[0];
if (batch->send_message) return &calld->on_complete[1];
if (batch->send_trailing_metadata) return &calld->on_complete[2];
if (batch->recv_initial_metadata) return &calld->on_complete[3];
if (batch->recv_message) return &calld->on_complete[4];
if (batch->recv_trailing_metadata) return &calld->on_complete[5];
GPR_UNREACHABLE_CODE(return NULL);
}
/* We perform a small hack to locate transport data alongside the connected /* We perform a small hack to locate transport data alongside the connected
channel data in call allocations, to allow everything to be pulled in minimal channel data in call allocations, to allow everything to be pulled in minimal
@ -99,38 +49,13 @@ static callback_state *get_state_for_batch(
into transport stream operations */ into transport stream operations */
static void con_start_transport_stream_op_batch( static void con_start_transport_stream_op_batch(
grpc_exec_ctx *exec_ctx, grpc_call_element *elem, grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
grpc_transport_stream_op_batch *batch) { grpc_transport_stream_op_batch *op) {
call_data *calld = elem->call_data; call_data *calld = elem->call_data;
channel_data *chand = elem->channel_data; channel_data *chand = elem->channel_data;
if (batch->recv_initial_metadata) { GRPC_CALL_LOG_OP(GPR_INFO, elem, op);
callback_state *state = &calld->recv_initial_metadata_ready;
intercept_callback(
calld, state, false, "recv_initial_metadata_ready",
&batch->payload->recv_initial_metadata.recv_initial_metadata_ready);
}
if (batch->recv_message) {
callback_state *state = &calld->recv_message_ready;
intercept_callback(calld, state, false, "recv_message_ready",
&batch->payload->recv_message.recv_message_ready);
}
if (batch->cancel_stream) {
// There can be more than one cancellation batch in flight at any
// given time, so we can't just pick out a fixed index into
// calld->on_complete like we can for the other ops. However,
// cancellation isn't in the fast path, so we just allocate a new
// closure for each one.
callback_state *state = (callback_state *)gpr_malloc(sizeof(*state));
intercept_callback(calld, state, true, "on_complete (cancel_stream)",
&batch->on_complete);
} else {
callback_state *state = get_state_for_batch(calld, batch);
intercept_callback(calld, state, false, "on_complete", &batch->on_complete);
}
grpc_transport_perform_stream_op(exec_ctx, chand->transport, grpc_transport_perform_stream_op(exec_ctx, chand->transport,
TRANSPORT_STREAM_FROM_CALL_DATA(calld), TRANSPORT_STREAM_FROM_CALL_DATA(calld), op);
batch);
GRPC_CALL_COMBINER_STOP(exec_ctx, calld->call_combiner,
"passed batch to transport");
} }
static void con_start_transport_op(grpc_exec_ctx *exec_ctx, static void con_start_transport_op(grpc_exec_ctx *exec_ctx,
@ -146,7 +71,6 @@ static grpc_error *init_call_elem(grpc_exec_ctx *exec_ctx,
const grpc_call_element_args *args) { const grpc_call_element_args *args) {
call_data *calld = elem->call_data; call_data *calld = elem->call_data;
channel_data *chand = elem->channel_data; channel_data *chand = elem->channel_data;
calld->call_combiner = args->call_combiner;
int r = grpc_transport_init_stream( int r = grpc_transport_init_stream(
exec_ctx, chand->transport, TRANSPORT_STREAM_FROM_CALL_DATA(calld), exec_ctx, chand->transport, TRANSPORT_STREAM_FROM_CALL_DATA(calld),
&args->call_stack->refcount, args->server_transport_data, args->arena); &args->call_stack->refcount, args->server_transport_data, args->arena);
@ -194,6 +118,11 @@ static void destroy_channel_elem(grpc_exec_ctx *exec_ctx,
} }
} }
static char *con_get_peer(grpc_exec_ctx *exec_ctx, grpc_call_element *elem) {
channel_data *chand = elem->channel_data;
return grpc_transport_get_peer(exec_ctx, chand->transport);
}
/* No-op. */ /* No-op. */
static void con_get_channel_info(grpc_exec_ctx *exec_ctx, static void con_get_channel_info(grpc_exec_ctx *exec_ctx,
grpc_channel_element *elem, grpc_channel_element *elem,
@ -209,6 +138,7 @@ const grpc_channel_filter grpc_connected_filter = {
sizeof(channel_data), sizeof(channel_data),
init_channel_elem, init_channel_elem,
destroy_channel_elem, destroy_channel_elem,
con_get_peer,
con_get_channel_info, con_get_channel_info,
"connected", "connected",
}; };

@ -1,180 +0,0 @@
/*
*
* Copyright 2017 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
#include "src/core/lib/iomgr/call_combiner.h"
#include <grpc/support/log.h>
grpc_tracer_flag grpc_call_combiner_trace =
GRPC_TRACER_INITIALIZER(false, "call_combiner");
static grpc_error* decode_cancel_state_error(gpr_atm cancel_state) {
if (cancel_state & 1) {
return (grpc_error*)(cancel_state & ~(gpr_atm)1);
}
return GRPC_ERROR_NONE;
}
static gpr_atm encode_cancel_state_error(grpc_error* error) {
return (gpr_atm)1 | (gpr_atm)error;
}
void grpc_call_combiner_init(grpc_call_combiner* call_combiner) {
gpr_mpscq_init(&call_combiner->queue);
}
void grpc_call_combiner_destroy(grpc_call_combiner* call_combiner) {
gpr_mpscq_destroy(&call_combiner->queue);
GRPC_ERROR_UNREF(decode_cancel_state_error(call_combiner->cancel_state));
}
#ifndef NDEBUG
#define DEBUG_ARGS , const char *file, int line
#define DEBUG_FMT_STR "%s:%d: "
#define DEBUG_FMT_ARGS , file, line
#else
#define DEBUG_ARGS
#define DEBUG_FMT_STR
#define DEBUG_FMT_ARGS
#endif
void grpc_call_combiner_start(grpc_exec_ctx* exec_ctx,
grpc_call_combiner* call_combiner,
grpc_closure* closure,
grpc_error* error DEBUG_ARGS,
const char* reason) {
if (GRPC_TRACER_ON(grpc_call_combiner_trace)) {
gpr_log(GPR_DEBUG,
"==> grpc_call_combiner_start() [%p] closure=%p [" DEBUG_FMT_STR
"%s] error=%s",
call_combiner, closure DEBUG_FMT_ARGS, reason,
grpc_error_string(error));
}
size_t prev_size =
(size_t)gpr_atm_full_fetch_add(&call_combiner->size, (gpr_atm)1);
if (GRPC_TRACER_ON(grpc_call_combiner_trace)) {
gpr_log(GPR_DEBUG, " size: %" PRIdPTR " -> %" PRIdPTR, prev_size,
prev_size + 1);
}
if (prev_size == 0) {
if (GRPC_TRACER_ON(grpc_call_combiner_trace)) {
gpr_log(GPR_DEBUG, " EXECUTING IMMEDIATELY");
}
// Queue was empty, so execute this closure immediately.
GRPC_CLOSURE_SCHED(exec_ctx, closure, error);
} else {
if (GRPC_TRACER_ON(grpc_call_combiner_trace)) {
gpr_log(GPR_INFO, " QUEUING");
}
// Queue was not empty, so add closure to queue.
closure->error_data.error = error;
gpr_mpscq_push(&call_combiner->queue, (gpr_mpscq_node*)closure);
}
}
void grpc_call_combiner_stop(grpc_exec_ctx* exec_ctx,
grpc_call_combiner* call_combiner DEBUG_ARGS,
const char* reason) {
if (GRPC_TRACER_ON(grpc_call_combiner_trace)) {
gpr_log(GPR_DEBUG,
"==> grpc_call_combiner_stop() [%p] [" DEBUG_FMT_STR "%s]",
call_combiner DEBUG_FMT_ARGS, reason);
}
size_t prev_size =
(size_t)gpr_atm_full_fetch_add(&call_combiner->size, (gpr_atm)-1);
if (GRPC_TRACER_ON(grpc_call_combiner_trace)) {
gpr_log(GPR_DEBUG, " size: %" PRIdPTR " -> %" PRIdPTR, prev_size,
prev_size - 1);
}
GPR_ASSERT(prev_size >= 1);
if (prev_size > 1) {
while (true) {
if (GRPC_TRACER_ON(grpc_call_combiner_trace)) {
gpr_log(GPR_DEBUG, " checking queue");
}
bool empty;
grpc_closure* closure = (grpc_closure*)gpr_mpscq_pop_and_check_end(
&call_combiner->queue, &empty);
if (closure == NULL) {
// This can happen either due to a race condition within the mpscq
// code or because of a race with grpc_call_combiner_start().
if (GRPC_TRACER_ON(grpc_call_combiner_trace)) {
gpr_log(GPR_DEBUG, " queue returned no result; checking again");
}
continue;
}
if (GRPC_TRACER_ON(grpc_call_combiner_trace)) {
gpr_log(GPR_DEBUG, " EXECUTING FROM QUEUE: closure=%p error=%s",
closure, grpc_error_string(closure->error_data.error));
}
GRPC_CLOSURE_SCHED(exec_ctx, closure, closure->error_data.error);
break;
}
} else if (GRPC_TRACER_ON(grpc_call_combiner_trace)) {
gpr_log(GPR_DEBUG, " queue empty");
}
}
void grpc_call_combiner_set_notify_on_cancel(grpc_exec_ctx* exec_ctx,
grpc_call_combiner* call_combiner,
grpc_closure* closure) {
while (true) {
// Decode original state.
gpr_atm original_state = gpr_atm_acq_load(&call_combiner->cancel_state);
grpc_error* original_error = decode_cancel_state_error(original_state);
// If error is set, invoke the cancellation closure immediately.
// Otherwise, store the new closure.
if (original_error != GRPC_ERROR_NONE) {
GRPC_CLOSURE_SCHED(exec_ctx, closure, GRPC_ERROR_REF(original_error));
break;
} else {
if (gpr_atm_full_cas(&call_combiner->cancel_state, original_state,
(gpr_atm)closure)) {
break;
}
}
// cas failed, try again.
}
}
void grpc_call_combiner_cancel(grpc_exec_ctx* exec_ctx,
grpc_call_combiner* call_combiner,
grpc_error* error) {
while (true) {
gpr_atm original_state = gpr_atm_acq_load(&call_combiner->cancel_state);
grpc_error* original_error = decode_cancel_state_error(original_state);
if (original_error != GRPC_ERROR_NONE) {
GRPC_ERROR_UNREF(error);
break;
}
if (gpr_atm_full_cas(&call_combiner->cancel_state, original_state,
encode_cancel_state_error(error))) {
if (original_state != 0) {
grpc_closure* notify_on_cancel = (grpc_closure*)original_state;
if (GRPC_TRACER_ON(grpc_call_combiner_trace)) {
gpr_log(GPR_DEBUG,
"call_combiner=%p: scheduling notify_on_cancel callback=%p",
call_combiner, notify_on_cancel);
}
GRPC_CLOSURE_SCHED(exec_ctx, notify_on_cancel, GRPC_ERROR_REF(error));
}
break;
}
// cas failed, try again.
}
}

@ -1,104 +0,0 @@
/*
*
* Copyright 2017 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
#ifndef GRPC_CORE_LIB_IOMGR_CALL_COMBINER_H
#define GRPC_CORE_LIB_IOMGR_CALL_COMBINER_H
#include <stddef.h>
#include <grpc/support/atm.h>
#include "src/core/lib/iomgr/closure.h"
#include "src/core/lib/iomgr/exec_ctx.h"
#include "src/core/lib/support/mpscq.h"
// A simple, lock-free mechanism for serializing activity related to a
// single call. This is similar to a combiner but is more lightweight.
//
// It requires the callback (or, in the common case where the callback
// actually kicks off a chain of callbacks, the last callback in that
// chain) to explicitly indicate (by calling GRPC_CALL_COMBINER_STOP())
// when it is done with the action that was kicked off by the original
// callback.
extern grpc_tracer_flag grpc_call_combiner_trace;
typedef struct {
gpr_atm size; // size_t, num closures in queue or currently executing
gpr_mpscq queue;
// Either 0 (if not cancelled and no cancellation closure set),
// a grpc_closure* (if the lowest bit is 0),
// or a grpc_error* (if the lowest bit is 1).
gpr_atm cancel_state;
} grpc_call_combiner;
// Assumes memory was initialized to zero.
void grpc_call_combiner_init(grpc_call_combiner* call_combiner);
void grpc_call_combiner_destroy(grpc_call_combiner* call_combiner);
#ifndef NDEBUG
#define GRPC_CALL_COMBINER_START(exec_ctx, call_combiner, closure, error, \
reason) \
grpc_call_combiner_start((exec_ctx), (call_combiner), (closure), (error), \
__FILE__, __LINE__, (reason))
#define GRPC_CALL_COMBINER_STOP(exec_ctx, call_combiner, reason) \
grpc_call_combiner_stop((exec_ctx), (call_combiner), __FILE__, __LINE__, \
(reason))
/// Starts processing \a closure on \a call_combiner.
void grpc_call_combiner_start(grpc_exec_ctx* exec_ctx,
grpc_call_combiner* call_combiner,
grpc_closure* closure, grpc_error* error,
const char* file, int line, const char* reason);
/// Yields the call combiner to the next closure in the queue, if any.
void grpc_call_combiner_stop(grpc_exec_ctx* exec_ctx,
grpc_call_combiner* call_combiner,
const char* file, int line, const char* reason);
#else
#define GRPC_CALL_COMBINER_START(exec_ctx, call_combiner, closure, error, \
reason) \
grpc_call_combiner_start((exec_ctx), (call_combiner), (closure), (error), \
(reason))
#define GRPC_CALL_COMBINER_STOP(exec_ctx, call_combiner, reason) \
grpc_call_combiner_stop((exec_ctx), (call_combiner), (reason))
/// Starts processing \a closure on \a call_combiner.
void grpc_call_combiner_start(grpc_exec_ctx* exec_ctx,
grpc_call_combiner* call_combiner,
grpc_closure* closure, grpc_error* error,
const char* reason);
/// Yields the call combiner to the next closure in the queue, if any.
void grpc_call_combiner_stop(grpc_exec_ctx* exec_ctx,
grpc_call_combiner* call_combiner,
const char* reason);
#endif
/// Tells \a call_combiner to invoke \a closure when
/// grpc_call_combiner_cancel() is called. If grpc_call_combiner_cancel()
/// was previously called, \a closure will be invoked immediately.
/// If \a closure is NULL, then no closure will be invoked on
/// cancellation; this effectively unregisters the previously set closure.
void grpc_call_combiner_set_notify_on_cancel(grpc_exec_ctx* exec_ctx,
grpc_call_combiner* call_combiner,
grpc_closure* closure);
/// Indicates that the call has been cancelled.
void grpc_call_combiner_cancel(grpc_exec_ctx* exec_ctx,
grpc_call_combiner* call_combiner,
grpc_error* error);
#endif /* GRPC_CORE_LIB_IOMGR_CALL_COMBINER_H */

@ -39,7 +39,6 @@
/* We can have a per-call credentials. */ /* We can have a per-call credentials. */
typedef struct { typedef struct {
grpc_call_combiner *call_combiner;
grpc_call_credentials *creds; grpc_call_credentials *creds;
bool have_host; bool have_host;
bool have_method; bool have_method;
@ -50,11 +49,17 @@ typedef struct {
pollset_set so that work can progress when this call wants work to progress pollset_set so that work can progress when this call wants work to progress
*/ */
grpc_polling_entity *pollent; grpc_polling_entity *pollent;
gpr_atm security_context_set;
gpr_mu security_context_mu;
grpc_credentials_mdelem_array md_array; grpc_credentials_mdelem_array md_array;
grpc_linked_mdelem md_links[MAX_CREDENTIALS_METADATA_COUNT]; grpc_linked_mdelem md_links[MAX_CREDENTIALS_METADATA_COUNT];
grpc_auth_metadata_context auth_md_context; grpc_auth_metadata_context auth_md_context;
grpc_closure async_cancel_closure; grpc_closure closure;
grpc_closure async_result_closure; // Either 0 (no cancellation and no async operation in flight),
// a grpc_closure* (if the lowest bit is 0),
// or a grpc_error* (if the lowest bit is 1).
gpr_atm cancellation_state;
grpc_closure cancel_closure;
} call_data; } call_data;
/* We can have a per-channel credentials. */ /* We can have a per-channel credentials. */
@ -63,6 +68,43 @@ typedef struct {
grpc_auth_context *auth_context; grpc_auth_context *auth_context;
} channel_data; } channel_data;
static void decode_cancel_state(gpr_atm cancel_state, grpc_closure **func,
grpc_error **error) {
// If the lowest bit is 1, the value is a grpc_error*.
// Otherwise, if non-zdero, the value is a grpc_closure*.
if (cancel_state & 1) {
*error = (grpc_error *)(cancel_state & ~(gpr_atm)1);
} else if (cancel_state != 0) {
*func = (grpc_closure *)cancel_state;
}
}
static gpr_atm encode_cancel_state_error(grpc_error *error) {
// Set the lowest bit to 1 to indicate that it's an error.
return (gpr_atm)1 | (gpr_atm)error;
}
// Returns an error if the call has been cancelled. Otherwise, sets the
// cancellation function to be called upon cancellation.
static grpc_error *set_cancel_func(grpc_call_element *elem,
grpc_iomgr_cb_func func) {
call_data *calld = (call_data *)elem->call_data;
// Decode original state.
gpr_atm original_state = gpr_atm_acq_load(&calld->cancellation_state);
grpc_error *original_error = GRPC_ERROR_NONE;
grpc_closure *original_func = NULL;
decode_cancel_state(original_state, &original_func, &original_error);
// If error is set, return it.
if (original_error != GRPC_ERROR_NONE) return GRPC_ERROR_REF(original_error);
// Otherwise, store func.
GRPC_CLOSURE_INIT(&calld->cancel_closure, func, elem,
grpc_schedule_on_exec_ctx);
GPR_ASSERT(((gpr_atm)&calld->cancel_closure & (gpr_atm)1) == 0);
gpr_atm_rel_store(&calld->cancellation_state,
(gpr_atm)&calld->cancel_closure);
return GRPC_ERROR_NONE;
}
static void reset_auth_metadata_context( static void reset_auth_metadata_context(
grpc_auth_metadata_context *auth_md_context) { grpc_auth_metadata_context *auth_md_context) {
if (auth_md_context->service_url != NULL) { if (auth_md_context->service_url != NULL) {
@ -93,7 +135,6 @@ static void on_credentials_metadata(grpc_exec_ctx *exec_ctx, void *arg,
grpc_transport_stream_op_batch *batch = (grpc_transport_stream_op_batch *)arg; grpc_transport_stream_op_batch *batch = (grpc_transport_stream_op_batch *)arg;
grpc_call_element *elem = batch->handler_private.extra_arg; grpc_call_element *elem = batch->handler_private.extra_arg;
call_data *calld = elem->call_data; call_data *calld = elem->call_data;
grpc_call_combiner_set_notify_on_cancel(exec_ctx, calld->call_combiner, NULL);
reset_auth_metadata_context(&calld->auth_md_context); reset_auth_metadata_context(&calld->auth_md_context);
grpc_error *error = GRPC_ERROR_REF(input_error); grpc_error *error = GRPC_ERROR_REF(input_error);
if (error == GRPC_ERROR_NONE) { if (error == GRPC_ERROR_NONE) {
@ -112,8 +153,7 @@ static void on_credentials_metadata(grpc_exec_ctx *exec_ctx, void *arg,
} else { } else {
error = grpc_error_set_int(error, GRPC_ERROR_INT_GRPC_STATUS, error = grpc_error_set_int(error, GRPC_ERROR_INT_GRPC_STATUS,
GRPC_STATUS_UNAUTHENTICATED); GRPC_STATUS_UNAUTHENTICATED);
grpc_transport_stream_op_batch_finish_with_failure(exec_ctx, batch, error, grpc_transport_stream_op_batch_finish_with_failure(exec_ctx, batch, error);
calld->call_combiner);
} }
} }
@ -183,8 +223,7 @@ static void send_security_metadata(grpc_exec_ctx *exec_ctx,
grpc_error_set_int( grpc_error_set_int(
GRPC_ERROR_CREATE_FROM_STATIC_STRING( GRPC_ERROR_CREATE_FROM_STATIC_STRING(
"Incompatible credentials set on channel and call."), "Incompatible credentials set on channel and call."),
GRPC_ERROR_INT_GRPC_STATUS, GRPC_STATUS_UNAUTHENTICATED), GRPC_ERROR_INT_GRPC_STATUS, GRPC_STATUS_UNAUTHENTICATED));
calld->call_combiner);
return; return;
} }
} else { } else {
@ -195,23 +234,22 @@ static void send_security_metadata(grpc_exec_ctx *exec_ctx,
build_auth_metadata_context(&chand->security_connector->base, build_auth_metadata_context(&chand->security_connector->base,
chand->auth_context, calld); chand->auth_context, calld);
grpc_error *cancel_error = set_cancel_func(elem, cancel_get_request_metadata);
if (cancel_error != GRPC_ERROR_NONE) {
grpc_transport_stream_op_batch_finish_with_failure(exec_ctx, batch,
cancel_error);
return;
}
GPR_ASSERT(calld->pollent != NULL); GPR_ASSERT(calld->pollent != NULL);
GRPC_CLOSURE_INIT(&calld->closure, on_credentials_metadata, batch,
GRPC_CLOSURE_INIT(&calld->async_result_closure, on_credentials_metadata, grpc_schedule_on_exec_ctx);
batch, grpc_schedule_on_exec_ctx);
grpc_error *error = GRPC_ERROR_NONE; grpc_error *error = GRPC_ERROR_NONE;
if (grpc_call_credentials_get_request_metadata( if (grpc_call_credentials_get_request_metadata(
exec_ctx, calld->creds, calld->pollent, calld->auth_md_context, exec_ctx, calld->creds, calld->pollent, calld->auth_md_context,
&calld->md_array, &calld->async_result_closure, &error)) { &calld->md_array, &calld->closure, &error)) {
// Synchronous return; invoke on_credentials_metadata() directly. // Synchronous return; invoke on_credentials_metadata() directly.
on_credentials_metadata(exec_ctx, batch, error); on_credentials_metadata(exec_ctx, batch, error);
GRPC_ERROR_UNREF(error); GRPC_ERROR_UNREF(error);
} else {
// Async return; register cancellation closure with call combiner.
GRPC_CLOSURE_INIT(&calld->async_cancel_closure, cancel_get_request_metadata,
elem, grpc_schedule_on_exec_ctx);
grpc_call_combiner_set_notify_on_cancel(exec_ctx, calld->call_combiner,
&calld->async_cancel_closure);
} }
} }
@ -220,7 +258,7 @@ static void on_host_checked(grpc_exec_ctx *exec_ctx, void *arg,
grpc_transport_stream_op_batch *batch = (grpc_transport_stream_op_batch *)arg; grpc_transport_stream_op_batch *batch = (grpc_transport_stream_op_batch *)arg;
grpc_call_element *elem = batch->handler_private.extra_arg; grpc_call_element *elem = batch->handler_private.extra_arg;
call_data *calld = elem->call_data; call_data *calld = elem->call_data;
grpc_call_combiner_set_notify_on_cancel(exec_ctx, calld->call_combiner, NULL);
if (error == GRPC_ERROR_NONE) { if (error == GRPC_ERROR_NONE) {
send_security_metadata(exec_ctx, elem, batch); send_security_metadata(exec_ctx, elem, batch);
} else { } else {
@ -233,8 +271,7 @@ static void on_host_checked(grpc_exec_ctx *exec_ctx, void *arg,
exec_ctx, batch, exec_ctx, batch,
grpc_error_set_int(GRPC_ERROR_CREATE_FROM_COPIED_STRING(error_msg), grpc_error_set_int(GRPC_ERROR_CREATE_FROM_COPIED_STRING(error_msg),
GRPC_ERROR_INT_GRPC_STATUS, GRPC_ERROR_INT_GRPC_STATUS,
GRPC_STATUS_UNAUTHENTICATED), GRPC_STATUS_UNAUTHENTICATED));
calld->call_combiner);
gpr_free(error_msg); gpr_free(error_msg);
} }
} }
@ -245,7 +282,7 @@ static void cancel_check_call_host(grpc_exec_ctx *exec_ctx, void *arg,
call_data *calld = (call_data *)elem->call_data; call_data *calld = (call_data *)elem->call_data;
channel_data *chand = (channel_data *)elem->channel_data; channel_data *chand = (channel_data *)elem->channel_data;
grpc_channel_security_connector_cancel_check_call_host( grpc_channel_security_connector_cancel_check_call_host(
exec_ctx, chand->security_connector, &calld->async_result_closure, exec_ctx, chand->security_connector, &calld->closure,
GRPC_ERROR_REF(error)); GRPC_ERROR_REF(error));
} }
@ -258,7 +295,36 @@ static void auth_start_transport_stream_op_batch(
call_data *calld = elem->call_data; call_data *calld = elem->call_data;
channel_data *chand = elem->channel_data; channel_data *chand = elem->channel_data;
if (!batch->cancel_stream) { if (batch->cancel_stream) {
while (true) {
// Decode the original cancellation state.
gpr_atm original_state = gpr_atm_acq_load(&calld->cancellation_state);
grpc_error *cancel_error = GRPC_ERROR_NONE;
grpc_closure *func = NULL;
decode_cancel_state(original_state, &func, &cancel_error);
// If we had already set a cancellation error, there's nothing
// more to do.
if (cancel_error != GRPC_ERROR_NONE) break;
// If there's a cancel func, call it.
// Note that even if the cancel func has been changed by some
// other thread between when we decoded it and now, it will just
// be a no-op.
cancel_error = GRPC_ERROR_REF(batch->payload->cancel_stream.cancel_error);
if (func != NULL) {
GRPC_CLOSURE_SCHED(exec_ctx, func, GRPC_ERROR_REF(cancel_error));
}
// Encode the new error into cancellation state.
if (gpr_atm_full_cas(&calld->cancellation_state, original_state,
encode_cancel_state_error(cancel_error))) {
break; // Success.
}
// The cas failed, so try again.
}
} else {
/* double checked lock over security context to ensure it's set once */
if (gpr_atm_acq_load(&calld->security_context_set) == 0) {
gpr_mu_lock(&calld->security_context_mu);
if (gpr_atm_acq_load(&calld->security_context_set) == 0) {
GPR_ASSERT(batch->payload->context != NULL); GPR_ASSERT(batch->payload->context != NULL);
if (batch->payload->context[GRPC_CONTEXT_SECURITY].value == NULL) { if (batch->payload->context[GRPC_CONTEXT_SECURITY].value == NULL) {
batch->payload->context[GRPC_CONTEXT_SECURITY].value = batch->payload->context[GRPC_CONTEXT_SECURITY].value =
@ -271,6 +337,10 @@ static void auth_start_transport_stream_op_batch(
GRPC_AUTH_CONTEXT_UNREF(sec_ctx->auth_context, "client auth filter"); GRPC_AUTH_CONTEXT_UNREF(sec_ctx->auth_context, "client auth filter");
sec_ctx->auth_context = sec_ctx->auth_context =
GRPC_AUTH_CONTEXT_REF(chand->auth_context, "client_auth_filter"); GRPC_AUTH_CONTEXT_REF(chand->auth_context, "client_auth_filter");
gpr_atm_rel_store(&calld->security_context_set, 1);
}
gpr_mu_unlock(&calld->security_context_mu);
}
} }
if (batch->send_initial_metadata) { if (batch->send_initial_metadata) {
@ -295,25 +365,26 @@ static void auth_start_transport_stream_op_batch(
} }
} }
if (calld->have_host) { if (calld->have_host) {
batch->handler_private.extra_arg = elem; grpc_error *cancel_error = set_cancel_func(elem, cancel_check_call_host);
GRPC_CLOSURE_INIT(&calld->async_result_closure, on_host_checked, batch, if (cancel_error != GRPC_ERROR_NONE) {
grpc_schedule_on_exec_ctx); grpc_transport_stream_op_batch_finish_with_failure(exec_ctx, batch,
cancel_error);
} else {
char *call_host = grpc_slice_to_c_string(calld->host); char *call_host = grpc_slice_to_c_string(calld->host);
batch->handler_private.extra_arg = elem;
grpc_error *error = GRPC_ERROR_NONE; grpc_error *error = GRPC_ERROR_NONE;
if (grpc_channel_security_connector_check_call_host( if (grpc_channel_security_connector_check_call_host(
exec_ctx, chand->security_connector, call_host, exec_ctx, chand->security_connector, call_host,
chand->auth_context, &calld->async_result_closure, &error)) { chand->auth_context,
GRPC_CLOSURE_INIT(&calld->closure, on_host_checked, batch,
grpc_schedule_on_exec_ctx),
&error)) {
// Synchronous return; invoke on_host_checked() directly. // Synchronous return; invoke on_host_checked() directly.
on_host_checked(exec_ctx, batch, error); on_host_checked(exec_ctx, batch, error);
GRPC_ERROR_UNREF(error); GRPC_ERROR_UNREF(error);
} else {
// Async return; register cancellation closure with call combiner.
GRPC_CLOSURE_INIT(&calld->async_cancel_closure, cancel_check_call_host,
elem, grpc_schedule_on_exec_ctx);
grpc_call_combiner_set_notify_on_cancel(exec_ctx, calld->call_combiner,
&calld->async_cancel_closure);
} }
gpr_free(call_host); gpr_free(call_host);
}
GPR_TIMER_END("auth_start_transport_stream_op_batch", 0); GPR_TIMER_END("auth_start_transport_stream_op_batch", 0);
return; /* early exit */ return; /* early exit */
} }
@ -329,7 +400,8 @@ static grpc_error *init_call_elem(grpc_exec_ctx *exec_ctx,
grpc_call_element *elem, grpc_call_element *elem,
const grpc_call_element_args *args) { const grpc_call_element_args *args) {
call_data *calld = elem->call_data; call_data *calld = elem->call_data;
calld->call_combiner = args->call_combiner; memset(calld, 0, sizeof(*calld));
gpr_mu_init(&calld->security_context_mu);
return GRPC_ERROR_NONE; return GRPC_ERROR_NONE;
} }
@ -354,6 +426,12 @@ static void destroy_call_elem(grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
grpc_slice_unref_internal(exec_ctx, calld->method); grpc_slice_unref_internal(exec_ctx, calld->method);
} }
reset_auth_metadata_context(&calld->auth_md_context); reset_auth_metadata_context(&calld->auth_md_context);
gpr_mu_destroy(&calld->security_context_mu);
gpr_atm cancel_state = gpr_atm_acq_load(&calld->cancellation_state);
grpc_error *cancel_error = GRPC_ERROR_NONE;
grpc_closure *cancel_func = NULL;
decode_cancel_state(cancel_state, &cancel_func, &cancel_error);
GRPC_ERROR_UNREF(cancel_error);
} }
/* Constructor for channel_data */ /* Constructor for channel_data */
@ -412,5 +490,6 @@ const grpc_channel_filter grpc_client_auth_filter = {
sizeof(channel_data), sizeof(channel_data),
init_channel_elem, init_channel_elem,
destroy_channel_elem, destroy_channel_elem,
grpc_call_next_get_peer,
grpc_channel_next_get_info, grpc_channel_next_get_info,
"client-auth"}; "client-auth"};

@ -26,15 +26,7 @@
#include "src/core/lib/security/transport/auth_filters.h" #include "src/core/lib/security/transport/auth_filters.h"
#include "src/core/lib/slice/slice_internal.h" #include "src/core/lib/slice/slice_internal.h"
typedef enum {
STATE_INIT = 0,
STATE_DONE,
STATE_CANCELLED,
} async_state;
typedef struct call_data { typedef struct call_data {
grpc_call_combiner *call_combiner;
grpc_call_stack *owning_call;
grpc_transport_stream_op_batch *recv_initial_metadata_batch; grpc_transport_stream_op_batch *recv_initial_metadata_batch;
grpc_closure *original_recv_initial_metadata_ready; grpc_closure *original_recv_initial_metadata_ready;
grpc_closure recv_initial_metadata_ready; grpc_closure recv_initial_metadata_ready;
@ -42,8 +34,6 @@ typedef struct call_data {
const grpc_metadata *consumed_md; const grpc_metadata *consumed_md;
size_t num_consumed_md; size_t num_consumed_md;
grpc_auth_context *auth_context; grpc_auth_context *auth_context;
grpc_closure cancel_closure;
gpr_atm state; // async_state
} call_data; } call_data;
typedef struct channel_data { typedef struct channel_data {
@ -88,92 +78,54 @@ static grpc_filtered_mdelem remove_consumed_md(grpc_exec_ctx *exec_ctx,
return GRPC_FILTERED_MDELEM(md); return GRPC_FILTERED_MDELEM(md);
} }
static void on_md_processing_done_inner(grpc_exec_ctx *exec_ctx, /* called from application code */
grpc_call_element *elem, static void on_md_processing_done(
const grpc_metadata *consumed_md, void *user_data, const grpc_metadata *consumed_md, size_t num_consumed_md,
size_t num_consumed_md, const grpc_metadata *response_md, size_t num_response_md,
const grpc_metadata *response_md, grpc_status_code status, const char *error_details) {
size_t num_response_md, grpc_call_element *elem = user_data;
grpc_error *error) {
call_data *calld = elem->call_data; call_data *calld = elem->call_data;
grpc_transport_stream_op_batch *batch = calld->recv_initial_metadata_batch; grpc_transport_stream_op_batch *batch = calld->recv_initial_metadata_batch;
grpc_call_combiner_set_notify_on_cancel(exec_ctx, calld->call_combiner, NULL); grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
/* TODO(jboeuf): Implement support for response_md. */ /* TODO(jboeuf): Implement support for response_md. */
if (response_md != NULL && num_response_md > 0) { if (response_md != NULL && num_response_md > 0) {
gpr_log(GPR_INFO, gpr_log(GPR_INFO,
"response_md in auth metadata processing not supported for now. " "response_md in auth metadata processing not supported for now. "
"Ignoring..."); "Ignoring...");
} }
if (error == GRPC_ERROR_NONE) { grpc_error *error = GRPC_ERROR_NONE;
if (status == GRPC_STATUS_OK) {
calld->consumed_md = consumed_md; calld->consumed_md = consumed_md;
calld->num_consumed_md = num_consumed_md; calld->num_consumed_md = num_consumed_md;
error = grpc_metadata_batch_filter( error = grpc_metadata_batch_filter(
exec_ctx, batch->payload->recv_initial_metadata.recv_initial_metadata, &exec_ctx, batch->payload->recv_initial_metadata.recv_initial_metadata,
remove_consumed_md, elem, "Response metadata filtering error"); remove_consumed_md, elem, "Response metadata filtering error");
} } else {
GRPC_CLOSURE_SCHED(exec_ctx, calld->original_recv_initial_metadata_ready,
error);
}
// Called from application code.
static void on_md_processing_done(
void *user_data, const grpc_metadata *consumed_md, size_t num_consumed_md,
const grpc_metadata *response_md, size_t num_response_md,
grpc_status_code status, const char *error_details) {
grpc_call_element *elem = user_data;
call_data *calld = elem->call_data;
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
// If the call was not cancelled while we were in flight, process the result.
if (gpr_atm_full_cas(&calld->state, (gpr_atm)STATE_INIT,
(gpr_atm)STATE_DONE)) {
grpc_error *error = GRPC_ERROR_NONE;
if (status != GRPC_STATUS_OK) {
if (error_details == NULL) { if (error_details == NULL) {
error_details = "Authentication metadata processing failed."; error_details = "Authentication metadata processing failed.";
} }
error = grpc_error_set_int( error =
GRPC_ERROR_CREATE_FROM_COPIED_STRING(error_details), grpc_error_set_int(GRPC_ERROR_CREATE_FROM_COPIED_STRING(error_details),
GRPC_ERROR_INT_GRPC_STATUS, status); GRPC_ERROR_INT_GRPC_STATUS, status);
} }
on_md_processing_done_inner(&exec_ctx, elem, consumed_md, num_consumed_md,
response_md, num_response_md, error);
}
// Clean up.
for (size_t i = 0; i < calld->md.count; i++) { for (size_t i = 0; i < calld->md.count; i++) {
grpc_slice_unref_internal(&exec_ctx, calld->md.metadata[i].key); grpc_slice_unref_internal(&exec_ctx, calld->md.metadata[i].key);
grpc_slice_unref_internal(&exec_ctx, calld->md.metadata[i].value); grpc_slice_unref_internal(&exec_ctx, calld->md.metadata[i].value);
} }
grpc_metadata_array_destroy(&calld->md); grpc_metadata_array_destroy(&calld->md);
GRPC_CALL_STACK_UNREF(&exec_ctx, calld->owning_call, "server_auth_metadata"); GRPC_CLOSURE_SCHED(&exec_ctx, calld->original_recv_initial_metadata_ready,
error);
grpc_exec_ctx_finish(&exec_ctx); grpc_exec_ctx_finish(&exec_ctx);
} }
static void cancel_call(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) {
grpc_call_element *elem = (grpc_call_element *)arg;
call_data *calld = elem->call_data;
// If the result was not already processed, invoke the callback now.
if (gpr_atm_full_cas(&calld->state, (gpr_atm)STATE_INIT,
(gpr_atm)STATE_CANCELLED)) {
on_md_processing_done_inner(exec_ctx, elem, NULL, 0, NULL, 0,
GRPC_ERROR_REF(error));
}
}
static void recv_initial_metadata_ready(grpc_exec_ctx *exec_ctx, void *arg, static void recv_initial_metadata_ready(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error) { grpc_error *error) {
grpc_call_element *elem = (grpc_call_element *)arg; grpc_call_element *elem = arg;
channel_data *chand = elem->channel_data; channel_data *chand = elem->channel_data;
call_data *calld = elem->call_data; call_data *calld = elem->call_data;
grpc_transport_stream_op_batch *batch = calld->recv_initial_metadata_batch; grpc_transport_stream_op_batch *batch = calld->recv_initial_metadata_batch;
if (error == GRPC_ERROR_NONE) { if (error == GRPC_ERROR_NONE) {
if (chand->creds != NULL && chand->creds->processor.process != NULL) { if (chand->creds != NULL && chand->creds->processor.process != NULL) {
// We're calling out to the application, so we need to make sure
// to drop the call combiner early if we get cancelled.
GRPC_CLOSURE_INIT(&calld->cancel_closure, cancel_call, elem,
grpc_schedule_on_exec_ctx);
grpc_call_combiner_set_notify_on_cancel(exec_ctx, calld->call_combiner,
&calld->cancel_closure);
GRPC_CALL_STACK_REF(calld->owning_call, "server_auth_metadata");
calld->md = metadata_batch_to_md_array( calld->md = metadata_batch_to_md_array(
batch->payload->recv_initial_metadata.recv_initial_metadata); batch->payload->recv_initial_metadata.recv_initial_metadata);
chand->creds->processor.process( chand->creds->processor.process(
@ -207,8 +159,6 @@ static grpc_error *init_call_elem(grpc_exec_ctx *exec_ctx,
const grpc_call_element_args *args) { const grpc_call_element_args *args) {
call_data *calld = elem->call_data; call_data *calld = elem->call_data;
channel_data *chand = elem->channel_data; channel_data *chand = elem->channel_data;
calld->call_combiner = args->call_combiner;
calld->owning_call = args->call_stack;
GRPC_CLOSURE_INIT(&calld->recv_initial_metadata_ready, GRPC_CLOSURE_INIT(&calld->recv_initial_metadata_ready,
recv_initial_metadata_ready, elem, recv_initial_metadata_ready, elem,
grpc_schedule_on_exec_ctx); grpc_schedule_on_exec_ctx);
@ -268,5 +218,6 @@ const grpc_channel_filter grpc_server_auth_filter = {
sizeof(channel_data), sizeof(channel_data),
init_channel_elem, init_channel_elem,
destroy_channel_elem, destroy_channel_elem,
grpc_call_next_get_peer,
grpc_channel_next_get_info, grpc_channel_next_get_info,
"server-auth"}; "server-auth"};

@ -122,7 +122,6 @@ typedef struct batch_control {
bool is_closure; bool is_closure;
} notify_tag; } notify_tag;
} completion_data; } completion_data;
grpc_closure start_batch;
grpc_closure finish_batch; grpc_closure finish_batch;
gpr_refcount steps_to_complete; gpr_refcount steps_to_complete;
@ -152,7 +151,6 @@ typedef struct {
struct grpc_call { struct grpc_call {
gpr_refcount ext_ref; gpr_refcount ext_ref;
gpr_arena *arena; gpr_arena *arena;
grpc_call_combiner call_combiner;
grpc_completion_queue *cq; grpc_completion_queue *cq;
grpc_polling_entity pollent; grpc_polling_entity pollent;
grpc_channel *channel; grpc_channel *channel;
@ -186,11 +184,6 @@ struct grpc_call {
Element 0 is initial metadata, element 1 is trailing metadata. */ Element 0 is initial metadata, element 1 is trailing metadata. */
grpc_metadata_array *buffered_metadata[2]; grpc_metadata_array *buffered_metadata[2];
grpc_metadata compression_md;
// A char* indicating the peer name.
gpr_atm peer_string;
/* Packed received call statuses from various sources */ /* Packed received call statuses from various sources */
gpr_atm status[STATUS_SOURCE_COUNT]; gpr_atm status[STATUS_SOURCE_COUNT];
@ -269,9 +262,8 @@ grpc_tracer_flag grpc_compression_trace =
#define CALL_FROM_TOP_ELEM(top_elem) \ #define CALL_FROM_TOP_ELEM(top_elem) \
CALL_FROM_CALL_STACK(grpc_call_stack_from_top_element(top_elem)) CALL_FROM_CALL_STACK(grpc_call_stack_from_top_element(top_elem))
static void execute_batch(grpc_exec_ctx *exec_ctx, grpc_call *call, static void execute_op(grpc_exec_ctx *exec_ctx, grpc_call *call,
grpc_transport_stream_op_batch *op, grpc_transport_stream_op_batch *op);
grpc_closure *start_batch_closure);
static void cancel_with_status(grpc_exec_ctx *exec_ctx, grpc_call *c, static void cancel_with_status(grpc_exec_ctx *exec_ctx, grpc_call *c,
status_source source, grpc_status_code status, status_source source, grpc_status_code status,
const char *description); const char *description);
@ -336,7 +328,6 @@ grpc_error *grpc_call_create(grpc_exec_ctx *exec_ctx,
sizeof(grpc_call) + channel_stack->call_stack_size); sizeof(grpc_call) + channel_stack->call_stack_size);
gpr_ref_init(&call->ext_ref, 1); gpr_ref_init(&call->ext_ref, 1);
call->arena = arena; call->arena = arena;
grpc_call_combiner_init(&call->call_combiner);
*out_call = call; *out_call = call;
call->channel = args->channel; call->channel = args->channel;
call->cq = args->cq; call->cq = args->cq;
@ -445,8 +436,7 @@ grpc_error *grpc_call_create(grpc_exec_ctx *exec_ctx,
.path = path, .path = path,
.start_time = call->start_time, .start_time = call->start_time,
.deadline = send_deadline, .deadline = send_deadline,
.arena = call->arena, .arena = call->arena};
.call_combiner = &call->call_combiner};
add_init_error(&error, grpc_call_stack_init(exec_ctx, channel_stack, 1, add_init_error(&error, grpc_call_stack_init(exec_ctx, channel_stack, 1,
destroy_call, call, &call_args)); destroy_call, call, &call_args));
if (error != GRPC_ERROR_NONE) { if (error != GRPC_ERROR_NONE) {
@ -513,8 +503,6 @@ static void release_call(grpc_exec_ctx *exec_ctx, void *call,
grpc_error *error) { grpc_error *error) {
grpc_call *c = call; grpc_call *c = call;
grpc_channel *channel = c->channel; grpc_channel *channel = c->channel;
grpc_call_combiner_destroy(&c->call_combiner);
gpr_free((char *)c->peer_string);
grpc_channel_update_call_size_estimate(channel, gpr_arena_destroy(c->arena)); grpc_channel_update_call_size_estimate(channel, gpr_arena_destroy(c->arena));
GRPC_CHANNEL_INTERNAL_UNREF(exec_ctx, channel, "call"); GRPC_CHANNEL_INTERNAL_UNREF(exec_ctx, channel, "call");
} }
@ -614,37 +602,30 @@ grpc_call_error grpc_call_cancel(grpc_call *call, void *reserved) {
return GRPC_CALL_OK; return GRPC_CALL_OK;
} }
// This is called via the call combiner to start sending a batch down static void execute_op(grpc_exec_ctx *exec_ctx, grpc_call *call,
// the filter stack. grpc_transport_stream_op_batch *op) {
static void execute_batch_in_call_combiner(grpc_exec_ctx *exec_ctx, void *arg, grpc_call_element *elem;
grpc_error *ignored) {
grpc_transport_stream_op_batch *batch = arg; GPR_TIMER_BEGIN("execute_op", 0);
grpc_call *call = batch->handler_private.extra_arg; elem = CALL_ELEM_FROM_CALL(call, 0);
GPR_TIMER_BEGIN("execute_batch", 0); elem->filter->start_transport_stream_op_batch(exec_ctx, elem, op);
grpc_call_element *elem = CALL_ELEM_FROM_CALL(call, 0); GPR_TIMER_END("execute_op", 0);
GRPC_CALL_LOG_OP(GPR_INFO, elem, batch);
elem->filter->start_transport_stream_op_batch(exec_ctx, elem, batch);
GPR_TIMER_END("execute_batch", 0);
}
// start_batch_closure points to a caller-allocated closure to be used
// for entering the call combiner.
static void execute_batch(grpc_exec_ctx *exec_ctx, grpc_call *call,
grpc_transport_stream_op_batch *batch,
grpc_closure *start_batch_closure) {
batch->handler_private.extra_arg = call;
GRPC_CLOSURE_INIT(start_batch_closure, execute_batch_in_call_combiner, batch,
grpc_schedule_on_exec_ctx);
GRPC_CALL_COMBINER_START(exec_ctx, &call->call_combiner, start_batch_closure,
GRPC_ERROR_NONE, "executing batch");
} }
char *grpc_call_get_peer(grpc_call *call) { char *grpc_call_get_peer(grpc_call *call) {
char *peer_string = (char *)gpr_atm_acq_load(&call->peer_string); grpc_call_element *elem = CALL_ELEM_FROM_CALL(call, 0);
if (peer_string != NULL) return gpr_strdup(peer_string); grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
peer_string = grpc_channel_get_target(call->channel); char *result;
if (peer_string != NULL) return peer_string; GRPC_API_TRACE("grpc_call_get_peer(%p)", 1, (call));
return gpr_strdup("unknown"); result = elem->filter->get_peer(&exec_ctx, elem);
if (result == NULL) {
result = grpc_channel_get_target(call->channel);
}
if (result == NULL) {
result = gpr_strdup("unknown");
}
grpc_exec_ctx_finish(&exec_ctx);
return result;
} }
grpc_call *grpc_call_from_top_element(grpc_call_element *elem) { grpc_call *grpc_call_from_top_element(grpc_call_element *elem) {
@ -671,41 +652,20 @@ grpc_call_error grpc_call_cancel_with_status(grpc_call *c,
return GRPC_CALL_OK; return GRPC_CALL_OK;
} }
typedef struct { static void done_termination(grpc_exec_ctx *exec_ctx, void *call,
grpc_call *call;
grpc_closure start_batch;
grpc_closure finish_batch;
} cancel_state;
// The on_complete callback used when sending a cancel_stream batch down
// the filter stack. Yields the call combiner when the batch is done.
static void done_termination(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error) { grpc_error *error) {
cancel_state *state = (cancel_state *)arg; GRPC_CALL_INTERNAL_UNREF(exec_ctx, call, "termination");
GRPC_CALL_COMBINER_STOP(exec_ctx, &state->call->call_combiner,
"on_complete for cancel_stream op");
GRPC_CALL_INTERNAL_UNREF(exec_ctx, state->call, "termination");
gpr_free(state);
} }
static void cancel_with_error(grpc_exec_ctx *exec_ctx, grpc_call *c, static void cancel_with_error(grpc_exec_ctx *exec_ctx, grpc_call *c,
status_source source, grpc_error *error) { status_source source, grpc_error *error) {
GRPC_CALL_INTERNAL_REF(c, "termination"); GRPC_CALL_INTERNAL_REF(c, "termination");
// Inform the call combiner of the cancellation, so that it can cancel
// any in-flight asynchronous actions that may be holding the call
// combiner. This ensures that the cancel_stream batch can be sent
// down the filter stack in a timely manner.
grpc_call_combiner_cancel(exec_ctx, &c->call_combiner, GRPC_ERROR_REF(error));
set_status_from_error(exec_ctx, c, source, GRPC_ERROR_REF(error)); set_status_from_error(exec_ctx, c, source, GRPC_ERROR_REF(error));
cancel_state *state = (cancel_state *)gpr_malloc(sizeof(*state)); grpc_transport_stream_op_batch *op = grpc_make_transport_stream_op(
state->call = c; GRPC_CLOSURE_CREATE(done_termination, c, grpc_schedule_on_exec_ctx));
GRPC_CLOSURE_INIT(&state->finish_batch, done_termination, state,
grpc_schedule_on_exec_ctx);
grpc_transport_stream_op_batch *op =
grpc_make_transport_stream_op(&state->finish_batch);
op->cancel_stream = true; op->cancel_stream = true;
op->payload->cancel_stream.cancel_error = error; op->payload->cancel_stream.cancel_error = error;
execute_batch(exec_ctx, c, op, &state->start_batch); execute_op(exec_ctx, c, op);
} }
static grpc_error *error_from_status(grpc_status_code status, static grpc_error *error_from_status(grpc_status_code status,
@ -1471,18 +1431,6 @@ static void receiving_stream_ready(grpc_exec_ctx *exec_ctx, void *bctlp,
} }
} }
// The recv_message_ready callback used when sending a batch containing
// a recv_message op down the filter stack. Yields the call combiner
// before processing the received message.
static void receiving_stream_ready_in_call_combiner(grpc_exec_ctx *exec_ctx,
void *bctlp,
grpc_error *error) {
batch_control *bctl = bctlp;
grpc_call *call = bctl->call;
GRPC_CALL_COMBINER_STOP(exec_ctx, &call->call_combiner, "recv_message_ready");
receiving_stream_ready(exec_ctx, bctlp, error);
}
static void validate_filtered_metadata(grpc_exec_ctx *exec_ctx, static void validate_filtered_metadata(grpc_exec_ctx *exec_ctx,
batch_control *bctl) { batch_control *bctl) {
grpc_call *call = bctl->call; grpc_call *call = bctl->call;
@ -1589,9 +1537,6 @@ static void receiving_initial_metadata_ready(grpc_exec_ctx *exec_ctx,
batch_control *bctl = bctlp; batch_control *bctl = bctlp;
grpc_call *call = bctl->call; grpc_call *call = bctl->call;
GRPC_CALL_COMBINER_STOP(exec_ctx, &call->call_combiner,
"recv_initial_metadata_ready");
add_batch_error(exec_ctx, bctl, GRPC_ERROR_REF(error), false); add_batch_error(exec_ctx, bctl, GRPC_ERROR_REF(error), false);
if (error == GRPC_ERROR_NONE) { if (error == GRPC_ERROR_NONE) {
grpc_metadata_batch *md = grpc_metadata_batch *md =
@ -1645,8 +1590,7 @@ static void receiving_initial_metadata_ready(grpc_exec_ctx *exec_ctx,
static void finish_batch(grpc_exec_ctx *exec_ctx, void *bctlp, static void finish_batch(grpc_exec_ctx *exec_ctx, void *bctlp,
grpc_error *error) { grpc_error *error) {
batch_control *bctl = bctlp; batch_control *bctl = bctlp;
grpc_call *call = bctl->call;
GRPC_CALL_COMBINER_STOP(exec_ctx, &call->call_combiner, "on_complete");
add_batch_error(exec_ctx, bctl, GRPC_ERROR_REF(error), false); add_batch_error(exec_ctx, bctl, GRPC_ERROR_REF(error), false);
finish_batch_step(exec_ctx, bctl); finish_batch_step(exec_ctx, bctl);
} }
@ -1666,6 +1610,9 @@ static grpc_call_error call_start_batch(grpc_exec_ctx *exec_ctx,
int num_completion_callbacks_needed = 1; int num_completion_callbacks_needed = 1;
grpc_call_error error = GRPC_CALL_OK; grpc_call_error error = GRPC_CALL_OK;
// sent_initial_metadata guards against variable reuse.
grpc_metadata compression_md;
GPR_TIMER_BEGIN("grpc_call_start_batch", 0); GPR_TIMER_BEGIN("grpc_call_start_batch", 0);
GRPC_CALL_LOG_BATCH(GPR_INFO, call, ops, nops, notify_tag); GRPC_CALL_LOG_BATCH(GPR_INFO, call, ops, nops, notify_tag);
@ -1713,7 +1660,7 @@ static grpc_call_error call_start_batch(grpc_exec_ctx *exec_ctx,
goto done_with_error; goto done_with_error;
} }
/* process compression level */ /* process compression level */
memset(&call->compression_md, 0, sizeof(call->compression_md)); memset(&compression_md, 0, sizeof(compression_md));
size_t additional_metadata_count = 0; size_t additional_metadata_count = 0;
grpc_compression_level effective_compression_level = grpc_compression_level effective_compression_level =
GRPC_COMPRESS_LEVEL_NONE; GRPC_COMPRESS_LEVEL_NONE;
@ -1751,9 +1698,9 @@ static grpc_call_error call_start_batch(grpc_exec_ctx *exec_ctx,
const grpc_stream_compression_algorithm calgo = const grpc_stream_compression_algorithm calgo =
stream_compression_algorithm_for_level_locked( stream_compression_algorithm_for_level_locked(
call, effective_stream_compression_level); call, effective_stream_compression_level);
call->compression_md.key = compression_md.key =
GRPC_MDSTR_GRPC_INTERNAL_STREAM_ENCODING_REQUEST; GRPC_MDSTR_GRPC_INTERNAL_STREAM_ENCODING_REQUEST;
call->compression_md.value = compression_md.value =
grpc_stream_compression_algorithm_slice(calgo); grpc_stream_compression_algorithm_slice(calgo);
} else { } else {
const grpc_compression_algorithm calgo = const grpc_compression_algorithm calgo =
@ -1761,10 +1708,8 @@ static grpc_call_error call_start_batch(grpc_exec_ctx *exec_ctx,
call, effective_compression_level); call, effective_compression_level);
/* the following will be picked up by the compress filter and used /* the following will be picked up by the compress filter and used
* as the call's compression algorithm. */ * as the call's compression algorithm. */
call->compression_md.key = compression_md.key = GRPC_MDSTR_GRPC_INTERNAL_ENCODING_REQUEST;
GRPC_MDSTR_GRPC_INTERNAL_ENCODING_REQUEST; compression_md.value = grpc_compression_algorithm_slice(calgo);
call->compression_md.value =
grpc_compression_algorithm_slice(calgo);
additional_metadata_count++; additional_metadata_count++;
} }
} }
@ -1779,7 +1724,7 @@ static grpc_call_error call_start_batch(grpc_exec_ctx *exec_ctx,
if (!prepare_application_metadata( if (!prepare_application_metadata(
exec_ctx, call, (int)op->data.send_initial_metadata.count, exec_ctx, call, (int)op->data.send_initial_metadata.count,
op->data.send_initial_metadata.metadata, 0, call->is_client, op->data.send_initial_metadata.metadata, 0, call->is_client,
&call->compression_md, (int)additional_metadata_count)) { &compression_md, (int)additional_metadata_count)) {
error = GRPC_CALL_ERROR_INVALID_METADATA; error = GRPC_CALL_ERROR_INVALID_METADATA;
goto done_with_error; goto done_with_error;
} }
@ -1791,10 +1736,6 @@ static grpc_call_error call_start_batch(grpc_exec_ctx *exec_ctx,
&call->metadata_batch[0 /* is_receiving */][0 /* is_trailing */]; &call->metadata_batch[0 /* is_receiving */][0 /* is_trailing */];
stream_op_payload->send_initial_metadata.send_initial_metadata_flags = stream_op_payload->send_initial_metadata.send_initial_metadata_flags =
op->flags; op->flags;
if (call->is_client) {
stream_op_payload->send_initial_metadata.peer_string =
&call->peer_string;
}
break; break;
case GRPC_OP_SEND_MESSAGE: case GRPC_OP_SEND_MESSAGE:
if (!are_write_flags_valid(op->flags)) { if (!are_write_flags_valid(op->flags)) {
@ -1927,10 +1868,6 @@ static grpc_call_error call_start_batch(grpc_exec_ctx *exec_ctx,
&call->metadata_batch[1 /* is_receiving */][0 /* is_trailing */]; &call->metadata_batch[1 /* is_receiving */][0 /* is_trailing */];
stream_op_payload->recv_initial_metadata.recv_initial_metadata_ready = stream_op_payload->recv_initial_metadata.recv_initial_metadata_ready =
&call->receiving_initial_metadata_ready; &call->receiving_initial_metadata_ready;
if (!call->is_client) {
stream_op_payload->recv_initial_metadata.peer_string =
&call->peer_string;
}
num_completion_callbacks_needed++; num_completion_callbacks_needed++;
break; break;
case GRPC_OP_RECV_MESSAGE: case GRPC_OP_RECV_MESSAGE:
@ -1947,9 +1884,8 @@ static grpc_call_error call_start_batch(grpc_exec_ctx *exec_ctx,
stream_op->recv_message = true; stream_op->recv_message = true;
call->receiving_buffer = op->data.recv_message.recv_message; call->receiving_buffer = op->data.recv_message.recv_message;
stream_op_payload->recv_message.recv_message = &call->receiving_stream; stream_op_payload->recv_message.recv_message = &call->receiving_stream;
GRPC_CLOSURE_INIT(&call->receiving_stream_ready, GRPC_CLOSURE_INIT(&call->receiving_stream_ready, receiving_stream_ready,
receiving_stream_ready_in_call_combiner, bctl, bctl, grpc_schedule_on_exec_ctx);
grpc_schedule_on_exec_ctx);
stream_op_payload->recv_message.recv_message_ready = stream_op_payload->recv_message.recv_message_ready =
&call->receiving_stream_ready; &call->receiving_stream_ready;
num_completion_callbacks_needed++; num_completion_callbacks_needed++;
@ -2019,7 +1955,7 @@ static grpc_call_error call_start_batch(grpc_exec_ctx *exec_ctx,
stream_op->on_complete = &bctl->finish_batch; stream_op->on_complete = &bctl->finish_batch;
gpr_atm_rel_store(&call->any_ops_sent_atm, 1); gpr_atm_rel_store(&call->any_ops_sent_atm, 1);
execute_batch(exec_ctx, call, stream_op, &bctl->start_batch); execute_op(exec_ctx, call, stream_op);
done: done:
GPR_TIMER_END("grpc_call_start_batch", 0); GPR_TIMER_END("grpc_call_start_batch", 0);

@ -31,7 +31,6 @@
#include "src/core/lib/debug/stats.h" #include "src/core/lib/debug/stats.h"
#include "src/core/lib/debug/trace.h" #include "src/core/lib/debug/trace.h"
#include "src/core/lib/http/parser.h" #include "src/core/lib/http/parser.h"
#include "src/core/lib/iomgr/call_combiner.h"
#include "src/core/lib/iomgr/combiner.h" #include "src/core/lib/iomgr/combiner.h"
#include "src/core/lib/iomgr/executor.h" #include "src/core/lib/iomgr/executor.h"
#include "src/core/lib/iomgr/iomgr.h" #include "src/core/lib/iomgr/iomgr.h"
@ -130,7 +129,6 @@ void grpc_init(void) {
grpc_register_tracer(&grpc_trace_channel_stack_builder); grpc_register_tracer(&grpc_trace_channel_stack_builder);
grpc_register_tracer(&grpc_http1_trace); grpc_register_tracer(&grpc_http1_trace);
grpc_register_tracer(&grpc_cq_pluck_trace); // default on grpc_register_tracer(&grpc_cq_pluck_trace); // default on
grpc_register_tracer(&grpc_call_combiner_trace);
grpc_register_tracer(&grpc_combiner_trace); grpc_register_tracer(&grpc_combiner_trace);
grpc_register_tracer(&grpc_server_channel_trace); grpc_register_tracer(&grpc_server_channel_trace);
grpc_register_tracer(&grpc_bdp_estimator_trace); grpc_register_tracer(&grpc_bdp_estimator_trace);

@ -40,7 +40,6 @@ namespace grpc_core {
namespace { namespace {
struct CallData { struct CallData {
grpc_call_combiner *call_combiner;
grpc_linked_mdelem status; grpc_linked_mdelem status;
grpc_linked_mdelem details; grpc_linked_mdelem details;
grpc_core::atomic<bool> filled_metadata; grpc_core::atomic<bool> filled_metadata;
@ -53,14 +52,14 @@ struct ChannelData {
static void fill_metadata(grpc_exec_ctx *exec_ctx, grpc_call_element *elem, static void fill_metadata(grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
grpc_metadata_batch *mdb) { grpc_metadata_batch *mdb) {
CallData *calld = reinterpret_cast<CallData *>(elem->call_data); CallData *calld = static_cast<CallData *>(elem->call_data);
bool expected = false; bool expected = false;
if (!calld->filled_metadata.compare_exchange_strong( if (!calld->filled_metadata.compare_exchange_strong(
expected, true, grpc_core::memory_order_relaxed, expected, true, grpc_core::memory_order_relaxed,
grpc_core::memory_order_relaxed)) { grpc_core::memory_order_relaxed)) {
return; return;
} }
ChannelData *chand = reinterpret_cast<ChannelData *>(elem->channel_data); ChannelData *chand = static_cast<ChannelData *>(elem->channel_data);
char tmp[GPR_LTOA_MIN_BUFSIZE]; char tmp[GPR_LTOA_MIN_BUFSIZE];
gpr_ltoa(chand->error_code, tmp); gpr_ltoa(chand->error_code, tmp);
calld->status.md = grpc_mdelem_from_slices( calld->status.md = grpc_mdelem_from_slices(
@ -80,7 +79,6 @@ static void fill_metadata(grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
static void lame_start_transport_stream_op_batch( static void lame_start_transport_stream_op_batch(
grpc_exec_ctx *exec_ctx, grpc_call_element *elem, grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
grpc_transport_stream_op_batch *op) { grpc_transport_stream_op_batch *op) {
CallData *calld = reinterpret_cast<CallData *>(elem->call_data);
if (op->recv_initial_metadata) { if (op->recv_initial_metadata) {
fill_metadata(exec_ctx, elem, fill_metadata(exec_ctx, elem,
op->payload->recv_initial_metadata.recv_initial_metadata); op->payload->recv_initial_metadata.recv_initial_metadata);
@ -89,8 +87,12 @@ static void lame_start_transport_stream_op_batch(
op->payload->recv_trailing_metadata.recv_trailing_metadata); op->payload->recv_trailing_metadata.recv_trailing_metadata);
} }
grpc_transport_stream_op_batch_finish_with_failure( grpc_transport_stream_op_batch_finish_with_failure(
exec_ctx, op, GRPC_ERROR_CREATE_FROM_STATIC_STRING("lame client channel"), exec_ctx, op,
calld->call_combiner); GRPC_ERROR_CREATE_FROM_STATIC_STRING("lame client channel"));
}
static char *lame_get_peer(grpc_exec_ctx *exec_ctx, grpc_call_element *elem) {
return NULL;
} }
static void lame_get_channel_info(grpc_exec_ctx *exec_ctx, static void lame_get_channel_info(grpc_exec_ctx *exec_ctx,
@ -120,8 +122,6 @@ static void lame_start_transport_op(grpc_exec_ctx *exec_ctx,
static grpc_error *init_call_elem(grpc_exec_ctx *exec_ctx, static grpc_error *init_call_elem(grpc_exec_ctx *exec_ctx,
grpc_call_element *elem, grpc_call_element *elem,
const grpc_call_element_args *args) { const grpc_call_element_args *args) {
CallData *calld = reinterpret_cast<CallData *>(elem->call_data);
calld->call_combiner = args->call_combiner;
return GRPC_ERROR_NONE; return GRPC_ERROR_NONE;
} }
@ -156,6 +156,7 @@ extern "C" const grpc_channel_filter grpc_lame_filter = {
sizeof(grpc_core::ChannelData), sizeof(grpc_core::ChannelData),
grpc_core::init_channel_elem, grpc_core::init_channel_elem,
grpc_core::destroy_channel_elem, grpc_core::destroy_channel_elem,
grpc_core::lame_get_peer,
grpc_core::lame_get_channel_info, grpc_core::lame_get_channel_info,
"lame-client", "lame-client",
}; };
@ -175,7 +176,7 @@ grpc_channel *grpc_lame_client_channel_create(const char *target,
"error_message=%s)", "error_message=%s)",
3, (target, (int)error_code, error_message)); 3, (target, (int)error_code, error_message));
GPR_ASSERT(elem->filter == &grpc_lame_filter); GPR_ASSERT(elem->filter == &grpc_lame_filter);
auto chand = reinterpret_cast<grpc_core::ChannelData *>(elem->channel_data); auto chand = static_cast<grpc_core::ChannelData *>(elem->channel_data);
chand->error_code = error_code; chand->error_code = error_code;
chand->error_message = error_message; chand->error_message = error_message;
grpc_exec_ctx_finish(&exec_ctx); grpc_exec_ctx_finish(&exec_ctx);

@ -789,6 +789,7 @@ static void server_mutate_op(grpc_call_element *elem,
static void server_start_transport_stream_op_batch( static void server_start_transport_stream_op_batch(
grpc_exec_ctx *exec_ctx, grpc_call_element *elem, grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
grpc_transport_stream_op_batch *op) { grpc_transport_stream_op_batch *op) {
GRPC_CALL_LOG_OP(GPR_INFO, elem, op);
server_mutate_op(elem, op); server_mutate_op(elem, op);
grpc_call_next_op(exec_ctx, elem, op); grpc_call_next_op(exec_ctx, elem, op);
} }
@ -961,6 +962,7 @@ const grpc_channel_filter grpc_server_top_filter = {
sizeof(channel_data), sizeof(channel_data),
init_channel_elem, init_channel_elem,
destroy_channel_elem, destroy_channel_elem,
grpc_call_next_get_peer,
grpc_channel_next_get_info, grpc_channel_next_get_info,
"server", "server",
}; };

@ -85,6 +85,7 @@ static void slice_buffer_stream_shutdown(grpc_exec_ctx *exec_ctx,
static void slice_buffer_stream_destroy(grpc_exec_ctx *exec_ctx, static void slice_buffer_stream_destroy(grpc_exec_ctx *exec_ctx,
grpc_byte_stream *byte_stream) { grpc_byte_stream *byte_stream) {
grpc_slice_buffer_stream *stream = (grpc_slice_buffer_stream *)byte_stream; grpc_slice_buffer_stream *stream = (grpc_slice_buffer_stream *)byte_stream;
grpc_slice_buffer_reset_and_unref_internal(exec_ctx, stream->backing_buffer);
GRPC_ERROR_UNREF(stream->shutdown_error); GRPC_ERROR_UNREF(stream->shutdown_error);
} }

@ -81,7 +81,9 @@ void grpc_byte_stream_destroy(grpc_exec_ctx *exec_ctx,
// grpc_slice_buffer_stream // grpc_slice_buffer_stream
// //
// A grpc_byte_stream that wraps a slice buffer. // A grpc_byte_stream that wraps a slice buffer. The stream takes
// ownership of the slices in the buffer, and on destruction will
// reset the contents of the buffer.
typedef struct grpc_slice_buffer_stream { typedef struct grpc_slice_buffer_stream {
grpc_byte_stream base; grpc_byte_stream base;

@ -197,6 +197,11 @@ void grpc_transport_destroy_stream(grpc_exec_ctx *exec_ctx,
then_schedule_closure); then_schedule_closure);
} }
char *grpc_transport_get_peer(grpc_exec_ctx *exec_ctx,
grpc_transport *transport) {
return transport->vtable->get_peer(exec_ctx, transport);
}
grpc_endpoint *grpc_transport_get_endpoint(grpc_exec_ctx *exec_ctx, grpc_endpoint *grpc_transport_get_endpoint(grpc_exec_ctx *exec_ctx,
grpc_transport *transport) { grpc_transport *transport) {
return transport->vtable->get_endpoint(exec_ctx, transport); return transport->vtable->get_endpoint(exec_ctx, transport);
@ -209,24 +214,24 @@ grpc_endpoint *grpc_transport_get_endpoint(grpc_exec_ctx *exec_ctx,
// is a function that must always unref cancel_error // is a function that must always unref cancel_error
// though it lives in lib, it handles transport stream ops sure // though it lives in lib, it handles transport stream ops sure
// it's grpc_transport_stream_op_batch_finish_with_failure // it's grpc_transport_stream_op_batch_finish_with_failure
void grpc_transport_stream_op_batch_finish_with_failure( void grpc_transport_stream_op_batch_finish_with_failure(
grpc_exec_ctx *exec_ctx, grpc_transport_stream_op_batch *batch, grpc_exec_ctx *exec_ctx, grpc_transport_stream_op_batch *batch,
grpc_error *error, grpc_call_combiner *call_combiner) { grpc_error *error) {
if (batch->send_message) { if (batch->send_message) {
grpc_byte_stream_destroy(exec_ctx, grpc_byte_stream_destroy(exec_ctx,
batch->payload->send_message.send_message); batch->payload->send_message.send_message);
} }
if (batch->recv_message) { if (batch->recv_message) {
GRPC_CALL_COMBINER_START(exec_ctx, call_combiner, GRPC_CLOSURE_SCHED(exec_ctx,
batch->payload->recv_message.recv_message_ready, batch->payload->recv_message.recv_message_ready,
GRPC_ERROR_REF(error), GRPC_ERROR_REF(error));
"failing recv_message_ready");
} }
if (batch->recv_initial_metadata) { if (batch->recv_initial_metadata) {
GRPC_CALL_COMBINER_START( GRPC_CLOSURE_SCHED(
exec_ctx, call_combiner, exec_ctx,
batch->payload->recv_initial_metadata.recv_initial_metadata_ready, batch->payload->recv_initial_metadata.recv_initial_metadata_ready,
GRPC_ERROR_REF(error), "failing recv_initial_metadata_ready"); GRPC_ERROR_REF(error));
} }
GRPC_CLOSURE_SCHED(exec_ctx, batch->on_complete, error); GRPC_CLOSURE_SCHED(exec_ctx, batch->on_complete, error);
if (batch->cancel_stream) { if (batch->cancel_stream) {

@ -22,7 +22,6 @@
#include <stddef.h> #include <stddef.h>
#include "src/core/lib/channel/context.h" #include "src/core/lib/channel/context.h"
#include "src/core/lib/iomgr/call_combiner.h"
#include "src/core/lib/iomgr/endpoint.h" #include "src/core/lib/iomgr/endpoint.h"
#include "src/core/lib/iomgr/polling_entity.h" #include "src/core/lib/iomgr/polling_entity.h"
#include "src/core/lib/iomgr/pollset.h" #include "src/core/lib/iomgr/pollset.h"
@ -153,9 +152,6 @@ struct grpc_transport_stream_op_batch_payload {
/** Iff send_initial_metadata != NULL, flags associated with /** Iff send_initial_metadata != NULL, flags associated with
send_initial_metadata: a bitfield of GRPC_INITIAL_METADATA_xxx */ send_initial_metadata: a bitfield of GRPC_INITIAL_METADATA_xxx */
uint32_t send_initial_metadata_flags; uint32_t send_initial_metadata_flags;
// If non-NULL, will be set by the transport to the peer string
// (a char*, which the caller takes ownership of).
gpr_atm *peer_string;
} send_initial_metadata; } send_initial_metadata;
struct { struct {
@ -180,9 +176,6 @@ struct grpc_transport_stream_op_batch_payload {
// immediately available. This may be a signal that we received a // immediately available. This may be a signal that we received a
// Trailers-Only response. // Trailers-Only response.
bool *trailing_metadata_available; bool *trailing_metadata_available;
// If non-NULL, will be set by the transport to the peer string
// (a char*, which the caller takes ownership of).
gpr_atm *peer_string;
} recv_initial_metadata; } recv_initial_metadata;
struct { struct {
@ -300,7 +293,7 @@ void grpc_transport_destroy_stream(grpc_exec_ctx *exec_ctx,
void grpc_transport_stream_op_batch_finish_with_failure( void grpc_transport_stream_op_batch_finish_with_failure(
grpc_exec_ctx *exec_ctx, grpc_transport_stream_op_batch *op, grpc_exec_ctx *exec_ctx, grpc_transport_stream_op_batch *op,
grpc_error *error, grpc_call_combiner *call_combiner); grpc_error *error);
char *grpc_transport_stream_op_batch_string(grpc_transport_stream_op_batch *op); char *grpc_transport_stream_op_batch_string(grpc_transport_stream_op_batch *op);
char *grpc_transport_op_string(grpc_transport_op *op); char *grpc_transport_op_string(grpc_transport_op *op);
@ -339,6 +332,10 @@ void grpc_transport_close(grpc_transport *transport);
/* Destroy the transport */ /* Destroy the transport */
void grpc_transport_destroy(grpc_exec_ctx *exec_ctx, grpc_transport *transport); void grpc_transport_destroy(grpc_exec_ctx *exec_ctx, grpc_transport *transport);
/* Get the transports peer */
char *grpc_transport_get_peer(grpc_exec_ctx *exec_ctx,
grpc_transport *transport);
/* Get the endpoint used by \a transport */ /* Get the endpoint used by \a transport */
grpc_endpoint *grpc_transport_get_endpoint(grpc_exec_ctx *exec_ctx, grpc_endpoint *grpc_transport_get_endpoint(grpc_exec_ctx *exec_ctx,
grpc_transport *transport); grpc_transport *transport);

@ -59,6 +59,9 @@ typedef struct grpc_transport_vtable {
/* implementation of grpc_transport_destroy */ /* implementation of grpc_transport_destroy */
void (*destroy)(grpc_exec_ctx *exec_ctx, grpc_transport *self); void (*destroy)(grpc_exec_ctx *exec_ctx, grpc_transport *self);
/* implementation of grpc_transport_get_peer */
char *(*get_peer)(grpc_exec_ctx *exec_ctx, grpc_transport *self);
/* implementation of grpc_transport_get_endpoint */ /* implementation of grpc_transport_get_endpoint */
grpc_endpoint *(*get_endpoint)(grpc_exec_ctx *exec_ctx, grpc_transport *self); grpc_endpoint *(*get_endpoint)(grpc_exec_ctx *exec_ctx, grpc_transport *self);
} grpc_transport_vtable; } grpc_transport_vtable;

@ -112,13 +112,6 @@ char *grpc_transport_stream_op_batch_string(
gpr_strvec_add(&b, tmp); gpr_strvec_add(&b, tmp);
} }
if (op->collect_stats) {
gpr_strvec_add(&b, gpr_strdup(" "));
gpr_asprintf(&tmp, "COLLECT_STATS:%p",
op->payload->collect_stats.collect_stats);
gpr_strvec_add(&b, tmp);
}
out = gpr_strvec_flatten(&b, NULL); out = gpr_strvec_flatten(&b, NULL);
gpr_strvec_destroy(&b); gpr_strvec_destroy(&b);

@ -18,7 +18,9 @@
#include <string.h> #include <string.h>
extern "C" {
#include "src/core/lib/channel/channel_stack.h" #include "src/core/lib/channel/channel_stack.h"
}
#include "src/cpp/common/channel_filter.h" #include "src/cpp/common/channel_filter.h"
#include <grpc++/impl/codegen/slice.h> #include <grpc++/impl/codegen/slice.h>
@ -66,6 +68,10 @@ void CallData::SetPollsetOrPollsetSet(grpc_exec_ctx *exec_ctx,
grpc_call_stack_ignore_set_pollset_or_pollset_set(exec_ctx, elem, pollent); grpc_call_stack_ignore_set_pollset_or_pollset_set(exec_ctx, elem, pollent);
} }
char *CallData::GetPeer(grpc_exec_ctx *exec_ctx, grpc_call_element *elem) {
return grpc_call_next_get_peer(exec_ctx, elem);
}
// internal code used by RegisterChannelFilter() // internal code used by RegisterChannelFilter()
namespace internal { namespace internal {

@ -26,9 +26,11 @@
#include <functional> #include <functional>
#include <vector> #include <vector>
extern "C" {
#include "src/core/lib/channel/channel_stack.h" #include "src/core/lib/channel/channel_stack.h"
#include "src/core/lib/surface/channel_init.h" #include "src/core/lib/surface/channel_init.h"
#include "src/core/lib/transport/metadata_batch.h" #include "src/core/lib/transport/metadata_batch.h"
}
/// An interface to define filters. /// An interface to define filters.
/// ///
@ -193,6 +195,15 @@ class TransportStreamOpBatch {
op_->payload->send_message.send_message = send_message; op_->payload->send_message.send_message = send_message;
} }
grpc_byte_stream **recv_message() const {
return op_->recv_message ? op_->payload->recv_message.recv_message
: nullptr;
}
void set_recv_message(grpc_byte_stream **recv_message) {
op_->recv_message = true;
op_->payload->recv_message.recv_message = recv_message;
}
census_context *get_census_context() const { census_context *get_census_context() const {
return (census_context *)op_->payload->context[GRPC_CONTEXT_TRACING].value; return (census_context *)op_->payload->context[GRPC_CONTEXT_TRACING].value;
} }
@ -257,6 +268,9 @@ class CallData {
virtual void SetPollsetOrPollsetSet(grpc_exec_ctx *exec_ctx, virtual void SetPollsetOrPollsetSet(grpc_exec_ctx *exec_ctx,
grpc_call_element *elem, grpc_call_element *elem,
grpc_polling_entity *pollent); grpc_polling_entity *pollent);
/// Gets the peer name.
virtual char *GetPeer(grpc_exec_ctx *exec_ctx, grpc_call_element *elem);
}; };
namespace internal { namespace internal {
@ -335,6 +349,11 @@ class ChannelFilter final {
CallDataType *call_data = reinterpret_cast<CallDataType *>(elem->call_data); CallDataType *call_data = reinterpret_cast<CallDataType *>(elem->call_data);
call_data->SetPollsetOrPollsetSet(exec_ctx, elem, pollent); call_data->SetPollsetOrPollsetSet(exec_ctx, elem, pollent);
} }
static char *GetPeer(grpc_exec_ctx *exec_ctx, grpc_call_element *elem) {
CallDataType *call_data = reinterpret_cast<CallDataType *>(elem->call_data);
return call_data->GetPeer(exec_ctx, elem);
}
}; };
struct FilterRecord { struct FilterRecord {
@ -377,7 +396,8 @@ void RegisterChannelFilter(
FilterType::call_data_size, FilterType::InitCallElement, FilterType::call_data_size, FilterType::InitCallElement,
FilterType::SetPollsetOrPollsetSet, FilterType::DestroyCallElement, FilterType::SetPollsetOrPollsetSet, FilterType::DestroyCallElement,
FilterType::channel_data_size, FilterType::InitChannelElement, FilterType::channel_data_size, FilterType::InitChannelElement,
FilterType::DestroyChannelElement, FilterType::GetChannelInfo, name}}; FilterType::DestroyChannelElement, FilterType::GetPeer,
FilterType::GetChannelInfo, name}};
internal::channel_filters->push_back(filter_record); internal::channel_filters->push_back(filter_record);
} }

@ -89,6 +89,10 @@ int CoreCodegen::gpr_cv_wait(gpr_cv* cv, gpr_mu* mu,
void CoreCodegen::gpr_cv_signal(gpr_cv* cv) { ::gpr_cv_signal(cv); } void CoreCodegen::gpr_cv_signal(gpr_cv* cv) { ::gpr_cv_signal(cv); }
void CoreCodegen::gpr_cv_broadcast(gpr_cv* cv) { ::gpr_cv_broadcast(cv); } void CoreCodegen::gpr_cv_broadcast(gpr_cv* cv) { ::gpr_cv_broadcast(cv); }
grpc_byte_buffer* CoreCodegen::grpc_byte_buffer_copy(grpc_byte_buffer* bb) {
return ::grpc_byte_buffer_copy(bb);
}
void CoreCodegen::grpc_byte_buffer_destroy(grpc_byte_buffer* bb) { void CoreCodegen::grpc_byte_buffer_destroy(grpc_byte_buffer* bb) {
::grpc_byte_buffer_destroy(bb); ::grpc_byte_buffer_destroy(bb);
} }

@ -14,15 +14,9 @@
licenses(["notice"]) # Apache v2 licenses(["notice"]) # Apache v2
package( load("//bazel:grpc_build_system.bzl", "grpc_proto_library", "grpc_package")
default_visibility = ["//visibility:public"],
features = [
"-layering_check",
"-parse_headers",
],
)
load("//bazel:grpc_build_system.bzl", "grpc_proto_library") grpc_package(name = "health", visibility = "public")
grpc_proto_library( grpc_proto_library(
name = "health_proto", name = "health_proto",

@ -14,15 +14,9 @@
licenses(["notice"]) # Apache v2 licenses(["notice"]) # Apache v2
package( load("//bazel:grpc_build_system.bzl", "grpc_proto_library", "grpc_package")
default_visibility = ["//visibility:public"],
features = [
"-layering_check",
"-parse_headers",
],
)
load("//bazel:grpc_build_system.bzl", "grpc_proto_library") grpc_package(name = "lb", visibility = "public")
grpc_proto_library( grpc_proto_library(
name = "load_balancer_proto", name = "load_balancer_proto",

@ -14,15 +14,9 @@
licenses(["notice"]) # Apache v2 licenses(["notice"]) # Apache v2
package( load("//bazel:grpc_build_system.bzl", "grpc_proto_library", "grpc_package")
default_visibility = ["//visibility:public"],
features = [
"-layering_check",
"-parse_headers",
],
)
load("//bazel:grpc_build_system.bzl", "grpc_proto_library") grpc_package(name = "reflection", visibility = "public")
grpc_proto_library( grpc_proto_library(
name = "reflection_proto", name = "reflection_proto",

@ -14,15 +14,9 @@
licenses(["notice"]) # Apache v2 licenses(["notice"]) # Apache v2
package( load("//bazel:grpc_build_system.bzl", "grpc_proto_library", "grpc_package")
default_visibility = ["//visibility:public"],
features = [
"-layering_check",
"-parse_headers",
],
)
load("//bazel:grpc_build_system.bzl", "grpc_proto_library") grpc_package(name = "status", visibility = "public")
grpc_proto_library( grpc_proto_library(
name = "status_proto", name = "status_proto",

@ -14,15 +14,9 @@
licenses(["notice"]) # Apache v2 licenses(["notice"]) # Apache v2
package( load("//bazel:grpc_build_system.bzl", "grpc_proto_library", "grpc_package")
default_visibility = ["//visibility:public"],
features = [
"-layering_check",
"-parse_headers",
],
)
load("//bazel:grpc_build_system.bzl", "grpc_proto_library") grpc_package(name = "testing", visibility = "public")
grpc_proto_library( grpc_proto_library(
name = "compiler_test_proto", name = "compiler_test_proto",

@ -14,15 +14,9 @@
licenses(["notice"]) # Apache v2 licenses(["notice"]) # Apache v2
package( load("//bazel:grpc_build_system.bzl", "grpc_proto_library", "grpc_package")
default_visibility = ["//visibility:public"],
features = [
"-layering_check",
"-parse_headers",
],
)
load("//bazel:grpc_build_system.bzl", "grpc_proto_library") grpc_package(name = "duplicate", visibility = "public")
grpc_proto_library( grpc_proto_library(
name = "echo_duplicate_proto", name = "echo_duplicate_proto",

@ -77,7 +77,6 @@ CORE_SOURCE_FILES = [
'src/core/lib/http/format_request.c', 'src/core/lib/http/format_request.c',
'src/core/lib/http/httpcli.c', 'src/core/lib/http/httpcli.c',
'src/core/lib/http/parser.c', 'src/core/lib/http/parser.c',
'src/core/lib/iomgr/call_combiner.c',
'src/core/lib/iomgr/closure.c', 'src/core/lib/iomgr/closure.c',
'src/core/lib/iomgr/combiner.c', 'src/core/lib/iomgr/combiner.c',
'src/core/lib/iomgr/endpoint.c', 'src/core/lib/iomgr/endpoint.c',

@ -12,14 +12,9 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
load("//bazel:grpc_build_system.bzl", "grpc_cc_library", "grpc_cc_test", "grpc_cc_binary") load("//bazel:grpc_build_system.bzl", "grpc_cc_library", "grpc_cc_test", "grpc_cc_binary", "grpc_package")
package( grpc_package(name = "test/core/bad_client")
features = [
"-layering_check",
"-parse_headers",
],
)
licenses(["notice"]) # Apache v2 licenses(["notice"]) # Apache v2

@ -12,14 +12,9 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
load("//bazel:grpc_build_system.bzl", "grpc_cc_library", "grpc_cc_test", "grpc_cc_binary") load("//bazel:grpc_build_system.bzl", "grpc_cc_library", "grpc_cc_test", "grpc_cc_binary", "grpc_package")
package( grpc_package(name = "test/core/bad_ssl")
features = [
"-layering_check",
"-parse_headers",
],
)
licenses(["notice"]) # Apache v2 licenses(["notice"]) # Apache v2

@ -12,16 +12,11 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
load("//bazel:grpc_build_system.bzl", "grpc_cc_library", "grpc_cc_test", "grpc_cc_binary") load("//bazel:grpc_build_system.bzl", "grpc_cc_library", "grpc_cc_test", "grpc_cc_binary", "grpc_package")
licenses(["notice"]) # Apache v2 grpc_package(name = "test/core/census")
package( licenses(["notice"]) # Apache v2
features = [
"-layering_check",
"-parse_headers",
],
)
grpc_cc_test( grpc_cc_test(
name = "context_test", name = "context_test",

@ -12,16 +12,11 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
load("//bazel:grpc_build_system.bzl", "grpc_cc_library", "grpc_cc_test", "grpc_cc_binary") load("//bazel:grpc_build_system.bzl", "grpc_cc_library", "grpc_cc_test", "grpc_cc_binary", "grpc_package")
licenses(["notice"]) # Apache v2 grpc_package(name = "test/core/channel")
package( licenses(["notice"]) # Apache v2
features = [
"-layering_check",
"-parse_headers",
],
)
grpc_cc_test( grpc_cc_test(
name = "channel_args_test", name = "channel_args_test",

@ -67,6 +67,10 @@ static void channel_func(grpc_exec_ctx *exec_ctx, grpc_channel_element *elem,
++*(int *)(elem->channel_data); ++*(int *)(elem->channel_data);
} }
static char *get_peer(grpc_exec_ctx *exec_ctx, grpc_call_element *elem) {
return gpr_strdup("peer");
}
static void free_channel(grpc_exec_ctx *exec_ctx, void *arg, static void free_channel(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error) { grpc_error *error) {
grpc_channel_stack_destroy(exec_ctx, arg); grpc_channel_stack_destroy(exec_ctx, arg);
@ -89,6 +93,7 @@ static void test_create_channel_stack(void) {
sizeof(int), sizeof(int),
channel_init_func, channel_init_func,
channel_destroy_func, channel_destroy_func,
get_peer,
grpc_channel_next_get_info, grpc_channel_next_get_info,
"some_test_filter"}; "some_test_filter"};
const grpc_channel_filter *filters = &filter; const grpc_channel_filter *filters = &filter;

@ -89,14 +89,14 @@ int main(int argc, char **argv) {
"connected", NULL); "connected", NULL);
errors += CHECK_STACK("unknown", NULL, GRPC_SERVER_CHANNEL, "server", errors += CHECK_STACK("unknown", NULL, GRPC_SERVER_CHANNEL, "server",
"message_size", "deadline", "connected", NULL); "message_size", "deadline", "connected", NULL);
errors += CHECK_STACK("chttp2", NULL, GRPC_CLIENT_DIRECT_CHANNEL, errors +=
"message_size", "deadline", "http-client", CHECK_STACK("chttp2", NULL, GRPC_CLIENT_DIRECT_CHANNEL, "message_size",
"message_compress", "connected", NULL); "deadline", "http-client", "compress", "connected", NULL);
errors += CHECK_STACK("chttp2", NULL, GRPC_CLIENT_SUBCHANNEL, "message_size", errors += CHECK_STACK("chttp2", NULL, GRPC_CLIENT_SUBCHANNEL, "message_size",
"http-client", "message_compress", "connected", NULL); "http-client", "compress", "connected", NULL);
errors += CHECK_STACK("chttp2", NULL, GRPC_SERVER_CHANNEL, "server", errors +=
"message_size", "deadline", "http-server", CHECK_STACK("chttp2", NULL, GRPC_SERVER_CHANNEL, "server", "message_size",
"message_compress", "connected", NULL); "deadline", "http-server", "compress", "connected", NULL);
errors += errors +=
CHECK_STACK(NULL, NULL, GRPC_CLIENT_CHANNEL, "client-channel", NULL); CHECK_STACK(NULL, NULL, GRPC_CLIENT_CHANNEL, "client-channel", NULL);

@ -12,16 +12,11 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
load("//bazel:grpc_build_system.bzl", "grpc_cc_library", "grpc_cc_test", "grpc_cc_binary") load("//bazel:grpc_build_system.bzl", "grpc_cc_library", "grpc_cc_test", "grpc_cc_binary", "grpc_package")
licenses(["notice"]) # Apache v2 grpc_package(name = "test/core/client_channel")
package( licenses(["notice"]) # Apache v2
features = [
"-layering_check",
"-parse_headers",
],
)
load("//test/core/util:grpc_fuzzer.bzl", "grpc_fuzzer") load("//test/core/util:grpc_fuzzer.bzl", "grpc_fuzzer")

@ -12,16 +12,11 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
load("//bazel:grpc_build_system.bzl", "grpc_cc_library", "grpc_cc_test", "grpc_cc_binary") load("//bazel:grpc_build_system.bzl", "grpc_cc_library", "grpc_cc_test", "grpc_cc_binary", "grpc_package")
licenses(["notice"]) # Apache v2 grpc_package(name = "test/core/client_channel_resolvers")
package( licenses(["notice"]) # Apache v2
features = [
"-layering_check",
"-parse_headers",
],
)
grpc_cc_test( grpc_cc_test(
name = "dns_resolver_connectivity_test", name = "dns_resolver_connectivity_test",

@ -12,16 +12,11 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
load("//bazel:grpc_build_system.bzl", "grpc_cc_library", "grpc_cc_test", "grpc_cc_binary") load("//bazel:grpc_build_system.bzl", "grpc_cc_library", "grpc_cc_test", "grpc_cc_binary", "grpc_package")
licenses(["notice"]) # Apache v2 grpc_package(name = "test/core/compression")
package( licenses(["notice"]) # Apache v2
features = [
"-layering_check",
"-parse_headers",
],
)
grpc_cc_test( grpc_cc_test(
name = "algorithm_test", name = "algorithm_test",

@ -12,16 +12,11 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
load("//bazel:grpc_build_system.bzl", "grpc_cc_library", "grpc_cc_test", "grpc_cc_binary") load("//bazel:grpc_build_system.bzl", "grpc_cc_library", "grpc_cc_test", "grpc_cc_binary", "grpc_package")
licenses(["notice"]) # Apache v2 licenses(["notice"]) # Apache v2
package( grpc_package(name = "test/core/end2end")
features = [
"-layering_check",
"-parse_headers",
],
)
load(":generate_tests.bzl", "grpc_end2end_tests") load(":generate_tests.bzl", "grpc_end2end_tests")

@ -12,16 +12,11 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
load("//bazel:grpc_build_system.bzl", "grpc_cc_library", "grpc_cc_test", "grpc_cc_binary") load("//bazel:grpc_build_system.bzl", "grpc_cc_library", "grpc_cc_test", "grpc_cc_binary", "grpc_package")
licenses(["notice"]) # Apache v2 grpc_package(name = "test/core/end2end/fuzzers")
package( licenses(["notice"]) # Apache v2
features = [
"-layering_check",
"-parse_headers",
],
)
load("//test/core/util:grpc_fuzzer.bzl", "grpc_fuzzer") load("//test/core/util:grpc_fuzzer.bzl", "grpc_fuzzer")

@ -114,7 +114,9 @@ static void test_cancel_after_round_trip(grpc_end2end_test_config config,
grpc_slice_from_copied_string("hello you"); grpc_slice_from_copied_string("hello you");
grpc_byte_buffer *request_payload = grpc_byte_buffer *request_payload =
grpc_raw_byte_buffer_create(&request_payload_slice, 1); grpc_raw_byte_buffer_create(&request_payload_slice, 1);
grpc_byte_buffer *response_payload = grpc_byte_buffer *response_payload1 =
grpc_raw_byte_buffer_create(&response_payload_slice, 1);
grpc_byte_buffer *response_payload2 =
grpc_raw_byte_buffer_create(&response_payload_slice, 1); grpc_raw_byte_buffer_create(&response_payload_slice, 1);
int was_cancelled = 2; int was_cancelled = 2;
@ -199,7 +201,7 @@ static void test_cancel_after_round_trip(grpc_end2end_test_config config,
op->reserved = NULL; op->reserved = NULL;
op++; op++;
op->op = GRPC_OP_SEND_MESSAGE; op->op = GRPC_OP_SEND_MESSAGE;
op->data.send_message.send_message = response_payload; op->data.send_message.send_message = response_payload1;
op->flags = 0; op->flags = 0;
op->reserved = NULL; op->reserved = NULL;
op++; op++;
@ -242,7 +244,7 @@ static void test_cancel_after_round_trip(grpc_end2end_test_config config,
op->reserved = NULL; op->reserved = NULL;
op++; op++;
op->op = GRPC_OP_SEND_MESSAGE; op->op = GRPC_OP_SEND_MESSAGE;
op->data.send_message.send_message = response_payload; op->data.send_message.send_message = response_payload2;
op->flags = 0; op->flags = 0;
op->reserved = NULL; op->reserved = NULL;
op++; op++;
@ -262,7 +264,8 @@ static void test_cancel_after_round_trip(grpc_end2end_test_config config,
grpc_call_details_destroy(&call_details); grpc_call_details_destroy(&call_details);
grpc_byte_buffer_destroy(request_payload); grpc_byte_buffer_destroy(request_payload);
grpc_byte_buffer_destroy(response_payload); grpc_byte_buffer_destroy(response_payload1);
grpc_byte_buffer_destroy(response_payload2);
grpc_byte_buffer_destroy(request_payload_recv); grpc_byte_buffer_destroy(request_payload_recv);
grpc_byte_buffer_destroy(response_payload_recv); grpc_byte_buffer_destroy(response_payload_recv);
grpc_slice_unref(details); grpc_slice_unref(details);

@ -430,6 +430,7 @@ static const grpc_channel_filter test_filter = {
0, 0,
init_channel_elem, init_channel_elem,
destroy_channel_elem, destroy_channel_elem,
grpc_call_next_get_peer,
grpc_channel_next_get_info, grpc_channel_next_get_info,
"filter_call_init_fails"}; "filter_call_init_fails"};

@ -197,7 +197,7 @@ static void recv_im_ready(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error) { grpc_error *error) {
grpc_call_element *elem = arg; grpc_call_element *elem = arg;
call_data *calld = elem->call_data; call_data *calld = elem->call_data;
GRPC_CLOSURE_RUN( GRPC_CLOSURE_SCHED(
exec_ctx, calld->recv_im_ready, exec_ctx, calld->recv_im_ready,
grpc_error_set_int(GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING( grpc_error_set_int(GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
"Failure that's not preventable.", &error, 1), "Failure that's not preventable.", &error, 1),
@ -247,6 +247,7 @@ static const grpc_channel_filter test_filter = {
sizeof(channel_data), sizeof(channel_data),
init_channel_elem, init_channel_elem,
destroy_channel_elem, destroy_channel_elem,
grpc_call_next_get_peer,
grpc_channel_next_get_info, grpc_channel_next_get_info,
"filter_causes_close"}; "filter_causes_close"};

@ -290,6 +290,7 @@ static const grpc_channel_filter test_client_filter = {
0, 0,
init_channel_elem, init_channel_elem,
destroy_channel_elem, destroy_channel_elem,
grpc_call_next_get_peer,
grpc_channel_next_get_info, grpc_channel_next_get_info,
"client_filter_latency"}; "client_filter_latency"};
@ -303,6 +304,7 @@ static const grpc_channel_filter test_server_filter = {
0, 0,
init_channel_elem, init_channel_elem,
destroy_channel_elem, destroy_channel_elem,
grpc_call_next_get_peer,
grpc_channel_next_get_info, grpc_channel_next_get_info,
"server_filter_latency"}; "server_filter_latency"};

@ -143,6 +143,8 @@ void resource_quota_server(grpc_end2end_test_config config) {
malloc(sizeof(grpc_call_details) * NUM_CALLS); malloc(sizeof(grpc_call_details) * NUM_CALLS);
grpc_status_code *status = malloc(sizeof(grpc_status_code) * NUM_CALLS); grpc_status_code *status = malloc(sizeof(grpc_status_code) * NUM_CALLS);
grpc_slice *details = malloc(sizeof(grpc_slice) * NUM_CALLS); grpc_slice *details = malloc(sizeof(grpc_slice) * NUM_CALLS);
grpc_byte_buffer **request_payload =
malloc(sizeof(grpc_byte_buffer *) * NUM_CALLS);
grpc_byte_buffer **request_payload_recv = grpc_byte_buffer **request_payload_recv =
malloc(sizeof(grpc_byte_buffer *) * NUM_CALLS); malloc(sizeof(grpc_byte_buffer *) * NUM_CALLS);
int *was_cancelled = malloc(sizeof(int) * NUM_CALLS); int *was_cancelled = malloc(sizeof(int) * NUM_CALLS);
@ -156,9 +158,6 @@ void resource_quota_server(grpc_end2end_test_config config) {
int deadline_exceeded = 0; int deadline_exceeded = 0;
int unavailable = 0; int unavailable = 0;
grpc_byte_buffer *request_payload =
grpc_raw_byte_buffer_create(&request_payload_slice, 1);
grpc_op ops[6]; grpc_op ops[6];
grpc_op *op; grpc_op *op;
@ -167,6 +166,7 @@ void resource_quota_server(grpc_end2end_test_config config) {
grpc_metadata_array_init(&trailing_metadata_recv[i]); grpc_metadata_array_init(&trailing_metadata_recv[i]);
grpc_metadata_array_init(&request_metadata_recv[i]); grpc_metadata_array_init(&request_metadata_recv[i]);
grpc_call_details_init(&call_details[i]); grpc_call_details_init(&call_details[i]);
request_payload[i] = grpc_raw_byte_buffer_create(&request_payload_slice, 1);
request_payload_recv[i] = NULL; request_payload_recv[i] = NULL;
was_cancelled[i] = 0; was_cancelled[i] = 0;
} }
@ -195,7 +195,7 @@ void resource_quota_server(grpc_end2end_test_config config) {
op->reserved = NULL; op->reserved = NULL;
op++; op++;
op->op = GRPC_OP_SEND_MESSAGE; op->op = GRPC_OP_SEND_MESSAGE;
op->data.send_message.send_message = request_payload; op->data.send_message.send_message = request_payload[i];
op->flags = 0; op->flags = 0;
op->reserved = NULL; op->reserved = NULL;
op++; op++;
@ -261,6 +261,7 @@ void resource_quota_server(grpc_end2end_test_config config) {
grpc_metadata_array_destroy(&trailing_metadata_recv[call_id]); grpc_metadata_array_destroy(&trailing_metadata_recv[call_id]);
grpc_call_unref(client_calls[call_id]); grpc_call_unref(client_calls[call_id]);
grpc_slice_unref(details[call_id]); grpc_slice_unref(details[call_id]);
grpc_byte_buffer_destroy(request_payload[call_id]);
pending_client_calls--; pending_client_calls--;
} else if (ev_tag < SERVER_RECV_BASE_TAG) { } else if (ev_tag < SERVER_RECV_BASE_TAG) {
@ -351,7 +352,6 @@ void resource_quota_server(grpc_end2end_test_config config) {
NUM_CALLS, cancelled_calls_on_server, cancelled_calls_on_client, NUM_CALLS, cancelled_calls_on_server, cancelled_calls_on_client,
deadline_exceeded, unavailable); deadline_exceeded, unavailable);
grpc_byte_buffer_destroy(request_payload);
grpc_slice_unref(request_payload_slice); grpc_slice_unref(request_payload_slice);
grpc_resource_quota_unref(resource_quota); grpc_resource_quota_unref(resource_quota);
@ -366,6 +366,7 @@ void resource_quota_server(grpc_end2end_test_config config) {
free(call_details); free(call_details);
free(status); free(status);
free(details); free(details);
free(request_payload);
free(request_payload_recv); free(request_payload_recv);
free(was_cancelled); free(was_cancelled);
} }

@ -12,16 +12,11 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
load("//bazel:grpc_build_system.bzl", "grpc_cc_library", "grpc_cc_test", "grpc_cc_binary") load("//bazel:grpc_build_system.bzl", "grpc_cc_library", "grpc_cc_test", "grpc_cc_binary", "grpc_package")
licenses(["notice"]) # Apache v2 grpc_package(name = "test/core/fling")
package( licenses(["notice"]) # Apache v2
features = [
"-layering_check",
"-parse_headers",
],
)
load("//test/core/util:grpc_fuzzer.bzl", "grpc_fuzzer") load("//test/core/util:grpc_fuzzer.bzl", "grpc_fuzzer")

@ -12,16 +12,11 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
load("//bazel:grpc_build_system.bzl", "grpc_cc_library", "grpc_cc_test", "grpc_cc_binary") load("//bazel:grpc_build_system.bzl", "grpc_cc_library", "grpc_cc_test", "grpc_cc_binary", "grpc_package")
licenses(["notice"]) # Apache v2 grpc_package(name = "test/core/handshake")
package( licenses(["notice"]) # Apache v2
features = [
"-layering_check",
"-parse_headers",
],
)
grpc_cc_test( grpc_cc_test(
name = "client_ssl", name = "client_ssl",

@ -12,16 +12,11 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
load("//bazel:grpc_build_system.bzl", "grpc_cc_library", "grpc_cc_test", "grpc_cc_binary") load("//bazel:grpc_build_system.bzl", "grpc_cc_library", "grpc_cc_test", "grpc_cc_binary", "grpc_package")
licenses(["notice"]) # Apache v2 grpc_package(name = "test/core/http")
package( licenses(["notice"]) # Apache v2
features = [
"-layering_check",
"-parse_headers",
],
)
load("//test/core/util:grpc_fuzzer.bzl", "grpc_fuzzer") load("//test/core/util:grpc_fuzzer.bzl", "grpc_fuzzer")

@ -12,19 +12,13 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
load("//bazel:grpc_build_system.bzl", "grpc_cc_library", "grpc_cc_test", "grpc_cc_binary") load("//bazel:grpc_build_system.bzl", "grpc_cc_library", "grpc_cc_test", "grpc_cc_binary", "grpc_package")
licenses(["notice"]) # Apache v2 licenses(["notice"]) # Apache v2
load("//test/core/util:grpc_fuzzer.bzl", "grpc_fuzzer") load("//test/core/util:grpc_fuzzer.bzl", "grpc_fuzzer")
package( grpc_package(name = "test/core/iomgr", visibility = "public") # Useful for third party devs to test their io manager implementation.
default_visibility = ["//visibility:public"], # Useful for third party devs to test their io manager implementation.
features = [
"-layering_check",
"-parse_headers",
],
)
grpc_cc_library( grpc_cc_library(
name = "endpoint_tests", name = "endpoint_tests",

@ -12,16 +12,11 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
load("//bazel:grpc_build_system.bzl", "grpc_cc_library", "grpc_cc_test", "grpc_cc_binary") load("//bazel:grpc_build_system.bzl", "grpc_cc_library", "grpc_cc_test", "grpc_cc_binary", "grpc_package")
licenses(["notice"]) # Apache v2 grpc_package(name = "test/core/json")
package( licenses(["notice"]) # Apache v2
features = [
"-layering_check",
"-parse_headers",
],
)
load("//test/core/util:grpc_fuzzer.bzl", "grpc_fuzzer") load("//test/core/util:grpc_fuzzer.bzl", "grpc_fuzzer")

@ -12,16 +12,11 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
load("//bazel:grpc_build_system.bzl", "grpc_cc_library", "grpc_cc_test", "grpc_cc_binary") load("//bazel:grpc_build_system.bzl", "grpc_cc_library", "grpc_cc_test", "grpc_cc_binary", "grpc_package")
licenses(["notice"]) # Apache v2 grpc_package(name = "test/core/nanopb")
package( licenses(["notice"]) # Apache v2
features = [
"-layering_check",
"-parse_headers",
],
)
load("//test/core/util:grpc_fuzzer.bzl", "grpc_fuzzer") load("//test/core/util:grpc_fuzzer.bzl", "grpc_fuzzer")

@ -12,17 +12,14 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
load("//bazel:grpc_build_system.bzl", "grpc_cc_library", "grpc_cc_test", "grpc_cc_binary") load("//bazel:grpc_build_system.bzl", "grpc_cc_library", "grpc_cc_test", "grpc_cc_binary", "grpc_package")
licenses(["notice"]) # Apache v2 grpc_package(name = "test/core/network_benchmarks",
features = ["-layering_check", "-parse_headers" ]
package(
features = [
"-layering_check",
"-parse_headers",
],
) )
licenses(["notice"]) # Apache v2
grpc_cc_binary( grpc_cc_binary(
name = "low_level_ping_pong", name = "low_level_ping_pong",
srcs = ["low_level_ping_pong.c"], srcs = ["low_level_ping_pong.c"],

@ -12,16 +12,11 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
load("//bazel:grpc_build_system.bzl", "grpc_cc_library", "grpc_cc_test", "grpc_cc_binary") load("//bazel:grpc_build_system.bzl", "grpc_cc_library", "grpc_cc_test", "grpc_cc_binary", "grpc_package")
licenses(["notice"]) # Apache v2 licenses(["notice"]) # Apache v2
package( grpc_package(name = "test/core/security")
features = [
"-layering_check",
"-parse_headers",
],
)
load("//test/core/util:grpc_fuzzer.bzl", "grpc_fuzzer") load("//test/core/util:grpc_fuzzer.bzl", "grpc_fuzzer")

@ -12,16 +12,11 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
load("//bazel:grpc_build_system.bzl", "grpc_cc_library", "grpc_cc_test", "grpc_cc_binary") load("//bazel:grpc_build_system.bzl", "grpc_cc_library", "grpc_cc_test", "grpc_cc_binary", "grpc_package")
licenses(["notice"]) # Apache v2 grpc_package(name = "test/core/slice")
package( licenses(["notice"]) # Apache v2
features = [
"-layering_check",
"-parse_headers",
],
)
load("//test/core/util:grpc_fuzzer.bzl", "grpc_fuzzer") load("//test/core/util:grpc_fuzzer.bzl", "grpc_fuzzer")

@ -12,16 +12,11 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
load("//bazel:grpc_build_system.bzl", "grpc_cc_library", "grpc_cc_test", "grpc_cc_binary") load("//bazel:grpc_build_system.bzl", "grpc_cc_library", "grpc_cc_test", "grpc_cc_binary", "grpc_package")
licenses(["notice"]) # Apache v2 licenses(["notice"]) # Apache v2
package( grpc_package(name = "test/core/support")
features = [
"-layering_check",
"-parse_headers",
],
)
grpc_cc_test( grpc_cc_test(
name = "alloc_test", name = "alloc_test",

@ -12,16 +12,11 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
load("//bazel:grpc_build_system.bzl", "grpc_cc_library", "grpc_cc_test", "grpc_cc_binary") load("//bazel:grpc_build_system.bzl", "grpc_cc_library", "grpc_cc_test", "grpc_cc_binary", "grpc_package")
licenses(["notice"]) # Apache v2 licenses(["notice"]) # Apache v2
package( grpc_package(name = "test/core/surface")
features = [
"-layering_check",
"-parse_headers",
],
)
grpc_cc_test( grpc_cc_test(
name = "alarm_test", name = "alarm_test",

@ -12,16 +12,11 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
load("//bazel:grpc_build_system.bzl", "grpc_cc_library", "grpc_cc_test", "grpc_cc_binary") load("//bazel:grpc_build_system.bzl", "grpc_cc_library", "grpc_cc_test", "grpc_cc_binary", "grpc_package")
licenses(["notice"]) # Apache v2 licenses(["notice"]) # Apache v2
package( grpc_package(name = "test/core/transport")
features = [
"-layering_check",
"-parse_headers",
],
)
grpc_cc_test( grpc_cc_test(
name = "bdp_estimator_test", name = "bdp_estimator_test",

@ -12,16 +12,11 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
load("//bazel:grpc_build_system.bzl", "grpc_cc_library", "grpc_cc_test", "grpc_cc_binary") load("//bazel:grpc_build_system.bzl", "grpc_cc_library", "grpc_cc_test", "grpc_cc_binary", "grpc_package")
licenses(["notice"]) # Apache v2 licenses(["notice"]) # Apache v2
package( grpc_package(name = "test/core/transport/chttp2")
features = [
"-layering_check",
"-parse_headers",
],
)
load("//test/core/util:grpc_fuzzer.bzl", "grpc_fuzzer") load("//test/core/util:grpc_fuzzer.bzl", "grpc_fuzzer")

@ -12,16 +12,11 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
load("//bazel:grpc_build_system.bzl", "grpc_cc_library", "grpc_cc_test", "grpc_cc_binary") load("//bazel:grpc_build_system.bzl", "grpc_cc_library", "grpc_cc_test", "grpc_cc_binary", "grpc_package")
licenses(["notice"]) # Apache v2 licenses(["notice"]) # Apache v2
package( grpc_package(name = "test/core/tsi")
features = [
"-layering_check",
"-parse_headers",
],
)
grpc_cc_test( grpc_cc_test(
name = "transport_security_test", name = "transport_security_test",

@ -12,17 +12,11 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
load("//bazel:grpc_build_system.bzl", "grpc_cc_library", "grpc_cc_test", "grpc_cc_binary") load("//bazel:grpc_build_system.bzl", "grpc_cc_library", "grpc_cc_test", "grpc_cc_binary", "grpc_package")
licenses(["notice"]) # Apache v2 licenses(["notice"]) # Apache v2
package( grpc_package(name = "test/core/util", visibility = "public")
default_visibility = ["//visibility:public"],
features = [
"-layering_check",
"-parse_headers",
],
)
grpc_cc_library( grpc_cc_library(
name = "gpr_test_util", name = "gpr_test_util",

@ -14,14 +14,9 @@
licenses(["notice"]) # Apache v2 licenses(["notice"]) # Apache v2
load("//bazel:grpc_build_system.bzl", "grpc_cc_test") load("//bazel:grpc_build_system.bzl", "grpc_cc_test", "grpc_package")
package( grpc_package(name = "test/cpp/codegen")
features = [
"-layering_check",
"-parse_headers",
],
)
grpc_cc_test( grpc_cc_test(
name = "codegen_test_full", name = "codegen_test_full",

@ -14,14 +14,9 @@
licenses(["notice"]) # Apache v2 licenses(["notice"]) # Apache v2
load("//bazel:grpc_build_system.bzl", "grpc_cc_test") load("//bazel:grpc_build_system.bzl", "grpc_cc_test", "grpc_package")
package( grpc_package(name = "test/cpp/common")
features = [
"-layering_check",
"-parse_headers",
],
)
grpc_cc_test( grpc_cc_test(
name = "alarm_cpp_test", name = "alarm_cpp_test",

Some files were not shown because too many files have changed in this diff Show More

Loading…
Cancel
Save