Merge branch 'master' of https://github.com/grpc/grpc into pull_request_interval_script

pull/12647/head
Nicolas "Pixel" Noble 7 years ago
commit 8354fa32ba
  1. 1
      .github/CODEOWNERS
  2. 6
      BUILD
  3. 27
      CMakeLists.txt
  4. 27
      Makefile
  5. 2
      binding.gyp
  6. 6
      build.yaml
  7. 2
      config.m4
  8. 2
      config.w32
  9. 1
      doc/environment_variables.md
  10. 7
      gRPC-Core.podspec
  11. 20
      grpc.def
  12. 5
      grpc.gemspec
  13. 8
      grpc.gyp
  14. 10
      include/grpc++/generic/generic_stub.h
  15. 156
      include/grpc++/impl/codegen/byte_buffer.h
  16. 56
      include/grpc++/impl/codegen/call.h
  17. 9
      include/grpc++/impl/codegen/method_handler_impl.h
  18. 14
      include/grpc++/impl/codegen/rpc_service_method.h
  19. 29
      include/grpc++/impl/codegen/serialization_traits.h
  20. 78
      include/grpc++/impl/codegen/slice.h
  21. 68
      include/grpc++/support/byte_buffer.h
  22. 6
      include/grpc++/support/channel_arguments.h
  23. 80
      include/grpc++/support/slice.h
  24. 64
      include/grpc/byte_buffer.h
  25. 35
      include/grpc/grpc_security.h
  26. 86
      include/grpc/impl/codegen/byte_buffer.h
  27. 6
      include/grpc/impl/codegen/grpc_types.h
  28. 5
      include/grpc/impl/codegen/port_platform.h
  29. 6
      include/grpc/slice.h
  30. 5
      package.xml
  31. 4
      setup.py
  32. 1
      src/compiler/OWNERS
  33. 32
      src/compiler/php_generator.cc
  34. 32
      src/core/ext/filters/client_channel/http_proxy.c
  35. 422
      src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.c
  36. 2
      src/core/ext/filters/client_channel/lb_policy_factory.c
  37. 2
      src/core/ext/filters/client_channel/lb_policy_factory.h
  38. 57
      src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver_posix.c
  39. 25
      src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.c
  40. 3
      src/core/ext/transport/chttp2/server/chttp2_server.c
  41. 88
      src/core/ext/transport/chttp2/transport/chttp2_transport.c
  42. 4
      src/core/ext/transport/chttp2/transport/frame_data.c
  43. 98
      src/core/ext/transport/chttp2/transport/hpack_encoder.c
  44. 48
      src/core/ext/transport/chttp2/transport/hpack_parser.c
  45. 20
      src/core/ext/transport/chttp2/transport/internal.h
  46. 106
      src/core/ext/transport/chttp2/transport/writing.c
  47. 179
      src/core/lib/compression/stream_compression.c
  48. 32
      src/core/lib/compression/stream_compression.h
  49. 228
      src/core/lib/compression/stream_compression_gzip.c
  50. 26
      src/core/lib/compression/stream_compression_gzip.h
  51. 94
      src/core/lib/compression/stream_compression_identity.c
  52. 27
      src/core/lib/compression/stream_compression_identity.h
  53. 110
      src/core/lib/debug/stats_data.c
  54. 106
      src/core/lib/debug/stats_data.h
  55. 52
      src/core/lib/debug/stats_data.yaml
  56. 24
      src/core/lib/debug/stats_data_bq_schema.sql
  57. 2
      src/core/lib/http/httpcli.c
  58. 71
      src/core/lib/iomgr/ev_epoll1_linux.c
  59. 72
      src/core/lib/iomgr/ev_epollex_linux.c
  60. 59
      src/core/lib/iomgr/ev_epollsig_linux.c
  61. 56
      src/core/lib/iomgr/ev_poll_posix.c
  62. 69
      src/core/lib/iomgr/executor.c
  63. 2
      src/core/lib/iomgr/iomgr.c
  64. 12
      src/core/lib/iomgr/is_epollexclusive_available.c
  65. 2
      src/core/lib/iomgr/pollset_uv.c
  66. 1
      src/core/lib/iomgr/resource_quota.c
  67. 4
      src/core/lib/iomgr/socket_factory_posix.c
  68. 4
      src/core/lib/iomgr/socket_mutator.c
  69. 4
      src/core/lib/iomgr/socket_utils_windows.c
  70. 4
      src/core/lib/iomgr/tcp_server_posix.c
  71. 139
      src/core/lib/iomgr/timer_generic.c
  72. 3
      src/core/lib/iomgr/timer_generic.h
  73. 14
      src/core/lib/security/credentials/composite/composite_credentials.c
  74. 182
      src/core/lib/security/credentials/plugin/plugin_credentials.c
  75. 2
      src/core/lib/security/credentials/plugin/plugin_credentials.h
  76. 31
      src/core/lib/surface/call.c
  77. 23
      src/core/lib/surface/completion_queue.c
  78. 6
      src/core/lib/surface/init_secure.c
  79. 816
      src/core/lib/transport/static_metadata.c
  80. 2
      src/core/lib/transport/status_conversion.c
  81. 8
      src/core/lib/transport/transport.c
  82. 10
      src/cpp/client/generic_stub.cc
  83. 80
      src/cpp/client/secure_credentials.cc
  84. 17
      src/cpp/client/secure_credentials.h
  85. 4
      src/cpp/common/channel_arguments.cc
  86. 1
      src/cpp/server/health/default_health_check_service.cc
  87. 31
      src/cpp/util/byte_buffer_cc.cc
  88. 2
      src/cpp/util/slice_cc.cc
  89. 9
      src/csharp/Grpc.Core/Internal/NativeMetadataCredentialsPlugin.cs
  90. 59
      src/csharp/Grpc.IntegrationTesting/MetadataCredentialsTest.cs
  91. 8
      src/csharp/ext/grpc_csharp_ext.c
  92. 10
      src/node/ext/call_credentials.cc
  93. 8
      src/node/ext/call_credentials.h
  94. 46
      src/php/ext/grpc/call_credentials.c
  95. 9
      src/php/ext/grpc/call_credentials.h
  96. 3
      src/php/tests/qps/composer.json
  97. 10
      src/python/grpcio/grpc/_cython/_cygrpc/credentials.pxd.pxi
  98. 45
      src/python/grpcio/grpc/_cython/_cygrpc/credentials.pyx.pxi
  99. 13
      src/python/grpcio/grpc/_cython/_cygrpc/grpc.pxi
  100. 16
      src/python/grpcio/grpc/_cython/_cygrpc/records.pyx.pxi
  101. Some files were not shown because too many files have changed in this diff Show More

@ -3,5 +3,4 @@
# repository as the source of truth for module ownership.
/**/OWNERS @markdroth @nicolasnoble @ctiller
/bazel/** @nicolasnoble @dgquintas @ctiller
/src/compiler/cpp_generator.cc @vjpai
/src/core/ext/filters/client_channel/** @markdroth @dgquintas @ctiller

@ -574,6 +574,8 @@ grpc_cc_library(
"src/core/lib/compression/compression.c",
"src/core/lib/compression/message_compress.c",
"src/core/lib/compression/stream_compression.c",
"src/core/lib/compression/stream_compression_gzip.c",
"src/core/lib/compression/stream_compression_identity.c",
"src/core/lib/debug/stats.c",
"src/core/lib/debug/stats_data.c",
"src/core/lib/http/format_request.c",
@ -705,6 +707,8 @@ grpc_cc_library(
"src/core/lib/compression/algorithm_metadata.h",
"src/core/lib/compression/message_compress.h",
"src/core/lib/compression/stream_compression.h",
"src/core/lib/compression/stream_compression_gzip.h",
"src/core/lib/compression/stream_compression_identity.h",
"src/core/lib/debug/stats.h",
"src/core/lib/debug/stats_data.h",
"src/core/lib/http/format_request.h",
@ -989,6 +993,7 @@ grpc_cc_library(
name = "grpc_codegen",
language = "c",
public_hdrs = [
"include/grpc/impl/codegen/byte_buffer.h",
"include/grpc/impl/codegen/byte_buffer_reader.h",
"include/grpc/impl/codegen/compression_types.h",
"include/grpc/impl/codegen/connectivity_state.h",
@ -1487,6 +1492,7 @@ grpc_cc_library(
public_hdrs = [
"include/grpc++/impl/codegen/async_stream.h",
"include/grpc++/impl/codegen/async_unary_call.h",
"include/grpc++/impl/codegen/byte_buffer.h",
"include/grpc++/impl/codegen/call.h",
"include/grpc++/impl/codegen/call_hook.h",
"include/grpc++/impl/codegen/channel_interface.h",

@ -965,6 +965,8 @@ add_library(grpc
src/core/lib/compression/compression.c
src/core/lib/compression/message_compress.c
src/core/lib/compression/stream_compression.c
src/core/lib/compression/stream_compression_gzip.c
src/core/lib/compression/stream_compression_identity.c
src/core/lib/debug/stats.c
src/core/lib/debug/stats_data.c
src/core/lib/http/format_request.c
@ -1250,6 +1252,7 @@ target_link_libraries(grpc
)
foreach(_hdr
include/grpc/impl/codegen/byte_buffer.h
include/grpc/impl/codegen/byte_buffer_reader.h
include/grpc/impl/codegen/compression_types.h
include/grpc/impl/codegen/connectivity_state.h
@ -1313,6 +1316,8 @@ add_library(grpc_cronet
src/core/lib/compression/compression.c
src/core/lib/compression/message_compress.c
src/core/lib/compression/stream_compression.c
src/core/lib/compression/stream_compression_gzip.c
src/core/lib/compression/stream_compression_identity.c
src/core/lib/debug/stats.c
src/core/lib/debug/stats_data.c
src/core/lib/http/format_request.c
@ -1556,6 +1561,7 @@ target_link_libraries(grpc_cronet
)
foreach(_hdr
include/grpc/impl/codegen/byte_buffer.h
include/grpc/impl/codegen/byte_buffer_reader.h
include/grpc/impl/codegen/compression_types.h
include/grpc/impl/codegen/connectivity_state.h
@ -1629,6 +1635,8 @@ add_library(grpc_test_util
src/core/lib/compression/compression.c
src/core/lib/compression/message_compress.c
src/core/lib/compression/stream_compression.c
src/core/lib/compression/stream_compression_gzip.c
src/core/lib/compression/stream_compression_identity.c
src/core/lib/debug/stats.c
src/core/lib/debug/stats_data.c
src/core/lib/http/format_request.c
@ -1832,6 +1840,7 @@ target_link_libraries(grpc_test_util
)
foreach(_hdr
include/grpc/impl/codegen/byte_buffer.h
include/grpc/impl/codegen/byte_buffer_reader.h
include/grpc/impl/codegen/compression_types.h
include/grpc/impl/codegen/connectivity_state.h
@ -1889,6 +1898,8 @@ add_library(grpc_test_util_unsecure
src/core/lib/compression/compression.c
src/core/lib/compression/message_compress.c
src/core/lib/compression/stream_compression.c
src/core/lib/compression/stream_compression_gzip.c
src/core/lib/compression/stream_compression_identity.c
src/core/lib/debug/stats.c
src/core/lib/debug/stats_data.c
src/core/lib/http/format_request.c
@ -2092,6 +2103,7 @@ target_link_libraries(grpc_test_util_unsecure
)
foreach(_hdr
include/grpc/impl/codegen/byte_buffer.h
include/grpc/impl/codegen/byte_buffer_reader.h
include/grpc/impl/codegen/compression_types.h
include/grpc/impl/codegen/connectivity_state.h
@ -2135,6 +2147,8 @@ add_library(grpc_unsecure
src/core/lib/compression/compression.c
src/core/lib/compression/message_compress.c
src/core/lib/compression/stream_compression.c
src/core/lib/compression/stream_compression_gzip.c
src/core/lib/compression/stream_compression_identity.c
src/core/lib/debug/stats.c
src/core/lib/debug/stats_data.c
src/core/lib/http/format_request.c
@ -2387,6 +2401,7 @@ target_link_libraries(grpc_unsecure
)
foreach(_hdr
include/grpc/impl/codegen/byte_buffer.h
include/grpc/impl/codegen/byte_buffer_reader.h
include/grpc/impl/codegen/compression_types.h
include/grpc/impl/codegen/connectivity_state.h
@ -2696,6 +2711,7 @@ foreach(_hdr
include/grpc/slice_buffer.h
include/grpc/status.h
include/grpc/support/workaround_list.h
include/grpc/impl/codegen/byte_buffer.h
include/grpc/impl/codegen/byte_buffer_reader.h
include/grpc/impl/codegen/compression_types.h
include/grpc/impl/codegen/connectivity_state.h
@ -2706,6 +2722,7 @@ foreach(_hdr
include/grpc/impl/codegen/status.h
include/grpc++/impl/codegen/async_stream.h
include/grpc++/impl/codegen/async_unary_call.h
include/grpc++/impl/codegen/byte_buffer.h
include/grpc++/impl/codegen/call.h
include/grpc++/impl/codegen/call_hook.h
include/grpc++/impl/codegen/channel_interface.h
@ -2887,6 +2904,8 @@ add_library(grpc++_cronet
src/core/lib/compression/compression.c
src/core/lib/compression/message_compress.c
src/core/lib/compression/stream_compression.c
src/core/lib/compression/stream_compression_gzip.c
src/core/lib/compression/stream_compression_identity.c
src/core/lib/debug/stats.c
src/core/lib/debug/stats_data.c
src/core/lib/http/format_request.c
@ -3188,6 +3207,7 @@ foreach(_hdr
include/grpc/slice_buffer.h
include/grpc/status.h
include/grpc/support/workaround_list.h
include/grpc/impl/codegen/byte_buffer.h
include/grpc/impl/codegen/byte_buffer_reader.h
include/grpc/impl/codegen/compression_types.h
include/grpc/impl/codegen/connectivity_state.h
@ -3198,6 +3218,7 @@ foreach(_hdr
include/grpc/impl/codegen/status.h
include/grpc++/impl/codegen/async_stream.h
include/grpc++/impl/codegen/async_unary_call.h
include/grpc++/impl/codegen/byte_buffer.h
include/grpc++/impl/codegen/call.h
include/grpc++/impl/codegen/call_hook.h
include/grpc++/impl/codegen/channel_interface.h
@ -3558,6 +3579,7 @@ target_link_libraries(grpc++_test_util
foreach(_hdr
include/grpc++/impl/codegen/async_stream.h
include/grpc++/impl/codegen/async_unary_call.h
include/grpc++/impl/codegen/byte_buffer.h
include/grpc++/impl/codegen/call.h
include/grpc++/impl/codegen/call_hook.h
include/grpc++/impl/codegen/channel_interface.h
@ -3585,6 +3607,7 @@ foreach(_hdr
include/grpc++/impl/codegen/stub_options.h
include/grpc++/impl/codegen/sync_stream.h
include/grpc++/impl/codegen/time.h
include/grpc/impl/codegen/byte_buffer.h
include/grpc/impl/codegen/byte_buffer_reader.h
include/grpc/impl/codegen/compression_types.h
include/grpc/impl/codegen/connectivity_state.h
@ -3696,6 +3719,7 @@ target_link_libraries(grpc++_test_util_unsecure
foreach(_hdr
include/grpc++/impl/codegen/async_stream.h
include/grpc++/impl/codegen/async_unary_call.h
include/grpc++/impl/codegen/byte_buffer.h
include/grpc++/impl/codegen/call.h
include/grpc++/impl/codegen/call_hook.h
include/grpc++/impl/codegen/channel_interface.h
@ -3723,6 +3747,7 @@ foreach(_hdr
include/grpc++/impl/codegen/stub_options.h
include/grpc++/impl/codegen/sync_stream.h
include/grpc++/impl/codegen/time.h
include/grpc/impl/codegen/byte_buffer.h
include/grpc/impl/codegen/byte_buffer_reader.h
include/grpc/impl/codegen/compression_types.h
include/grpc/impl/codegen/connectivity_state.h
@ -3925,6 +3950,7 @@ foreach(_hdr
include/grpc/slice_buffer.h
include/grpc/status.h
include/grpc/support/workaround_list.h
include/grpc/impl/codegen/byte_buffer.h
include/grpc/impl/codegen/byte_buffer_reader.h
include/grpc/impl/codegen/compression_types.h
include/grpc/impl/codegen/connectivity_state.h
@ -3935,6 +3961,7 @@ foreach(_hdr
include/grpc/impl/codegen/status.h
include/grpc++/impl/codegen/async_stream.h
include/grpc++/impl/codegen/async_unary_call.h
include/grpc++/impl/codegen/byte_buffer.h
include/grpc++/impl/codegen/call.h
include/grpc++/impl/codegen/call_hook.h
include/grpc++/impl/codegen/channel_interface.h

@ -2956,6 +2956,8 @@ LIBGRPC_SRC = \
src/core/lib/compression/compression.c \
src/core/lib/compression/message_compress.c \
src/core/lib/compression/stream_compression.c \
src/core/lib/compression/stream_compression_gzip.c \
src/core/lib/compression/stream_compression_identity.c \
src/core/lib/debug/stats.c \
src/core/lib/debug/stats_data.c \
src/core/lib/http/format_request.c \
@ -3206,6 +3208,7 @@ LIBGRPC_SRC = \
src/core/plugin_registry/grpc_plugin_registry.c \
PUBLIC_HEADERS_C += \
include/grpc/impl/codegen/byte_buffer.h \
include/grpc/impl/codegen/byte_buffer_reader.h \
include/grpc/impl/codegen/compression_types.h \
include/grpc/impl/codegen/connectivity_state.h \
@ -3304,6 +3307,8 @@ LIBGRPC_CRONET_SRC = \
src/core/lib/compression/compression.c \
src/core/lib/compression/message_compress.c \
src/core/lib/compression/stream_compression.c \
src/core/lib/compression/stream_compression_gzip.c \
src/core/lib/compression/stream_compression_identity.c \
src/core/lib/debug/stats.c \
src/core/lib/debug/stats_data.c \
src/core/lib/http/format_request.c \
@ -3512,6 +3517,7 @@ LIBGRPC_CRONET_SRC = \
src/core/plugin_registry/grpc_cronet_plugin_registry.c \
PUBLIC_HEADERS_C += \
include/grpc/impl/codegen/byte_buffer.h \
include/grpc/impl/codegen/byte_buffer_reader.h \
include/grpc/impl/codegen/compression_types.h \
include/grpc/impl/codegen/connectivity_state.h \
@ -3619,6 +3625,8 @@ LIBGRPC_TEST_UTIL_SRC = \
src/core/lib/compression/compression.c \
src/core/lib/compression/message_compress.c \
src/core/lib/compression/stream_compression.c \
src/core/lib/compression/stream_compression_gzip.c \
src/core/lib/compression/stream_compression_identity.c \
src/core/lib/debug/stats.c \
src/core/lib/debug/stats_data.c \
src/core/lib/http/format_request.c \
@ -3789,6 +3797,7 @@ LIBGRPC_TEST_UTIL_SRC = \
src/core/ext/filters/http/server/http_server_filter.c \
PUBLIC_HEADERS_C += \
include/grpc/impl/codegen/byte_buffer.h \
include/grpc/impl/codegen/byte_buffer_reader.h \
include/grpc/impl/codegen/compression_types.h \
include/grpc/impl/codegen/connectivity_state.h \
@ -3870,6 +3879,8 @@ LIBGRPC_TEST_UTIL_UNSECURE_SRC = \
src/core/lib/compression/compression.c \
src/core/lib/compression/message_compress.c \
src/core/lib/compression/stream_compression.c \
src/core/lib/compression/stream_compression_gzip.c \
src/core/lib/compression/stream_compression_identity.c \
src/core/lib/debug/stats.c \
src/core/lib/debug/stats_data.c \
src/core/lib/http/format_request.c \
@ -4040,6 +4051,7 @@ LIBGRPC_TEST_UTIL_UNSECURE_SRC = \
src/core/ext/filters/http/server/http_server_filter.c \
PUBLIC_HEADERS_C += \
include/grpc/impl/codegen/byte_buffer.h \
include/grpc/impl/codegen/byte_buffer_reader.h \
include/grpc/impl/codegen/compression_types.h \
include/grpc/impl/codegen/connectivity_state.h \
@ -4094,6 +4106,8 @@ LIBGRPC_UNSECURE_SRC = \
src/core/lib/compression/compression.c \
src/core/lib/compression/message_compress.c \
src/core/lib/compression/stream_compression.c \
src/core/lib/compression/stream_compression_gzip.c \
src/core/lib/compression/stream_compression_identity.c \
src/core/lib/debug/stats.c \
src/core/lib/debug/stats_data.c \
src/core/lib/http/format_request.c \
@ -4312,6 +4326,7 @@ LIBGRPC_UNSECURE_SRC = \
src/core/plugin_registry/grpc_unsecure_plugin_registry.c \
PUBLIC_HEADERS_C += \
include/grpc/impl/codegen/byte_buffer.h \
include/grpc/impl/codegen/byte_buffer_reader.h \
include/grpc/impl/codegen/compression_types.h \
include/grpc/impl/codegen/connectivity_state.h \
@ -4600,6 +4615,7 @@ PUBLIC_HEADERS_CXX += \
include/grpc/slice_buffer.h \
include/grpc/status.h \
include/grpc/support/workaround_list.h \
include/grpc/impl/codegen/byte_buffer.h \
include/grpc/impl/codegen/byte_buffer_reader.h \
include/grpc/impl/codegen/compression_types.h \
include/grpc/impl/codegen/connectivity_state.h \
@ -4610,6 +4626,7 @@ PUBLIC_HEADERS_CXX += \
include/grpc/impl/codegen/status.h \
include/grpc++/impl/codegen/async_stream.h \
include/grpc++/impl/codegen/async_unary_call.h \
include/grpc++/impl/codegen/byte_buffer.h \
include/grpc++/impl/codegen/call.h \
include/grpc++/impl/codegen/call_hook.h \
include/grpc++/impl/codegen/channel_interface.h \
@ -4829,6 +4846,8 @@ LIBGRPC++_CRONET_SRC = \
src/core/lib/compression/compression.c \
src/core/lib/compression/message_compress.c \
src/core/lib/compression/stream_compression.c \
src/core/lib/compression/stream_compression_gzip.c \
src/core/lib/compression/stream_compression_identity.c \
src/core/lib/debug/stats.c \
src/core/lib/debug/stats_data.c \
src/core/lib/http/format_request.c \
@ -5093,6 +5112,7 @@ PUBLIC_HEADERS_CXX += \
include/grpc/slice_buffer.h \
include/grpc/status.h \
include/grpc/support/workaround_list.h \
include/grpc/impl/codegen/byte_buffer.h \
include/grpc/impl/codegen/byte_buffer_reader.h \
include/grpc/impl/codegen/compression_types.h \
include/grpc/impl/codegen/connectivity_state.h \
@ -5103,6 +5123,7 @@ PUBLIC_HEADERS_CXX += \
include/grpc/impl/codegen/status.h \
include/grpc++/impl/codegen/async_stream.h \
include/grpc++/impl/codegen/async_unary_call.h \
include/grpc++/impl/codegen/byte_buffer.h \
include/grpc++/impl/codegen/call.h \
include/grpc++/impl/codegen/call_hook.h \
include/grpc++/impl/codegen/channel_interface.h \
@ -5456,6 +5477,7 @@ LIBGRPC++_TEST_UTIL_SRC = \
PUBLIC_HEADERS_CXX += \
include/grpc++/impl/codegen/async_stream.h \
include/grpc++/impl/codegen/async_unary_call.h \
include/grpc++/impl/codegen/byte_buffer.h \
include/grpc++/impl/codegen/call.h \
include/grpc++/impl/codegen/call_hook.h \
include/grpc++/impl/codegen/channel_interface.h \
@ -5483,6 +5505,7 @@ PUBLIC_HEADERS_CXX += \
include/grpc++/impl/codegen/stub_options.h \
include/grpc++/impl/codegen/sync_stream.h \
include/grpc++/impl/codegen/time.h \
include/grpc/impl/codegen/byte_buffer.h \
include/grpc/impl/codegen/byte_buffer_reader.h \
include/grpc/impl/codegen/compression_types.h \
include/grpc/impl/codegen/connectivity_state.h \
@ -5571,6 +5594,7 @@ LIBGRPC++_TEST_UTIL_UNSECURE_SRC = \
PUBLIC_HEADERS_CXX += \
include/grpc++/impl/codegen/async_stream.h \
include/grpc++/impl/codegen/async_unary_call.h \
include/grpc++/impl/codegen/byte_buffer.h \
include/grpc++/impl/codegen/call.h \
include/grpc++/impl/codegen/call_hook.h \
include/grpc++/impl/codegen/channel_interface.h \
@ -5598,6 +5622,7 @@ PUBLIC_HEADERS_CXX += \
include/grpc++/impl/codegen/stub_options.h \
include/grpc++/impl/codegen/sync_stream.h \
include/grpc++/impl/codegen/time.h \
include/grpc/impl/codegen/byte_buffer.h \
include/grpc/impl/codegen/byte_buffer_reader.h \
include/grpc/impl/codegen/compression_types.h \
include/grpc/impl/codegen/connectivity_state.h \
@ -5805,6 +5830,7 @@ PUBLIC_HEADERS_CXX += \
include/grpc/slice_buffer.h \
include/grpc/status.h \
include/grpc/support/workaround_list.h \
include/grpc/impl/codegen/byte_buffer.h \
include/grpc/impl/codegen/byte_buffer_reader.h \
include/grpc/impl/codegen/compression_types.h \
include/grpc/impl/codegen/connectivity_state.h \
@ -5815,6 +5841,7 @@ PUBLIC_HEADERS_CXX += \
include/grpc/impl/codegen/status.h \
include/grpc++/impl/codegen/async_stream.h \
include/grpc++/impl/codegen/async_unary_call.h \
include/grpc++/impl/codegen/byte_buffer.h \
include/grpc++/impl/codegen/call.h \
include/grpc++/impl/codegen/call_hook.h \
include/grpc++/impl/codegen/channel_interface.h \

@ -667,6 +667,8 @@
'src/core/lib/compression/compression.c',
'src/core/lib/compression/message_compress.c',
'src/core/lib/compression/stream_compression.c',
'src/core/lib/compression/stream_compression_gzip.c',
'src/core/lib/compression/stream_compression_identity.c',
'src/core/lib/debug/stats.c',
'src/core/lib/debug/stats_data.c',
'src/core/lib/http/format_request.c',

@ -195,6 +195,8 @@ filegroups:
- src/core/lib/compression/compression.c
- src/core/lib/compression/message_compress.c
- src/core/lib/compression/stream_compression.c
- src/core/lib/compression/stream_compression_gzip.c
- src/core/lib/compression/stream_compression_identity.c
- src/core/lib/debug/stats.c
- src/core/lib/debug/stats_data.c
- src/core/lib/http/format_request.c
@ -346,6 +348,8 @@ filegroups:
- src/core/lib/compression/algorithm_metadata.h
- src/core/lib/compression/message_compress.h
- src/core/lib/compression/stream_compression.h
- src/core/lib/compression/stream_compression_gzip.h
- src/core/lib/compression/stream_compression_identity.h
- src/core/lib/debug/stats.h
- src/core/lib/debug/stats_data.h
- src/core/lib/http/format_request.h
@ -502,6 +506,7 @@ filegroups:
- grpc_deadline_filter
- name: grpc_codegen
public_headers:
- include/grpc/impl/codegen/byte_buffer.h
- include/grpc/impl/codegen/byte_buffer_reader.h
- include/grpc/impl/codegen/compression_types.h
- include/grpc/impl/codegen/connectivity_state.h
@ -969,6 +974,7 @@ filegroups:
public_headers:
- include/grpc++/impl/codegen/async_stream.h
- include/grpc++/impl/codegen/async_unary_call.h
- include/grpc++/impl/codegen/byte_buffer.h
- include/grpc++/impl/codegen/call.h
- include/grpc++/impl/codegen/call_hook.h
- include/grpc++/impl/codegen/channel_interface.h

@ -96,6 +96,8 @@ if test "$PHP_GRPC" != "no"; then
src/core/lib/compression/compression.c \
src/core/lib/compression/message_compress.c \
src/core/lib/compression/stream_compression.c \
src/core/lib/compression/stream_compression_gzip.c \
src/core/lib/compression/stream_compression_identity.c \
src/core/lib/debug/stats.c \
src/core/lib/debug/stats_data.c \
src/core/lib/http/format_request.c \

@ -73,6 +73,8 @@ if (PHP_GRPC != "no") {
"src\\core\\lib\\compression\\compression.c " +
"src\\core\\lib\\compression\\message_compress.c " +
"src\\core\\lib\\compression\\stream_compression.c " +
"src\\core\\lib\\compression\\stream_compression_gzip.c " +
"src\\core\\lib\\compression\\stream_compression_identity.c " +
"src\\core\\lib\\debug\\stats.c " +
"src\\core\\lib\\debug\\stats_data.c " +
"src\\core\\lib\\http\\format_request.c " +

@ -58,6 +58,7 @@ some configuration as environment variables that can be set.
completion queue
- round_robin - traces the round_robin load balancing policy
- pick_first - traces the pick first load balancing policy
- plugin_credentials - traces plugin credentials
- resource_quota - trace resource quota objects internals
- glb - traces the grpclb load balancer
- queue_pluck

@ -141,6 +141,7 @@ Pod::Spec.new do |s|
'include/grpc/impl/codegen/sync_generic.h',
'include/grpc/impl/codegen/sync_posix.h',
'include/grpc/impl/codegen/sync_windows.h',
'include/grpc/impl/codegen/byte_buffer.h',
'include/grpc/impl/codegen/byte_buffer_reader.h',
'include/grpc/impl/codegen/compression_types.h',
'include/grpc/impl/codegen/connectivity_state.h',
@ -329,6 +330,8 @@ Pod::Spec.new do |s|
'src/core/lib/compression/algorithm_metadata.h',
'src/core/lib/compression/message_compress.h',
'src/core/lib/compression/stream_compression.h',
'src/core/lib/compression/stream_compression_gzip.h',
'src/core/lib/compression/stream_compression_identity.h',
'src/core/lib/debug/stats.h',
'src/core/lib/debug/stats_data.h',
'src/core/lib/http/format_request.h',
@ -478,6 +481,8 @@ Pod::Spec.new do |s|
'src/core/lib/compression/compression.c',
'src/core/lib/compression/message_compress.c',
'src/core/lib/compression/stream_compression.c',
'src/core/lib/compression/stream_compression_gzip.c',
'src/core/lib/compression/stream_compression_identity.c',
'src/core/lib/debug/stats.c',
'src/core/lib/debug/stats_data.c',
'src/core/lib/http/format_request.c',
@ -824,6 +829,8 @@ Pod::Spec.new do |s|
'src/core/lib/compression/algorithm_metadata.h',
'src/core/lib/compression/message_compress.h',
'src/core/lib/compression/stream_compression.h',
'src/core/lib/compression/stream_compression_gzip.h',
'src/core/lib/compression/stream_compression_identity.h',
'src/core/lib/debug/stats.h',
'src/core/lib/debug/stats_data.h',
'src/core/lib/http/format_request.h',

@ -1,14 +1,4 @@
EXPORTS
grpc_raw_byte_buffer_create
grpc_raw_compressed_byte_buffer_create
grpc_byte_buffer_copy
grpc_byte_buffer_length
grpc_byte_buffer_destroy
grpc_byte_buffer_reader_init
grpc_byte_buffer_reader_destroy
grpc_byte_buffer_reader_next
grpc_byte_buffer_reader_readall
grpc_raw_byte_buffer_from_reader
census_initialize
census_shutdown
census_supported
@ -145,6 +135,16 @@ EXPORTS
grpc_server_add_secure_http2_port
grpc_call_set_credentials
grpc_server_credentials_set_auth_metadata_processor
grpc_raw_byte_buffer_create
grpc_raw_compressed_byte_buffer_create
grpc_byte_buffer_copy
grpc_byte_buffer_length
grpc_byte_buffer_destroy
grpc_byte_buffer_reader_init
grpc_byte_buffer_reader_destroy
grpc_byte_buffer_reader_next
grpc_byte_buffer_reader_readall
grpc_raw_byte_buffer_from_reader
grpc_slice_ref
grpc_slice_unref
grpc_slice_copy

@ -146,6 +146,7 @@ Gem::Specification.new do |s|
s.files += %w( src/core/lib/support/tmpfile_posix.c )
s.files += %w( src/core/lib/support/tmpfile_windows.c )
s.files += %w( src/core/lib/support/wrap_memcpy.c )
s.files += %w( include/grpc/impl/codegen/byte_buffer.h )
s.files += %w( include/grpc/impl/codegen/byte_buffer_reader.h )
s.files += %w( include/grpc/impl/codegen/compression_types.h )
s.files += %w( include/grpc/impl/codegen/connectivity_state.h )
@ -262,6 +263,8 @@ Gem::Specification.new do |s|
s.files += %w( src/core/lib/compression/algorithm_metadata.h )
s.files += %w( src/core/lib/compression/message_compress.h )
s.files += %w( src/core/lib/compression/stream_compression.h )
s.files += %w( src/core/lib/compression/stream_compression_gzip.h )
s.files += %w( src/core/lib/compression/stream_compression_identity.h )
s.files += %w( src/core/lib/debug/stats.h )
s.files += %w( src/core/lib/debug/stats_data.h )
s.files += %w( src/core/lib/http/format_request.h )
@ -415,6 +418,8 @@ Gem::Specification.new do |s|
s.files += %w( src/core/lib/compression/compression.c )
s.files += %w( src/core/lib/compression/message_compress.c )
s.files += %w( src/core/lib/compression/stream_compression.c )
s.files += %w( src/core/lib/compression/stream_compression_gzip.c )
s.files += %w( src/core/lib/compression/stream_compression_identity.c )
s.files += %w( src/core/lib/debug/stats.c )
s.files += %w( src/core/lib/debug/stats_data.c )
s.files += %w( src/core/lib/http/format_request.c )

@ -233,6 +233,8 @@
'src/core/lib/compression/compression.c',
'src/core/lib/compression/message_compress.c',
'src/core/lib/compression/stream_compression.c',
'src/core/lib/compression/stream_compression_gzip.c',
'src/core/lib/compression/stream_compression_identity.c',
'src/core/lib/debug/stats.c',
'src/core/lib/debug/stats_data.c',
'src/core/lib/http/format_request.c',
@ -532,6 +534,8 @@
'src/core/lib/compression/compression.c',
'src/core/lib/compression/message_compress.c',
'src/core/lib/compression/stream_compression.c',
'src/core/lib/compression/stream_compression_gzip.c',
'src/core/lib/compression/stream_compression_identity.c',
'src/core/lib/debug/stats.c',
'src/core/lib/debug/stats_data.c',
'src/core/lib/http/format_request.c',
@ -736,6 +740,8 @@
'src/core/lib/compression/compression.c',
'src/core/lib/compression/message_compress.c',
'src/core/lib/compression/stream_compression.c',
'src/core/lib/compression/stream_compression_gzip.c',
'src/core/lib/compression/stream_compression_identity.c',
'src/core/lib/debug/stats.c',
'src/core/lib/debug/stats_data.c',
'src/core/lib/http/format_request.c',
@ -925,6 +931,8 @@
'src/core/lib/compression/compression.c',
'src/core/lib/compression/message_compress.c',
'src/core/lib/compression/stream_compression.c',
'src/core/lib/compression/stream_compression_gzip.c',
'src/core/lib/compression/stream_compression_identity.c',
'src/core/lib/debug/stats.c',
'src/core/lib/debug/stats_data.c',
'src/core/lib/http/format_request.c',

@ -20,6 +20,7 @@
#define GRPCXX_GENERIC_GENERIC_STUB_H
#include <grpc++/support/async_stream.h>
#include <grpc++/support/async_unary_call.h>
#include <grpc++/support/byte_buffer.h>
namespace grpc {
@ -27,6 +28,7 @@ namespace grpc {
class CompletionQueue;
typedef ClientAsyncReaderWriter<ByteBuffer, ByteBuffer>
GenericClientAsyncReaderWriter;
typedef ClientAsyncResponseReader<ByteBuffer> GenericClientAsyncResponseReader;
/// Generic stubs provide a type-unsafe interface to call gRPC methods
/// by name.
@ -51,6 +53,14 @@ class GenericStub final {
std::unique_ptr<GenericClientAsyncReaderWriter> PrepareCall(
ClientContext* context, const grpc::string& method, CompletionQueue* cq);
/// Setup a unary call to a named method \a method using \a context, and don't
/// start it. Let it be started explicitly with StartCall.
/// The return value only indicates whether or not registration of the call
/// succeeded (i.e. the call won't proceed if the return value is nullptr).
std::unique_ptr<GenericClientAsyncResponseReader> PrepareUnaryCall(
ClientContext* context, const grpc::string& method,
const ByteBuffer& request, CompletionQueue* cq);
private:
std::shared_ptr<ChannelInterface> channel_;
};

@ -0,0 +1,156 @@
/*
*
* Copyright 2017 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
#ifndef GRPCXX_IMPL_CODEGEN_BYTE_BUFFER_H
#define GRPCXX_IMPL_CODEGEN_BYTE_BUFFER_H
#include <grpc/impl/codegen/byte_buffer.h>
#include <grpc++/impl/codegen/config.h>
#include <grpc++/impl/codegen/core_codegen_interface.h>
#include <grpc++/impl/codegen/serialization_traits.h>
#include <grpc++/impl/codegen/slice.h>
#include <grpc++/impl/codegen/status.h>
#include <vector>
namespace grpc {
template <class R>
class CallOpRecvMessage;
class MethodHandler;
template <class ServiceType, class RequestType, class ResponseType>
class RpcMethodHandler;
template <class ServiceType, class RequestType, class ResponseType>
class ServerStreamingHandler;
namespace CallOpGenericRecvMessageHelper {
template <class R>
class DeserializeFuncType;
} // namespace CallOpGenericRecvMessageHelper
/// A sequence of bytes.
class ByteBuffer final {
public:
/// Constuct an empty buffer.
ByteBuffer() : buffer_(nullptr) {}
/// Construct buffer from \a slices, of which there are \a nslices.
ByteBuffer(const Slice* slices, size_t nslices);
/// Constuct a byte buffer by referencing elements of existing buffer
/// \a buf. Wrapper of core function grpc_byte_buffer_copy
ByteBuffer(const ByteBuffer& buf);
~ByteBuffer() {
if (buffer_) {
g_core_codegen_interface->grpc_byte_buffer_destroy(buffer_);
}
}
ByteBuffer& operator=(const ByteBuffer&);
/// Dump (read) the buffer contents into \a slices.
Status Dump(std::vector<Slice>* slices) const;
/// Remove all data.
void Clear() {
if (buffer_) {
g_core_codegen_interface->grpc_byte_buffer_destroy(buffer_);
buffer_ = nullptr;
}
}
/// Make a duplicate copy of the internals of this byte
/// buffer so that we have our own owned version of it.
/// bbuf.Duplicate(); is equivalent to bbuf=bbuf; but is actually readable
void Duplicate() {
buffer_ = g_core_codegen_interface->grpc_byte_buffer_copy(buffer_);
}
/// Forget underlying byte buffer without destroying
/// Use this only for un-owned byte buffers
void Release() { buffer_ = nullptr; }
/// Buffer size in bytes.
size_t Length() const;
/// Swap the state of *this and *other.
void Swap(ByteBuffer* other);
/// Is this ByteBuffer valid?
bool Valid() const { return (buffer_ != nullptr); }
private:
friend class SerializationTraits<ByteBuffer, void>;
friend class CallOpSendMessage;
template <class R>
friend class CallOpRecvMessage;
friend class CallOpGenericRecvMessage;
friend class MethodHandler;
template <class ServiceType, class RequestType, class ResponseType>
friend class RpcMethodHandler;
template <class ServiceType, class RequestType, class ResponseType>
friend class ServerStreamingHandler;
template <class R>
friend class CallOpGenericRecvMessageHelper::DeserializeFuncType;
grpc_byte_buffer* buffer_;
// takes ownership
void set_buffer(grpc_byte_buffer* buf) {
if (buffer_) {
Clear();
}
buffer_ = buf;
}
grpc_byte_buffer* c_buffer() { return buffer_; }
grpc_byte_buffer** c_buffer_ptr() { return &buffer_; }
class ByteBufferPointer {
public:
ByteBufferPointer(const ByteBuffer* b)
: bbuf_(const_cast<ByteBuffer*>(b)) {}
operator ByteBuffer*() { return bbuf_; }
operator grpc_byte_buffer*() { return bbuf_->buffer_; }
operator grpc_byte_buffer**() { return &bbuf_->buffer_; }
private:
ByteBuffer* bbuf_;
};
ByteBufferPointer bbuf_ptr() const { return ByteBufferPointer(this); }
};
template <>
class SerializationTraits<ByteBuffer, void> {
public:
static Status Deserialize(ByteBuffer* byte_buffer, ByteBuffer* dest) {
dest->set_buffer(byte_buffer->buffer_);
return Status::OK;
}
static Status Serialize(const ByteBuffer& source, ByteBuffer* buffer,
bool* own_buffer) {
*buffer = source;
*own_buffer = true;
return Status::OK;
}
};
} // namespace grpc
#endif // GRPCXX_IMPL_CODEGEN_BYTE_BUFFER_H

@ -25,6 +25,7 @@
#include <map>
#include <memory>
#include <grpc++/impl/codegen/byte_buffer.h>
#include <grpc++/impl/codegen/call_hook.h>
#include <grpc++/impl/codegen/client_context.h>
#include <grpc++/impl/codegen/completion_queue_tag.h>
@ -39,8 +40,6 @@
#include <grpc/impl/codegen/compression_types.h>
#include <grpc/impl/codegen/grpc_types.h>
struct grpc_byte_buffer;
namespace grpc {
class ByteBuffer;
@ -281,7 +280,7 @@ class CallOpSendInitialMetadata {
class CallOpSendMessage {
public:
CallOpSendMessage() : send_buf_(nullptr) {}
CallOpSendMessage() : send_buf_() {}
/// Send \a message using \a options for the write. The \a options are cleared
/// after use.
@ -294,33 +293,35 @@ class CallOpSendMessage {
protected:
void AddOp(grpc_op* ops, size_t* nops) {
if (send_buf_ == nullptr) return;
if (!send_buf_.Valid()) return;
grpc_op* op = &ops[(*nops)++];
op->op = GRPC_OP_SEND_MESSAGE;
op->flags = write_options_.flags();
op->reserved = NULL;
op->data.send_message.send_message = send_buf_;
op->data.send_message.send_message = send_buf_.c_buffer();
// Flags are per-message: clear them after use.
write_options_.Clear();
}
void FinishOp(bool* status) {
g_core_codegen_interface->grpc_byte_buffer_destroy(send_buf_);
send_buf_ = nullptr;
}
void FinishOp(bool* status) { send_buf_.Clear(); }
private:
grpc_byte_buffer* send_buf_;
ByteBuffer send_buf_;
WriteOptions write_options_;
};
namespace internal {
template <class T>
T Example();
} // namespace internal
template <class M>
Status CallOpSendMessage::SendMessage(const M& message, WriteOptions options) {
write_options_ = options;
bool own_buf;
Status result =
SerializationTraits<M>::Serialize(message, &send_buf_, &own_buf);
Status result = SerializationTraits<M>::Serialize(
message, send_buf_.bbuf_ptr(), &own_buf);
if (!own_buf) {
send_buf_ = g_core_codegen_interface->grpc_byte_buffer_copy(send_buf_);
send_buf_.Duplicate();
}
return result;
}
@ -352,18 +353,20 @@ class CallOpRecvMessage {
op->op = GRPC_OP_RECV_MESSAGE;
op->flags = 0;
op->reserved = NULL;
op->data.recv_message.recv_message = &recv_buf_;
op->data.recv_message.recv_message = recv_buf_.c_buffer_ptr();
}
void FinishOp(bool* status) {
if (message_ == nullptr) return;
if (recv_buf_) {
if (recv_buf_.Valid()) {
if (*status) {
got_message = *status =
SerializationTraits<R>::Deserialize(recv_buf_, message_).ok();
SerializationTraits<R>::Deserialize(recv_buf_.bbuf_ptr(), message_)
.ok();
recv_buf_.Release();
} else {
got_message = false;
g_core_codegen_interface->grpc_byte_buffer_destroy(recv_buf_);
recv_buf_.Clear();
}
} else {
got_message = false;
@ -376,14 +379,14 @@ class CallOpRecvMessage {
private:
R* message_;
grpc_byte_buffer* recv_buf_;
ByteBuffer recv_buf_;
bool allow_not_getting_message_;
};
namespace CallOpGenericRecvMessageHelper {
class DeserializeFunc {
public:
virtual Status Deserialize(grpc_byte_buffer* buf) = 0;
virtual Status Deserialize(ByteBuffer* buf) = 0;
virtual ~DeserializeFunc() {}
};
@ -391,8 +394,8 @@ template <class R>
class DeserializeFuncType final : public DeserializeFunc {
public:
DeserializeFuncType(R* message) : message_(message) {}
Status Deserialize(grpc_byte_buffer* buf) override {
return SerializationTraits<R>::Deserialize(buf, message_);
Status Deserialize(ByteBuffer* buf) override {
return SerializationTraits<R>::Deserialize(buf->bbuf_ptr(), message_);
}
~DeserializeFuncType() override {}
@ -428,18 +431,19 @@ class CallOpGenericRecvMessage {
op->op = GRPC_OP_RECV_MESSAGE;
op->flags = 0;
op->reserved = NULL;
op->data.recv_message.recv_message = &recv_buf_;
op->data.recv_message.recv_message = recv_buf_.c_buffer_ptr();
}
void FinishOp(bool* status) {
if (!deserialize_) return;
if (recv_buf_) {
if (recv_buf_.Valid()) {
if (*status) {
got_message = true;
*status = deserialize_->Deserialize(recv_buf_).ok();
*status = deserialize_->Deserialize(&recv_buf_).ok();
recv_buf_.Release();
} else {
got_message = false;
g_core_codegen_interface->grpc_byte_buffer_destroy(recv_buf_);
recv_buf_.Clear();
}
} else {
got_message = false;
@ -452,7 +456,7 @@ class CallOpGenericRecvMessage {
private:
std::unique_ptr<CallOpGenericRecvMessageHelper::DeserializeFunc> deserialize_;
grpc_byte_buffer* recv_buf_;
ByteBuffer recv_buf_;
bool allow_not_getting_message_;
};

@ -19,6 +19,7 @@
#ifndef GRPCXX_IMPL_CODEGEN_METHOD_HANDLER_IMPL_H
#define GRPCXX_IMPL_CODEGEN_METHOD_HANDLER_IMPL_H
#include <grpc++/impl/codegen/byte_buffer.h>
#include <grpc++/impl/codegen/core_codegen_interface.h>
#include <grpc++/impl/codegen/rpc_service_method.h>
#include <grpc++/impl/codegen/sync_stream.h>
@ -37,8 +38,8 @@ class RpcMethodHandler : public MethodHandler {
void RunHandler(const HandlerParameter& param) final {
RequestType req;
Status status =
SerializationTraits<RequestType>::Deserialize(param.request, &req);
Status status = SerializationTraits<RequestType>::Deserialize(
param.request.bbuf_ptr(), &req);
ResponseType rsp;
if (status.ok()) {
status = func_(service_, param.server_context, &req, &rsp);
@ -123,8 +124,8 @@ class ServerStreamingHandler : public MethodHandler {
void RunHandler(const HandlerParameter& param) final {
RequestType req;
Status status =
SerializationTraits<RequestType>::Deserialize(param.request, &req);
Status status = SerializationTraits<RequestType>::Deserialize(
param.request.bbuf_ptr(), &req);
if (status.ok()) {
ServerWriter<ResponseType> writer(param.call, param.server_context);

@ -25,14 +25,11 @@
#include <memory>
#include <vector>
#include <grpc++/impl/codegen/byte_buffer.h>
#include <grpc++/impl/codegen/config.h>
#include <grpc++/impl/codegen/rpc_method.h>
#include <grpc++/impl/codegen/status.h>
extern "C" {
struct grpc_byte_buffer;
}
namespace grpc {
class ServerContext;
class StreamContextInterface;
@ -43,11 +40,14 @@ class MethodHandler {
virtual ~MethodHandler() {}
struct HandlerParameter {
HandlerParameter(Call* c, ServerContext* context, grpc_byte_buffer* req)
: call(c), server_context(context), request(req) {}
: call(c), server_context(context) {
request.set_buffer(req);
}
~HandlerParameter() { request.Release(); }
Call* call;
ServerContext* server_context;
// Handler required to grpc_byte_buffer_destroy this
grpc_byte_buffer* request;
// Handler required to destroy these contents
ByteBuffer request;
};
virtual void RunHandler(const HandlerParameter& param) = 0;
};

@ -24,17 +24,26 @@ namespace grpc {
/// Defines how to serialize and deserialize some type.
///
/// Used for hooking different message serialization API's into GRPC.
/// Each SerializationTraits implementation must provide the following
/// functions:
/// static Status Serialize(const Message& msg,
/// grpc_byte_buffer** buffer,
/// bool* own_buffer);
/// static Status Deserialize(grpc_byte_buffer* buffer,
/// Message* msg,
/// int max_receive_message_size);
/// Each SerializationTraits<Message> implementation must provide the
/// following functions:
/// 1. static Status Serialize(const Message& msg,
/// ByteBuffer* buffer,
/// bool* own_buffer);
/// OR
/// static Status Serialize(const Message& msg,
/// grpc_byte_buffer** buffer,
/// bool* own_buffer);
/// The former is preferred; the latter is deprecated
///
/// Serialize is required to convert message to a grpc_byte_buffer, and
/// to store a pointer to that byte buffer at *buffer. *own_buffer should
/// 2. static Status Deserialize(ByteBuffer* buffer,
/// Message* msg);
/// OR
/// static Status Deserialize(grpc_byte_buffer* buffer,
/// Message* msg);
/// The former is preferred; the latter is deprecated
///
/// Serialize is required to convert message to a ByteBuffer, and
/// return that byte buffer through *buffer. *own_buffer should
/// be set to true if the caller owns said byte buffer, or false if
/// ownership is retained elsewhere.
///

@ -19,11 +19,89 @@
#ifndef GRPCXX_IMPL_CODEGEN_SLICE_H
#define GRPCXX_IMPL_CODEGEN_SLICE_H
#include <grpc++/impl/codegen/config.h>
#include <grpc++/impl/codegen/core_codegen_interface.h>
#include <grpc++/impl/codegen/string_ref.h>
#include <grpc/impl/codegen/slice.h>
namespace grpc {
/// A wrapper around \a grpc_slice.
///
/// A slice represents a contiguous reference counted array of bytes.
/// It is cheap to take references to a slice, and it is cheap to create a
/// slice pointing to a subset of another slice.
class Slice final {
public:
/// Construct an empty slice.
Slice();
/// Destructor - drops one reference.
~Slice();
enum AddRef { ADD_REF };
/// Construct a slice from \a slice, adding a reference.
Slice(grpc_slice slice, AddRef);
enum StealRef { STEAL_REF };
/// Construct a slice from \a slice, stealing a reference.
Slice(grpc_slice slice, StealRef);
/// Allocate a slice of specified size
Slice(size_t len);
/// Construct a slice from a copied buffer
Slice(const void* buf, size_t len);
/// Construct a slice from a copied string
Slice(const grpc::string& str);
enum StaticSlice { STATIC_SLICE };
/// Construct a slice from a static buffer
Slice(const void* buf, size_t len, StaticSlice);
/// Copy constructor, adds a reference.
Slice(const Slice& other);
/// Assignment, reference count is unchanged.
Slice& operator=(Slice other) {
std::swap(slice_, other.slice_);
return *this;
}
/// Create a slice pointing at some data. Calls malloc to allocate a refcount
/// for the object, and arranges that destroy will be called with the
/// user data pointer passed in at destruction. Can be the same as buf or
/// different (e.g., if data is part of a larger structure that must be
/// destroyed when the data is no longer needed)
Slice(void* buf, size_t len, void (*destroy)(void*), void* user_data);
/// Specialization of above for common case where buf == user_data
Slice(void* buf, size_t len, void (*destroy)(void*))
: Slice(buf, len, destroy, buf) {}
/// Similar to the above but has a destroy that also takes slice length
Slice(void* buf, size_t len, void (*destroy)(void*, size_t));
/// Byte size.
size_t size() const { return GRPC_SLICE_LENGTH(slice_); }
/// Raw pointer to the beginning (first element) of the slice.
const uint8_t* begin() const { return GRPC_SLICE_START_PTR(slice_); }
/// Raw pointer to the end (one byte \em past the last element) of the slice.
const uint8_t* end() const { return GRPC_SLICE_END_PTR(slice_); }
/// Raw C slice. Caller needs to call grpc_slice_unref when done.
grpc_slice c_slice() const;
private:
friend class ByteBuffer;
grpc_slice slice_;
};
inline grpc::string_ref StringRefFromSlice(const grpc_slice* slice) {
return grpc::string_ref(
reinterpret_cast<const char*>(GRPC_SLICE_START_PTR(*slice)),

@ -19,6 +19,7 @@
#ifndef GRPCXX_SUPPORT_BYTE_BUFFER_H
#define GRPCXX_SUPPORT_BYTE_BUFFER_H
#include <grpc++/impl/codegen/byte_buffer.h>
#include <grpc++/impl/serialization_traits.h>
#include <grpc++/support/config.h>
#include <grpc++/support/slice.h>
@ -27,71 +28,4 @@
#include <grpc/grpc.h>
#include <grpc/support/log.h>
#include <vector>
namespace grpc {
/// A sequence of bytes.
class ByteBuffer final {
public:
/// Constuct an empty buffer.
ByteBuffer() : buffer_(nullptr) {}
/// Construct buffer from \a slices, of which there are \a nslices.
ByteBuffer(const Slice* slices, size_t nslices);
/// Constuct a byte buffer by referencing elements of existing buffer
/// \a buf. Wrapper of core function grpc_byte_buffer_copy
ByteBuffer(const ByteBuffer& buf);
~ByteBuffer();
ByteBuffer& operator=(const ByteBuffer&);
/// Dump (read) the buffer contents into \a slices.
Status Dump(std::vector<Slice>* slices) const;
/// Remove all data.
void Clear();
/// Buffer size in bytes.
size_t Length() const;
/// Swap the state of *this and *other.
void Swap(ByteBuffer* other);
private:
friend class SerializationTraits<ByteBuffer, void>;
// takes ownership
void set_buffer(grpc_byte_buffer* buf) {
if (buffer_) {
Clear();
}
buffer_ = buf;
}
// For \a SerializationTraits's usage.
grpc_byte_buffer* buffer() const { return buffer_; }
grpc_byte_buffer* buffer_;
};
template <>
class SerializationTraits<ByteBuffer, void> {
public:
static Status Deserialize(grpc_byte_buffer* byte_buffer, ByteBuffer* dest) {
dest->set_buffer(byte_buffer);
return Status::OK;
}
static Status Serialize(const ByteBuffer& source, grpc_byte_buffer** buffer,
bool* own_buffer) {
*buffer = grpc_byte_buffer_copy(source.buffer());
*own_buffer = true;
return Status::OK;
}
};
} // namespace grpc
#endif // GRPCXX_SUPPORT_BYTE_BUFFER_H

@ -64,6 +64,12 @@ class ChannelArguments {
/// Set the compression algorithm for the channel.
void SetCompressionAlgorithm(grpc_compression_algorithm algorithm);
/// Set the grpclb fallback timeout (in ms) for the channel. If this amount
/// of time has passed but we have not gotten any non-empty \a serverlist from
/// the balancer, we will fall back to use the backend address(es) returned by
/// the resolver.
void SetGrpclbFallbackTimeout(int fallback_timeout);
/// Set the socket mutator for the channel.
void SetSocketMutator(grpc_socket_mutator* mutator);

@ -19,86 +19,8 @@
#ifndef GRPCXX_SUPPORT_SLICE_H
#define GRPCXX_SUPPORT_SLICE_H
#include <grpc++/impl/codegen/slice.h>
#include <grpc++/support/config.h>
#include <grpc/slice.h>
namespace grpc {
/// A wrapper around \a grpc_slice.
///
/// A slice represents a contiguous reference counted array of bytes.
/// It is cheap to take references to a slice, and it is cheap to create a
/// slice pointing to a subset of another slice.
class Slice final {
public:
/// Construct an empty slice.
Slice();
/// Destructor - drops one reference.
~Slice();
enum AddRef { ADD_REF };
/// Construct a slice from \a slice, adding a reference.
Slice(grpc_slice slice, AddRef);
enum StealRef { STEAL_REF };
/// Construct a slice from \a slice, stealing a reference.
Slice(grpc_slice slice, StealRef);
/// Allocate a slice of specified size
Slice(size_t len);
/// Construct a slice from a copied buffer
Slice(const void* buf, size_t len);
/// Construct a slice from a copied string
Slice(const grpc::string& str);
enum StaticSlice { STATIC_SLICE };
/// Construct a slice from a static buffer
Slice(const void* buf, size_t len, StaticSlice);
/// Copy constructor, adds a reference.
Slice(const Slice& other);
/// Assignment, reference count is unchanged.
Slice& operator=(Slice other) {
std::swap(slice_, other.slice_);
return *this;
}
/// Create a slice pointing at some data. Calls malloc to allocate a refcount
/// for the object, and arranges that destroy will be called with the
/// user data pointer passed in at destruction. Can be the same as buf or
/// different (e.g., if data is part of a larger structure that must be
/// destroyed when the data is no longer needed)
Slice(void* buf, size_t len, void (*destroy)(void*), void* user_data);
/// Specialization of above for common case where buf == user_data
Slice(void* buf, size_t len, void (*destroy)(void*))
: Slice(buf, len, destroy, buf) {}
/// Similar to the above but has a destroy that also takes slice length
Slice(void* buf, size_t len, void (*destroy)(void*, size_t));
/// Byte size.
size_t size() const { return GRPC_SLICE_LENGTH(slice_); }
/// Raw pointer to the beginning (first element) of the slice.
const uint8_t* begin() const { return GRPC_SLICE_START_PTR(slice_); }
/// Raw pointer to the end (one byte \em past the last element) of the slice.
const uint8_t* end() const { return GRPC_SLICE_END_PTR(slice_); }
/// Raw C slice. Caller needs to call grpc_slice_unref when done.
grpc_slice c_slice() const { return grpc_slice_ref(slice_); }
private:
friend class ByteBuffer;
grpc_slice slice_;
};
} // namespace grpc
#endif // GRPCXX_SUPPORT_SLICE_H

@ -19,69 +19,7 @@
#ifndef GRPC_BYTE_BUFFER_H
#define GRPC_BYTE_BUFFER_H
#include <grpc/impl/codegen/grpc_types.h>
#include <grpc/impl/codegen/byte_buffer.h>
#include <grpc/slice_buffer.h>
#ifdef __cplusplus
extern "C" {
#endif
/** Returns a RAW byte buffer instance over the given slices (up to \a nslices).
*
* Increases the reference count for all \a slices processed. The user is
* responsible for invoking grpc_byte_buffer_destroy on the returned instance.*/
GRPCAPI grpc_byte_buffer *grpc_raw_byte_buffer_create(grpc_slice *slices,
size_t nslices);
/** Returns a *compressed* RAW byte buffer instance over the given slices (up to
* \a nslices). The \a compression argument defines the compression algorithm
* used to generate the data in \a slices.
*
* Increases the reference count for all \a slices processed. The user is
* responsible for invoking grpc_byte_buffer_destroy on the returned instance.*/
GRPCAPI grpc_byte_buffer *grpc_raw_compressed_byte_buffer_create(
grpc_slice *slices, size_t nslices, grpc_compression_algorithm compression);
/** Copies input byte buffer \a bb.
*
* Increases the reference count of all the source slices. The user is
* responsible for calling grpc_byte_buffer_destroy over the returned copy. */
GRPCAPI grpc_byte_buffer *grpc_byte_buffer_copy(grpc_byte_buffer *bb);
/** Returns the size of the given byte buffer, in bytes. */
GRPCAPI size_t grpc_byte_buffer_length(grpc_byte_buffer *bb);
/** Destroys \a byte_buffer deallocating all its memory. */
GRPCAPI void grpc_byte_buffer_destroy(grpc_byte_buffer *byte_buffer);
/** Reader for byte buffers. Iterates over slices in the byte buffer */
struct grpc_byte_buffer_reader;
typedef struct grpc_byte_buffer_reader grpc_byte_buffer_reader;
/** Initialize \a reader to read over \a buffer.
* Returns 1 upon success, 0 otherwise. */
GRPCAPI int grpc_byte_buffer_reader_init(grpc_byte_buffer_reader *reader,
grpc_byte_buffer *buffer);
/** Cleanup and destroy \a reader */
GRPCAPI void grpc_byte_buffer_reader_destroy(grpc_byte_buffer_reader *reader);
/** Updates \a slice with the next piece of data from from \a reader and returns
* 1. Returns 0 at the end of the stream. Caller is responsible for calling
* grpc_slice_unref on the result. */
GRPCAPI int grpc_byte_buffer_reader_next(grpc_byte_buffer_reader *reader,
grpc_slice *slice);
/** Merge all data from \a reader into single slice */
GRPCAPI grpc_slice
grpc_byte_buffer_reader_readall(grpc_byte_buffer_reader *reader);
/** Returns a RAW byte buffer instance from the output of \a reader. */
GRPCAPI grpc_byte_buffer *grpc_raw_byte_buffer_from_reader(
grpc_byte_buffer_reader *reader);
#ifdef __cplusplus
}
#endif
#endif /* GRPC_BYTE_BUFFER_H */

@ -249,19 +249,40 @@ typedef struct {
void *reserved;
} grpc_auth_metadata_context;
/** Maximum number of metadata entries returnable by a credentials plugin via
a synchronous return. */
#define GRPC_METADATA_CREDENTIALS_PLUGIN_SYNC_MAX 4
/** grpc_metadata_credentials plugin is an API user provided structure used to
create grpc_credentials objects that can be set on a channel (composed) or
a call. See grpc_credentials_metadata_create_from_plugin below.
The grpc client stack will call the get_metadata method of the plugin for
every call in scope for the credentials created from it. */
typedef struct {
/** The implementation of this method has to be non-blocking.
- context is the information that can be used by the plugin to create auth
metadata.
- cb is the callback that needs to be called when the metadata is ready.
- user_data needs to be passed as the first parameter of the callback. */
void (*get_metadata)(void *state, grpc_auth_metadata_context context,
grpc_credentials_plugin_metadata_cb cb, void *user_data);
/** The implementation of this method has to be non-blocking, but can
be performed synchronously or asynchronously.
If processing occurs synchronously, returns non-zero and populates
creds_md, num_creds_md, status, and error_details. In this case,
the caller takes ownership of the entries in creds_md and of
error_details. Note that if the plugin needs to return more than
GRPC_METADATA_CREDENTIALS_PLUGIN_SYNC_MAX entries in creds_md, it must
return asynchronously.
If processing occurs asynchronously, returns zero and invokes \a cb
when processing is completed. \a user_data will be passed as the
first parameter of the callback. NOTE: \a cb MUST be invoked in a
different thread, not from the thread in which \a get_metadata() is
invoked.
\a context is the information that can be used by the plugin to create
auth metadata. */
int (*get_metadata)(
void *state, grpc_auth_metadata_context context,
grpc_credentials_plugin_metadata_cb cb, void *user_data,
grpc_metadata creds_md[GRPC_METADATA_CREDENTIALS_PLUGIN_SYNC_MAX],
size_t *num_creds_md, grpc_status_code *status,
const char **error_details);
/** Destroys the plugin state. */
void (*destroy)(void *state);

@ -0,0 +1,86 @@
/*
*
* Copyright 2015 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
#ifndef GRPC_IMPL_CODEGEN_BYTE_BUFFER_H
#define GRPC_IMPL_CODEGEN_BYTE_BUFFER_H
#include <grpc/impl/codegen/grpc_types.h>
#ifdef __cplusplus
extern "C" {
#endif
/** Returns a RAW byte buffer instance over the given slices (up to \a nslices).
*
* Increases the reference count for all \a slices processed. The user is
* responsible for invoking grpc_byte_buffer_destroy on the returned instance.*/
GRPCAPI grpc_byte_buffer *grpc_raw_byte_buffer_create(grpc_slice *slices,
size_t nslices);
/** Returns a *compressed* RAW byte buffer instance over the given slices (up to
* \a nslices). The \a compression argument defines the compression algorithm
* used to generate the data in \a slices.
*
* Increases the reference count for all \a slices processed. The user is
* responsible for invoking grpc_byte_buffer_destroy on the returned instance.*/
GRPCAPI grpc_byte_buffer *grpc_raw_compressed_byte_buffer_create(
grpc_slice *slices, size_t nslices, grpc_compression_algorithm compression);
/** Copies input byte buffer \a bb.
*
* Increases the reference count of all the source slices. The user is
* responsible for calling grpc_byte_buffer_destroy over the returned copy. */
GRPCAPI grpc_byte_buffer *grpc_byte_buffer_copy(grpc_byte_buffer *bb);
/** Returns the size of the given byte buffer, in bytes. */
GRPCAPI size_t grpc_byte_buffer_length(grpc_byte_buffer *bb);
/** Destroys \a byte_buffer deallocating all its memory. */
GRPCAPI void grpc_byte_buffer_destroy(grpc_byte_buffer *byte_buffer);
/** Reader for byte buffers. Iterates over slices in the byte buffer */
struct grpc_byte_buffer_reader;
typedef struct grpc_byte_buffer_reader grpc_byte_buffer_reader;
/** Initialize \a reader to read over \a buffer.
* Returns 1 upon success, 0 otherwise. */
GRPCAPI int grpc_byte_buffer_reader_init(grpc_byte_buffer_reader *reader,
grpc_byte_buffer *buffer);
/** Cleanup and destroy \a reader */
GRPCAPI void grpc_byte_buffer_reader_destroy(grpc_byte_buffer_reader *reader);
/** Updates \a slice with the next piece of data from from \a reader and returns
* 1. Returns 0 at the end of the stream. Caller is responsible for calling
* grpc_slice_unref on the result. */
GRPCAPI int grpc_byte_buffer_reader_next(grpc_byte_buffer_reader *reader,
grpc_slice *slice);
/** Merge all data from \a reader into single slice */
GRPCAPI grpc_slice
grpc_byte_buffer_reader_readall(grpc_byte_buffer_reader *reader);
/** Returns a RAW byte buffer instance from the output of \a reader. */
GRPCAPI grpc_byte_buffer *grpc_raw_byte_buffer_from_reader(
grpc_byte_buffer_reader *reader);
#ifdef __cplusplus
}
#endif
#endif /* GRPC_IMPL_CODEGEN_BYTE_BUFFER_H */

@ -288,7 +288,11 @@ typedef struct {
"grpc.experimental.tcp_max_read_chunk_size"
/* Timeout in milliseconds to use for calls to the grpclb load balancer.
If 0 or unset, the balancer calls will have no deadline. */
#define GRPC_ARG_GRPCLB_CALL_TIMEOUT_MS "grpc.grpclb_timeout_ms"
#define GRPC_ARG_GRPCLB_CALL_TIMEOUT_MS "grpc.grpclb_call_timeout_ms"
/* Timeout in milliseconds to wait for the serverlist from the grpclb load
balancer before using fallback backend addresses from the resolver.
If 0, fallback will never be used. */
#define GRPC_ARG_GRPCLB_FALLBACK_TIMEOUT_MS "grpc.grpclb_fallback_timeout_ms"
/** If non-zero, grpc server's cronet compression workaround will be enabled */
#define GRPC_ARG_WORKAROUND_CRONET_COMPRESSION \
"grpc.workaround.cronet_compression"

@ -183,7 +183,6 @@
#define _BSD_SOURCE
#endif
#if TARGET_OS_IPHONE
#define GPR_FORBID_UNREACHABLE_CODE 1
#define GPR_PLATFORM_STRING "ios"
#define GPR_CPU_IPHONE 1
#define GPR_PTHREAD_TLS 1
@ -292,10 +291,6 @@
#endif
#ifdef _MSC_VER
#ifdef _PYTHON_MSVC
// The Python 3.5 Windows runtime is missing InetNtop
#define GPR_WIN_INET_NTOP
#endif // _PYTHON_MSVC
#if _MSC_VER < 1700
typedef __int8 int8_t;
typedef __int16 int16_t;

@ -65,11 +65,7 @@ GPRAPI grpc_slice grpc_slice_new_with_len(void *p, size_t len,
GPRAPI grpc_slice grpc_slice_malloc(size_t length);
GPRAPI grpc_slice grpc_slice_malloc_large(size_t length);
#define GRPC_SLICE_MALLOC(len) \
((len) <= GRPC_SLICE_INLINED_SIZE \
? (grpc_slice){.refcount = NULL, \
.data.inlined = {.length = (uint8_t)(len)}} \
: grpc_slice_malloc_large((len)))
#define GRPC_SLICE_MALLOC(len) grpc_slice_malloc(len)
/** Intern a slice:

@ -158,6 +158,7 @@
<file baseinstalldir="/" name="src/core/lib/support/tmpfile_posix.c" role="src" />
<file baseinstalldir="/" name="src/core/lib/support/tmpfile_windows.c" role="src" />
<file baseinstalldir="/" name="src/core/lib/support/wrap_memcpy.c" role="src" />
<file baseinstalldir="/" name="include/grpc/impl/codegen/byte_buffer.h" role="src" />
<file baseinstalldir="/" name="include/grpc/impl/codegen/byte_buffer_reader.h" role="src" />
<file baseinstalldir="/" name="include/grpc/impl/codegen/compression_types.h" role="src" />
<file baseinstalldir="/" name="include/grpc/impl/codegen/connectivity_state.h" role="src" />
@ -274,6 +275,8 @@
<file baseinstalldir="/" name="src/core/lib/compression/algorithm_metadata.h" role="src" />
<file baseinstalldir="/" name="src/core/lib/compression/message_compress.h" role="src" />
<file baseinstalldir="/" name="src/core/lib/compression/stream_compression.h" role="src" />
<file baseinstalldir="/" name="src/core/lib/compression/stream_compression_gzip.h" role="src" />
<file baseinstalldir="/" name="src/core/lib/compression/stream_compression_identity.h" role="src" />
<file baseinstalldir="/" name="src/core/lib/debug/stats.h" role="src" />
<file baseinstalldir="/" name="src/core/lib/debug/stats_data.h" role="src" />
<file baseinstalldir="/" name="src/core/lib/http/format_request.h" role="src" />
@ -427,6 +430,8 @@
<file baseinstalldir="/" name="src/core/lib/compression/compression.c" role="src" />
<file baseinstalldir="/" name="src/core/lib/compression/message_compress.c" role="src" />
<file baseinstalldir="/" name="src/core/lib/compression/stream_compression.c" role="src" />
<file baseinstalldir="/" name="src/core/lib/compression/stream_compression_gzip.c" role="src" />
<file baseinstalldir="/" name="src/core/lib/compression/stream_compression_identity.c" role="src" />
<file baseinstalldir="/" name="src/core/lib/debug/stats.c" role="src" />
<file baseinstalldir="/" name="src/core/lib/debug/stats_data.c" role="src" />
<file baseinstalldir="/" name="src/core/lib/http/format_request.c" role="src" />

@ -110,8 +110,6 @@ if EXTRA_ENV_COMPILE_ARGS is None:
EXTRA_ENV_COMPILE_ARGS += ' -D_ftime=_ftime32 -D_timeb=__timeb32 -D_ftime_s=_ftime32_s'
else:
EXTRA_ENV_COMPILE_ARGS += ' -D_ftime=_ftime64 -D_timeb=__timeb64'
elif 'win32' in sys.platform:
EXTRA_ENV_COMPILE_ARGS += ' -D_PYTHON_MSVC'
elif "linux" in sys.platform:
EXTRA_ENV_COMPILE_ARGS += ' -std=c++11 -std=gnu99 -fvisibility=hidden -fno-wrapv -fno-exceptions'
elif "darwin" in sys.platform:
@ -163,7 +161,7 @@ if "win32" in sys.platform:
# TODO(zyc): Re-enble c-ares on x64 and x86 windows after fixing the
# ares_library_init compilation issue
DEFINE_MACROS += (('WIN32_LEAN_AND_MEAN', 1), ('CARES_STATICLIB', 1),
('GRPC_ARES', 0),)
('GRPC_ARES', 0), ('NTDDI_VERSION', 0x06000000),)
if '64bit' in platform.architecture()[0]:
DEFINE_MACROS += (('MS_WIN64', 1),)
elif sys.version_info >= (3, 5):

@ -1 +0,0 @@
@vjpai cpp_generator.cc

@ -33,7 +33,7 @@ using std::map;
namespace grpc_php_generator {
namespace {
grpc::string MessageIdentifierName(const grpc::string &name) {
grpc::string ConvertToPhpNamespace(const grpc::string &name) {
std::vector<grpc::string> tokens = grpc_generator::tokenize(name, ".");
std::ostringstream oss;
for (unsigned int i = 0; i < tokens.size(); i++) {
@ -43,14 +43,33 @@ grpc::string MessageIdentifierName(const grpc::string &name) {
return oss.str();
}
grpc::string PackageName(const FileDescriptor *file) {
if (file->options().has_php_namespace()) {
return file->options().php_namespace();
} else {
return ConvertToPhpNamespace(file->package());
}
}
grpc::string MessageIdentifierName(const grpc::string &name,
const FileDescriptor *file) {
std::vector<grpc::string> tokens = grpc_generator::tokenize(name, ".");
std::ostringstream oss;
oss << PackageName(file) << "\\"
<< grpc_generator::CapitalizeFirstLetter(tokens[tokens.size() - 1]);
return oss.str();
}
void PrintMethod(const MethodDescriptor *method, Printer *out) {
const Descriptor *input_type = method->input_type();
const Descriptor *output_type = method->output_type();
map<grpc::string, grpc::string> vars;
vars["service_name"] = method->service()->full_name();
vars["name"] = method->name();
vars["input_type_id"] = MessageIdentifierName(input_type->full_name());
vars["output_type_id"] = MessageIdentifierName(output_type->full_name());
vars["input_type_id"] =
MessageIdentifierName(input_type->full_name(), input_type->file());
vars["output_type_id"] =
MessageIdentifierName(output_type->full_name(), output_type->file());
out->Print("/**\n");
out->Print(GetPHPComments(method, " *").c_str());
@ -149,12 +168,7 @@ grpc::string GenerateFile(const FileDescriptor *file,
}
map<grpc::string, grpc::string> vars;
grpc::string php_namespace;
if (file->options().has_php_namespace()) {
php_namespace = file->options().php_namespace();
} else {
php_namespace = MessageIdentifierName(file->package());
}
grpc::string php_namespace = PackageName(file);
vars["package"] = php_namespace;
out.Print(vars, "namespace $package$;\n\n");

@ -44,6 +44,8 @@ static char* get_http_proxy_server(grpc_exec_ctx* exec_ctx, char** user_cred) {
GPR_ASSERT(user_cred != NULL);
char* proxy_name = NULL;
char* uri_str = gpr_getenv("http_proxy");
char** authority_strs = NULL;
size_t authority_nstrs;
if (uri_str == NULL) return NULL;
grpc_uri* uri =
grpc_uri_parse(exec_ctx, uri_str, false /* suppress_errors */);
@ -56,8 +58,6 @@ static char* get_http_proxy_server(grpc_exec_ctx* exec_ctx, char** user_cred) {
goto done;
}
/* Split on '@' to separate user credentials from host */
char** authority_strs = NULL;
size_t authority_nstrs;
gpr_string_split(uri->authority, "@", &authority_strs, &authority_nstrs);
GPR_ASSERT(authority_nstrs != 0); /* should have at least 1 string */
if (authority_nstrs == 1) {
@ -91,6 +91,7 @@ static bool proxy_mapper_map_name(grpc_exec_ctx* exec_ctx,
char* user_cred = NULL;
*name_to_resolve = get_http_proxy_server(exec_ctx, &user_cred);
if (*name_to_resolve == NULL) return false;
char* no_proxy_str = NULL;
grpc_uri* uri =
grpc_uri_parse(exec_ctx, server_uri, false /* suppress_errors */);
if (uri == NULL || uri->path[0] == '\0') {
@ -98,20 +99,14 @@ static bool proxy_mapper_map_name(grpc_exec_ctx* exec_ctx,
"'http_proxy' environment variable set, but cannot "
"parse server URI '%s' -- not using proxy",
server_uri);
if (uri != NULL) {
gpr_free(user_cred);
grpc_uri_destroy(uri);
}
return false;
goto no_use_proxy;
}
if (strcmp(uri->scheme, "unix") == 0) {
gpr_log(GPR_INFO, "not using proxy for Unix domain socket '%s'",
server_uri);
gpr_free(user_cred);
grpc_uri_destroy(uri);
return false;
goto no_use_proxy;
}
char* no_proxy_str = gpr_getenv("no_proxy");
no_proxy_str = gpr_getenv("no_proxy");
if (no_proxy_str != NULL) {
static const char* NO_PROXY_SEPARATOR = ",";
bool use_proxy = true;
@ -147,12 +142,7 @@ static bool proxy_mapper_map_name(grpc_exec_ctx* exec_ctx,
gpr_free(no_proxy_hosts);
gpr_free(server_host);
gpr_free(server_port);
if (!use_proxy) {
grpc_uri_destroy(uri);
gpr_free(*name_to_resolve);
*name_to_resolve = NULL;
return false;
}
if (!use_proxy) goto no_use_proxy;
}
}
grpc_arg args_to_add[2];
@ -173,9 +163,15 @@ static bool proxy_mapper_map_name(grpc_exec_ctx* exec_ctx,
} else {
*new_args = grpc_channel_args_copy_and_add(args, args_to_add, 1);
}
gpr_free(user_cred);
grpc_uri_destroy(uri);
gpr_free(user_cred);
return true;
no_use_proxy:
if (uri != NULL) grpc_uri_destroy(uri);
gpr_free(*name_to_resolve);
*name_to_resolve = NULL;
gpr_free(user_cred);
return false;
}
static bool proxy_mapper_map_address(grpc_exec_ctx* exec_ctx,

@ -123,6 +123,7 @@
#define GRPC_GRPCLB_RECONNECT_BACKOFF_MULTIPLIER 1.6
#define GRPC_GRPCLB_RECONNECT_MAX_BACKOFF_SECONDS 120
#define GRPC_GRPCLB_RECONNECT_JITTER 0.2
#define GRPC_GRPCLB_DEFAULT_FALLBACK_TIMEOUT_MS 10000
grpc_tracer_flag grpc_lb_glb_trace = GRPC_TRACER_INITIALIZER(false, "glb");
@ -299,6 +300,10 @@ typedef struct glb_lb_policy {
/** timeout in milliseconds for the LB call. 0 means no deadline. */
int lb_call_timeout_ms;
/** timeout in milliseconds for before using fallback backend addresses.
* 0 means not using fallback. */
int lb_fallback_timeout_ms;
/** for communicating with the LB server */
grpc_channel *lb_channel;
@ -325,6 +330,9 @@ typedef struct glb_lb_policy {
* Otherwise, we delegate to the RR policy. */
size_t serverlist_index;
/** stores the backend addresses from the resolver */
grpc_lb_addresses *fallback_backend_addresses;
/** list of picks that are waiting on RR's policy connectivity */
pending_pick *pending_picks;
@ -345,6 +353,9 @@ typedef struct glb_lb_policy {
/** is \a lb_call_retry_timer active? */
bool retry_timer_active;
/** is \a lb_fallback_timer active? */
bool fallback_timer_active;
/** called upon changes to the LB channel's connectivity. */
grpc_closure lb_channel_on_connectivity_changed;
@ -354,9 +365,6 @@ typedef struct glb_lb_policy {
/************************************************************/
/* client data associated with the LB server communication */
/************************************************************/
/* Finished sending initial request. */
grpc_closure lb_on_sent_initial_request;
/* Status from the LB server has been received. This signals the end of the LB
* call. */
grpc_closure lb_on_server_status_received;
@ -367,6 +375,9 @@ typedef struct glb_lb_policy {
/* LB call retry timer callback. */
grpc_closure lb_on_call_retry;
/* LB fallback timer callback. */
grpc_closure lb_on_fallback;
grpc_call *lb_call; /* streaming call to the LB server, */
grpc_metadata_array lb_initial_metadata_recv; /* initial MD from LB server */
@ -390,7 +401,9 @@ typedef struct glb_lb_policy {
/** LB call retry timer */
grpc_timer lb_call_retry_timer;
bool initial_request_sent;
/** LB fallback timer */
grpc_timer lb_fallback_timer;
bool seen_initial_response;
/* Stats for client-side load reporting. Should be unreffed and
@ -536,6 +549,32 @@ static grpc_lb_addresses *process_serverlist_locked(
return lb_addresses;
}
/* Returns the backend addresses extracted from the given addresses */
static grpc_lb_addresses *extract_backend_addresses_locked(
grpc_exec_ctx *exec_ctx, const grpc_lb_addresses *addresses) {
/* first pass: count the number of backend addresses */
size_t num_backends = 0;
for (size_t i = 0; i < addresses->num_addresses; ++i) {
if (!addresses->addresses[i].is_balancer) {
++num_backends;
}
}
/* second pass: actually populate the addresses and (empty) LB tokens */
grpc_lb_addresses *backend_addresses =
grpc_lb_addresses_create(num_backends, &lb_token_vtable);
size_t num_copied = 0;
for (size_t i = 0; i < addresses->num_addresses; ++i) {
if (addresses->addresses[i].is_balancer) continue;
const grpc_resolved_address *addr = &addresses->addresses[i].address;
grpc_lb_addresses_set_address(backend_addresses, num_copied, &addr->addr,
addr->len, false /* is_balancer */,
NULL /* balancer_name */,
(void *)GRPC_MDELEM_LB_TOKEN_EMPTY.payload);
++num_copied;
}
return backend_addresses;
}
static void update_lb_connectivity_status_locked(
grpc_exec_ctx *exec_ctx, glb_lb_policy *glb_policy,
grpc_connectivity_state rr_state, grpc_error *rr_state_error) {
@ -603,35 +642,38 @@ static bool pick_from_internal_rr_locked(
grpc_exec_ctx *exec_ctx, glb_lb_policy *glb_policy,
const grpc_lb_policy_pick_args *pick_args, bool force_async,
grpc_connected_subchannel **target, wrapped_rr_closure_arg *wc_arg) {
// Look at the index into the serverlist to see if we should drop this call.
grpc_grpclb_server *server =
glb_policy->serverlist->servers[glb_policy->serverlist_index++];
if (glb_policy->serverlist_index == glb_policy->serverlist->num_servers) {
glb_policy->serverlist_index = 0; // Wrap-around.
}
if (server->drop) {
// Not using the RR policy, so unref it.
if (GRPC_TRACER_ON(grpc_lb_glb_trace)) {
gpr_log(GPR_INFO, "Unreffing RR for drop (0x%" PRIxPTR ")",
(intptr_t)wc_arg->rr_policy);
// Check for drops if we are not using fallback backend addresses.
if (glb_policy->serverlist != NULL) {
// Look at the index into the serverlist to see if we should drop this call.
grpc_grpclb_server *server =
glb_policy->serverlist->servers[glb_policy->serverlist_index++];
if (glb_policy->serverlist_index == glb_policy->serverlist->num_servers) {
glb_policy->serverlist_index = 0; // Wrap-around.
}
GRPC_LB_POLICY_UNREF(exec_ctx, wc_arg->rr_policy, "glb_pick_sync");
// Update client load reporting stats to indicate the number of
// dropped calls. Note that we have to do this here instead of in
// the client_load_reporting filter, because we do not create a
// subchannel call (and therefore no client_load_reporting filter)
// for dropped calls.
grpc_grpclb_client_stats_add_call_dropped_locked(server->load_balance_token,
wc_arg->client_stats);
grpc_grpclb_client_stats_unref(wc_arg->client_stats);
if (force_async) {
GPR_ASSERT(wc_arg->wrapped_closure != NULL);
GRPC_CLOSURE_SCHED(exec_ctx, wc_arg->wrapped_closure, GRPC_ERROR_NONE);
if (server->drop) {
// Not using the RR policy, so unref it.
if (GRPC_TRACER_ON(grpc_lb_glb_trace)) {
gpr_log(GPR_INFO, "Unreffing RR for drop (0x%" PRIxPTR ")",
(intptr_t)wc_arg->rr_policy);
}
GRPC_LB_POLICY_UNREF(exec_ctx, wc_arg->rr_policy, "glb_pick_sync");
// Update client load reporting stats to indicate the number of
// dropped calls. Note that we have to do this here instead of in
// the client_load_reporting filter, because we do not create a
// subchannel call (and therefore no client_load_reporting filter)
// for dropped calls.
grpc_grpclb_client_stats_add_call_dropped_locked(
server->load_balance_token, wc_arg->client_stats);
grpc_grpclb_client_stats_unref(wc_arg->client_stats);
if (force_async) {
GPR_ASSERT(wc_arg->wrapped_closure != NULL);
GRPC_CLOSURE_SCHED(exec_ctx, wc_arg->wrapped_closure, GRPC_ERROR_NONE);
gpr_free(wc_arg->free_when_done);
return false;
}
gpr_free(wc_arg->free_when_done);
return false;
return true;
}
gpr_free(wc_arg->free_when_done);
return true;
}
// Pick via the RR policy.
const bool pick_done = grpc_lb_policy_pick_locked(
@ -669,8 +711,18 @@ static bool pick_from_internal_rr_locked(
static grpc_lb_policy_args *lb_policy_args_create(grpc_exec_ctx *exec_ctx,
glb_lb_policy *glb_policy) {
grpc_lb_addresses *addresses =
process_serverlist_locked(exec_ctx, glb_policy->serverlist);
grpc_lb_addresses *addresses;
if (glb_policy->serverlist != NULL) {
GPR_ASSERT(glb_policy->serverlist->num_servers > 0);
addresses = process_serverlist_locked(exec_ctx, glb_policy->serverlist);
} else {
// If rr_handover_locked() is invoked when we haven't received any
// serverlist from the balancer, we use the fallback backends returned by
// the resolver. Note that the fallback backend list may be empty, in which
// case the new round_robin policy will keep the requested picks pending.
GPR_ASSERT(glb_policy->fallback_backend_addresses != NULL);
addresses = grpc_lb_addresses_copy(glb_policy->fallback_backend_addresses);
}
GPR_ASSERT(addresses != NULL);
grpc_lb_policy_args *args = (grpc_lb_policy_args *)gpr_zalloc(sizeof(*args));
args->client_channel_factory = glb_policy->cc_factory;
@ -776,8 +828,6 @@ static void create_rr_locked(grpc_exec_ctx *exec_ctx, glb_lb_policy *glb_policy,
/* glb_policy->rr_policy may be NULL (initial handover) */
static void rr_handover_locked(grpc_exec_ctx *exec_ctx,
glb_lb_policy *glb_policy) {
GPR_ASSERT(glb_policy->serverlist != NULL &&
glb_policy->serverlist->num_servers > 0);
if (glb_policy->shutting_down) return;
grpc_lb_policy_args *args = lb_policy_args_create(exec_ctx, glb_policy);
GPR_ASSERT(args != NULL);
@ -926,6 +976,9 @@ static void glb_destroy(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol) {
if (glb_policy->serverlist != NULL) {
grpc_grpclb_destroy_serverlist(glb_policy->serverlist);
}
if (glb_policy->fallback_backend_addresses != NULL) {
grpc_lb_addresses_destroy(exec_ctx, glb_policy->fallback_backend_addresses);
}
grpc_fake_resolver_response_generator_unref(glb_policy->response_generator);
grpc_subchannel_index_unref();
if (glb_policy->pending_update_args != NULL) {
@ -1067,10 +1120,28 @@ static void glb_cancel_picks_locked(grpc_exec_ctx *exec_ctx,
GRPC_ERROR_UNREF(error);
}
static void lb_on_fallback_timer_locked(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error);
static void query_for_backends_locked(grpc_exec_ctx *exec_ctx,
glb_lb_policy *glb_policy);
static void start_picking_locked(grpc_exec_ctx *exec_ctx,
glb_lb_policy *glb_policy) {
/* start a timer to fall back */
if (glb_policy->lb_fallback_timeout_ms > 0 &&
glb_policy->serverlist == NULL && !glb_policy->fallback_timer_active) {
gpr_timespec now = gpr_now(GPR_CLOCK_MONOTONIC);
gpr_timespec deadline = gpr_time_add(
now,
gpr_time_from_millis(glb_policy->lb_fallback_timeout_ms, GPR_TIMESPAN));
GRPC_LB_POLICY_WEAK_REF(&glb_policy->base, "grpclb_fallback_timer");
GRPC_CLOSURE_INIT(&glb_policy->lb_on_fallback, lb_on_fallback_timer_locked,
glb_policy,
grpc_combiner_scheduler(glb_policy->base.combiner));
glb_policy->fallback_timer_active = true;
grpc_timer_init(exec_ctx, &glb_policy->lb_fallback_timer, deadline,
&glb_policy->lb_on_fallback, now);
}
glb_policy->started_picking = true;
gpr_backoff_reset(&glb_policy->lb_call_backoff_state);
query_for_backends_locked(exec_ctx, glb_policy);
@ -1173,6 +1244,58 @@ static void glb_notify_on_state_change_locked(grpc_exec_ctx *exec_ctx,
exec_ctx, &glb_policy->state_tracker, current, notify);
}
static void lb_call_on_retry_timer_locked(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error) {
glb_lb_policy *glb_policy = (glb_lb_policy *)arg;
glb_policy->retry_timer_active = false;
if (!glb_policy->shutting_down && error == GRPC_ERROR_NONE) {
if (GRPC_TRACER_ON(grpc_lb_glb_trace)) {
gpr_log(GPR_INFO, "Restaring call to LB server (grpclb %p)",
(void *)glb_policy);
}
GPR_ASSERT(glb_policy->lb_call == NULL);
query_for_backends_locked(exec_ctx, glb_policy);
}
GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, &glb_policy->base, "grpclb_retry_timer");
}
static void maybe_restart_lb_call(grpc_exec_ctx *exec_ctx,
glb_lb_policy *glb_policy) {
if (glb_policy->started_picking && glb_policy->updating_lb_call) {
if (glb_policy->retry_timer_active) {
grpc_timer_cancel(exec_ctx, &glb_policy->lb_call_retry_timer);
}
if (!glb_policy->shutting_down) start_picking_locked(exec_ctx, glb_policy);
glb_policy->updating_lb_call = false;
} else if (!glb_policy->shutting_down) {
/* if we aren't shutting down, restart the LB client call after some time */
gpr_timespec now = gpr_now(GPR_CLOCK_MONOTONIC);
gpr_timespec next_try =
gpr_backoff_step(&glb_policy->lb_call_backoff_state, now);
if (GRPC_TRACER_ON(grpc_lb_glb_trace)) {
gpr_log(GPR_DEBUG, "Connection to LB server lost (grpclb: %p)...",
(void *)glb_policy);
gpr_timespec timeout = gpr_time_sub(next_try, now);
if (gpr_time_cmp(timeout, gpr_time_0(timeout.clock_type)) > 0) {
gpr_log(GPR_DEBUG,
"... retry_timer_active in %" PRId64 ".%09d seconds.",
timeout.tv_sec, timeout.tv_nsec);
} else {
gpr_log(GPR_DEBUG, "... retry_timer_active immediately.");
}
}
GRPC_LB_POLICY_WEAK_REF(&glb_policy->base, "grpclb_retry_timer");
GRPC_CLOSURE_INIT(&glb_policy->lb_on_call_retry,
lb_call_on_retry_timer_locked, glb_policy,
grpc_combiner_scheduler(glb_policy->base.combiner));
glb_policy->retry_timer_active = true;
grpc_timer_init(exec_ctx, &glb_policy->lb_call_retry_timer, next_try,
&glb_policy->lb_on_call_retry, now);
}
GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, &glb_policy->base,
"lb_on_server_status_received_locked");
}
static void send_client_load_report_locked(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error);
@ -1203,21 +1326,6 @@ static void client_load_report_done_locked(grpc_exec_ctx *exec_ctx, void *arg,
schedule_next_client_load_report(exec_ctx, glb_policy);
}
static void do_send_client_load_report_locked(grpc_exec_ctx *exec_ctx,
glb_lb_policy *glb_policy) {
grpc_op op;
memset(&op, 0, sizeof(op));
op.op = GRPC_OP_SEND_MESSAGE;
op.data.send_message.send_message = glb_policy->client_load_report_payload;
GRPC_CLOSURE_INIT(&glb_policy->client_load_report_closure,
client_load_report_done_locked, glb_policy,
grpc_combiner_scheduler(glb_policy->base.combiner));
grpc_call_error call_error = grpc_call_start_batch_and_execute(
exec_ctx, glb_policy->lb_call, &op, 1,
&glb_policy->client_load_report_closure);
GPR_ASSERT(GRPC_CALL_OK == call_error);
}
static bool load_report_counters_are_zero(grpc_grpclb_request *request) {
grpc_grpclb_dropped_call_counts *drop_entries =
(grpc_grpclb_dropped_call_counts *)
@ -1237,6 +1345,9 @@ static void send_client_load_report_locked(grpc_exec_ctx *exec_ctx, void *arg,
glb_policy->client_load_report_timer_pending = false;
GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, &glb_policy->base,
"client_load_report");
if (glb_policy->lb_call == NULL) {
maybe_restart_lb_call(exec_ctx, glb_policy);
}
return;
}
// Construct message payload.
@ -1260,17 +1371,23 @@ static void send_client_load_report_locked(grpc_exec_ctx *exec_ctx, void *arg,
grpc_raw_byte_buffer_create(&request_payload_slice, 1);
grpc_slice_unref_internal(exec_ctx, request_payload_slice);
grpc_grpclb_request_destroy(request);
// If we've already sent the initial request, then we can go ahead and
// sent the load report. Otherwise, we need to wait until the initial
// request has been sent to send this
// (see lb_on_sent_initial_request_locked() below).
if (glb_policy->initial_request_sent) {
do_send_client_load_report_locked(exec_ctx, glb_policy);
// Send load report message.
grpc_op op;
memset(&op, 0, sizeof(op));
op.op = GRPC_OP_SEND_MESSAGE;
op.data.send_message.send_message = glb_policy->client_load_report_payload;
GRPC_CLOSURE_INIT(&glb_policy->client_load_report_closure,
client_load_report_done_locked, glb_policy,
grpc_combiner_scheduler(glb_policy->base.combiner));
grpc_call_error call_error = grpc_call_start_batch_and_execute(
exec_ctx, glb_policy->lb_call, &op, 1,
&glb_policy->client_load_report_closure);
if (call_error != GRPC_CALL_OK) {
gpr_log(GPR_ERROR, "call_error=%d", call_error);
GPR_ASSERT(GRPC_CALL_OK == call_error);
}
}
static void lb_on_sent_initial_request_locked(grpc_exec_ctx *exec_ctx,
void *arg, grpc_error *error);
static void lb_on_server_status_received_locked(grpc_exec_ctx *exec_ctx,
void *arg, grpc_error *error);
static void lb_on_response_received_locked(grpc_exec_ctx *exec_ctx, void *arg,
@ -1315,9 +1432,6 @@ static void lb_call_init_locked(grpc_exec_ctx *exec_ctx,
grpc_slice_unref_internal(exec_ctx, request_payload_slice);
grpc_grpclb_request_destroy(request);
GRPC_CLOSURE_INIT(&glb_policy->lb_on_sent_initial_request,
lb_on_sent_initial_request_locked, glb_policy,
grpc_combiner_scheduler(glb_policy->base.combiner));
GRPC_CLOSURE_INIT(&glb_policy->lb_on_server_status_received,
lb_on_server_status_received_locked, glb_policy,
grpc_combiner_scheduler(glb_policy->base.combiner));
@ -1332,7 +1446,6 @@ static void lb_call_init_locked(grpc_exec_ctx *exec_ctx,
GRPC_GRPCLB_MIN_CONNECT_TIMEOUT_SECONDS * 1000,
GRPC_GRPCLB_RECONNECT_MAX_BACKOFF_SECONDS * 1000);
glb_policy->initial_request_sent = false;
glb_policy->seen_initial_response = false;
glb_policy->last_client_load_report_counters_were_zero = false;
}
@ -1349,7 +1462,7 @@ static void lb_call_destroy_locked(grpc_exec_ctx *exec_ctx,
grpc_byte_buffer_destroy(glb_policy->lb_request_payload);
grpc_slice_unref_internal(exec_ctx, glb_policy->lb_call_status_details);
if (!glb_policy->client_load_report_timer_pending) {
if (glb_policy->client_load_report_timer_pending) {
grpc_timer_cancel(exec_ctx, &glb_policy->client_load_report_timer);
}
}
@ -1373,7 +1486,7 @@ static void query_for_backends_locked(grpc_exec_ctx *exec_ctx,
GPR_ASSERT(glb_policy->lb_call != NULL);
grpc_call_error call_error;
grpc_op ops[4];
grpc_op ops[3];
memset(ops, 0, sizeof(ops));
grpc_op *op = ops;
@ -1394,13 +1507,8 @@ static void query_for_backends_locked(grpc_exec_ctx *exec_ctx,
op->flags = 0;
op->reserved = NULL;
op++;
/* take a weak ref (won't prevent calling of \a glb_shutdown if the strong ref
* count goes to zero) to be unref'd in lb_on_sent_initial_request_locked() */
GRPC_LB_POLICY_WEAK_REF(&glb_policy->base,
"lb_on_sent_initial_request_locked");
call_error = grpc_call_start_batch_and_execute(
exec_ctx, glb_policy->lb_call, ops, (size_t)(op - ops),
&glb_policy->lb_on_sent_initial_request);
call_error = grpc_call_start_batch_and_execute(exec_ctx, glb_policy->lb_call,
ops, (size_t)(op - ops), NULL);
GPR_ASSERT(GRPC_CALL_OK == call_error);
op = ops;
@ -1437,19 +1545,6 @@ static void query_for_backends_locked(grpc_exec_ctx *exec_ctx,
GPR_ASSERT(GRPC_CALL_OK == call_error);
}
static void lb_on_sent_initial_request_locked(grpc_exec_ctx *exec_ctx,
void *arg, grpc_error *error) {
glb_lb_policy *glb_policy = (glb_lb_policy *)arg;
glb_policy->initial_request_sent = true;
// If we attempted to send a client load report before the initial
// request was sent, send the load report now.
if (glb_policy->client_load_report_payload != NULL) {
do_send_client_load_report_locked(exec_ctx, glb_policy);
}
GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, &glb_policy->base,
"lb_on_sent_initial_request_locked");
}
static void lb_on_response_received_locked(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error) {
glb_lb_policy *glb_policy = (glb_lb_policy *)arg;
@ -1525,6 +1620,15 @@ static void lb_on_response_received_locked(grpc_exec_ctx *exec_ctx, void *arg,
if (glb_policy->serverlist != NULL) {
/* dispose of the old serverlist */
grpc_grpclb_destroy_serverlist(glb_policy->serverlist);
} else {
/* or dispose of the fallback */
grpc_lb_addresses_destroy(exec_ctx,
glb_policy->fallback_backend_addresses);
glb_policy->fallback_backend_addresses = NULL;
if (glb_policy->fallback_timer_active) {
grpc_timer_cancel(exec_ctx, &glb_policy->lb_fallback_timer);
glb_policy->fallback_timer_active = false;
}
}
/* and update the copy in the glb_lb_policy instance. This
* serverlist instance will be destroyed either upon the next
@ -1535,9 +1639,7 @@ static void lb_on_response_received_locked(grpc_exec_ctx *exec_ctx, void *arg,
}
} else {
if (GRPC_TRACER_ON(grpc_lb_glb_trace)) {
gpr_log(GPR_INFO,
"Received empty server list. Picks will stay pending until "
"a response with > 0 servers is received");
gpr_log(GPR_INFO, "Received empty server list, ignoring.");
}
grpc_grpclb_destroy_serverlist(serverlist);
}
@ -1572,19 +1674,25 @@ static void lb_on_response_received_locked(grpc_exec_ctx *exec_ctx, void *arg,
}
}
static void lb_call_on_retry_timer_locked(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error) {
static void lb_on_fallback_timer_locked(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error) {
glb_lb_policy *glb_policy = (glb_lb_policy *)arg;
glb_policy->retry_timer_active = false;
if (!glb_policy->shutting_down && error == GRPC_ERROR_NONE) {
if (GRPC_TRACER_ON(grpc_lb_glb_trace)) {
gpr_log(GPR_INFO, "Restaring call to LB server (grpclb %p)",
(void *)glb_policy);
glb_policy->fallback_timer_active = false;
/* If we receive a serverlist after the timer fires but before this callback
* actually runs, don't fall back. */
if (glb_policy->serverlist == NULL) {
if (!glb_policy->shutting_down && error == GRPC_ERROR_NONE) {
if (GRPC_TRACER_ON(grpc_lb_glb_trace)) {
gpr_log(GPR_INFO,
"Falling back to use backends from resolver (grpclb %p)",
(void *)glb_policy);
}
GPR_ASSERT(glb_policy->fallback_backend_addresses != NULL);
rr_handover_locked(exec_ctx, glb_policy);
}
GPR_ASSERT(glb_policy->lb_call == NULL);
query_for_backends_locked(exec_ctx, glb_policy);
}
GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, &glb_policy->base, "grpclb_retry_timer");
GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, &glb_policy->base,
"grpclb_fallback_timer");
}
static void lb_on_server_status_received_locked(grpc_exec_ctx *exec_ctx,
@ -1603,66 +1711,30 @@ static void lb_on_server_status_received_locked(grpc_exec_ctx *exec_ctx,
}
/* We need to perform cleanups no matter what. */
lb_call_destroy_locked(exec_ctx, glb_policy);
if (glb_policy->started_picking && glb_policy->updating_lb_call) {
if (glb_policy->retry_timer_active) {
grpc_timer_cancel(exec_ctx, &glb_policy->lb_call_retry_timer);
}
if (!glb_policy->shutting_down) start_picking_locked(exec_ctx, glb_policy);
glb_policy->updating_lb_call = false;
} else if (!glb_policy->shutting_down) {
/* if we aren't shutting down, restart the LB client call after some time */
gpr_timespec now = gpr_now(GPR_CLOCK_MONOTONIC);
gpr_timespec next_try =
gpr_backoff_step(&glb_policy->lb_call_backoff_state, now);
if (GRPC_TRACER_ON(grpc_lb_glb_trace)) {
gpr_log(GPR_DEBUG, "Connection to LB server lost (grpclb: %p)...",
(void *)glb_policy);
gpr_timespec timeout = gpr_time_sub(next_try, now);
if (gpr_time_cmp(timeout, gpr_time_0(timeout.clock_type)) > 0) {
gpr_log(GPR_DEBUG,
"... retry_timer_active in %" PRId64 ".%09d seconds.",
timeout.tv_sec, timeout.tv_nsec);
} else {
gpr_log(GPR_DEBUG, "... retry_timer_active immediately.");
}
}
GRPC_LB_POLICY_WEAK_REF(&glb_policy->base, "grpclb_retry_timer");
GRPC_CLOSURE_INIT(&glb_policy->lb_on_call_retry,
lb_call_on_retry_timer_locked, glb_policy,
grpc_combiner_scheduler(glb_policy->base.combiner));
glb_policy->retry_timer_active = true;
grpc_timer_init(exec_ctx, &glb_policy->lb_call_retry_timer, next_try,
&glb_policy->lb_on_call_retry, now);
// If the load report timer is still pending, we wait for it to be
// called before restarting the call. Otherwise, we restart the call
// here.
if (!glb_policy->client_load_report_timer_pending) {
maybe_restart_lb_call(exec_ctx, glb_policy);
}
}
static void fallback_update_locked(grpc_exec_ctx *exec_ctx,
glb_lb_policy *glb_policy,
const grpc_lb_addresses *addresses) {
GPR_ASSERT(glb_policy->fallback_backend_addresses != NULL);
grpc_lb_addresses_destroy(exec_ctx, glb_policy->fallback_backend_addresses);
glb_policy->fallback_backend_addresses =
extract_backend_addresses_locked(exec_ctx, addresses);
if (glb_policy->lb_fallback_timeout_ms > 0 &&
!glb_policy->fallback_timer_active) {
rr_handover_locked(exec_ctx, glb_policy);
}
GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, &glb_policy->base,
"lb_on_server_status_received_locked");
}
static void glb_update_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy,
const grpc_lb_policy_args *args) {
glb_lb_policy *glb_policy = (glb_lb_policy *)policy;
if (glb_policy->updating_lb_channel) {
if (GRPC_TRACER_ON(grpc_lb_glb_trace)) {
gpr_log(GPR_INFO,
"Update already in progress for grpclb %p. Deferring update.",
(void *)glb_policy);
}
if (glb_policy->pending_update_args != NULL) {
grpc_channel_args_destroy(exec_ctx,
glb_policy->pending_update_args->args);
gpr_free(glb_policy->pending_update_args);
}
glb_policy->pending_update_args = (grpc_lb_policy_args *)gpr_zalloc(
sizeof(*glb_policy->pending_update_args));
glb_policy->pending_update_args->client_channel_factory =
args->client_channel_factory;
glb_policy->pending_update_args->args = grpc_channel_args_copy(args->args);
glb_policy->pending_update_args->combiner = args->combiner;
return;
}
glb_policy->updating_lb_channel = true;
// Propagate update to lb_channel (pick first).
const grpc_arg *arg =
grpc_channel_args_find(args->args, GRPC_ARG_LB_ADDRESSES);
if (arg == NULL || arg->type != GRPC_ARG_POINTER) {
@ -1680,13 +1752,43 @@ static void glb_update_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy,
"ignoring.",
(void *)glb_policy);
}
return;
}
const grpc_lb_addresses *addresses =
(const grpc_lb_addresses *)arg->value.pointer.p;
if (glb_policy->serverlist == NULL) {
// If a non-empty serverlist hasn't been received from the balancer,
// propagate the update to fallback_backend_addresses.
fallback_update_locked(exec_ctx, glb_policy, addresses);
} else if (glb_policy->updating_lb_channel) {
// If we have recieved serverlist from the balancer, we need to defer update
// when there is an in-progress one.
if (GRPC_TRACER_ON(grpc_lb_glb_trace)) {
gpr_log(GPR_INFO,
"Update already in progress for grpclb %p. Deferring update.",
(void *)glb_policy);
}
if (glb_policy->pending_update_args != NULL) {
grpc_channel_args_destroy(exec_ctx,
glb_policy->pending_update_args->args);
gpr_free(glb_policy->pending_update_args);
}
glb_policy->pending_update_args = (grpc_lb_policy_args *)gpr_zalloc(
sizeof(*glb_policy->pending_update_args));
glb_policy->pending_update_args->client_channel_factory =
args->client_channel_factory;
glb_policy->pending_update_args->args = grpc_channel_args_copy(args->args);
glb_policy->pending_update_args->combiner = args->combiner;
return;
}
glb_policy->updating_lb_channel = true;
GPR_ASSERT(glb_policy->lb_channel != NULL);
grpc_channel_args *lb_channel_args = build_lb_channel_args(
exec_ctx, addresses, glb_policy->response_generator, args->args);
/* Propagate updates to the LB channel through the fake resolver */
/* Propagate updates to the LB channel (pick first) through the fake resolver
*/
grpc_fake_resolver_response_generator_set_response(
exec_ctx, glb_policy->response_generator, lb_channel_args);
grpc_channel_args_destroy(exec_ctx, lb_channel_args);
@ -1789,13 +1891,7 @@ static const grpc_lb_policy_vtable glb_lb_policy_vtable = {
static grpc_lb_policy *glb_create(grpc_exec_ctx *exec_ctx,
grpc_lb_policy_factory *factory,
grpc_lb_policy_args *args) {
/* Count the number of gRPC-LB addresses. There must be at least one.
* TODO(roth): For now, we ignore non-balancer addresses, but in the
* future, we may change the behavior such that we fall back to using
* the non-balancer addresses if we cannot reach any balancers. In the
* fallback case, we should use the LB policy indicated by
* GRPC_ARG_LB_POLICY_NAME (although if that specifies grpclb or is
* unset, we should default to pick_first). */
/* Count the number of gRPC-LB addresses. There must be at least one. */
const grpc_arg *arg =
grpc_channel_args_find(args->args, GRPC_ARG_LB_ADDRESSES);
if (arg == NULL || arg->type != GRPC_ARG_POINTER) {
@ -1831,6 +1927,11 @@ static grpc_lb_policy *glb_create(grpc_exec_ctx *exec_ctx,
glb_policy->lb_call_timeout_ms =
grpc_channel_arg_get_integer(arg, (grpc_integer_options){0, 0, INT_MAX});
arg = grpc_channel_args_find(args->args, GRPC_ARG_GRPCLB_FALLBACK_TIMEOUT_MS);
glb_policy->lb_fallback_timeout_ms = grpc_channel_arg_get_integer(
arg, (grpc_integer_options){GRPC_GRPCLB_DEFAULT_FALLBACK_TIMEOUT_MS, 0,
INT_MAX});
// Make sure that GRPC_ARG_LB_POLICY_NAME is set in channel args,
// since we use this to trigger the client_load_reporting filter.
grpc_arg new_arg = grpc_channel_arg_string_create(
@ -1839,6 +1940,11 @@ static grpc_lb_policy *glb_create(grpc_exec_ctx *exec_ctx,
glb_policy->args = grpc_channel_args_copy_and_add_and_remove(
args->args, args_to_remove, GPR_ARRAY_SIZE(args_to_remove), &new_arg, 1);
/* Extract the backend addresses (may be empty) from the resolver for
* fallback. */
glb_policy->fallback_backend_addresses =
extract_backend_addresses_locked(exec_ctx, addresses);
/* Create a client channel over them to communicate with a LB service */
glb_policy->response_generator =
grpc_fake_resolver_response_generator_create();

@ -56,7 +56,7 @@ grpc_lb_addresses* grpc_lb_addresses_copy(const grpc_lb_addresses* addresses) {
}
void grpc_lb_addresses_set_address(grpc_lb_addresses* addresses, size_t index,
void* address, size_t address_len,
const void* address, size_t address_len,
bool is_balancer, const char* balancer_name,
void* user_data) {
GPR_ASSERT(index < addresses->num_addresses);

@ -73,7 +73,7 @@ grpc_lb_addresses *grpc_lb_addresses_copy(const grpc_lb_addresses *addresses);
* \a address is a socket address of length \a address_len.
* Takes ownership of \a balancer_name. */
void grpc_lb_addresses_set_address(grpc_lb_addresses *addresses, size_t index,
void *address, size_t address_len,
const void *address, size_t address_len,
bool is_balancer, const char *balancer_name,
void *user_data);

@ -20,6 +20,7 @@
#if GRPC_ARES == 1 && defined(GRPC_POSIX_SOCKET)
#include <ares.h>
#include <sys/ioctl.h>
#include "src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver.h"
@ -37,8 +38,6 @@
typedef struct fd_node {
/** the owner of this fd node */
grpc_ares_ev_driver *ev_driver;
/** the grpc_fd owned by this fd node */
grpc_fd *fd;
/** a closure wrapping on_readable_cb, which should be invoked when the
grpc_fd in this node becomes readable. */
grpc_closure read_closure;
@ -50,10 +49,14 @@ typedef struct fd_node {
/** mutex guarding the rest of the state */
gpr_mu mu;
/** the grpc_fd owned by this fd node */
grpc_fd *fd;
/** if the readable closure has been registered */
bool readable_registered;
/** if the writable closure has been registered */
bool writable_registered;
/** if the fd is being shut down */
bool shutting_down;
} fd_node;
struct grpc_ares_ev_driver {
@ -100,7 +103,6 @@ static void fd_node_destroy(grpc_exec_ctx *exec_ctx, fd_node *fdn) {
GPR_ASSERT(!fdn->readable_registered);
GPR_ASSERT(!fdn->writable_registered);
gpr_mu_destroy(&fdn->mu);
grpc_pollset_set_del_fd(exec_ctx, fdn->ev_driver->pollset_set, fdn->fd);
/* c-ares library has closed the fd inside grpc_fd. This fd may be picked up
immediately by another thread, and should not be closed by the following
grpc_fd_orphan. */
@ -109,6 +111,19 @@ static void fd_node_destroy(grpc_exec_ctx *exec_ctx, fd_node *fdn) {
gpr_free(fdn);
}
static void fd_node_shutdown(grpc_exec_ctx *exec_ctx, fd_node *fdn) {
gpr_mu_lock(&fdn->mu);
fdn->shutting_down = true;
if (!fdn->readable_registered && !fdn->writable_registered) {
gpr_mu_unlock(&fdn->mu);
fd_node_destroy(exec_ctx, fdn);
} else {
grpc_fd_shutdown(exec_ctx, fdn->fd, GRPC_ERROR_CREATE_FROM_STATIC_STRING(
"c-ares fd shutdown"));
gpr_mu_unlock(&fdn->mu);
}
}
grpc_error *grpc_ares_ev_driver_create(grpc_ares_ev_driver **ev_driver,
grpc_pollset_set *pollset_set) {
*ev_driver = (grpc_ares_ev_driver *)gpr_malloc(sizeof(grpc_ares_ev_driver));
@ -175,18 +190,33 @@ static fd_node *pop_fd_node(fd_node **head, int fd) {
return NULL;
}
/* Check if \a fd is still readable */
static bool grpc_ares_is_fd_still_readable(grpc_ares_ev_driver *ev_driver,
int fd) {
size_t bytes_available = 0;
return ioctl(fd, FIONREAD, &bytes_available) == 0 && bytes_available > 0;
}
static void on_readable_cb(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error) {
fd_node *fdn = (fd_node *)arg;
grpc_ares_ev_driver *ev_driver = fdn->ev_driver;
gpr_mu_lock(&fdn->mu);
const int fd = grpc_fd_wrapped_fd(fdn->fd);
fdn->readable_registered = false;
if (fdn->shutting_down && !fdn->writable_registered) {
gpr_mu_unlock(&fdn->mu);
fd_node_destroy(exec_ctx, fdn);
grpc_ares_ev_driver_unref(ev_driver);
return;
}
gpr_mu_unlock(&fdn->mu);
gpr_log(GPR_DEBUG, "readable on %d", grpc_fd_wrapped_fd(fdn->fd));
gpr_log(GPR_DEBUG, "readable on %d", fd);
if (error == GRPC_ERROR_NONE) {
ares_process_fd(ev_driver->channel, grpc_fd_wrapped_fd(fdn->fd),
ARES_SOCKET_BAD);
do {
ares_process_fd(ev_driver->channel, fd, ARES_SOCKET_BAD);
} while (grpc_ares_is_fd_still_readable(ev_driver, fd));
} else {
// If error is not GRPC_ERROR_NONE, it means the fd has been shutdown or
// timed out. The pending lookups made on this ev_driver will be cancelled
@ -207,13 +237,19 @@ static void on_writable_cb(grpc_exec_ctx *exec_ctx, void *arg,
fd_node *fdn = (fd_node *)arg;
grpc_ares_ev_driver *ev_driver = fdn->ev_driver;
gpr_mu_lock(&fdn->mu);
const int fd = grpc_fd_wrapped_fd(fdn->fd);
fdn->writable_registered = false;
if (fdn->shutting_down && !fdn->readable_registered) {
gpr_mu_unlock(&fdn->mu);
fd_node_destroy(exec_ctx, fdn);
grpc_ares_ev_driver_unref(ev_driver);
return;
}
gpr_mu_unlock(&fdn->mu);
gpr_log(GPR_DEBUG, "writable on %d", grpc_fd_wrapped_fd(fdn->fd));
gpr_log(GPR_DEBUG, "writable on %d", fd);
if (error == GRPC_ERROR_NONE) {
ares_process_fd(ev_driver->channel, ARES_SOCKET_BAD,
grpc_fd_wrapped_fd(fdn->fd));
ares_process_fd(ev_driver->channel, ARES_SOCKET_BAD, fd);
} else {
// If error is not GRPC_ERROR_NONE, it means the fd has been shutdown or
// timed out. The pending lookups made on this ev_driver will be cancelled
@ -256,6 +292,7 @@ static void grpc_ares_notify_on_event_locked(grpc_exec_ctx *exec_ctx,
fdn->ev_driver = ev_driver;
fdn->readable_registered = false;
fdn->writable_registered = false;
fdn->shutting_down = false;
gpr_mu_init(&fdn->mu);
GRPC_CLOSURE_INIT(&fdn->read_closure, on_readable_cb, fdn,
grpc_schedule_on_exec_ctx);
@ -296,7 +333,7 @@ static void grpc_ares_notify_on_event_locked(grpc_exec_ctx *exec_ctx,
while (ev_driver->fds != NULL) {
fd_node *cur = ev_driver->fds;
ev_driver->fds = ev_driver->fds->next;
fd_node_destroy(exec_ctx, cur);
fd_node_shutdown(exec_ctx, cur);
}
ev_driver->fds = new_list;
// If the ev driver has no working fd, all the tasks are done.

@ -275,14 +275,15 @@ static void on_txt_done_cb(void *arg, int status, int timeouts,
gpr_log(GPR_DEBUG, "on_txt_done_cb");
char *error_msg;
grpc_ares_request *r = (grpc_ares_request *)arg;
const size_t prefix_len = sizeof(g_service_config_attribute_prefix) - 1;
struct ares_txt_ext *result = NULL;
struct ares_txt_ext *reply = NULL;
grpc_error *error = GRPC_ERROR_NONE;
gpr_mu_lock(&r->mu);
if (status != ARES_SUCCESS) goto fail;
struct ares_txt_ext *reply = NULL;
status = ares_parse_txt_reply_ext(buf, len, &reply);
if (status != ARES_SUCCESS) goto fail;
// Find service config in TXT record.
const size_t prefix_len = sizeof(g_service_config_attribute_prefix) - 1;
struct ares_txt_ext *result;
for (result = reply; result != NULL; result = result->next) {
if (result->record_start &&
memcmp(result->txt, g_service_config_attribute_prefix, prefix_len) ==
@ -313,7 +314,7 @@ static void on_txt_done_cb(void *arg, int status, int timeouts,
fail:
gpr_asprintf(&error_msg, "C-ares TXT lookup status is not ARES_SUCCESS: %s",
ares_strerror(status));
grpc_error *error = GRPC_ERROR_CREATE_FROM_COPIED_STRING(error_msg);
error = GRPC_ERROR_CREATE_FROM_COPIED_STRING(error_msg);
gpr_free(error_msg);
if (r->error == GRPC_ERROR_NONE) {
r->error = error;
@ -331,6 +332,9 @@ static grpc_ares_request *grpc_dns_lookup_ares_impl(
grpc_closure *on_done, grpc_lb_addresses **addrs, bool check_grpclb,
char **service_config_json) {
grpc_error *error = GRPC_ERROR_NONE;
grpc_ares_hostbyname_request *hr = NULL;
grpc_ares_request *r = NULL;
ares_channel *channel = NULL;
/* TODO(zyc): Enable tracing after #9603 is checked in */
/* if (grpc_dns_trace) {
gpr_log(GPR_DEBUG, "resolve_address (blocking): name=%s, default_port=%s",
@ -360,8 +364,7 @@ static grpc_ares_request *grpc_dns_lookup_ares_impl(
error = grpc_ares_ev_driver_create(&ev_driver, interested_parties);
if (error != GRPC_ERROR_NONE) goto error_cleanup;
grpc_ares_request *r =
(grpc_ares_request *)gpr_zalloc(sizeof(grpc_ares_request));
r = (grpc_ares_request *)gpr_zalloc(sizeof(grpc_ares_request));
gpr_mu_init(&r->mu);
r->ev_driver = ev_driver;
r->on_done = on_done;
@ -369,7 +372,7 @@ static grpc_ares_request *grpc_dns_lookup_ares_impl(
r->service_config_json_out = service_config_json;
r->success = false;
r->error = GRPC_ERROR_NONE;
ares_channel *channel = grpc_ares_ev_driver_get_channel(r->ev_driver);
channel = grpc_ares_ev_driver_get_channel(r->ev_driver);
// If dns_server is specified, use it.
if (dns_server != NULL) {
@ -410,12 +413,12 @@ static grpc_ares_request *grpc_dns_lookup_ares_impl(
}
gpr_ref_init(&r->pending_queries, 1);
if (grpc_ipv6_loopback_available()) {
grpc_ares_hostbyname_request *hr = create_hostbyname_request(
r, host, strhtons(port), false /* is_balancer */);
hr = create_hostbyname_request(r, host, strhtons(port),
false /* is_balancer */);
ares_gethostbyname(*channel, hr->host, AF_INET6, on_hostbyname_done_cb, hr);
}
grpc_ares_hostbyname_request *hr = create_hostbyname_request(
r, host, strhtons(port), false /* is_balancer */);
hr = create_hostbyname_request(r, host, strhtons(port),
false /* is_balancer */);
ares_gethostbyname(*channel, hr->host, AF_INET, on_hostbyname_done_cb, hr);
if (check_grpclb) {
/* Query the SRV record */

@ -201,6 +201,7 @@ grpc_error *grpc_chttp2_server_add_port(grpc_exec_ctx *exec_ctx,
grpc_error *err = GRPC_ERROR_NONE;
server_state *state = NULL;
grpc_error **errors = NULL;
size_t naddrs = 0;
*port_num = -1;
@ -225,7 +226,7 @@ grpc_error *grpc_chttp2_server_add_port(grpc_exec_ctx *exec_ctx,
state->shutdown = true;
gpr_mu_init(&state->mu);
const size_t naddrs = resolved->naddrs;
naddrs = resolved->naddrs;
errors = (grpc_error **)gpr_malloc(sizeof(*errors) * naddrs);
for (i = 0; i < naddrs; i++) {
errors[i] =

@ -369,14 +369,12 @@ static void init_transport(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
queue_setting_update(exec_ctx, t,
GRPC_CHTTP2_SETTINGS_GRPC_ALLOW_TRUE_BINARY_METADATA, 1);
t->ping_policy = (grpc_chttp2_repeated_ping_policy){
.max_pings_without_data = g_default_max_pings_without_data,
.min_sent_ping_interval_without_data = gpr_time_from_millis(
g_default_min_sent_ping_interval_without_data_ms, GPR_TIMESPAN),
.max_ping_strikes = g_default_max_ping_strikes,
.min_recv_ping_interval_without_data = gpr_time_from_millis(
g_default_min_recv_ping_interval_without_data_ms, GPR_TIMESPAN),
};
t->ping_policy.max_pings_without_data = g_default_max_pings_without_data;
t->ping_policy.min_sent_ping_interval_without_data = gpr_time_from_millis(
g_default_min_sent_ping_interval_without_data_ms, GPR_TIMESPAN);
t->ping_policy.max_ping_strikes = g_default_max_ping_strikes;
t->ping_policy.min_recv_ping_interval_without_data = gpr_time_from_millis(
g_default_min_recv_ping_interval_without_data_ms, GPR_TIMESPAN);
/* Keepalive setting */
if (t->is_client) {
@ -708,7 +706,10 @@ static int init_stream(grpc_exec_ctx *exec_ctx, grpc_transport *gt,
grpc_schedule_on_exec_ctx);
grpc_slice_buffer_init(&s->unprocessed_incoming_frames_buffer);
grpc_slice_buffer_init(&s->frame_storage);
grpc_slice_buffer_init(&s->compressed_data_buffer);
grpc_slice_buffer_init(&s->decompressed_data_buffer);
s->pending_byte_stream = false;
s->decompressed_header_bytes = 0;
GRPC_CLOSURE_INIT(&s->reset_byte_stream, reset_byte_stream, s,
grpc_combiner_scheduler(t->combiner));
@ -742,14 +743,8 @@ static void destroy_stream_locked(grpc_exec_ctx *exec_ctx, void *sp,
grpc_slice_buffer_destroy_internal(exec_ctx,
&s->unprocessed_incoming_frames_buffer);
grpc_slice_buffer_destroy_internal(exec_ctx, &s->frame_storage);
if (s->compressed_data_buffer) {
grpc_slice_buffer_destroy_internal(exec_ctx, s->compressed_data_buffer);
gpr_free(s->compressed_data_buffer);
}
if (s->decompressed_data_buffer) {
grpc_slice_buffer_destroy_internal(exec_ctx, s->decompressed_data_buffer);
gpr_free(s->decompressed_data_buffer);
}
grpc_slice_buffer_destroy_internal(exec_ctx, &s->compressed_data_buffer);
grpc_slice_buffer_destroy_internal(exec_ctx, &s->decompressed_data_buffer);
grpc_chttp2_list_remove_stalled_by_transport(t, s);
grpc_chttp2_list_remove_stalled_by_stream(t, s);
@ -1450,12 +1445,14 @@ static void perform_stream_op_locked(grpc_exec_ctx *exec_ctx, void *stream_op,
on_complete->next_data.scratch |= CLOSURE_BARRIER_MAY_COVER_WRITE;
/* Identify stream compression */
if ((s->stream_compression_send_enabled =
(op_payload->send_initial_metadata.send_initial_metadata->idx.named
.content_encoding != NULL)) == true) {
s->compressed_data_buffer =
(grpc_slice_buffer *)gpr_malloc(sizeof(grpc_slice_buffer));
grpc_slice_buffer_init(s->compressed_data_buffer);
if (op_payload->send_initial_metadata.send_initial_metadata->idx.named
.content_encoding == NULL ||
grpc_stream_compression_method_parse(
GRPC_MDVALUE(
op_payload->send_initial_metadata.send_initial_metadata->idx
.named.content_encoding->md),
true, &s->stream_compression_method) == 0) {
s->stream_compression_method = GRPC_STREAM_COMPRESSION_IDENTITY_COMPRESS;
}
s->send_initial_metadata_finished = add_closure_barrier(on_complete);
@ -1904,20 +1901,20 @@ void grpc_chttp2_maybe_complete_recv_message(grpc_exec_ctx *exec_ctx,
&s->frame_storage);
s->unprocessed_incoming_frames_decompressed = false;
}
if (s->stream_compression_recv_enabled &&
!s->unprocessed_incoming_frames_decompressed) {
GPR_ASSERT(s->decompressed_data_buffer->length == 0);
if (!s->unprocessed_incoming_frames_decompressed) {
GPR_ASSERT(s->decompressed_data_buffer.length == 0);
bool end_of_context;
if (!s->stream_decompression_ctx) {
s->stream_decompression_ctx =
grpc_stream_compression_context_create(
GRPC_STREAM_COMPRESSION_DECOMPRESS);
s->stream_decompression_method);
}
if (!grpc_stream_decompress(s->stream_decompression_ctx,
&s->unprocessed_incoming_frames_buffer,
s->decompressed_data_buffer, NULL,
GRPC_HEADER_SIZE_IN_BYTES,
&end_of_context)) {
if (!grpc_stream_decompress(
s->stream_decompression_ctx,
&s->unprocessed_incoming_frames_buffer,
&s->decompressed_data_buffer, NULL,
GRPC_HEADER_SIZE_IN_BYTES - s->decompressed_header_bytes,
&end_of_context)) {
grpc_slice_buffer_reset_and_unref_internal(exec_ctx,
&s->frame_storage);
grpc_slice_buffer_reset_and_unref_internal(
@ -1925,9 +1922,13 @@ void grpc_chttp2_maybe_complete_recv_message(grpc_exec_ctx *exec_ctx,
error = GRPC_ERROR_CREATE_FROM_STATIC_STRING(
"Stream decompression error.");
} else {
s->decompressed_header_bytes += s->decompressed_data_buffer.length;
if (s->decompressed_header_bytes == GRPC_HEADER_SIZE_IN_BYTES) {
s->decompressed_header_bytes = 0;
}
error = grpc_deframe_unprocessed_incoming_frames(
exec_ctx, &s->data_parser, s, s->decompressed_data_buffer, NULL,
s->recv_message);
exec_ctx, &s->data_parser, s, &s->decompressed_data_buffer,
NULL, s->recv_message);
if (end_of_context) {
grpc_stream_compression_context_destroy(
s->stream_decompression_ctx);
@ -1976,15 +1977,14 @@ void grpc_chttp2_maybe_complete_recv_trailing_metadata(grpc_exec_ctx *exec_ctx,
}
bool pending_data = s->pending_byte_stream ||
s->unprocessed_incoming_frames_buffer.length > 0;
if (s->stream_compression_recv_enabled && s->read_closed &&
s->frame_storage.length > 0 && !pending_data && !s->seen_error &&
s->recv_trailing_metadata_finished != NULL) {
if (s->read_closed && s->frame_storage.length > 0 && !pending_data &&
!s->seen_error && s->recv_trailing_metadata_finished != NULL) {
/* Maybe some SYNC_FLUSH data is left in frame_storage. Consume them and
* maybe decompress the next 5 bytes in the stream. */
bool end_of_context;
if (!s->stream_decompression_ctx) {
s->stream_decompression_ctx = grpc_stream_compression_context_create(
GRPC_STREAM_COMPRESSION_DECOMPRESS);
s->stream_decompression_method);
}
if (!grpc_stream_decompress(s->stream_decompression_ctx,
&s->frame_storage,
@ -1997,6 +1997,7 @@ void grpc_chttp2_maybe_complete_recv_trailing_metadata(grpc_exec_ctx *exec_ctx,
} else {
if (s->unprocessed_incoming_frames_buffer.length > 0) {
s->unprocessed_incoming_frames_decompressed = true;
pending_data = true;
}
if (end_of_context) {
grpc_stream_compression_context_destroy(s->stream_decompression_ctx);
@ -2815,7 +2816,7 @@ static void reset_byte_stream(grpc_exec_ctx *exec_ctx, void *arg,
GRPC_ERROR_UNREF(s->byte_stream_error);
s->byte_stream_error = GRPC_ERROR_NONE;
grpc_chttp2_cancel_stream(exec_ctx, s->t, s, GRPC_ERROR_REF(error));
s->byte_stream_error = error;
s->byte_stream_error = GRPC_ERROR_REF(error);
}
}
@ -2913,24 +2914,23 @@ static grpc_error *incoming_byte_stream_pull(grpc_exec_ctx *exec_ctx,
grpc_error *error;
if (s->unprocessed_incoming_frames_buffer.length > 0) {
if (s->stream_compression_recv_enabled &&
!s->unprocessed_incoming_frames_decompressed) {
if (!s->unprocessed_incoming_frames_decompressed) {
bool end_of_context;
if (!s->stream_decompression_ctx) {
s->stream_decompression_ctx = grpc_stream_compression_context_create(
GRPC_STREAM_COMPRESSION_DECOMPRESS);
s->stream_decompression_method);
}
if (!grpc_stream_decompress(s->stream_decompression_ctx,
&s->unprocessed_incoming_frames_buffer,
s->decompressed_data_buffer, NULL, MAX_SIZE_T,
&end_of_context)) {
&s->decompressed_data_buffer, NULL,
MAX_SIZE_T, &end_of_context)) {
error =
GRPC_ERROR_CREATE_FROM_STATIC_STRING("Stream decompression error.");
return error;
}
GPR_ASSERT(s->unprocessed_incoming_frames_buffer.length == 0);
grpc_slice_buffer_swap(&s->unprocessed_incoming_frames_buffer,
s->decompressed_data_buffer);
&s->decompressed_data_buffer);
s->unprocessed_incoming_frames_decompressed = true;
if (end_of_context) {
grpc_stream_compression_context_destroy(s->stream_decompression_ctx);

@ -210,7 +210,7 @@ grpc_error *grpc_deframe_unprocessed_incoming_frames(
if (cur != end) {
grpc_slice_buffer_undo_take_first(
&s->unprocessed_incoming_frames_buffer,
slices,
grpc_slice_sub(slice, (size_t)(cur - beg), (size_t)(end - beg)));
}
grpc_slice_unref_internal(exec_ctx, slice);
@ -277,7 +277,7 @@ grpc_error *grpc_deframe_unprocessed_incoming_frames(
p->state = GRPC_CHTTP2_DATA_FH_0;
cur += p->frame_size;
grpc_slice_buffer_undo_take_first(
&s->unprocessed_incoming_frames_buffer,
slices,
grpc_slice_sub(slice, (size_t)(cur - beg), (size_t)(end - beg)));
grpc_slice_unref_internal(exec_ctx, slice);
return GRPC_ERROR_NONE;

@ -33,6 +33,7 @@
#include "src/core/ext/transport/chttp2/transport/bin_encoder.h"
#include "src/core/ext/transport/chttp2/transport/hpack_table.h"
#include "src/core/ext/transport/chttp2/transport/varint.h"
#include "src/core/lib/debug/stats.h"
#include "src/core/lib/slice/slice_internal.h"
#include "src/core/lib/slice/slice_string_helpers.h"
#include "src/core/lib/transport/metadata.h"
@ -51,8 +52,10 @@
#define MAX_DECODER_SPACE_USAGE 512
static grpc_slice_refcount terminal_slice_refcount = {NULL, NULL};
static const grpc_slice terminal_slice = {&terminal_slice_refcount,
.data.refcounted = {0, 0}};
static const grpc_slice terminal_slice = {
&terminal_slice_refcount, /* refcount */
{{0, 0}} /* data.refcounted */
};
extern grpc_tracer_flag grpc_http_trace;
@ -269,8 +272,10 @@ static void add_elem(grpc_exec_ctx *exec_ctx, grpc_chttp2_hpack_compressor *c,
}
}
static void emit_indexed(grpc_chttp2_hpack_compressor *c, uint32_t elem_index,
static void emit_indexed(grpc_exec_ctx *exec_ctx,
grpc_chttp2_hpack_compressor *c, uint32_t elem_index,
framer_state *st) {
GRPC_STATS_INC_HPACK_SEND_INDEXED(exec_ctx);
uint32_t len = GRPC_CHTTP2_VARINT_LENGTH(elem_index, 1);
GRPC_CHTTP2_WRITE_VARINT(elem_index, 1, 0x80, add_tiny_header_data(st, len),
len);
@ -282,30 +287,31 @@ typedef struct {
bool insert_null_before_wire_value;
} wire_value;
static wire_value get_wire_value(grpc_mdelem elem, bool true_binary_enabled) {
static wire_value get_wire_value(grpc_exec_ctx *exec_ctx, grpc_mdelem elem,
bool true_binary_enabled) {
wire_value wire_val;
if (grpc_is_binary_header(GRPC_MDKEY(elem))) {
if (true_binary_enabled) {
return (wire_value){
.huffman_prefix = 0x00,
.insert_null_before_wire_value = true,
.data = grpc_slice_ref_internal(GRPC_MDVALUE(elem)),
};
GRPC_STATS_INC_HPACK_SEND_BINARY(exec_ctx);
wire_val.huffman_prefix = 0x00;
wire_val.insert_null_before_wire_value = true;
wire_val.data = grpc_slice_ref_internal(GRPC_MDVALUE(elem));
} else {
return (wire_value){
.huffman_prefix = 0x80,
.insert_null_before_wire_value = false,
.data = grpc_chttp2_base64_encode_and_huffman_compress(
GRPC_MDVALUE(elem)),
};
GRPC_STATS_INC_HPACK_SEND_BINARY_BASE64(exec_ctx);
wire_val.huffman_prefix = 0x80;
wire_val.insert_null_before_wire_value = false;
wire_val.data =
grpc_chttp2_base64_encode_and_huffman_compress(GRPC_MDVALUE(elem));
}
} else {
/* TODO(ctiller): opportunistically compress non-binary headers */
return (wire_value){
.huffman_prefix = 0x00,
.insert_null_before_wire_value = false,
.data = grpc_slice_ref_internal(GRPC_MDVALUE(elem)),
};
GRPC_STATS_INC_HPACK_SEND_UNCOMPRESSED(exec_ctx);
wire_val.huffman_prefix = 0x00;
wire_val.insert_null_before_wire_value = false;
wire_val.data = grpc_slice_ref_internal(GRPC_MDVALUE(elem));
}
return wire_val;
}
static size_t wire_value_length(wire_value v) {
@ -317,11 +323,14 @@ static void add_wire_value(framer_state *st, wire_value v) {
add_header_data(st, v.data);
}
static void emit_lithdr_incidx(grpc_chttp2_hpack_compressor *c,
static void emit_lithdr_incidx(grpc_exec_ctx *exec_ctx,
grpc_chttp2_hpack_compressor *c,
uint32_t key_index, grpc_mdelem elem,
framer_state *st) {
GRPC_STATS_INC_HPACK_SEND_LITHDR_INCIDX(exec_ctx);
uint32_t len_pfx = GRPC_CHTTP2_VARINT_LENGTH(key_index, 2);
wire_value value = get_wire_value(elem, st->use_true_binary_metadata);
wire_value value =
get_wire_value(exec_ctx, elem, st->use_true_binary_metadata);
size_t len_val = wire_value_length(value);
uint32_t len_val_len;
GPR_ASSERT(len_val <= UINT32_MAX);
@ -333,11 +342,14 @@ static void emit_lithdr_incidx(grpc_chttp2_hpack_compressor *c,
add_wire_value(st, value);
}
static void emit_lithdr_noidx(grpc_chttp2_hpack_compressor *c,
static void emit_lithdr_noidx(grpc_exec_ctx *exec_ctx,
grpc_chttp2_hpack_compressor *c,
uint32_t key_index, grpc_mdelem elem,
framer_state *st) {
GRPC_STATS_INC_HPACK_SEND_LITHDR_NOTIDX(exec_ctx);
uint32_t len_pfx = GRPC_CHTTP2_VARINT_LENGTH(key_index, 4);
wire_value value = get_wire_value(elem, st->use_true_binary_metadata);
wire_value value =
get_wire_value(exec_ctx, elem, st->use_true_binary_metadata);
size_t len_val = wire_value_length(value);
uint32_t len_val_len;
GPR_ASSERT(len_val <= UINT32_MAX);
@ -349,10 +361,14 @@ static void emit_lithdr_noidx(grpc_chttp2_hpack_compressor *c,
add_wire_value(st, value);
}
static void emit_lithdr_incidx_v(grpc_chttp2_hpack_compressor *c,
static void emit_lithdr_incidx_v(grpc_exec_ctx *exec_ctx,
grpc_chttp2_hpack_compressor *c,
grpc_mdelem elem, framer_state *st) {
GRPC_STATS_INC_HPACK_SEND_LITHDR_INCIDX_V(exec_ctx);
GRPC_STATS_INC_HPACK_SEND_UNCOMPRESSED(exec_ctx);
uint32_t len_key = (uint32_t)GRPC_SLICE_LENGTH(GRPC_MDKEY(elem));
wire_value value = get_wire_value(elem, st->use_true_binary_metadata);
wire_value value =
get_wire_value(exec_ctx, elem, st->use_true_binary_metadata);
uint32_t len_val = (uint32_t)wire_value_length(value);
uint32_t len_key_len = GRPC_CHTTP2_VARINT_LENGTH(len_key, 1);
uint32_t len_val_len = GRPC_CHTTP2_VARINT_LENGTH(len_val, 1);
@ -367,10 +383,14 @@ static void emit_lithdr_incidx_v(grpc_chttp2_hpack_compressor *c,
add_wire_value(st, value);
}
static void emit_lithdr_noidx_v(grpc_chttp2_hpack_compressor *c,
static void emit_lithdr_noidx_v(grpc_exec_ctx *exec_ctx,
grpc_chttp2_hpack_compressor *c,
grpc_mdelem elem, framer_state *st) {
GRPC_STATS_INC_HPACK_SEND_LITHDR_NOTIDX_V(exec_ctx);
GRPC_STATS_INC_HPACK_SEND_UNCOMPRESSED(exec_ctx);
uint32_t len_key = (uint32_t)GRPC_SLICE_LENGTH(GRPC_MDKEY(elem));
wire_value value = get_wire_value(elem, st->use_true_binary_metadata);
wire_value value =
get_wire_value(exec_ctx, elem, st->use_true_binary_metadata);
uint32_t len_val = (uint32_t)wire_value_length(value);
uint32_t len_key_len = GRPC_CHTTP2_VARINT_LENGTH(len_key, 1);
uint32_t len_val_len = GRPC_CHTTP2_VARINT_LENGTH(len_val, 1);
@ -423,7 +443,7 @@ static void hpack_enc(grpc_exec_ctx *exec_ctx, grpc_chttp2_hpack_compressor *c,
gpr_free(v);
}
if (!GRPC_MDELEM_IS_INTERNED(elem)) {
emit_lithdr_noidx_v(c, elem, st);
emit_lithdr_noidx_v(exec_ctx, c, elem, st);
return;
}
@ -445,16 +465,16 @@ static void hpack_enc(grpc_exec_ctx *exec_ctx, grpc_chttp2_hpack_compressor *c,
if (grpc_mdelem_eq(c->entries_elems[HASH_FRAGMENT_2(elem_hash)], elem) &&
c->indices_elems[HASH_FRAGMENT_2(elem_hash)] > c->tail_remote_index) {
/* HIT: complete element (first cuckoo hash) */
emit_indexed(c, dynidx(c, c->indices_elems[HASH_FRAGMENT_2(elem_hash)]),
st);
emit_indexed(exec_ctx, c,
dynidx(c, c->indices_elems[HASH_FRAGMENT_2(elem_hash)]), st);
return;
}
if (grpc_mdelem_eq(c->entries_elems[HASH_FRAGMENT_3(elem_hash)], elem) &&
c->indices_elems[HASH_FRAGMENT_3(elem_hash)] > c->tail_remote_index) {
/* HIT: complete element (second cuckoo hash) */
emit_indexed(c, dynidx(c, c->indices_elems[HASH_FRAGMENT_3(elem_hash)]),
st);
emit_indexed(exec_ctx, c,
dynidx(c, c->indices_elems[HASH_FRAGMENT_3(elem_hash)]), st);
return;
}
@ -472,11 +492,11 @@ static void hpack_enc(grpc_exec_ctx *exec_ctx, grpc_chttp2_hpack_compressor *c,
indices_key > c->tail_remote_index) {
/* HIT: key (first cuckoo hash) */
if (should_add_elem) {
emit_lithdr_incidx(c, dynidx(c, indices_key), elem, st);
emit_lithdr_incidx(exec_ctx, c, dynidx(c, indices_key), elem, st);
add_elem(exec_ctx, c, elem);
return;
} else {
emit_lithdr_noidx(c, dynidx(c, indices_key), elem, st);
emit_lithdr_noidx(exec_ctx, c, dynidx(c, indices_key), elem, st);
return;
}
GPR_UNREACHABLE_CODE(return );
@ -488,11 +508,11 @@ static void hpack_enc(grpc_exec_ctx *exec_ctx, grpc_chttp2_hpack_compressor *c,
indices_key > c->tail_remote_index) {
/* HIT: key (first cuckoo hash) */
if (should_add_elem) {
emit_lithdr_incidx(c, dynidx(c, indices_key), elem, st);
emit_lithdr_incidx(exec_ctx, c, dynidx(c, indices_key), elem, st);
add_elem(exec_ctx, c, elem);
return;
} else {
emit_lithdr_noidx(c, dynidx(c, indices_key), elem, st);
emit_lithdr_noidx(exec_ctx, c, dynidx(c, indices_key), elem, st);
return;
}
GPR_UNREACHABLE_CODE(return );
@ -501,11 +521,11 @@ static void hpack_enc(grpc_exec_ctx *exec_ctx, grpc_chttp2_hpack_compressor *c,
/* no elem, key in the table... fall back to literal emission */
if (should_add_elem) {
emit_lithdr_incidx_v(c, elem, st);
emit_lithdr_incidx_v(exec_ctx, c, elem, st);
add_elem(exec_ctx, c, elem);
return;
} else {
emit_lithdr_noidx_v(c, elem, st);
emit_lithdr_noidx_v(exec_ctx, c, elem, st);
return;
}
GPR_UNREACHABLE_CODE(return );

@ -30,6 +30,7 @@
#include <grpc/support/useful.h>
#include "src/core/ext/transport/chttp2/transport/bin_encoder.h"
#include "src/core/lib/debug/stats.h"
#include "src/core/lib/profiling/timers.h"
#include "src/core/lib/slice/slice_internal.h"
#include "src/core/lib/support/string.h"
@ -777,8 +778,7 @@ static grpc_error *parse_stream_dep0(grpc_exec_ctx *exec_ctx,
return parse_stream_dep1(exec_ctx, p, cur + 1, end);
}
/* emit an indexed field; for now just logs it to console; jumps to
begin the next field on completion */
/* emit an indexed field; jumps to begin the next field on completion */
static grpc_error *finish_indexed_field(grpc_exec_ctx *exec_ctx,
grpc_chttp2_hpack_parser *p,
const uint8_t *cur,
@ -792,6 +792,7 @@ static grpc_error *finish_indexed_field(grpc_exec_ctx *exec_ctx,
GRPC_ERROR_INT_SIZE, (intptr_t)p->table.num_ents);
}
GRPC_MDELEM_REF(md);
GRPC_STATS_INC_HPACK_RECV_INDEXED(exec_ctx);
grpc_error *err = on_hdr(exec_ctx, p, md, 0);
if (err != GRPC_ERROR_NONE) return err;
return parse_begin(exec_ctx, p, cur, end);
@ -820,14 +821,14 @@ static grpc_error *parse_indexed_field_x(grpc_exec_ctx *exec_ctx,
return parse_value0(exec_ctx, p, cur + 1, end);
}
/* finish a literal header with incremental indexing: just log, and jump to '
begin */
/* finish a literal header with incremental indexing */
static grpc_error *finish_lithdr_incidx(grpc_exec_ctx *exec_ctx,
grpc_chttp2_hpack_parser *p,
const uint8_t *cur,
const uint8_t *end) {
grpc_mdelem md = grpc_chttp2_hptbl_lookup(&p->table, p->index);
GPR_ASSERT(!GRPC_MDISNULL(md)); /* handled in string parsing */
GRPC_STATS_INC_HPACK_RECV_LITHDR_INCIDX(exec_ctx);
grpc_error *err = on_hdr(
exec_ctx, p,
grpc_mdelem_from_slices(exec_ctx, grpc_slice_ref_internal(GRPC_MDKEY(md)),
@ -842,6 +843,7 @@ static grpc_error *finish_lithdr_incidx_v(grpc_exec_ctx *exec_ctx,
grpc_chttp2_hpack_parser *p,
const uint8_t *cur,
const uint8_t *end) {
GRPC_STATS_INC_HPACK_RECV_LITHDR_INCIDX_V(exec_ctx);
grpc_error *err = on_hdr(
exec_ctx, p,
grpc_mdelem_from_slices(exec_ctx, take_string(exec_ctx, p, &p->key, true),
@ -898,6 +900,7 @@ static grpc_error *finish_lithdr_notidx(grpc_exec_ctx *exec_ctx,
const uint8_t *end) {
grpc_mdelem md = grpc_chttp2_hptbl_lookup(&p->table, p->index);
GPR_ASSERT(!GRPC_MDISNULL(md)); /* handled in string parsing */
GRPC_STATS_INC_HPACK_RECV_LITHDR_NOTIDX(exec_ctx);
grpc_error *err = on_hdr(
exec_ctx, p,
grpc_mdelem_from_slices(exec_ctx, grpc_slice_ref_internal(GRPC_MDKEY(md)),
@ -912,6 +915,7 @@ static grpc_error *finish_lithdr_notidx_v(grpc_exec_ctx *exec_ctx,
grpc_chttp2_hpack_parser *p,
const uint8_t *cur,
const uint8_t *end) {
GRPC_STATS_INC_HPACK_RECV_LITHDR_NOTIDX_V(exec_ctx);
grpc_error *err = on_hdr(
exec_ctx, p,
grpc_mdelem_from_slices(exec_ctx, take_string(exec_ctx, p, &p->key, true),
@ -968,6 +972,7 @@ static grpc_error *finish_lithdr_nvridx(grpc_exec_ctx *exec_ctx,
const uint8_t *end) {
grpc_mdelem md = grpc_chttp2_hptbl_lookup(&p->table, p->index);
GPR_ASSERT(!GRPC_MDISNULL(md)); /* handled in string parsing */
GRPC_STATS_INC_HPACK_RECV_LITHDR_NVRIDX(exec_ctx);
grpc_error *err = on_hdr(
exec_ctx, p,
grpc_mdelem_from_slices(exec_ctx, grpc_slice_ref_internal(GRPC_MDKEY(md)),
@ -982,6 +987,7 @@ static grpc_error *finish_lithdr_nvridx_v(grpc_exec_ctx *exec_ctx,
grpc_chttp2_hpack_parser *p,
const uint8_t *cur,
const uint8_t *end) {
GRPC_STATS_INC_HPACK_RECV_LITHDR_NVRIDX_V(exec_ctx);
grpc_error *err = on_hdr(
exec_ctx, p,
grpc_mdelem_from_slices(exec_ctx, take_string(exec_ctx, p, &p->key, true),
@ -1310,9 +1316,11 @@ static grpc_error *append_string(grpc_exec_ctx *exec_ctx,
/* 'true-binary' case */
++cur;
p->binary = NOT_BINARY;
GRPC_STATS_INC_HPACK_RECV_BINARY(exec_ctx);
append_bytes(str, cur, (size_t)(end - cur));
return GRPC_ERROR_NONE;
}
GRPC_STATS_INC_HPACK_RECV_BINARY_BASE64(exec_ctx);
/* fallthrough */
b64_byte0:
case B64_BYTE0:
@ -1510,6 +1518,7 @@ static grpc_error *begin_parse_string(grpc_exec_ctx *exec_ctx,
grpc_chttp2_hpack_parser_string *str) {
if (!p->huff && binary == NOT_BINARY && (end - cur) >= (intptr_t)p->strlen &&
p->current_slice_refcount != NULL) {
GRPC_STATS_INC_HPACK_RECV_UNCOMPRESSED(exec_ctx);
str->copied = false;
str->data.referenced.refcount = p->current_slice_refcount;
str->data.referenced.data.refcounted.bytes = (uint8_t *)cur;
@ -1523,6 +1532,20 @@ static grpc_error *begin_parse_string(grpc_exec_ctx *exec_ctx,
p->parsing.str = str;
p->huff_state = 0;
p->binary = binary;
switch (p->binary) {
case NOT_BINARY:
if (p->huff) {
GRPC_STATS_INC_HPACK_RECV_HUFFMAN(exec_ctx);
} else {
GRPC_STATS_INC_HPACK_RECV_UNCOMPRESSED(exec_ctx);
}
break;
case BINARY_BEGIN:
/* stats incremented later: don't know true binary or not */
break;
default:
abort();
}
return parse_string(exec_ctx, p, cur, end);
}
@ -1660,17 +1683,12 @@ static void parse_stream_compression_md(grpc_exec_ctx *exec_ctx,
grpc_chttp2_transport *t,
grpc_chttp2_stream *s,
grpc_metadata_batch *initial_metadata) {
if (initial_metadata->idx.named.content_encoding != NULL) {
grpc_slice content_encoding =
GRPC_MDVALUE(initial_metadata->idx.named.content_encoding->md);
if (!grpc_slice_eq(content_encoding, GRPC_MDSTR_IDENTITY)) {
if (grpc_slice_eq(content_encoding, GRPC_MDSTR_GZIP)) {
s->stream_compression_recv_enabled = true;
s->decompressed_data_buffer =
(grpc_slice_buffer *)gpr_malloc(sizeof(grpc_slice_buffer));
grpc_slice_buffer_init(s->decompressed_data_buffer);
}
}
if (initial_metadata->idx.named.content_encoding == NULL ||
grpc_stream_compression_method_parse(
GRPC_MDVALUE(initial_metadata->idx.named.content_encoding->md), false,
&s->stream_decompression_method) == 0) {
s->stream_decompression_method =
GRPC_STREAM_COMPRESSION_IDENTITY_DECOMPRESS;
}
}

@ -592,25 +592,27 @@ struct grpc_chttp2_stream {
grpc_chttp2_write_cb *finish_after_write;
size_t sending_bytes;
/** Whether stream compression send is enabled */
bool stream_compression_recv_enabled;
/** Whether stream compression recv is enabled */
bool stream_compression_send_enabled;
/** Whether bytes stored in unprocessed_incoming_byte_stream is decompressed
*/
bool unprocessed_incoming_frames_decompressed;
/* Stream compression method to be used. */
grpc_stream_compression_method stream_compression_method;
/* Stream decompression method to be used. */
grpc_stream_compression_method stream_decompression_method;
/** Stream compression decompress context */
grpc_stream_compression_context *stream_decompression_ctx;
/** Stream compression compress context */
grpc_stream_compression_context *stream_compression_ctx;
/** Buffer storing data that is compressed but not sent */
grpc_slice_buffer *compressed_data_buffer;
grpc_slice_buffer compressed_data_buffer;
/** Amount of uncompressed bytes sent out when compressed_data_buffer is
* emptied */
size_t uncompressed_data_size;
/** Temporary buffer storing decompressed data */
grpc_slice_buffer *decompressed_data_buffer;
grpc_slice_buffer decompressed_data_buffer;
/** Whether bytes stored in unprocessed_incoming_byte_stream is decompressed
*/
bool unprocessed_incoming_frames_decompressed;
/** gRPC header bytes that are already decompressed */
size_t decompressed_header_bytes;
};
/** Transport writing call flow:

@ -317,8 +317,7 @@ grpc_chttp2_begin_write_result grpc_chttp2_begin_write(
if (sent_initial_metadata) {
/* send any body bytes, if allowed by flow control */
if (s->flow_controlled_buffer.length > 0 ||
(s->stream_compression_send_enabled &&
s->compressed_data_buffer->length > 0)) {
s->compressed_data_buffer.length > 0) {
uint32_t stream_remote_window = (uint32_t)GPR_MAX(
0,
s->flow_control.remote_window_delta +
@ -332,56 +331,59 @@ grpc_chttp2_begin_write_result grpc_chttp2_begin_write(
bool is_last_data_frame = false;
bool is_last_frame = false;
size_t sending_bytes_before = s->sending_bytes;
if (s->stream_compression_send_enabled) {
while ((s->flow_controlled_buffer.length > 0 ||
s->compressed_data_buffer->length > 0) &&
max_outgoing > 0) {
if (s->compressed_data_buffer->length > 0) {
uint32_t send_bytes = (uint32_t)GPR_MIN(
max_outgoing, s->compressed_data_buffer->length);
is_last_data_frame =
(send_bytes == s->compressed_data_buffer->length &&
s->flow_controlled_buffer.length == 0 &&
s->fetching_send_message == NULL);
is_last_frame =
is_last_data_frame && s->send_trailing_metadata != NULL &&
grpc_metadata_batch_is_empty(s->send_trailing_metadata);
grpc_chttp2_encode_data(s->id, s->compressed_data_buffer,
send_bytes, is_last_frame,
&s->stats.outgoing, &t->outbuf);
grpc_chttp2_flowctl_sent_data(&t->flow_control,
&s->flow_control, send_bytes);
max_outgoing -= send_bytes;
if (s->compressed_data_buffer->length == 0) {
s->sending_bytes += s->uncompressed_data_size;
while ((s->flow_controlled_buffer.length > 0 ||
s->compressed_data_buffer.length > 0) &&
max_outgoing > 0) {
if (s->compressed_data_buffer.length > 0) {
uint32_t send_bytes = (uint32_t)GPR_MIN(
max_outgoing, s->compressed_data_buffer.length);
is_last_data_frame =
(send_bytes == s->compressed_data_buffer.length &&
s->flow_controlled_buffer.length == 0 &&
s->fetching_send_message == NULL);
if (is_last_data_frame && s->send_trailing_metadata != NULL &&
s->stream_compression_ctx != NULL) {
if (!grpc_stream_compress(
s->stream_compression_ctx, &s->flow_controlled_buffer,
&s->compressed_data_buffer, NULL, MAX_SIZE_T,
GRPC_STREAM_COMPRESSION_FLUSH_FINISH)) {
gpr_log(GPR_ERROR, "Stream compression failed.");
}
} else {
if (s->stream_compression_ctx == NULL) {
s->stream_compression_ctx =
grpc_stream_compression_context_create(
GRPC_STREAM_COMPRESSION_COMPRESS);
}
s->uncompressed_data_size = s->flow_controlled_buffer.length;
GPR_ASSERT(grpc_stream_compress(
s->stream_compression_ctx, &s->flow_controlled_buffer,
s->compressed_data_buffer, NULL, MAX_SIZE_T,
GRPC_STREAM_COMPRESSION_FLUSH_SYNC));
grpc_stream_compression_context_destroy(
s->stream_compression_ctx);
s->stream_compression_ctx = NULL;
/* After finish, bytes in s->compressed_data_buffer may be
* more than max_outgoing. Start another round of the current
* while loop so that send_bytes and is_last_data_frame are
* recalculated. */
continue;
}
is_last_frame =
is_last_data_frame && s->send_trailing_metadata != NULL &&
grpc_metadata_batch_is_empty(s->send_trailing_metadata);
grpc_chttp2_encode_data(s->id, &s->compressed_data_buffer,
send_bytes, is_last_frame,
&s->stats.outgoing, &t->outbuf);
grpc_chttp2_flowctl_sent_data(&t->flow_control, &s->flow_control,
send_bytes);
max_outgoing -= send_bytes;
if (s->compressed_data_buffer.length == 0) {
s->sending_bytes += s->uncompressed_data_size;
}
} else {
if (s->stream_compression_ctx == NULL) {
s->stream_compression_ctx =
grpc_stream_compression_context_create(
s->stream_compression_method);
}
s->uncompressed_data_size = s->flow_controlled_buffer.length;
if (!grpc_stream_compress(
s->stream_compression_ctx, &s->flow_controlled_buffer,
&s->compressed_data_buffer, NULL, MAX_SIZE_T,
GRPC_STREAM_COMPRESSION_FLUSH_SYNC)) {
gpr_log(GPR_ERROR, "Stream compression failed.");
}
}
} else {
uint32_t send_bytes = (uint32_t)GPR_MIN(
max_outgoing, s->flow_controlled_buffer.length);
is_last_data_frame = s->fetching_send_message == NULL &&
send_bytes == s->flow_controlled_buffer.length;
is_last_frame =
is_last_data_frame && s->send_trailing_metadata != NULL &&
grpc_metadata_batch_is_empty(s->send_trailing_metadata);
grpc_chttp2_encode_data(s->id, &s->flow_controlled_buffer,
send_bytes, is_last_frame,
&s->stats.outgoing, &t->outbuf);
grpc_chttp2_flowctl_sent_data(&t->flow_control, &s->flow_control,
send_bytes);
s->sending_bytes += send_bytes;
}
if (!t->is_client) {
t->ping_recv_state.last_ping_recv_time =
@ -406,8 +408,7 @@ grpc_chttp2_begin_write_result grpc_chttp2_begin_write(
&s->flow_controlled_bytes_flowed, GRPC_ERROR_NONE);
now_writing = true;
if (s->flow_controlled_buffer.length > 0 ||
(s->stream_compression_send_enabled &&
s->compressed_data_buffer->length > 0)) {
s->compressed_data_buffer.length > 0) {
GRPC_CHTTP2_STREAM_REF(s, "chttp2_writing:fork");
grpc_chttp2_list_add_writable_stream(t, s);
}
@ -423,8 +424,7 @@ grpc_chttp2_begin_write_result grpc_chttp2_begin_write(
if (s->send_trailing_metadata != NULL &&
s->fetching_send_message == NULL &&
s->flow_controlled_buffer.length == 0 &&
(!s->stream_compression_send_enabled ||
s->compressed_data_buffer->length == 0)) {
s->compressed_data_buffer.length == 0) {
GRPC_CHTTP2_IF_TRACING(gpr_log(GPR_INFO, "sending trailing_metadata"));
if (grpc_metadata_batch_is_empty(s->send_trailing_metadata)) {
grpc_chttp2_encode_data(s->id, &s->flow_controlled_buffer, 0, true,

@ -16,177 +16,62 @@
*
*/
#include <grpc/support/alloc.h>
#include <grpc/support/log.h>
#include "src/core/lib/compression/stream_compression.h"
#include "src/core/lib/iomgr/exec_ctx.h"
#include "src/core/lib/slice/slice_internal.h"
#include "src/core/lib/compression/stream_compression_gzip.h"
#define OUTPUT_BLOCK_SIZE (1024)
static bool gzip_flate(grpc_stream_compression_context *ctx,
grpc_slice_buffer *in, grpc_slice_buffer *out,
size_t *output_size, size_t max_output_size, int flush,
bool *end_of_context) {
GPR_ASSERT(flush == 0 || flush == Z_SYNC_FLUSH || flush == Z_FINISH);
/* Full flush is not allowed when inflating. */
GPR_ASSERT(!(ctx->flate == inflate && (flush == Z_FINISH)));
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
int r;
bool eoc = false;
size_t original_max_output_size = max_output_size;
while (max_output_size > 0 && (in->length > 0 || flush) && !eoc) {
size_t slice_size = max_output_size < OUTPUT_BLOCK_SIZE ? max_output_size
: OUTPUT_BLOCK_SIZE;
grpc_slice slice_out = GRPC_SLICE_MALLOC(slice_size);
ctx->zs.avail_out = (uInt)slice_size;
ctx->zs.next_out = GRPC_SLICE_START_PTR(slice_out);
while (ctx->zs.avail_out > 0 && in->length > 0 && !eoc) {
grpc_slice slice = grpc_slice_buffer_take_first(in);
ctx->zs.avail_in = (uInt)GRPC_SLICE_LENGTH(slice);
ctx->zs.next_in = GRPC_SLICE_START_PTR(slice);
r = ctx->flate(&ctx->zs, Z_NO_FLUSH);
if (r < 0 && r != Z_BUF_ERROR) {
gpr_log(GPR_ERROR, "zlib error (%d)", r);
grpc_slice_unref_internal(&exec_ctx, slice_out);
grpc_exec_ctx_finish(&exec_ctx);
return false;
} else if (r == Z_STREAM_END && ctx->flate == inflate) {
eoc = true;
}
if (ctx->zs.avail_in > 0) {
grpc_slice_buffer_undo_take_first(
in,
grpc_slice_sub(slice, GRPC_SLICE_LENGTH(slice) - ctx->zs.avail_in,
GRPC_SLICE_LENGTH(slice)));
}
grpc_slice_unref_internal(&exec_ctx, slice);
}
if (flush != 0 && ctx->zs.avail_out > 0 && !eoc) {
GPR_ASSERT(in->length == 0);
r = ctx->flate(&ctx->zs, flush);
if (flush == Z_SYNC_FLUSH) {
switch (r) {
case Z_OK:
/* Maybe flush is not complete; just made some partial progress. */
if (ctx->zs.avail_out > 0) {
flush = 0;
}
break;
case Z_BUF_ERROR:
case Z_STREAM_END:
flush = 0;
break;
default:
gpr_log(GPR_ERROR, "zlib error (%d)", r);
grpc_slice_unref_internal(&exec_ctx, slice_out);
grpc_exec_ctx_finish(&exec_ctx);
return false;
}
} else if (flush == Z_FINISH) {
switch (r) {
case Z_OK:
case Z_BUF_ERROR:
/* Wait for the next loop to assign additional output space. */
GPR_ASSERT(ctx->zs.avail_out == 0);
break;
case Z_STREAM_END:
flush = 0;
break;
default:
gpr_log(GPR_ERROR, "zlib error (%d)", r);
grpc_slice_unref_internal(&exec_ctx, slice_out);
grpc_exec_ctx_finish(&exec_ctx);
return false;
}
}
}
if (ctx->zs.avail_out == 0) {
grpc_slice_buffer_add(out, slice_out);
} else if (ctx->zs.avail_out < slice_size) {
slice_out.data.refcounted.length -= ctx->zs.avail_out;
grpc_slice_buffer_add(out, slice_out);
} else {
grpc_slice_unref_internal(&exec_ctx, slice_out);
}
max_output_size -= (slice_size - ctx->zs.avail_out);
}
grpc_exec_ctx_finish(&exec_ctx);
if (end_of_context) {
*end_of_context = eoc;
}
if (output_size) {
*output_size = original_max_output_size - max_output_size;
}
return true;
}
extern const grpc_stream_compression_vtable
grpc_stream_compression_identity_vtable;
bool grpc_stream_compress(grpc_stream_compression_context *ctx,
grpc_slice_buffer *in, grpc_slice_buffer *out,
size_t *output_size, size_t max_output_size,
grpc_stream_compression_flush flush) {
GPR_ASSERT(ctx->flate == deflate);
int gzip_flush;
switch (flush) {
case GRPC_STREAM_COMPRESSION_FLUSH_NONE:
gzip_flush = 0;
break;
case GRPC_STREAM_COMPRESSION_FLUSH_SYNC:
gzip_flush = Z_SYNC_FLUSH;
break;
case GRPC_STREAM_COMPRESSION_FLUSH_FINISH:
gzip_flush = Z_FINISH;
break;
default:
gzip_flush = 0;
}
return gzip_flate(ctx, in, out, output_size, max_output_size, gzip_flush,
NULL);
return ctx->vtable->compress(ctx, in, out, output_size, max_output_size,
flush);
}
bool grpc_stream_decompress(grpc_stream_compression_context *ctx,
grpc_slice_buffer *in, grpc_slice_buffer *out,
size_t *output_size, size_t max_output_size,
bool *end_of_context) {
GPR_ASSERT(ctx->flate == inflate);
return gzip_flate(ctx, in, out, output_size, max_output_size, Z_SYNC_FLUSH,
end_of_context);
return ctx->vtable->decompress(ctx, in, out, output_size, max_output_size,
end_of_context);
}
grpc_stream_compression_context *grpc_stream_compression_context_create(
grpc_stream_compression_method method) {
grpc_stream_compression_context *ctx =
(grpc_stream_compression_context *)gpr_zalloc(
sizeof(grpc_stream_compression_context));
int r;
if (ctx == NULL) {
return NULL;
}
if (method == GRPC_STREAM_COMPRESSION_DECOMPRESS) {
r = inflateInit2(&ctx->zs, 0x1F);
ctx->flate = inflate;
} else {
r = deflateInit2(&ctx->zs, Z_DEFAULT_COMPRESSION, Z_DEFLATED, 0x1F, 8,
Z_DEFAULT_STRATEGY);
ctx->flate = deflate;
}
if (r != Z_OK) {
gpr_free(ctx);
return NULL;
switch (method) {
case GRPC_STREAM_COMPRESSION_IDENTITY_COMPRESS:
case GRPC_STREAM_COMPRESSION_IDENTITY_DECOMPRESS:
return grpc_stream_compression_identity_vtable.context_create(method);
case GRPC_STREAM_COMPRESSION_GZIP_COMPRESS:
case GRPC_STREAM_COMPRESSION_GZIP_DECOMPRESS:
return grpc_stream_compression_gzip_vtable.context_create(method);
default:
gpr_log(GPR_ERROR, "Unknown stream compression method: %d", method);
return NULL;
}
return ctx;
}
void grpc_stream_compression_context_destroy(
grpc_stream_compression_context *ctx) {
if (ctx->flate == inflate) {
inflateEnd(&ctx->zs);
ctx->vtable->context_destroy(ctx);
}
int grpc_stream_compression_method_parse(
grpc_slice value, bool is_compress,
grpc_stream_compression_method *method) {
if (grpc_slice_eq(value, GRPC_MDSTR_IDENTITY)) {
*method = is_compress ? GRPC_STREAM_COMPRESSION_IDENTITY_COMPRESS
: GRPC_STREAM_COMPRESSION_IDENTITY_DECOMPRESS;
return 1;
} else if (grpc_slice_eq(value, GRPC_MDSTR_GZIP)) {
*method = is_compress ? GRPC_STREAM_COMPRESSION_GZIP_COMPRESS
: GRPC_STREAM_COMPRESSION_GZIP_DECOMPRESS;
return 1;
} else {
deflateEnd(&ctx->zs);
return 0;
}
gpr_free(ctx);
}

@ -24,15 +24,20 @@
#include <grpc/slice_buffer.h>
#include <zlib.h>
#include "src/core/lib/transport/static_metadata.h"
typedef struct grpc_stream_compression_vtable grpc_stream_compression_vtable;
/* Stream compression/decompression context */
typedef struct grpc_stream_compression_context {
z_stream zs;
int (*flate)(z_stream *zs, int flush);
const grpc_stream_compression_vtable *vtable;
} grpc_stream_compression_context;
typedef enum grpc_stream_compression_method {
GRPC_STREAM_COMPRESSION_COMPRESS = 0,
GRPC_STREAM_COMPRESSION_DECOMPRESS,
GRPC_STREAM_COMPRESSION_IDENTITY_COMPRESS = 0,
GRPC_STREAM_COMPRESSION_IDENTITY_DECOMPRESS,
GRPC_STREAM_COMPRESSION_GZIP_COMPRESS,
GRPC_STREAM_COMPRESSION_GZIP_DECOMPRESS,
GRPC_STREAM_COMPRESSION_METHOD_COUNT
} grpc_stream_compression_method;
@ -43,6 +48,19 @@ typedef enum grpc_stream_compression_flush {
GRPC_STREAM_COMPRESSION_FLUSH_COUNT
} grpc_stream_compression_flush;
struct grpc_stream_compression_vtable {
bool (*compress)(grpc_stream_compression_context *ctx, grpc_slice_buffer *in,
grpc_slice_buffer *out, size_t *output_size,
size_t max_output_size, grpc_stream_compression_flush flush);
bool (*decompress)(grpc_stream_compression_context *ctx,
grpc_slice_buffer *in, grpc_slice_buffer *out,
size_t *output_size, size_t max_output_size,
bool *end_of_context);
grpc_stream_compression_context *(*context_create)(
grpc_stream_compression_method method);
void (*context_destroy)(grpc_stream_compression_context *ctx);
};
/**
* Compress bytes provided in \a in with a given context, with an optional flush
* at the end of compression. Emits at most \a max_output_size compressed bytes
@ -87,4 +105,10 @@ grpc_stream_compression_context *grpc_stream_compression_context_create(
void grpc_stream_compression_context_destroy(
grpc_stream_compression_context *ctx);
/**
* Parse stream compression method based on algorithm name
*/
int grpc_stream_compression_method_parse(
grpc_slice value, bool is_compress, grpc_stream_compression_method *method);
#endif

@ -0,0 +1,228 @@
/*
*
* Copyright 2017 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
#include <grpc/support/alloc.h>
#include <grpc/support/log.h>
#include "src/core/lib/compression/stream_compression_gzip.h"
#include "src/core/lib/iomgr/exec_ctx.h"
#include "src/core/lib/slice/slice_internal.h"
#define OUTPUT_BLOCK_SIZE (1024)
typedef struct grpc_stream_compression_context_gzip {
grpc_stream_compression_context base;
z_stream zs;
int (*flate)(z_stream *zs, int flush);
} grpc_stream_compression_context_gzip;
static bool gzip_flate(grpc_stream_compression_context_gzip *ctx,
grpc_slice_buffer *in, grpc_slice_buffer *out,
size_t *output_size, size_t max_output_size, int flush,
bool *end_of_context) {
GPR_ASSERT(flush == 0 || flush == Z_SYNC_FLUSH || flush == Z_FINISH);
/* Full flush is not allowed when inflating. */
GPR_ASSERT(!(ctx->flate == inflate && (flush == Z_FINISH)));
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
int r;
bool eoc = false;
size_t original_max_output_size = max_output_size;
while (max_output_size > 0 && (in->length > 0 || flush) && !eoc) {
size_t slice_size = max_output_size < OUTPUT_BLOCK_SIZE ? max_output_size
: OUTPUT_BLOCK_SIZE;
grpc_slice slice_out = GRPC_SLICE_MALLOC(slice_size);
ctx->zs.avail_out = (uInt)slice_size;
ctx->zs.next_out = GRPC_SLICE_START_PTR(slice_out);
while (ctx->zs.avail_out > 0 && in->length > 0 && !eoc) {
grpc_slice slice = grpc_slice_buffer_take_first(in);
ctx->zs.avail_in = (uInt)GRPC_SLICE_LENGTH(slice);
ctx->zs.next_in = GRPC_SLICE_START_PTR(slice);
r = ctx->flate(&ctx->zs, Z_NO_FLUSH);
if (r < 0 && r != Z_BUF_ERROR) {
gpr_log(GPR_ERROR, "zlib error (%d)", r);
grpc_slice_unref_internal(&exec_ctx, slice_out);
grpc_exec_ctx_finish(&exec_ctx);
return false;
} else if (r == Z_STREAM_END && ctx->flate == inflate) {
eoc = true;
}
if (ctx->zs.avail_in > 0) {
grpc_slice_buffer_undo_take_first(
in,
grpc_slice_sub(slice, GRPC_SLICE_LENGTH(slice) - ctx->zs.avail_in,
GRPC_SLICE_LENGTH(slice)));
}
grpc_slice_unref_internal(&exec_ctx, slice);
}
if (flush != 0 && ctx->zs.avail_out > 0 && !eoc) {
GPR_ASSERT(in->length == 0);
r = ctx->flate(&ctx->zs, flush);
if (flush == Z_SYNC_FLUSH) {
switch (r) {
case Z_OK:
/* Maybe flush is not complete; just made some partial progress. */
if (ctx->zs.avail_out > 0) {
flush = 0;
}
break;
case Z_BUF_ERROR:
case Z_STREAM_END:
flush = 0;
break;
default:
gpr_log(GPR_ERROR, "zlib error (%d)", r);
grpc_slice_unref_internal(&exec_ctx, slice_out);
grpc_exec_ctx_finish(&exec_ctx);
return false;
}
} else if (flush == Z_FINISH) {
switch (r) {
case Z_OK:
case Z_BUF_ERROR:
/* Wait for the next loop to assign additional output space. */
GPR_ASSERT(ctx->zs.avail_out == 0);
break;
case Z_STREAM_END:
flush = 0;
break;
default:
gpr_log(GPR_ERROR, "zlib error (%d)", r);
grpc_slice_unref_internal(&exec_ctx, slice_out);
grpc_exec_ctx_finish(&exec_ctx);
return false;
}
}
}
if (ctx->zs.avail_out == 0) {
grpc_slice_buffer_add(out, slice_out);
} else if (ctx->zs.avail_out < slice_size) {
slice_out.data.refcounted.length -= ctx->zs.avail_out;
grpc_slice_buffer_add(out, slice_out);
} else {
grpc_slice_unref_internal(&exec_ctx, slice_out);
}
max_output_size -= (slice_size - ctx->zs.avail_out);
}
grpc_exec_ctx_finish(&exec_ctx);
if (end_of_context) {
*end_of_context = eoc;
}
if (output_size) {
*output_size = original_max_output_size - max_output_size;
}
return true;
}
static bool grpc_stream_compress_gzip(grpc_stream_compression_context *ctx,
grpc_slice_buffer *in,
grpc_slice_buffer *out,
size_t *output_size,
size_t max_output_size,
grpc_stream_compression_flush flush) {
if (ctx == NULL) {
return false;
}
grpc_stream_compression_context_gzip *gzip_ctx =
(grpc_stream_compression_context_gzip *)ctx;
GPR_ASSERT(gzip_ctx->flate == deflate);
int gzip_flush;
switch (flush) {
case GRPC_STREAM_COMPRESSION_FLUSH_NONE:
gzip_flush = 0;
break;
case GRPC_STREAM_COMPRESSION_FLUSH_SYNC:
gzip_flush = Z_SYNC_FLUSH;
break;
case GRPC_STREAM_COMPRESSION_FLUSH_FINISH:
gzip_flush = Z_FINISH;
break;
default:
gzip_flush = 0;
}
return gzip_flate(gzip_ctx, in, out, output_size, max_output_size, gzip_flush,
NULL);
}
static bool grpc_stream_decompress_gzip(grpc_stream_compression_context *ctx,
grpc_slice_buffer *in,
grpc_slice_buffer *out,
size_t *output_size,
size_t max_output_size,
bool *end_of_context) {
if (ctx == NULL) {
return false;
}
grpc_stream_compression_context_gzip *gzip_ctx =
(grpc_stream_compression_context_gzip *)ctx;
GPR_ASSERT(gzip_ctx->flate == inflate);
return gzip_flate(gzip_ctx, in, out, output_size, max_output_size,
Z_SYNC_FLUSH, end_of_context);
}
static grpc_stream_compression_context *
grpc_stream_compression_context_create_gzip(
grpc_stream_compression_method method) {
GPR_ASSERT(method == GRPC_STREAM_COMPRESSION_GZIP_COMPRESS ||
method == GRPC_STREAM_COMPRESSION_GZIP_DECOMPRESS);
grpc_stream_compression_context_gzip *gzip_ctx =
(grpc_stream_compression_context_gzip *)gpr_zalloc(
sizeof(grpc_stream_compression_context_gzip));
int r;
if (gzip_ctx == NULL) {
return NULL;
}
if (method == GRPC_STREAM_COMPRESSION_GZIP_DECOMPRESS) {
r = inflateInit2(&gzip_ctx->zs, 0x1F);
gzip_ctx->flate = inflate;
} else {
r = deflateInit2(&gzip_ctx->zs, Z_DEFAULT_COMPRESSION, Z_DEFLATED, 0x1F, 8,
Z_DEFAULT_STRATEGY);
gzip_ctx->flate = deflate;
}
if (r != Z_OK) {
gpr_free(gzip_ctx);
return NULL;
}
gzip_ctx->base.vtable = &grpc_stream_compression_gzip_vtable;
return (grpc_stream_compression_context *)gzip_ctx;
}
static void grpc_stream_compression_context_destroy_gzip(
grpc_stream_compression_context *ctx) {
if (ctx == NULL) {
return;
}
grpc_stream_compression_context_gzip *gzip_ctx =
(grpc_stream_compression_context_gzip *)ctx;
if (gzip_ctx->flate == inflate) {
inflateEnd(&gzip_ctx->zs);
} else {
deflateEnd(&gzip_ctx->zs);
}
gpr_free(ctx);
}
const grpc_stream_compression_vtable grpc_stream_compression_gzip_vtable = {
.compress = grpc_stream_compress_gzip,
.decompress = grpc_stream_decompress_gzip,
.context_create = grpc_stream_compression_context_create_gzip,
.context_destroy = grpc_stream_compression_context_destroy_gzip};

@ -0,0 +1,26 @@
/*
*
* Copyright 2017 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
#ifndef GRPC_CORE_LIB_COMPRESSION_STREAM_COMPRESSION_GZIP_H
#define GRPC_CORE_LIB_COMPRESSION_STREAM_COMPRESSION_GZIP_H
#include "src/core/lib/compression/stream_compression.h"
extern const grpc_stream_compression_vtable grpc_stream_compression_gzip_vtable;
#endif

@ -0,0 +1,94 @@
/*
*
* Copyright 2017 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
#include <grpc/support/alloc.h>
#include <grpc/support/log.h>
#include "src/core/lib/compression/stream_compression_identity.h"
#include "src/core/lib/iomgr/exec_ctx.h"
#include "src/core/lib/slice/slice_internal.h"
#define OUTPUT_BLOCK_SIZE (1024)
/* Singleton context used for all identity streams. */
static grpc_stream_compression_context identity_ctx = {
.vtable = &grpc_stream_compression_identity_vtable};
static void grpc_stream_compression_pass_through(grpc_slice_buffer *in,
grpc_slice_buffer *out,
size_t *output_size,
size_t max_output_size) {
if (max_output_size >= in->length) {
if (output_size) {
*output_size = in->length;
}
grpc_slice_buffer_move_into(in, out);
} else {
if (output_size) {
*output_size = max_output_size;
}
grpc_slice_buffer_move_first(in, max_output_size, out);
}
}
static bool grpc_stream_compress_identity(grpc_stream_compression_context *ctx,
grpc_slice_buffer *in,
grpc_slice_buffer *out,
size_t *output_size,
size_t max_output_size,
grpc_stream_compression_flush flush) {
if (ctx == NULL) {
return false;
}
grpc_stream_compression_pass_through(in, out, output_size, max_output_size);
return true;
}
static bool grpc_stream_decompress_identity(
grpc_stream_compression_context *ctx, grpc_slice_buffer *in,
grpc_slice_buffer *out, size_t *output_size, size_t max_output_size,
bool *end_of_context) {
if (ctx == NULL) {
return false;
}
grpc_stream_compression_pass_through(in, out, output_size, max_output_size);
if (end_of_context) {
*end_of_context = false;
}
return true;
}
static grpc_stream_compression_context *
grpc_stream_compression_context_create_identity(
grpc_stream_compression_method method) {
GPR_ASSERT(method == GRPC_STREAM_COMPRESSION_IDENTITY_COMPRESS ||
method == GRPC_STREAM_COMPRESSION_IDENTITY_DECOMPRESS);
/* No context needed in this case. Use fake context instead. */
return (grpc_stream_compression_context *)&identity_ctx;
}
static void grpc_stream_compression_context_destroy_identity(
grpc_stream_compression_context *ctx) {
return;
}
const grpc_stream_compression_vtable grpc_stream_compression_identity_vtable = {
.compress = grpc_stream_compress_identity,
.decompress = grpc_stream_decompress_identity,
.context_create = grpc_stream_compression_context_create_identity,
.context_destroy = grpc_stream_compression_context_destroy_identity};

@ -0,0 +1,27 @@
/*
*
* Copyright 2017 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
#ifndef GRPC_CORE_LIB_COMPRESSION_STREAM_COMPRESSION_IDENTITY_H
#define GRPC_CORE_LIB_COMPRESSION_STREAM_COMPRESSION_IDENTITY_H
#include "src/core/lib/compression/stream_compression.h"
extern const grpc_stream_compression_vtable
grpc_stream_compression_identity_vtable;
#endif

@ -77,6 +77,28 @@ const char *grpc_stats_counter_name[GRPC_STATS_COUNTER_COUNT] = {
"http2_initiate_write_due_to_transport_flow_control_unstalled",
"http2_initiate_write_due_to_ping_response",
"http2_initiate_write_due_to_force_rst_stream",
"hpack_recv_indexed",
"hpack_recv_lithdr_incidx",
"hpack_recv_lithdr_incidx_v",
"hpack_recv_lithdr_notidx",
"hpack_recv_lithdr_notidx_v",
"hpack_recv_lithdr_nvridx",
"hpack_recv_lithdr_nvridx_v",
"hpack_recv_uncompressed",
"hpack_recv_huffman",
"hpack_recv_binary",
"hpack_recv_binary_base64",
"hpack_send_indexed",
"hpack_send_lithdr_incidx",
"hpack_send_lithdr_incidx_v",
"hpack_send_lithdr_notidx",
"hpack_send_lithdr_notidx_v",
"hpack_send_lithdr_nvridx",
"hpack_send_lithdr_nvridx_v",
"hpack_send_uncompressed",
"hpack_send_huffman",
"hpack_send_binary",
"hpack_send_binary_base64",
"combiner_locks_initiated",
"combiner_locks_scheduled_items",
"combiner_locks_scheduled_final_items",
@ -87,8 +109,6 @@ const char *grpc_stats_counter_name[GRPC_STATS_COUNTER_COUNT] = {
"executor_wakeup_initiated",
"executor_queue_drained",
"executor_push_retries",
"executor_threads_created",
"executor_threads_used",
"server_requested_calls",
"server_slowpath_requests_queued",
};
@ -157,6 +177,32 @@ const char *grpc_stats_counter_doc[GRPC_STATS_COUNTER_COUNT] = {
"'transport_flow_control_unstalled'",
"Number of HTTP2 writes initiated due to 'ping_response'",
"Number of HTTP2 writes initiated due to 'force_rst_stream'",
"Number of HPACK indexed fields received",
"Number of HPACK literal headers received with incremental indexing",
"Number of HPACK literal headers received with incremental indexing and "
"literal keys",
"Number of HPACK literal headers received with no indexing",
"Number of HPACK literal headers received with no indexing and literal "
"keys",
"Number of HPACK literal headers received with never-indexing",
"Number of HPACK literal headers received with never-indexing and literal "
"keys",
"Number of uncompressed strings received in metadata",
"Number of huffman encoded strings received in metadata",
"Number of binary strings received in metadata",
"Number of binary strings received encoded in base64 in metadata",
"Number of HPACK indexed fields sent",
"Number of HPACK literal headers sent with incremental indexing",
"Number of HPACK literal headers sent with incremental indexing and "
"literal keys",
"Number of HPACK literal headers sent with no indexing",
"Number of HPACK literal headers sent with no indexing and literal keys",
"Number of HPACK literal headers sent with never-indexing",
"Number of HPACK literal headers sent with never-indexing and literal keys",
"Number of uncompressed strings sent in metadata",
"Number of huffman encoded strings sent in metadata",
"Number of binary strings received in metadata",
"Number of binary strings received encoded in base64 in metadata",
"Number of combiner lock entries by process (first items queued to a "
"combiner)",
"Number of items scheduled against combiner locks",
@ -171,8 +217,6 @@ const char *grpc_stats_counter_doc[GRPC_STATS_COUNTER_COUNT] = {
"Number of times an executor queue was drained",
"Number of times we raced and were forced to retry pushing a closure to "
"the executor",
"Size of the backing thread pool for overflow gRPC Core work",
"How many executor threads actually got used",
"How many calls were requested (not necessarily received) by the server",
"How many times was the server slow path taken (indicates too few "
"outstanding requests)",
@ -190,7 +234,6 @@ const char *grpc_stats_histogram_name[GRPC_STATS_HISTOGRAM_COUNT] = {
"http2_send_message_per_write",
"http2_send_trailing_metadata_per_write",
"http2_send_flowctl_per_write",
"executor_closures_per_wakeup",
"server_cqs_checked",
};
const char *grpc_stats_histogram_doc[GRPC_STATS_HISTOGRAM_COUNT] = {
@ -206,7 +249,6 @@ const char *grpc_stats_histogram_doc[GRPC_STATS_HISTOGRAM_COUNT] = {
"Number of streams whose payload was written per TCP write",
"Number of streams terminated per TCP write",
"Number of flow control updates written per TCP write",
"Number of closures executed each time an executor wakes up",
"How many completion queues were checked looking for a CQ that had "
"requested the incoming call",
};
@ -278,7 +320,6 @@ const uint8_t grpc_stats_table_7[102] = {
const int grpc_stats_table_8[9] = {0, 1, 2, 4, 7, 13, 23, 39, 64};
const uint8_t grpc_stats_table_9[9] = {0, 0, 1, 2, 2, 3, 4, 4, 5};
void grpc_stats_inc_call_initial_size(grpc_exec_ctx *exec_ctx, int value) {
/* Automatically generated by tools/codegen/core/gen_stats_data.py */
value = GPR_CLAMP(value, 0, 262144);
if (value < 6) {
GRPC_STATS_INC_HISTOGRAM((exec_ctx), GRPC_STATS_HISTOGRAM_CALL_INITIAL_SIZE,
@ -304,7 +345,6 @@ void grpc_stats_inc_call_initial_size(grpc_exec_ctx *exec_ctx, int value) {
(exec_ctx), value, grpc_stats_table_0, 64));
}
void grpc_stats_inc_poll_events_returned(grpc_exec_ctx *exec_ctx, int value) {
/* Automatically generated by tools/codegen/core/gen_stats_data.py */
value = GPR_CLAMP(value, 0, 1024);
if (value < 29) {
GRPC_STATS_INC_HISTOGRAM((exec_ctx),
@ -331,7 +371,6 @@ void grpc_stats_inc_poll_events_returned(grpc_exec_ctx *exec_ctx, int value) {
(exec_ctx), value, grpc_stats_table_2, 128));
}
void grpc_stats_inc_tcp_write_size(grpc_exec_ctx *exec_ctx, int value) {
/* Automatically generated by tools/codegen/core/gen_stats_data.py */
value = GPR_CLAMP(value, 0, 16777216);
if (value < 5) {
GRPC_STATS_INC_HISTOGRAM((exec_ctx), GRPC_STATS_HISTOGRAM_TCP_WRITE_SIZE,
@ -357,7 +396,6 @@ void grpc_stats_inc_tcp_write_size(grpc_exec_ctx *exec_ctx, int value) {
(exec_ctx), value, grpc_stats_table_4, 64));
}
void grpc_stats_inc_tcp_write_iov_size(grpc_exec_ctx *exec_ctx, int value) {
/* Automatically generated by tools/codegen/core/gen_stats_data.py */
value = GPR_CLAMP(value, 0, 1024);
if (value < 13) {
GRPC_STATS_INC_HISTOGRAM((exec_ctx),
@ -383,7 +421,6 @@ void grpc_stats_inc_tcp_write_iov_size(grpc_exec_ctx *exec_ctx, int value) {
(exec_ctx), value, grpc_stats_table_6, 64));
}
void grpc_stats_inc_tcp_read_size(grpc_exec_ctx *exec_ctx, int value) {
/* Automatically generated by tools/codegen/core/gen_stats_data.py */
value = GPR_CLAMP(value, 0, 16777216);
if (value < 5) {
GRPC_STATS_INC_HISTOGRAM((exec_ctx), GRPC_STATS_HISTOGRAM_TCP_READ_SIZE,
@ -409,7 +446,6 @@ void grpc_stats_inc_tcp_read_size(grpc_exec_ctx *exec_ctx, int value) {
(exec_ctx), value, grpc_stats_table_4, 64));
}
void grpc_stats_inc_tcp_read_offer(grpc_exec_ctx *exec_ctx, int value) {
/* Automatically generated by tools/codegen/core/gen_stats_data.py */
value = GPR_CLAMP(value, 0, 16777216);
if (value < 5) {
GRPC_STATS_INC_HISTOGRAM((exec_ctx), GRPC_STATS_HISTOGRAM_TCP_READ_OFFER,
@ -436,7 +472,6 @@ void grpc_stats_inc_tcp_read_offer(grpc_exec_ctx *exec_ctx, int value) {
}
void grpc_stats_inc_tcp_read_offer_iov_size(grpc_exec_ctx *exec_ctx,
int value) {
/* Automatically generated by tools/codegen/core/gen_stats_data.py */
value = GPR_CLAMP(value, 0, 1024);
if (value < 13) {
GRPC_STATS_INC_HISTOGRAM(
@ -464,7 +499,6 @@ void grpc_stats_inc_tcp_read_offer_iov_size(grpc_exec_ctx *exec_ctx,
}
void grpc_stats_inc_http2_send_message_size(grpc_exec_ctx *exec_ctx,
int value) {
/* Automatically generated by tools/codegen/core/gen_stats_data.py */
value = GPR_CLAMP(value, 0, 16777216);
if (value < 5) {
GRPC_STATS_INC_HISTOGRAM(
@ -492,7 +526,6 @@ void grpc_stats_inc_http2_send_message_size(grpc_exec_ctx *exec_ctx,
}
void grpc_stats_inc_http2_send_initial_metadata_per_write(
grpc_exec_ctx *exec_ctx, int value) {
/* Automatically generated by tools/codegen/core/gen_stats_data.py */
value = GPR_CLAMP(value, 0, 1024);
if (value < 13) {
GRPC_STATS_INC_HISTOGRAM(
@ -522,7 +555,6 @@ void grpc_stats_inc_http2_send_initial_metadata_per_write(
}
void grpc_stats_inc_http2_send_message_per_write(grpc_exec_ctx *exec_ctx,
int value) {
/* Automatically generated by tools/codegen/core/gen_stats_data.py */
value = GPR_CLAMP(value, 0, 1024);
if (value < 13) {
GRPC_STATS_INC_HISTOGRAM(
@ -550,7 +582,6 @@ void grpc_stats_inc_http2_send_message_per_write(grpc_exec_ctx *exec_ctx,
}
void grpc_stats_inc_http2_send_trailing_metadata_per_write(
grpc_exec_ctx *exec_ctx, int value) {
/* Automatically generated by tools/codegen/core/gen_stats_data.py */
value = GPR_CLAMP(value, 0, 1024);
if (value < 13) {
GRPC_STATS_INC_HISTOGRAM(
@ -580,7 +611,6 @@ void grpc_stats_inc_http2_send_trailing_metadata_per_write(
}
void grpc_stats_inc_http2_send_flowctl_per_write(grpc_exec_ctx *exec_ctx,
int value) {
/* Automatically generated by tools/codegen/core/gen_stats_data.py */
value = GPR_CLAMP(value, 0, 1024);
if (value < 13) {
GRPC_STATS_INC_HISTOGRAM(
@ -606,36 +636,7 @@ void grpc_stats_inc_http2_send_flowctl_per_write(grpc_exec_ctx *exec_ctx,
grpc_stats_histo_find_bucket_slow(
(exec_ctx), value, grpc_stats_table_6, 64));
}
void grpc_stats_inc_executor_closures_per_wakeup(grpc_exec_ctx *exec_ctx,
int value) {
/* Automatically generated by tools/codegen/core/gen_stats_data.py */
value = GPR_CLAMP(value, 0, 1024);
if (value < 13) {
GRPC_STATS_INC_HISTOGRAM(
(exec_ctx), GRPC_STATS_HISTOGRAM_EXECUTOR_CLOSURES_PER_WAKEUP, value);
return;
}
union {
double dbl;
uint64_t uint;
} _val, _bkt;
_val.dbl = value;
if (_val.uint < 4637863191261478912ull) {
int bucket =
grpc_stats_table_7[((_val.uint - 4623507967449235456ull) >> 48)] + 13;
_bkt.dbl = grpc_stats_table_6[bucket];
bucket -= (_val.uint < _bkt.uint);
GRPC_STATS_INC_HISTOGRAM(
(exec_ctx), GRPC_STATS_HISTOGRAM_EXECUTOR_CLOSURES_PER_WAKEUP, bucket);
return;
}
GRPC_STATS_INC_HISTOGRAM((exec_ctx),
GRPC_STATS_HISTOGRAM_EXECUTOR_CLOSURES_PER_WAKEUP,
grpc_stats_histo_find_bucket_slow(
(exec_ctx), value, grpc_stats_table_6, 64));
}
void grpc_stats_inc_server_cqs_checked(grpc_exec_ctx *exec_ctx, int value) {
/* Automatically generated by tools/codegen/core/gen_stats_data.py */
value = GPR_CLAMP(value, 0, 64);
if (value < 3) {
GRPC_STATS_INC_HISTOGRAM((exec_ctx),
@ -660,17 +661,17 @@ void grpc_stats_inc_server_cqs_checked(grpc_exec_ctx *exec_ctx, int value) {
grpc_stats_histo_find_bucket_slow(
(exec_ctx), value, grpc_stats_table_8, 8));
}
const int grpc_stats_histo_buckets[14] = {64, 128, 64, 64, 64, 64, 64,
64, 64, 64, 64, 64, 64, 8};
const int grpc_stats_histo_start[14] = {0, 64, 192, 256, 320, 384, 448,
512, 576, 640, 704, 768, 832, 896};
const int *const grpc_stats_histo_bucket_boundaries[14] = {
const int grpc_stats_histo_buckets[13] = {64, 128, 64, 64, 64, 64, 64,
64, 64, 64, 64, 64, 8};
const int grpc_stats_histo_start[13] = {0, 64, 192, 256, 320, 384, 448,
512, 576, 640, 704, 768, 832};
const int *const grpc_stats_histo_bucket_boundaries[13] = {
grpc_stats_table_0, grpc_stats_table_2, grpc_stats_table_4,
grpc_stats_table_6, grpc_stats_table_4, grpc_stats_table_4,
grpc_stats_table_6, grpc_stats_table_4, grpc_stats_table_6,
grpc_stats_table_6, grpc_stats_table_6, grpc_stats_table_6,
grpc_stats_table_6, grpc_stats_table_8};
void (*const grpc_stats_inc_histogram[14])(grpc_exec_ctx *exec_ctx, int x) = {
grpc_stats_table_8};
void (*const grpc_stats_inc_histogram[13])(grpc_exec_ctx *exec_ctx, int x) = {
grpc_stats_inc_call_initial_size,
grpc_stats_inc_poll_events_returned,
grpc_stats_inc_tcp_write_size,
@ -683,5 +684,4 @@ void (*const grpc_stats_inc_histogram[14])(grpc_exec_ctx *exec_ctx, int x) = {
grpc_stats_inc_http2_send_message_per_write,
grpc_stats_inc_http2_send_trailing_metadata_per_write,
grpc_stats_inc_http2_send_flowctl_per_write,
grpc_stats_inc_executor_closures_per_wakeup,
grpc_stats_inc_server_cqs_checked};

@ -79,6 +79,28 @@ typedef enum {
GRPC_STATS_COUNTER_HTTP2_INITIATE_WRITE_DUE_TO_TRANSPORT_FLOW_CONTROL_UNSTALLED,
GRPC_STATS_COUNTER_HTTP2_INITIATE_WRITE_DUE_TO_PING_RESPONSE,
GRPC_STATS_COUNTER_HTTP2_INITIATE_WRITE_DUE_TO_FORCE_RST_STREAM,
GRPC_STATS_COUNTER_HPACK_RECV_INDEXED,
GRPC_STATS_COUNTER_HPACK_RECV_LITHDR_INCIDX,
GRPC_STATS_COUNTER_HPACK_RECV_LITHDR_INCIDX_V,
GRPC_STATS_COUNTER_HPACK_RECV_LITHDR_NOTIDX,
GRPC_STATS_COUNTER_HPACK_RECV_LITHDR_NOTIDX_V,
GRPC_STATS_COUNTER_HPACK_RECV_LITHDR_NVRIDX,
GRPC_STATS_COUNTER_HPACK_RECV_LITHDR_NVRIDX_V,
GRPC_STATS_COUNTER_HPACK_RECV_UNCOMPRESSED,
GRPC_STATS_COUNTER_HPACK_RECV_HUFFMAN,
GRPC_STATS_COUNTER_HPACK_RECV_BINARY,
GRPC_STATS_COUNTER_HPACK_RECV_BINARY_BASE64,
GRPC_STATS_COUNTER_HPACK_SEND_INDEXED,
GRPC_STATS_COUNTER_HPACK_SEND_LITHDR_INCIDX,
GRPC_STATS_COUNTER_HPACK_SEND_LITHDR_INCIDX_V,
GRPC_STATS_COUNTER_HPACK_SEND_LITHDR_NOTIDX,
GRPC_STATS_COUNTER_HPACK_SEND_LITHDR_NOTIDX_V,
GRPC_STATS_COUNTER_HPACK_SEND_LITHDR_NVRIDX,
GRPC_STATS_COUNTER_HPACK_SEND_LITHDR_NVRIDX_V,
GRPC_STATS_COUNTER_HPACK_SEND_UNCOMPRESSED,
GRPC_STATS_COUNTER_HPACK_SEND_HUFFMAN,
GRPC_STATS_COUNTER_HPACK_SEND_BINARY,
GRPC_STATS_COUNTER_HPACK_SEND_BINARY_BASE64,
GRPC_STATS_COUNTER_COMBINER_LOCKS_INITIATED,
GRPC_STATS_COUNTER_COMBINER_LOCKS_SCHEDULED_ITEMS,
GRPC_STATS_COUNTER_COMBINER_LOCKS_SCHEDULED_FINAL_ITEMS,
@ -89,8 +111,6 @@ typedef enum {
GRPC_STATS_COUNTER_EXECUTOR_WAKEUP_INITIATED,
GRPC_STATS_COUNTER_EXECUTOR_QUEUE_DRAINED,
GRPC_STATS_COUNTER_EXECUTOR_PUSH_RETRIES,
GRPC_STATS_COUNTER_EXECUTOR_THREADS_CREATED,
GRPC_STATS_COUNTER_EXECUTOR_THREADS_USED,
GRPC_STATS_COUNTER_SERVER_REQUESTED_CALLS,
GRPC_STATS_COUNTER_SERVER_SLOWPATH_REQUESTS_QUEUED,
GRPC_STATS_COUNTER_COUNT
@ -110,7 +130,6 @@ typedef enum {
GRPC_STATS_HISTOGRAM_HTTP2_SEND_MESSAGE_PER_WRITE,
GRPC_STATS_HISTOGRAM_HTTP2_SEND_TRAILING_METADATA_PER_WRITE,
GRPC_STATS_HISTOGRAM_HTTP2_SEND_FLOWCTL_PER_WRITE,
GRPC_STATS_HISTOGRAM_EXECUTOR_CLOSURES_PER_WAKEUP,
GRPC_STATS_HISTOGRAM_SERVER_CQS_CHECKED,
GRPC_STATS_HISTOGRAM_COUNT
} grpc_stats_histograms;
@ -141,11 +160,9 @@ typedef enum {
GRPC_STATS_HISTOGRAM_HTTP2_SEND_TRAILING_METADATA_PER_WRITE_BUCKETS = 64,
GRPC_STATS_HISTOGRAM_HTTP2_SEND_FLOWCTL_PER_WRITE_FIRST_SLOT = 768,
GRPC_STATS_HISTOGRAM_HTTP2_SEND_FLOWCTL_PER_WRITE_BUCKETS = 64,
GRPC_STATS_HISTOGRAM_EXECUTOR_CLOSURES_PER_WAKEUP_FIRST_SLOT = 832,
GRPC_STATS_HISTOGRAM_EXECUTOR_CLOSURES_PER_WAKEUP_BUCKETS = 64,
GRPC_STATS_HISTOGRAM_SERVER_CQS_CHECKED_FIRST_SLOT = 896,
GRPC_STATS_HISTOGRAM_SERVER_CQS_CHECKED_FIRST_SLOT = 832,
GRPC_STATS_HISTOGRAM_SERVER_CQS_CHECKED_BUCKETS = 8,
GRPC_STATS_HISTOGRAM_BUCKETS = 904
GRPC_STATS_HISTOGRAM_BUCKETS = 840
} grpc_stats_histogram_constants;
#define GRPC_STATS_INC_CLIENT_CALLS_CREATED(exec_ctx) \
GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_CLIENT_CALLS_CREATED)
@ -309,6 +326,64 @@ typedef enum {
GRPC_STATS_INC_COUNTER( \
(exec_ctx), \
GRPC_STATS_COUNTER_HTTP2_INITIATE_WRITE_DUE_TO_FORCE_RST_STREAM)
#define GRPC_STATS_INC_HPACK_RECV_INDEXED(exec_ctx) \
GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_HPACK_RECV_INDEXED)
#define GRPC_STATS_INC_HPACK_RECV_LITHDR_INCIDX(exec_ctx) \
GRPC_STATS_INC_COUNTER((exec_ctx), \
GRPC_STATS_COUNTER_HPACK_RECV_LITHDR_INCIDX)
#define GRPC_STATS_INC_HPACK_RECV_LITHDR_INCIDX_V(exec_ctx) \
GRPC_STATS_INC_COUNTER((exec_ctx), \
GRPC_STATS_COUNTER_HPACK_RECV_LITHDR_INCIDX_V)
#define GRPC_STATS_INC_HPACK_RECV_LITHDR_NOTIDX(exec_ctx) \
GRPC_STATS_INC_COUNTER((exec_ctx), \
GRPC_STATS_COUNTER_HPACK_RECV_LITHDR_NOTIDX)
#define GRPC_STATS_INC_HPACK_RECV_LITHDR_NOTIDX_V(exec_ctx) \
GRPC_STATS_INC_COUNTER((exec_ctx), \
GRPC_STATS_COUNTER_HPACK_RECV_LITHDR_NOTIDX_V)
#define GRPC_STATS_INC_HPACK_RECV_LITHDR_NVRIDX(exec_ctx) \
GRPC_STATS_INC_COUNTER((exec_ctx), \
GRPC_STATS_COUNTER_HPACK_RECV_LITHDR_NVRIDX)
#define GRPC_STATS_INC_HPACK_RECV_LITHDR_NVRIDX_V(exec_ctx) \
GRPC_STATS_INC_COUNTER((exec_ctx), \
GRPC_STATS_COUNTER_HPACK_RECV_LITHDR_NVRIDX_V)
#define GRPC_STATS_INC_HPACK_RECV_UNCOMPRESSED(exec_ctx) \
GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_HPACK_RECV_UNCOMPRESSED)
#define GRPC_STATS_INC_HPACK_RECV_HUFFMAN(exec_ctx) \
GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_HPACK_RECV_HUFFMAN)
#define GRPC_STATS_INC_HPACK_RECV_BINARY(exec_ctx) \
GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_HPACK_RECV_BINARY)
#define GRPC_STATS_INC_HPACK_RECV_BINARY_BASE64(exec_ctx) \
GRPC_STATS_INC_COUNTER((exec_ctx), \
GRPC_STATS_COUNTER_HPACK_RECV_BINARY_BASE64)
#define GRPC_STATS_INC_HPACK_SEND_INDEXED(exec_ctx) \
GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_HPACK_SEND_INDEXED)
#define GRPC_STATS_INC_HPACK_SEND_LITHDR_INCIDX(exec_ctx) \
GRPC_STATS_INC_COUNTER((exec_ctx), \
GRPC_STATS_COUNTER_HPACK_SEND_LITHDR_INCIDX)
#define GRPC_STATS_INC_HPACK_SEND_LITHDR_INCIDX_V(exec_ctx) \
GRPC_STATS_INC_COUNTER((exec_ctx), \
GRPC_STATS_COUNTER_HPACK_SEND_LITHDR_INCIDX_V)
#define GRPC_STATS_INC_HPACK_SEND_LITHDR_NOTIDX(exec_ctx) \
GRPC_STATS_INC_COUNTER((exec_ctx), \
GRPC_STATS_COUNTER_HPACK_SEND_LITHDR_NOTIDX)
#define GRPC_STATS_INC_HPACK_SEND_LITHDR_NOTIDX_V(exec_ctx) \
GRPC_STATS_INC_COUNTER((exec_ctx), \
GRPC_STATS_COUNTER_HPACK_SEND_LITHDR_NOTIDX_V)
#define GRPC_STATS_INC_HPACK_SEND_LITHDR_NVRIDX(exec_ctx) \
GRPC_STATS_INC_COUNTER((exec_ctx), \
GRPC_STATS_COUNTER_HPACK_SEND_LITHDR_NVRIDX)
#define GRPC_STATS_INC_HPACK_SEND_LITHDR_NVRIDX_V(exec_ctx) \
GRPC_STATS_INC_COUNTER((exec_ctx), \
GRPC_STATS_COUNTER_HPACK_SEND_LITHDR_NVRIDX_V)
#define GRPC_STATS_INC_HPACK_SEND_UNCOMPRESSED(exec_ctx) \
GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_HPACK_SEND_UNCOMPRESSED)
#define GRPC_STATS_INC_HPACK_SEND_HUFFMAN(exec_ctx) \
GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_HPACK_SEND_HUFFMAN)
#define GRPC_STATS_INC_HPACK_SEND_BINARY(exec_ctx) \
GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_HPACK_SEND_BINARY)
#define GRPC_STATS_INC_HPACK_SEND_BINARY_BASE64(exec_ctx) \
GRPC_STATS_INC_COUNTER((exec_ctx), \
GRPC_STATS_COUNTER_HPACK_SEND_BINARY_BASE64)
#define GRPC_STATS_INC_COMBINER_LOCKS_INITIATED(exec_ctx) \
GRPC_STATS_INC_COUNTER((exec_ctx), \
GRPC_STATS_COUNTER_COMBINER_LOCKS_INITIATED)
@ -337,11 +412,6 @@ typedef enum {
GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_EXECUTOR_QUEUE_DRAINED)
#define GRPC_STATS_INC_EXECUTOR_PUSH_RETRIES(exec_ctx) \
GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_EXECUTOR_PUSH_RETRIES)
#define GRPC_STATS_INC_EXECUTOR_THREADS_CREATED(exec_ctx) \
GRPC_STATS_INC_COUNTER((exec_ctx), \
GRPC_STATS_COUNTER_EXECUTOR_THREADS_CREATED)
#define GRPC_STATS_INC_EXECUTOR_THREADS_USED(exec_ctx) \
GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_EXECUTOR_THREADS_USED)
#define GRPC_STATS_INC_SERVER_REQUESTED_CALLS(exec_ctx) \
GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_SERVER_REQUESTED_CALLS)
#define GRPC_STATS_INC_SERVER_SLOWPATH_REQUESTS_QUEUED(exec_ctx) \
@ -388,17 +458,13 @@ void grpc_stats_inc_http2_send_trailing_metadata_per_write(
grpc_stats_inc_http2_send_flowctl_per_write((exec_ctx), (int)(value))
void grpc_stats_inc_http2_send_flowctl_per_write(grpc_exec_ctx *exec_ctx,
int x);
#define GRPC_STATS_INC_EXECUTOR_CLOSURES_PER_WAKEUP(exec_ctx, value) \
grpc_stats_inc_executor_closures_per_wakeup((exec_ctx), (int)(value))
void grpc_stats_inc_executor_closures_per_wakeup(grpc_exec_ctx *exec_ctx,
int x);
#define GRPC_STATS_INC_SERVER_CQS_CHECKED(exec_ctx, value) \
grpc_stats_inc_server_cqs_checked((exec_ctx), (int)(value))
void grpc_stats_inc_server_cqs_checked(grpc_exec_ctx *exec_ctx, int x);
extern const int grpc_stats_histo_buckets[14];
extern const int grpc_stats_histo_start[14];
extern const int *const grpc_stats_histo_bucket_boundaries[14];
extern void (*const grpc_stats_inc_histogram[14])(grpc_exec_ctx *exec_ctx,
extern const int grpc_stats_histo_buckets[13];
extern const int grpc_stats_histo_start[13];
extern const int *const grpc_stats_histo_bucket_boundaries[13];
extern void (*const grpc_stats_inc_histogram[13])(grpc_exec_ctx *exec_ctx,
int x);
#endif /* GRPC_CORE_LIB_DEBUG_STATS_DATA_H */

@ -189,6 +189,50 @@
doc: Number of HTTP2 writes initiated due to 'ping_response'
- counter: http2_initiate_write_due_to_force_rst_stream
doc: Number of HTTP2 writes initiated due to 'force_rst_stream'
- counter: hpack_recv_indexed
doc: Number of HPACK indexed fields received
- counter: hpack_recv_lithdr_incidx
doc: Number of HPACK literal headers received with incremental indexing
- counter: hpack_recv_lithdr_incidx_v
doc: Number of HPACK literal headers received with incremental indexing and literal keys
- counter: hpack_recv_lithdr_notidx
doc: Number of HPACK literal headers received with no indexing
- counter: hpack_recv_lithdr_notidx_v
doc: Number of HPACK literal headers received with no indexing and literal keys
- counter: hpack_recv_lithdr_nvridx
doc: Number of HPACK literal headers received with never-indexing
- counter: hpack_recv_lithdr_nvridx_v
doc: Number of HPACK literal headers received with never-indexing and literal keys
- counter: hpack_recv_uncompressed
doc: Number of uncompressed strings received in metadata
- counter: hpack_recv_huffman
doc: Number of huffman encoded strings received in metadata
- counter: hpack_recv_binary
doc: Number of binary strings received in metadata
- counter: hpack_recv_binary_base64
doc: Number of binary strings received encoded in base64 in metadata
- counter: hpack_send_indexed
doc: Number of HPACK indexed fields sent
- counter: hpack_send_lithdr_incidx
doc: Number of HPACK literal headers sent with incremental indexing
- counter: hpack_send_lithdr_incidx_v
doc: Number of HPACK literal headers sent with incremental indexing and literal keys
- counter: hpack_send_lithdr_notidx
doc: Number of HPACK literal headers sent with no indexing
- counter: hpack_send_lithdr_notidx_v
doc: Number of HPACK literal headers sent with no indexing and literal keys
- counter: hpack_send_lithdr_nvridx
doc: Number of HPACK literal headers sent with never-indexing
- counter: hpack_send_lithdr_nvridx_v
doc: Number of HPACK literal headers sent with never-indexing and literal keys
- counter: hpack_send_uncompressed
doc: Number of uncompressed strings sent in metadata
- counter: hpack_send_huffman
doc: Number of huffman encoded strings sent in metadata
- counter: hpack_send_binary
doc: Number of binary strings received in metadata
- counter: hpack_send_binary_base64
doc: Number of binary strings received encoded in base64 in metadata
# combiner locks
- counter: combiner_locks_initiated
doc: Number of combiner lock entries by process
@ -215,14 +259,6 @@
- counter: executor_push_retries
doc: Number of times we raced and were forced to retry pushing a closure to
the executor
- counter: executor_threads_created
doc: Size of the backing thread pool for overflow gRPC Core work
- counter: executor_threads_used
doc: How many executor threads actually got used
- histogram: executor_closures_per_wakeup
max: 1024
buckets: 64
doc: Number of closures executed each time an executor wakes up
# server
- counter: server_requested_calls
doc: How many calls were requested (not necessarily received) by the server

@ -52,6 +52,28 @@ http2_initiate_write_due_to_keepalive_ping_per_iteration:FLOAT,
http2_initiate_write_due_to_transport_flow_control_unstalled_per_iteration:FLOAT,
http2_initiate_write_due_to_ping_response_per_iteration:FLOAT,
http2_initiate_write_due_to_force_rst_stream_per_iteration:FLOAT,
hpack_recv_indexed_per_iteration:FLOAT,
hpack_recv_lithdr_incidx_per_iteration:FLOAT,
hpack_recv_lithdr_incidx_v_per_iteration:FLOAT,
hpack_recv_lithdr_notidx_per_iteration:FLOAT,
hpack_recv_lithdr_notidx_v_per_iteration:FLOAT,
hpack_recv_lithdr_nvridx_per_iteration:FLOAT,
hpack_recv_lithdr_nvridx_v_per_iteration:FLOAT,
hpack_recv_uncompressed_per_iteration:FLOAT,
hpack_recv_huffman_per_iteration:FLOAT,
hpack_recv_binary_per_iteration:FLOAT,
hpack_recv_binary_base64_per_iteration:FLOAT,
hpack_send_indexed_per_iteration:FLOAT,
hpack_send_lithdr_incidx_per_iteration:FLOAT,
hpack_send_lithdr_incidx_v_per_iteration:FLOAT,
hpack_send_lithdr_notidx_per_iteration:FLOAT,
hpack_send_lithdr_notidx_v_per_iteration:FLOAT,
hpack_send_lithdr_nvridx_per_iteration:FLOAT,
hpack_send_lithdr_nvridx_v_per_iteration:FLOAT,
hpack_send_uncompressed_per_iteration:FLOAT,
hpack_send_huffman_per_iteration:FLOAT,
hpack_send_binary_per_iteration:FLOAT,
hpack_send_binary_base64_per_iteration:FLOAT,
combiner_locks_initiated_per_iteration:FLOAT,
combiner_locks_scheduled_items_per_iteration:FLOAT,
combiner_locks_scheduled_final_items_per_iteration:FLOAT,
@ -62,7 +84,5 @@ executor_scheduled_to_self_per_iteration:FLOAT,
executor_wakeup_initiated_per_iteration:FLOAT,
executor_queue_drained_per_iteration:FLOAT,
executor_push_retries_per_iteration:FLOAT,
executor_threads_created_per_iteration:FLOAT,
executor_threads_used_per_iteration:FLOAT,
server_requested_calls_per_iteration:FLOAT,
server_slowpath_requests_queued_per_iteration:FLOAT

@ -217,7 +217,7 @@ static void next_address(grpc_exec_ctx *exec_ctx, internal_request *req,
GRPC_CLOSURE_INIT(&req->connected, on_connected, req,
grpc_schedule_on_exec_ctx);
grpc_arg arg = grpc_channel_arg_pointer_create(
GRPC_ARG_RESOURCE_QUOTA, req->resource_quota,
(char *)GRPC_ARG_RESOURCE_QUOTA, req->resource_quota,
grpc_resource_quota_arg_vtable());
grpc_channel_args args = {1, &arg};
grpc_tcp_client_connect(exec_ctx, &req->connected, &req->ep,

@ -280,8 +280,9 @@ static grpc_fd *fd_create(int fd, const char *name) {
#endif
gpr_free(fd_name);
struct epoll_event ev = {.events = (uint32_t)(EPOLLIN | EPOLLOUT | EPOLLET),
.data.ptr = new_fd};
struct epoll_event ev;
ev.events = (uint32_t)(EPOLLIN | EPOLLOUT | EPOLLET);
ev.data.ptr = new_fd;
if (epoll_ctl(g_epoll_set.epfd, EPOLL_CTL_ADD, fd, &ev) != 0) {
gpr_log(GPR_ERROR, "epoll_ctl failed: %s", strerror(errno));
}
@ -435,8 +436,9 @@ static grpc_error *pollset_global_init(void) {
global_wakeup_fd.read_fd = -1;
grpc_error *err = grpc_wakeup_fd_init(&global_wakeup_fd);
if (err != GRPC_ERROR_NONE) return err;
struct epoll_event ev = {.events = (uint32_t)(EPOLLIN | EPOLLET),
.data.ptr = &global_wakeup_fd};
struct epoll_event ev;
ev.events = (uint32_t)(EPOLLIN | EPOLLET);
ev.data.ptr = &global_wakeup_fd;
if (epoll_ctl(g_epoll_set.epfd, EPOLL_CTL_ADD, global_wakeup_fd.read_fd,
&ev) != 0) {
return GRPC_OS_ERROR(errno, "epoll_ctl");
@ -572,7 +574,10 @@ static int poll_deadline_to_millis_timeout(gpr_timespec deadline,
}
static const gpr_timespec round_up = {
.clock_type = GPR_TIMESPAN, .tv_sec = 0, .tv_nsec = GPR_NS_PER_MS - 1};
0, /* tv_sec */
GPR_NS_PER_MS - 1, /* tv_nsec */
GPR_TIMESPAN /* clock_type */
};
timeout = gpr_time_sub(deadline, now);
int millis = gpr_time_to_millis(gpr_time_add(timeout, round_up));
return millis >= 1 ? millis : 1;
@ -1197,34 +1202,34 @@ static void shutdown_engine(void) {
}
static const grpc_event_engine_vtable vtable = {
.pollset_size = sizeof(grpc_pollset),
.fd_create = fd_create,
.fd_wrapped_fd = fd_wrapped_fd,
.fd_orphan = fd_orphan,
.fd_shutdown = fd_shutdown,
.fd_is_shutdown = fd_is_shutdown,
.fd_notify_on_read = fd_notify_on_read,
.fd_notify_on_write = fd_notify_on_write,
.fd_get_read_notifier_pollset = fd_get_read_notifier_pollset,
.pollset_init = pollset_init,
.pollset_shutdown = pollset_shutdown,
.pollset_destroy = pollset_destroy,
.pollset_work = pollset_work,
.pollset_kick = pollset_kick,
.pollset_add_fd = pollset_add_fd,
.pollset_set_create = pollset_set_create,
.pollset_set_destroy = pollset_set_destroy,
.pollset_set_add_pollset = pollset_set_add_pollset,
.pollset_set_del_pollset = pollset_set_del_pollset,
.pollset_set_add_pollset_set = pollset_set_add_pollset_set,
.pollset_set_del_pollset_set = pollset_set_del_pollset_set,
.pollset_set_add_fd = pollset_set_add_fd,
.pollset_set_del_fd = pollset_set_del_fd,
.shutdown_engine = shutdown_engine,
sizeof(grpc_pollset),
fd_create,
fd_wrapped_fd,
fd_orphan,
fd_shutdown,
fd_notify_on_read,
fd_notify_on_write,
fd_is_shutdown,
fd_get_read_notifier_pollset,
pollset_init,
pollset_shutdown,
pollset_destroy,
pollset_work,
pollset_kick,
pollset_add_fd,
pollset_set_create,
pollset_set_destroy,
pollset_set_add_pollset,
pollset_set_del_pollset,
pollset_set_add_pollset_set,
pollset_set_del_pollset_set,
pollset_set_add_fd,
pollset_set_del_fd,
shutdown_engine,
};
/* It is possible that GLIBC has epoll but the underlying kernel doesn't.

@ -477,8 +477,9 @@ static grpc_error *pollable_materialize(pollable *p) {
close(new_epfd);
return err;
}
struct epoll_event ev = {.events = (uint32_t)(EPOLLIN | EPOLLET),
.data.ptr = (void *)(1 | (intptr_t)&p->wakeup)};
struct epoll_event ev;
ev.events = (uint32_t)(EPOLLIN | EPOLLET);
ev.data.ptr = (void *)(1 | (intptr_t)&p->wakeup);
if (epoll_ctl(new_epfd, EPOLL_CTL_ADD, p->wakeup.read_fd, &ev) != 0) {
err = GRPC_OS_ERROR(errno, "epoll_ctl");
close(new_epfd);
@ -507,9 +508,9 @@ static grpc_error *pollable_add_fd(pollable *p, grpc_fd *fd) {
gpr_mu_unlock(&fd->orphaned_mu);
return GRPC_ERROR_NONE;
}
struct epoll_event ev_fd = {
.events = (uint32_t)(EPOLLET | EPOLLIN | EPOLLOUT | EPOLLEXCLUSIVE),
.data.ptr = fd};
struct epoll_event ev_fd;
ev_fd.events = (uint32_t)(EPOLLET | EPOLLIN | EPOLLOUT | EPOLLEXCLUSIVE);
ev_fd.data.ptr = fd;
if (epoll_ctl(epfd, EPOLL_CTL_ADD, fd->fd, &ev_fd) != 0) {
switch (errno) {
case EEXIST:
@ -708,7 +709,10 @@ static int poll_deadline_to_millis_timeout(gpr_timespec deadline,
}
static const gpr_timespec round_up = {
.clock_type = GPR_TIMESPAN, .tv_sec = 0, .tv_nsec = GPR_NS_PER_MS - 1};
0, /* tv_sec */
GPR_NS_PER_MS - 1, /* tv_nsec */
GPR_TIMESPAN /* clock_type */
};
timeout = gpr_time_sub(deadline, now);
int millis = gpr_time_to_millis(gpr_time_add(timeout, round_up));
return millis >= 1 ? millis : 1;
@ -1392,34 +1396,34 @@ static void shutdown_engine(void) {
}
static const grpc_event_engine_vtable vtable = {
.pollset_size = sizeof(grpc_pollset),
.fd_create = fd_create,
.fd_wrapped_fd = fd_wrapped_fd,
.fd_orphan = fd_orphan,
.fd_shutdown = fd_shutdown,
.fd_is_shutdown = fd_is_shutdown,
.fd_notify_on_read = fd_notify_on_read,
.fd_notify_on_write = fd_notify_on_write,
.fd_get_read_notifier_pollset = fd_get_read_notifier_pollset,
.pollset_init = pollset_init,
.pollset_shutdown = pollset_shutdown,
.pollset_destroy = pollset_destroy,
.pollset_work = pollset_work,
.pollset_kick = pollset_kick,
.pollset_add_fd = pollset_add_fd,
.pollset_set_create = pollset_set_create,
.pollset_set_destroy = pollset_set_destroy,
.pollset_set_add_pollset = pollset_set_add_pollset,
.pollset_set_del_pollset = pollset_set_del_pollset,
.pollset_set_add_pollset_set = pollset_set_add_pollset_set,
.pollset_set_del_pollset_set = pollset_set_del_pollset_set,
.pollset_set_add_fd = pollset_set_add_fd,
.pollset_set_del_fd = pollset_set_del_fd,
.shutdown_engine = shutdown_engine,
sizeof(grpc_pollset),
fd_create,
fd_wrapped_fd,
fd_orphan,
fd_shutdown,
fd_notify_on_read,
fd_notify_on_write,
fd_is_shutdown,
fd_get_read_notifier_pollset,
pollset_init,
pollset_shutdown,
pollset_destroy,
pollset_work,
pollset_kick,
pollset_add_fd,
pollset_set_create,
pollset_set_destroy,
pollset_set_add_pollset,
pollset_set_del_pollset,
pollset_set_add_pollset_set,
pollset_set_del_pollset_set,
pollset_set_add_fd,
pollset_set_del_fd,
shutdown_engine,
};
const grpc_event_engine_vtable *grpc_init_epollex_linux(

@ -1133,7 +1133,8 @@ static void fd_become_writable(grpc_exec_ctx *exec_ctx, grpc_fd *fd) {
}
static void pollset_release_polling_island(grpc_exec_ctx *exec_ctx,
grpc_pollset *ps, char *reason) {
grpc_pollset *ps,
const char *reason) {
if (ps->po.pi != NULL) {
PI_UNREF(exec_ctx, ps->po.pi, reason);
}
@ -1671,34 +1672,34 @@ static void shutdown_engine(void) {
}
static const grpc_event_engine_vtable vtable = {
.pollset_size = sizeof(grpc_pollset),
.fd_create = fd_create,
.fd_wrapped_fd = fd_wrapped_fd,
.fd_orphan = fd_orphan,
.fd_shutdown = fd_shutdown,
.fd_is_shutdown = fd_is_shutdown,
.fd_notify_on_read = fd_notify_on_read,
.fd_notify_on_write = fd_notify_on_write,
.fd_get_read_notifier_pollset = fd_get_read_notifier_pollset,
.pollset_init = pollset_init,
.pollset_shutdown = pollset_shutdown,
.pollset_destroy = pollset_destroy,
.pollset_work = pollset_work,
.pollset_kick = pollset_kick,
.pollset_add_fd = pollset_add_fd,
.pollset_set_create = pollset_set_create,
.pollset_set_destroy = pollset_set_destroy,
.pollset_set_add_pollset = pollset_set_add_pollset,
.pollset_set_del_pollset = pollset_set_del_pollset,
.pollset_set_add_pollset_set = pollset_set_add_pollset_set,
.pollset_set_del_pollset_set = pollset_set_del_pollset_set,
.pollset_set_add_fd = pollset_set_add_fd,
.pollset_set_del_fd = pollset_set_del_fd,
.shutdown_engine = shutdown_engine,
sizeof(grpc_pollset),
fd_create,
fd_wrapped_fd,
fd_orphan,
fd_shutdown,
fd_notify_on_read,
fd_notify_on_write,
fd_is_shutdown,
fd_get_read_notifier_pollset,
pollset_init,
pollset_shutdown,
pollset_destroy,
pollset_work,
pollset_kick,
pollset_add_fd,
pollset_set_create,
pollset_set_destroy,
pollset_set_add_pollset,
pollset_set_del_pollset,
pollset_set_add_pollset_set,
pollset_set_del_pollset_set,
pollset_set_add_fd,
pollset_set_del_fd,
shutdown_engine,
};
/* It is possible that GLIBC has epoll but the underlying kernel doesn't.

@ -1692,34 +1692,34 @@ static void shutdown_engine(void) {
}
static const grpc_event_engine_vtable vtable = {
.pollset_size = sizeof(grpc_pollset),
.fd_create = fd_create,
.fd_wrapped_fd = fd_wrapped_fd,
.fd_orphan = fd_orphan,
.fd_shutdown = fd_shutdown,
.fd_is_shutdown = fd_is_shutdown,
.fd_notify_on_read = fd_notify_on_read,
.fd_notify_on_write = fd_notify_on_write,
.fd_get_read_notifier_pollset = fd_get_read_notifier_pollset,
.pollset_init = pollset_init,
.pollset_shutdown = pollset_shutdown,
.pollset_destroy = pollset_destroy,
.pollset_work = pollset_work,
.pollset_kick = pollset_kick,
.pollset_add_fd = pollset_add_fd,
.pollset_set_create = pollset_set_create,
.pollset_set_destroy = pollset_set_destroy,
.pollset_set_add_pollset = pollset_set_add_pollset,
.pollset_set_del_pollset = pollset_set_del_pollset,
.pollset_set_add_pollset_set = pollset_set_add_pollset_set,
.pollset_set_del_pollset_set = pollset_set_del_pollset_set,
.pollset_set_add_fd = pollset_set_add_fd,
.pollset_set_del_fd = pollset_set_del_fd,
.shutdown_engine = shutdown_engine,
sizeof(grpc_pollset),
fd_create,
fd_wrapped_fd,
fd_orphan,
fd_shutdown,
fd_notify_on_read,
fd_notify_on_write,
fd_is_shutdown,
fd_get_read_notifier_pollset,
pollset_init,
pollset_shutdown,
pollset_destroy,
pollset_work,
pollset_kick,
pollset_add_fd,
pollset_set_create,
pollset_set_destroy,
pollset_set_add_pollset,
pollset_set_del_pollset,
pollset_set_add_pollset_set,
pollset_set_del_pollset_set,
pollset_set_add_fd,
pollset_set_del_fd,
shutdown_engine,
};
const grpc_event_engine_vtable *grpc_init_poll_posix(bool explicit_request) {

@ -32,14 +32,16 @@
#include "src/core/lib/iomgr/exec_ctx.h"
#include "src/core/lib/support/spinlock.h"
#define MAX_DEPTH 2
typedef struct {
gpr_mu mu;
gpr_cv cv;
grpc_closure_list elems;
size_t depth;
bool shutdown;
bool queued_long_job;
gpr_thd_id id;
grpc_closure_list local_elems;
} thread_state;
static thread_state *g_thread_state;
@ -54,35 +56,32 @@ static grpc_tracer_flag executor_trace =
static void executor_thread(void *arg);
static void run_closures(grpc_exec_ctx *exec_ctx, grpc_closure_list *list) {
int n = 0; // number of closures executed
static size_t run_closures(grpc_exec_ctx *exec_ctx, grpc_closure_list list) {
size_t n = 0;
while (!grpc_closure_list_empty(*list)) {
grpc_closure *c = list->head;
grpc_closure_list_init(list);
while (c != NULL) {
grpc_closure *next = c->next_data.next;
grpc_error *error = c->error_data.error;
if (GRPC_TRACER_ON(executor_trace)) {
grpc_closure *c = list.head;
while (c != NULL) {
grpc_closure *next = c->next_data.next;
grpc_error *error = c->error_data.error;
if (GRPC_TRACER_ON(executor_trace)) {
#ifndef NDEBUG
gpr_log(GPR_DEBUG, "EXECUTOR: run %p [created by %s:%d]", c,
c->file_created, c->line_created);
gpr_log(GPR_DEBUG, "EXECUTOR: run %p [created by %s:%d]", c,
c->file_created, c->line_created);
#else
gpr_log(GPR_DEBUG, "EXECUTOR: run %p", c);
gpr_log(GPR_DEBUG, "EXECUTOR: run %p", c);
#endif
}
}
#ifndef NDEBUG
c->scheduled = false;
c->scheduled = false;
#endif
n++;
c->cb(exec_ctx, c->cb_arg, error);
GRPC_ERROR_UNREF(error);
c = next;
grpc_exec_ctx_flush(exec_ctx);
}
c->cb(exec_ctx, c->cb_arg, error);
GRPC_ERROR_UNREF(error);
c = next;
n++;
grpc_exec_ctx_flush(exec_ctx);
}
GRPC_STATS_INC_EXECUTOR_CLOSURES_PER_WAKEUP(exec_ctx, n);
return n;
}
bool grpc_executor_is_threaded() {
@ -127,7 +126,7 @@ void grpc_executor_set_threading(grpc_exec_ctx *exec_ctx, bool threading) {
for (size_t i = 0; i < g_max_threads; i++) {
gpr_mu_destroy(&g_thread_state[i].mu);
gpr_cv_destroy(&g_thread_state[i].cv);
run_closures(exec_ctx, &g_thread_state[i].elems);
run_closures(exec_ctx, g_thread_state[i].elems);
}
gpr_free(g_thread_state);
gpr_tls_destroy(&g_this_thread_state);
@ -151,14 +150,14 @@ static void executor_thread(void *arg) {
grpc_exec_ctx exec_ctx =
GRPC_EXEC_CTX_INITIALIZER(0, grpc_never_ready_to_finish, NULL);
GRPC_STATS_INC_EXECUTOR_THREADS_CREATED(&exec_ctx);
bool used = false;
size_t subtract_depth = 0;
for (;;) {
if (GRPC_TRACER_ON(executor_trace)) {
gpr_log(GPR_DEBUG, "EXECUTOR[%d]: step", (int)(ts - g_thread_state));
gpr_log(GPR_DEBUG, "EXECUTOR[%d]: step (sub_depth=%" PRIdPTR ")",
(int)(ts - g_thread_state), subtract_depth);
}
gpr_mu_lock(&ts->mu);
ts->depth -= subtract_depth;
while (grpc_closure_list_empty(ts->elems) && !ts->shutdown) {
ts->queued_long_job = false;
gpr_cv_wait(&ts->cv, &ts->mu, gpr_inf_future(GPR_CLOCK_REALTIME));
@ -171,20 +170,15 @@ static void executor_thread(void *arg) {
gpr_mu_unlock(&ts->mu);
break;
}
if (!used) {
GRPC_STATS_INC_EXECUTOR_THREADS_USED(&exec_ctx);
used = true;
}
GRPC_STATS_INC_EXECUTOR_QUEUE_DRAINED(&exec_ctx);
GPR_ASSERT(grpc_closure_list_empty(ts->local_elems));
ts->local_elems = ts->elems;
grpc_closure_list exec = ts->elems;
ts->elems = (grpc_closure_list)GRPC_CLOSURE_LIST_INIT;
gpr_mu_unlock(&ts->mu);
if (GRPC_TRACER_ON(executor_trace)) {
gpr_log(GPR_DEBUG, "EXECUTOR[%d]: execute", (int)(ts - g_thread_state));
}
run_closures(&exec_ctx, &ts->local_elems);
subtract_depth = run_closures(&exec_ctx, exec);
}
grpc_exec_ctx_finish(&exec_ctx);
}
@ -217,10 +211,6 @@ static void executor_push(grpc_exec_ctx *exec_ctx, grpc_closure *closure,
ts = &g_thread_state[GPR_HASH_POINTER(exec_ctx, cur_thread_count)];
} else {
GRPC_STATS_INC_EXECUTOR_SCHEDULED_TO_SELF(exec_ctx);
if (is_short) {
grpc_closure_list_append(&ts->local_elems, closure, error);
return;
}
}
thread_state *orig_ts = ts;
@ -260,7 +250,8 @@ static void executor_push(grpc_exec_ctx *exec_ctx, grpc_closure *closure,
gpr_cv_signal(&ts->cv);
}
grpc_closure_list_append(&ts->elems, closure, error);
try_new_thread = ts->elems.head != closure &&
ts->depth++;
try_new_thread = ts->depth > MAX_DEPTH &&
cur_thread_count < g_max_threads && !ts->shutdown;
if (!is_short) ts->queued_long_job = true;
gpr_mu_unlock(&ts->mu);

@ -50,7 +50,7 @@ void grpc_iomgr_init(grpc_exec_ctx *exec_ctx) {
grpc_executor_init(exec_ctx);
grpc_timer_list_init(gpr_now(GPR_CLOCK_MONOTONIC));
g_root_object.next = g_root_object.prev = &g_root_object;
g_root_object.name = "root";
g_root_object.name = (char *)"root";
grpc_network_status_init();
grpc_iomgr_platform_init();
}

@ -57,12 +57,12 @@ bool grpc_is_epollexclusive_available(void) {
close(fd);
return false;
}
struct epoll_event ev = {
/* choose events that should cause an error on
EPOLLEXCLUSIVE enabled kernels - specifically the combination of
EPOLLONESHOT and EPOLLEXCLUSIVE */
.events = (uint32_t)(EPOLLET | EPOLLIN | EPOLLEXCLUSIVE | EPOLLONESHOT),
.data.ptr = NULL};
struct epoll_event ev;
/* choose events that should cause an error on
EPOLLEXCLUSIVE enabled kernels - specifically the combination of
EPOLLONESHOT and EPOLLEXCLUSIVE */
ev.events = (uint32_t)(EPOLLET | EPOLLIN | EPOLLEXCLUSIVE | EPOLLONESHOT);
ev.data.ptr = NULL;
if (epoll_ctl(fd, EPOLL_CTL_ADD, evfd, &ev) != 0) {
if (errno != EINVAL) {
if (!logged_why_not) {

@ -145,7 +145,7 @@ grpc_error *grpc_pollset_work(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
return GRPC_ERROR_NONE;
}
grpc_error *grpc_pollset_kick(grpc_pollset *pollset,
grpc_error *grpc_pollset_kick(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
grpc_pollset_worker *specific_worker) {
GRPC_UV_ASSERT_SAME_THREAD();
uv_timer_start(dummy_uv_handle, dummy_timer_cb, 0, 0);

@ -22,6 +22,7 @@
#include <stdint.h>
#include <string.h>
#include <grpc/slice_buffer.h>
#include <grpc/support/alloc.h>
#include <grpc/support/log.h>
#include <grpc/support/string_util.h>

@ -85,8 +85,8 @@ static const grpc_arg_pointer_vtable socket_factory_arg_vtable = {
socket_factory_arg_copy, socket_factory_arg_destroy, socket_factory_cmp};
grpc_arg grpc_socket_factory_to_arg(grpc_socket_factory *factory) {
return grpc_channel_arg_pointer_create(GRPC_ARG_SOCKET_FACTORY, factory,
&socket_factory_arg_vtable);
return grpc_channel_arg_pointer_create((char *)GRPC_ARG_SOCKET_FACTORY,
factory, &socket_factory_arg_vtable);
}
#endif

@ -76,6 +76,6 @@ static const grpc_arg_pointer_vtable socket_mutator_arg_vtable = {
socket_mutator_arg_copy, socket_mutator_arg_destroy, socket_mutator_cmp};
grpc_arg grpc_socket_mutator_to_arg(grpc_socket_mutator *mutator) {
return grpc_channel_arg_pointer_create(GRPC_ARG_SOCKET_MUTATOR, mutator,
&socket_mutator_arg_vtable);
return grpc_channel_arg_pointer_create((char *)GRPC_ARG_SOCKET_MUTATOR,
mutator, &socket_mutator_arg_vtable);
}

@ -26,12 +26,8 @@
#include <grpc/support/log.h>
const char *grpc_inet_ntop(int af, const void *src, char *dst, size_t size) {
#ifdef GPR_WIN_INET_NTOP
return inet_ntop(af, src, dst, size);
#else
/* Windows InetNtopA wants a mutable ip pointer */
return InetNtopA(af, (void *)src, dst, size);
#endif /* GPR_WIN_INET_NTOP */
}
#endif /* GRPC_WINDOWS_SOCKETUTILS */

@ -198,12 +198,12 @@ static void tcp_server_destroy(grpc_exec_ctx *exec_ctx, grpc_tcp_server *s) {
/* event manager callback when reads are ready */
static void on_read(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *err) {
grpc_tcp_listener *sp = (grpc_tcp_listener *)arg;
grpc_pollset *read_notifier_pollset;
if (err != GRPC_ERROR_NONE) {
goto error;
}
grpc_pollset *read_notifier_pollset =
read_notifier_pollset =
sp->server->pollsets[(size_t)gpr_atm_no_barrier_fetch_add(
&sp->server->next_pollset_to_assign, 1) %
sp->server->pollset_count];

@ -79,6 +79,125 @@ static timer_shard g_shards[NUM_SHARDS];
* Access to this is protected by g_shared_mutables.mu */
static timer_shard *g_shard_queue[NUM_SHARDS];
#ifndef NDEBUG
/* == Hash table for duplicate timer detection == */
#define NUM_HASH_BUCKETS 1009 /* Prime number close to 1000 */
static gpr_mu g_hash_mu[NUM_HASH_BUCKETS]; /* One mutex per bucket */
static grpc_timer *g_timer_ht[NUM_HASH_BUCKETS] = {NULL};
static void init_timer_ht() {
for (int i = 0; i < NUM_HASH_BUCKETS; i++) {
gpr_mu_init(&g_hash_mu[i]);
}
}
static bool is_in_ht(grpc_timer *t) {
size_t i = GPR_HASH_POINTER(t, NUM_HASH_BUCKETS);
gpr_mu_lock(&g_hash_mu[i]);
grpc_timer *p = g_timer_ht[i];
while (p != NULL && p != t) {
p = p->hash_table_next;
}
gpr_mu_unlock(&g_hash_mu[i]);
return (p == t);
}
static void add_to_ht(grpc_timer *t) {
GPR_ASSERT(!t->hash_table_next);
size_t i = GPR_HASH_POINTER(t, NUM_HASH_BUCKETS);
gpr_mu_lock(&g_hash_mu[i]);
grpc_timer *p = g_timer_ht[i];
while (p != NULL && p != t) {
p = p->hash_table_next;
}
if (p == t) {
grpc_closure *c = t->closure;
gpr_log(GPR_ERROR,
"** Duplicate timer (%p) being added. Closure: (%p), created at: "
"(%s:%d), scheduled at: (%s:%d) **",
t, c, c->file_created, c->line_created, c->file_initiated,
c->line_initiated);
abort();
}
/* Timer not present in the bucket. Insert at head of the list */
t->hash_table_next = g_timer_ht[i];
g_timer_ht[i] = t;
gpr_mu_unlock(&g_hash_mu[i]);
}
static void remove_from_ht(grpc_timer *t) {
size_t i = GPR_HASH_POINTER(t, NUM_HASH_BUCKETS);
bool removed = false;
gpr_mu_lock(&g_hash_mu[i]);
if (g_timer_ht[i] == t) {
g_timer_ht[i] = g_timer_ht[i]->hash_table_next;
removed = true;
} else if (g_timer_ht[i] != NULL) {
grpc_timer *p = g_timer_ht[i];
while (p->hash_table_next != NULL && p->hash_table_next != t) {
p = p->hash_table_next;
}
if (p->hash_table_next == t) {
p->hash_table_next = t->hash_table_next;
removed = true;
}
}
gpr_mu_unlock(&g_hash_mu[i]);
if (!removed) {
grpc_closure *c = t->closure;
gpr_log(GPR_ERROR,
"** Removing timer (%p) that is not added to hash table. Closure "
"(%p), created at: (%s:%d), scheduled at: (%s:%d) **",
t, c, c->file_created, c->line_created, c->file_initiated,
c->line_initiated);
abort();
}
t->hash_table_next = NULL;
}
/* If a timer is added to a timer shard (either heap or a list), it cannot
* be pending. A timer is added to hash table only-if it is added to the
* timer shard.
* Therefore, if timer->pending is false, it cannot be in hash table */
static void validate_non_pending_timer(grpc_timer *t) {
if (!t->pending && is_in_ht(t)) {
grpc_closure *c = t->closure;
gpr_log(GPR_ERROR,
"** gpr_timer_cancel() called on a non-pending timer (%p) which "
"is in the hash table. Closure: (%p), created at: (%s:%d), "
"scheduled at: (%s:%d) **",
t, c, c->file_created, c->line_created, c->file_initiated,
c->line_initiated);
abort();
}
}
#define INIT_TIMER_HASH_TABLE() init_timer_ht()
#define ADD_TO_HASH_TABLE(t) add_to_ht((t))
#define REMOVE_FROM_HASH_TABLE(t) remove_from_ht((t))
#define VALIDATE_NON_PENDING_TIMER(t) validate_non_pending_timer((t))
#else
#define INIT_TIMER_HASH_TABLE()
#define ADD_TO_HASH_TABLE(t)
#define REMOVE_FROM_HASH_TABLE(t)
#define VALIDATE_NON_PENDING_TIMER(t)
#endif
/* Thread local variable that stores the deadline of the next timer the thread
* has last-seen. This is an optimization to prevent the thread from checking
* shared_mutables.min_timer (which requires acquiring shared_mutables.mu lock,
@ -95,9 +214,7 @@ struct shared_mutables {
gpr_mu mu;
} GPR_ALIGN_STRUCT(GPR_CACHELINE_SIZE);
static struct shared_mutables g_shared_mutables = {
.checker_mu = GPR_SPINLOCK_STATIC_INITIALIZER, .initialized = false,
};
static struct shared_mutables g_shared_mutables;
static gpr_clock_type g_clock_type;
static gpr_timespec g_start_time;
@ -155,6 +272,7 @@ void grpc_timer_list_init(gpr_timespec now) {
uint32_t i;
g_shared_mutables.initialized = true;
g_shared_mutables.checker_mu = GPR_SPINLOCK_INITIALIZER;
gpr_mu_init(&g_shared_mutables.mu);
g_clock_type = now.clock_type;
g_start_time = now;
@ -176,6 +294,8 @@ void grpc_timer_list_init(gpr_timespec now) {
shard->min_deadline = compute_min_deadline(shard);
g_shard_queue[i] = shard;
}
INIT_TIMER_HASH_TABLE();
}
void grpc_timer_list_shutdown(grpc_exec_ctx *exec_ctx) {
@ -246,6 +366,10 @@ void grpc_timer_init(grpc_exec_ctx *exec_ctx, grpc_timer *timer,
timer->closure = closure;
gpr_atm deadline_atm = timer->deadline = timespec_to_atm_round_up(deadline);
#ifndef NDEBUG
timer->hash_table_next = NULL;
#endif
if (GRPC_TRACER_ON(grpc_timer_trace)) {
gpr_log(GPR_DEBUG, "TIMER %p: SET %" PRId64 ".%09d [%" PRIdPTR
"] now %" PRId64 ".%09d [%" PRIdPTR "] call %p[%p]",
@ -273,6 +397,9 @@ void grpc_timer_init(grpc_exec_ctx *exec_ctx, grpc_timer *timer,
grpc_time_averaged_stats_add_sample(&shard->stats,
ts_to_dbl(gpr_time_sub(deadline, now)));
ADD_TO_HASH_TABLE(timer);
if (deadline_atm < shard->queue_deadline_cap) {
is_first_timer = grpc_timer_heap_add(&shard->heap, timer);
} else {
@ -334,7 +461,10 @@ void grpc_timer_cancel(grpc_exec_ctx *exec_ctx, grpc_timer *timer) {
gpr_log(GPR_DEBUG, "TIMER %p: CANCEL pending=%s", timer,
timer->pending ? "true" : "false");
}
if (timer->pending) {
REMOVE_FROM_HASH_TABLE(timer);
GRPC_CLOSURE_SCHED(exec_ctx, timer->closure, GRPC_ERROR_CANCELLED);
timer->pending = false;
if (timer->heap_index == INVALID_HEAP_INDEX) {
@ -342,6 +472,8 @@ void grpc_timer_cancel(grpc_exec_ctx *exec_ctx, grpc_timer *timer) {
} else {
grpc_timer_heap_remove(&shard->heap, timer);
}
} else {
VALIDATE_NON_PENDING_TIMER(timer);
}
gpr_mu_unlock(&shard->mu);
}
@ -425,6 +557,7 @@ static size_t pop_timers(grpc_exec_ctx *exec_ctx, timer_shard *shard,
grpc_timer *timer;
gpr_mu_lock(&shard->mu);
while ((timer = pop_one(shard, now))) {
REMOVE_FROM_HASH_TABLE(timer);
GRPC_CLOSURE_SCHED(exec_ctx, timer->closure, GRPC_ERROR_REF(error));
n++;
}

@ -29,6 +29,9 @@ struct grpc_timer {
struct grpc_timer *next;
struct grpc_timer *prev;
grpc_closure *closure;
#ifndef NDEBUG
struct grpc_timer *hash_table_next;
#endif
};
#endif /* GRPC_CORE_LIB_IOMGR_TIMER_GENERIC_H */

@ -87,6 +87,7 @@ static bool composite_call_get_request_metadata(
ctx->on_request_metadata = on_request_metadata;
GRPC_CLOSURE_INIT(&ctx->internal_on_request_metadata,
composite_call_metadata_cb, ctx, grpc_schedule_on_exec_ctx);
bool synchronous = true;
while (ctx->creds_index < ctx->composite_creds->inner.num_creds) {
grpc_call_credentials *inner_creds =
ctx->composite_creds->inner.creds_array[ctx->creds_index++];
@ -95,19 +96,12 @@ static bool composite_call_get_request_metadata(
ctx->md_array, &ctx->internal_on_request_metadata, error)) {
if (*error != GRPC_ERROR_NONE) break;
} else {
synchronous = false; // Async return.
break;
}
}
// If we got through all creds synchronously or we got a synchronous
// error on one of them, return synchronously.
if (ctx->creds_index == ctx->composite_creds->inner.num_creds ||
*error != GRPC_ERROR_NONE) {
gpr_free(ctx);
return true;
}
// At least one inner cred is returning asynchronously, so we'll
// return asynchronously as well.
return false;
if (synchronous) gpr_free(ctx);
return synchronous;
}
static void composite_call_cancel_get_request_metadata(

@ -31,6 +31,9 @@
#include "src/core/lib/surface/api_trace.h"
#include "src/core/lib/surface/validate_metadata.h"
grpc_tracer_flag grpc_plugin_credentials_trace =
GRPC_TRACER_INITIALIZER(false, "plugin_credentials");
static void plugin_destruct(grpc_exec_ctx *exec_ctx,
grpc_call_credentials *creds) {
grpc_plugin_credentials *c = (grpc_plugin_credentials *)creds;
@ -53,6 +56,62 @@ static void pending_request_remove_locked(
}
}
// Checks if the request has been cancelled.
// If not, removes it from the pending list, so that it cannot be
// cancelled out from under us.
// When this returns, r->cancelled indicates whether the request was
// cancelled before completion.
static void pending_request_complete(
grpc_exec_ctx *exec_ctx, grpc_plugin_credentials_pending_request *r) {
gpr_mu_lock(&r->creds->mu);
if (!r->cancelled) pending_request_remove_locked(r->creds, r);
gpr_mu_unlock(&r->creds->mu);
// Ref to credentials not needed anymore.
grpc_call_credentials_unref(exec_ctx, &r->creds->base);
}
static grpc_error *process_plugin_result(
grpc_exec_ctx *exec_ctx, grpc_plugin_credentials_pending_request *r,
const grpc_metadata *md, size_t num_md, grpc_status_code status,
const char *error_details) {
grpc_error *error = GRPC_ERROR_NONE;
if (status != GRPC_STATUS_OK) {
char *msg;
gpr_asprintf(&msg, "Getting metadata from plugin failed with error: %s",
error_details);
error = GRPC_ERROR_CREATE_FROM_COPIED_STRING(msg);
gpr_free(msg);
} else {
bool seen_illegal_header = false;
for (size_t i = 0; i < num_md; ++i) {
if (!GRPC_LOG_IF_ERROR("validate_metadata_from_plugin",
grpc_validate_header_key_is_legal(md[i].key))) {
seen_illegal_header = true;
break;
} else if (!grpc_is_binary_header(md[i].key) &&
!GRPC_LOG_IF_ERROR(
"validate_metadata_from_plugin",
grpc_validate_header_nonbin_value_is_legal(md[i].value))) {
gpr_log(GPR_ERROR, "Plugin added invalid metadata value.");
seen_illegal_header = true;
break;
}
}
if (seen_illegal_header) {
error = GRPC_ERROR_CREATE_FROM_STATIC_STRING("Illegal metadata");
} else {
for (size_t i = 0; i < num_md; ++i) {
grpc_mdelem mdelem = grpc_mdelem_from_slices(
exec_ctx, grpc_slice_ref_internal(md[i].key),
grpc_slice_ref_internal(md[i].value));
grpc_credentials_mdelem_array_add(r->md_array, mdelem);
GRPC_MDELEM_UNREF(exec_ctx, mdelem);
}
}
}
return error;
}
static void plugin_md_request_metadata_ready(void *request,
const grpc_metadata *md,
size_t num_md,
@ -64,54 +123,24 @@ static void plugin_md_request_metadata_ready(void *request,
NULL, NULL);
grpc_plugin_credentials_pending_request *r =
(grpc_plugin_credentials_pending_request *)request;
// Check if the request has been cancelled.
// If not, remove it from the pending list, so that it cannot be
// cancelled out from under us.
gpr_mu_lock(&r->creds->mu);
if (!r->cancelled) pending_request_remove_locked(r->creds, r);
gpr_mu_unlock(&r->creds->mu);
grpc_call_credentials_unref(&exec_ctx, &r->creds->base);
if (GRPC_TRACER_ON(grpc_plugin_credentials_trace)) {
gpr_log(GPR_INFO,
"plugin_credentials[%p]: request %p: plugin returned "
"asynchronously",
r->creds, r);
}
// Remove request from pending list if not previously cancelled.
pending_request_complete(&exec_ctx, r);
// If it has not been cancelled, process it.
if (!r->cancelled) {
if (status != GRPC_STATUS_OK) {
char *msg;
gpr_asprintf(&msg, "Getting metadata from plugin failed with error: %s",
error_details);
GRPC_CLOSURE_SCHED(&exec_ctx, r->on_request_metadata,
GRPC_ERROR_CREATE_FROM_COPIED_STRING(msg));
gpr_free(msg);
} else {
bool seen_illegal_header = false;
for (size_t i = 0; i < num_md; ++i) {
if (!GRPC_LOG_IF_ERROR("validate_metadata_from_plugin",
grpc_validate_header_key_is_legal(md[i].key))) {
seen_illegal_header = true;
break;
} else if (!grpc_is_binary_header(md[i].key) &&
!GRPC_LOG_IF_ERROR(
"validate_metadata_from_plugin",
grpc_validate_header_nonbin_value_is_legal(
md[i].value))) {
gpr_log(GPR_ERROR, "Plugin added invalid metadata value.");
seen_illegal_header = true;
break;
}
}
if (seen_illegal_header) {
GRPC_CLOSURE_SCHED(
&exec_ctx, r->on_request_metadata,
GRPC_ERROR_CREATE_FROM_STATIC_STRING("Illegal metadata"));
} else {
for (size_t i = 0; i < num_md; ++i) {
grpc_mdelem mdelem = grpc_mdelem_from_slices(
&exec_ctx, grpc_slice_ref_internal(md[i].key),
grpc_slice_ref_internal(md[i].value));
grpc_credentials_mdelem_array_add(r->md_array, mdelem);
GRPC_MDELEM_UNREF(&exec_ctx, mdelem);
}
GRPC_CLOSURE_SCHED(&exec_ctx, r->on_request_metadata, GRPC_ERROR_NONE);
}
}
grpc_error *error =
process_plugin_result(&exec_ctx, r, md, num_md, status, error_details);
GRPC_CLOSURE_SCHED(&exec_ctx, r->on_request_metadata, error);
} else if (GRPC_TRACER_ON(grpc_plugin_credentials_trace)) {
gpr_log(GPR_INFO,
"plugin_credentials[%p]: request %p: plugin was previously "
"cancelled",
r->creds, r);
}
gpr_free(r);
grpc_exec_ctx_finish(&exec_ctx);
@ -125,6 +154,7 @@ static bool plugin_get_request_metadata(grpc_exec_ctx *exec_ctx,
grpc_closure *on_request_metadata,
grpc_error **error) {
grpc_plugin_credentials *c = (grpc_plugin_credentials *)creds;
bool retval = true; // Synchronous return.
if (c->plugin.get_metadata != NULL) {
// Create pending_request object.
grpc_plugin_credentials_pending_request *pending_request =
@ -142,12 +172,60 @@ static bool plugin_get_request_metadata(grpc_exec_ctx *exec_ctx,
c->pending_requests = pending_request;
gpr_mu_unlock(&c->mu);
// Invoke the plugin. The callback holds a ref to us.
if (GRPC_TRACER_ON(grpc_plugin_credentials_trace)) {
gpr_log(GPR_INFO, "plugin_credentials[%p]: request %p: invoking plugin",
c, pending_request);
}
grpc_call_credentials_ref(creds);
c->plugin.get_metadata(c->plugin.state, context,
plugin_md_request_metadata_ready, pending_request);
return false;
grpc_metadata creds_md[GRPC_METADATA_CREDENTIALS_PLUGIN_SYNC_MAX];
size_t num_creds_md = 0;
grpc_status_code status = GRPC_STATUS_OK;
const char *error_details = NULL;
if (!c->plugin.get_metadata(c->plugin.state, context,
plugin_md_request_metadata_ready,
pending_request, creds_md, &num_creds_md,
&status, &error_details)) {
if (GRPC_TRACER_ON(grpc_plugin_credentials_trace)) {
gpr_log(GPR_INFO,
"plugin_credentials[%p]: request %p: plugin will return "
"asynchronously",
c, pending_request);
}
return false; // Asynchronous return.
}
// Returned synchronously.
// Remove request from pending list if not previously cancelled.
pending_request_complete(exec_ctx, pending_request);
// If the request was cancelled, the error will have been returned
// asynchronously by plugin_cancel_get_request_metadata(), so return
// false. Otherwise, process the result.
if (pending_request->cancelled) {
if (GRPC_TRACER_ON(grpc_plugin_credentials_trace)) {
gpr_log(GPR_INFO,
"plugin_credentials[%p]: request %p was cancelled, error "
"will be returned asynchronously",
c, pending_request);
}
retval = false;
} else {
if (GRPC_TRACER_ON(grpc_plugin_credentials_trace)) {
gpr_log(GPR_INFO,
"plugin_credentials[%p]: request %p: plugin returned "
"synchronously",
c, pending_request);
}
*error = process_plugin_result(exec_ctx, pending_request, creds_md,
num_creds_md, status, error_details);
}
// Clean up.
for (size_t i = 0; i < num_creds_md; ++i) {
grpc_slice_unref_internal(exec_ctx, creds_md[i].key);
grpc_slice_unref_internal(exec_ctx, creds_md[i].value);
}
gpr_free((void *)error_details);
gpr_free(pending_request);
}
return true;
return retval;
}
static void plugin_cancel_get_request_metadata(
@ -159,6 +237,10 @@ static void plugin_cancel_get_request_metadata(
c->pending_requests;
pending_request != NULL; pending_request = pending_request->next) {
if (pending_request->md_array == md_array) {
if (GRPC_TRACER_ON(grpc_plugin_credentials_trace)) {
gpr_log(GPR_INFO, "plugin_credentials[%p]: cancelling request %p", c,
pending_request);
}
pending_request->cancelled = true;
GRPC_CLOSURE_SCHED(exec_ctx, pending_request->on_request_metadata,
GRPC_ERROR_REF(error));

@ -21,6 +21,8 @@
#include "src/core/lib/security/credentials/credentials.h"
extern grpc_tracer_flag grpc_plugin_credentials_trace;
struct grpc_plugin_credentials;
typedef struct grpc_plugin_credentials_pending_request {

@ -1674,6 +1674,8 @@ static grpc_call_error call_start_batch(grpc_exec_ctx *exec_ctx,
batch_control *bctl;
int num_completion_callbacks_needed = 1;
grpc_call_error error = GRPC_CALL_OK;
grpc_transport_stream_op_batch *stream_op;
grpc_transport_stream_op_batch_payload *stream_op_payload;
GPR_TIMER_BEGIN("grpc_call_start_batch", 0);
GRPC_CALL_LOG_BATCH(GPR_INFO, call, ops, nops, notify_tag);
@ -1700,9 +1702,8 @@ static grpc_call_error call_start_batch(grpc_exec_ctx *exec_ctx,
bctl->completion_data.notify_tag.is_closure =
(uint8_t)(is_notify_tag_closure != 0);
grpc_transport_stream_op_batch *stream_op = &bctl->op;
grpc_transport_stream_op_batch_payload *stream_op_payload =
&call->stream_op_payload;
stream_op = &bctl->op;
stream_op_payload = &call->stream_op_payload;
/* rewrite batch ops into a transport op */
for (i = 0; i < nops; i++) {
@ -1712,7 +1713,7 @@ static grpc_call_error call_start_batch(grpc_exec_ctx *exec_ctx,
goto done_with_error;
}
switch (op->op) {
case GRPC_OP_SEND_INITIAL_METADATA:
case GRPC_OP_SEND_INITIAL_METADATA: {
/* Flag validation: currently allow no flags */
if (!are_initial_metadata_flags_valid(op->flags, call->is_client)) {
error = GRPC_CALL_ERROR_INVALID_FLAGS;
@ -1806,7 +1807,8 @@ static grpc_call_error call_start_batch(grpc_exec_ctx *exec_ctx,
&call->peer_string;
}
break;
case GRPC_OP_SEND_MESSAGE:
}
case GRPC_OP_SEND_MESSAGE: {
if (!are_write_flags_valid(op->flags)) {
error = GRPC_CALL_ERROR_INVALID_FLAGS;
goto done_with_error;
@ -1835,7 +1837,8 @@ static grpc_call_error call_start_batch(grpc_exec_ctx *exec_ctx,
stream_op_payload->send_message.send_message =
&call->sending_stream.base;
break;
case GRPC_OP_SEND_CLOSE_FROM_CLIENT:
}
case GRPC_OP_SEND_CLOSE_FROM_CLIENT: {
/* Flag validation: currently allow no flags */
if (op->flags != 0) {
error = GRPC_CALL_ERROR_INVALID_FLAGS;
@ -1854,7 +1857,8 @@ static grpc_call_error call_start_batch(grpc_exec_ctx *exec_ctx,
stream_op_payload->send_trailing_metadata.send_trailing_metadata =
&call->metadata_batch[0 /* is_receiving */][1 /* is_trailing */];
break;
case GRPC_OP_SEND_STATUS_FROM_SERVER:
}
case GRPC_OP_SEND_STATUS_FROM_SERVER: {
/* Flag validation: currently allow no flags */
if (op->flags != 0) {
error = GRPC_CALL_ERROR_INVALID_FLAGS;
@ -1916,7 +1920,8 @@ static grpc_call_error call_start_batch(grpc_exec_ctx *exec_ctx,
stream_op_payload->send_trailing_metadata.send_trailing_metadata =
&call->metadata_batch[0 /* is_receiving */][1 /* is_trailing */];
break;
case GRPC_OP_RECV_INITIAL_METADATA:
}
case GRPC_OP_RECV_INITIAL_METADATA: {
/* Flag validation: currently allow no flags */
if (op->flags != 0) {
error = GRPC_CALL_ERROR_INVALID_FLAGS;
@ -1943,7 +1948,8 @@ static grpc_call_error call_start_batch(grpc_exec_ctx *exec_ctx,
}
num_completion_callbacks_needed++;
break;
case GRPC_OP_RECV_MESSAGE:
}
case GRPC_OP_RECV_MESSAGE: {
/* Flag validation: currently allow no flags */
if (op->flags != 0) {
error = GRPC_CALL_ERROR_INVALID_FLAGS;
@ -1964,7 +1970,8 @@ static grpc_call_error call_start_batch(grpc_exec_ctx *exec_ctx,
&call->receiving_stream_ready;
num_completion_callbacks_needed++;
break;
case GRPC_OP_RECV_STATUS_ON_CLIENT:
}
case GRPC_OP_RECV_STATUS_ON_CLIENT: {
/* Flag validation: currently allow no flags */
if (op->flags != 0) {
error = GRPC_CALL_ERROR_INVALID_FLAGS;
@ -1991,7 +1998,8 @@ static grpc_call_error call_start_batch(grpc_exec_ctx *exec_ctx,
stream_op_payload->collect_stats.collect_stats =
&call->final_info.stats.transport_stream_stats;
break;
case GRPC_OP_RECV_CLOSE_ON_SERVER:
}
case GRPC_OP_RECV_CLOSE_ON_SERVER: {
/* Flag validation: currently allow no flags */
if (op->flags != 0) {
error = GRPC_CALL_ERROR_INVALID_FLAGS;
@ -2015,6 +2023,7 @@ static grpc_call_error call_start_batch(grpc_exec_ctx *exec_ctx,
stream_op_payload->collect_stats.collect_stats =
&call->final_info.stats.transport_stream_stats;
break;
}
}
}

@ -329,25 +329,12 @@ static void cq_destroy_pluck(void *data);
/* Completion queue vtables based on the completion-type */
static const cq_vtable g_cq_vtable[] = {
/* GRPC_CQ_NEXT */
{.data_size = sizeof(cq_next_data),
.cq_completion_type = GRPC_CQ_NEXT,
.init = cq_init_next,
.shutdown = cq_shutdown_next,
.destroy = cq_destroy_next,
.begin_op = cq_begin_op_for_next,
.end_op = cq_end_op_for_next,
.next = cq_next,
.pluck = NULL},
{GRPC_CQ_NEXT, sizeof(cq_next_data), cq_init_next, cq_shutdown_next,
cq_destroy_next, cq_begin_op_for_next, cq_end_op_for_next, cq_next, NULL},
/* GRPC_CQ_PLUCK */
{.data_size = sizeof(cq_pluck_data),
.cq_completion_type = GRPC_CQ_PLUCK,
.init = cq_init_pluck,
.shutdown = cq_shutdown_pluck,
.destroy = cq_destroy_pluck,
.begin_op = cq_begin_op_for_pluck,
.end_op = cq_end_op_for_pluck,
.next = NULL,
.pluck = cq_pluck},
{GRPC_CQ_PLUCK, sizeof(cq_pluck_data), cq_init_pluck, cq_shutdown_pluck,
cq_destroy_pluck, cq_begin_op_for_pluck, cq_end_op_for_pluck, NULL,
cq_pluck},
};
#define DATA_FROM_CQ(cq) ((void *)(cq + 1))

@ -25,6 +25,7 @@
#include "src/core/lib/debug/trace.h"
#include "src/core/lib/security/credentials/credentials.h"
#include "src/core/lib/security/credentials/plugin/plugin_credentials.h"
#include "src/core/lib/security/transport/auth_filters.h"
#include "src/core/lib/security/transport/secure_endpoint.h"
#include "src/core/lib/security/transport/security_connector.h"
@ -84,4 +85,7 @@ void grpc_register_security_filters(void) {
maybe_prepend_server_auth_filter, NULL);
}
void grpc_security_init() { grpc_security_register_handshaker_factories(); }
void grpc_security_init() {
grpc_security_register_handshaker_factories();
grpc_register_tracer(&grpc_plugin_credentials_trace);
}

@ -216,206 +216,106 @@ grpc_slice_refcount grpc_static_metadata_refcounts[GRPC_STATIC_MDSTR_COUNT] = {
};
const grpc_slice grpc_static_slice_table[GRPC_STATIC_MDSTR_COUNT] = {
{.refcount = &grpc_static_metadata_refcounts[0],
.data.refcounted = {g_bytes + 0, 5}},
{.refcount = &grpc_static_metadata_refcounts[1],
.data.refcounted = {g_bytes + 5, 7}},
{.refcount = &grpc_static_metadata_refcounts[2],
.data.refcounted = {g_bytes + 12, 7}},
{.refcount = &grpc_static_metadata_refcounts[3],
.data.refcounted = {g_bytes + 19, 10}},
{.refcount = &grpc_static_metadata_refcounts[4],
.data.refcounted = {g_bytes + 29, 7}},
{.refcount = &grpc_static_metadata_refcounts[5],
.data.refcounted = {g_bytes + 36, 2}},
{.refcount = &grpc_static_metadata_refcounts[6],
.data.refcounted = {g_bytes + 38, 12}},
{.refcount = &grpc_static_metadata_refcounts[7],
.data.refcounted = {g_bytes + 50, 11}},
{.refcount = &grpc_static_metadata_refcounts[8],
.data.refcounted = {g_bytes + 61, 16}},
{.refcount = &grpc_static_metadata_refcounts[9],
.data.refcounted = {g_bytes + 77, 13}},
{.refcount = &grpc_static_metadata_refcounts[10],
.data.refcounted = {g_bytes + 90, 20}},
{.refcount = &grpc_static_metadata_refcounts[11],
.data.refcounted = {g_bytes + 110, 21}},
{.refcount = &grpc_static_metadata_refcounts[12],
.data.refcounted = {g_bytes + 131, 13}},
{.refcount = &grpc_static_metadata_refcounts[13],
.data.refcounted = {g_bytes + 144, 14}},
{.refcount = &grpc_static_metadata_refcounts[14],
.data.refcounted = {g_bytes + 158, 12}},
{.refcount = &grpc_static_metadata_refcounts[15],
.data.refcounted = {g_bytes + 170, 16}},
{.refcount = &grpc_static_metadata_refcounts[16],
.data.refcounted = {g_bytes + 186, 15}},
{.refcount = &grpc_static_metadata_refcounts[17],
.data.refcounted = {g_bytes + 201, 30}},
{.refcount = &grpc_static_metadata_refcounts[18],
.data.refcounted = {g_bytes + 231, 37}},
{.refcount = &grpc_static_metadata_refcounts[19],
.data.refcounted = {g_bytes + 268, 10}},
{.refcount = &grpc_static_metadata_refcounts[20],
.data.refcounted = {g_bytes + 278, 4}},
{.refcount = &grpc_static_metadata_refcounts[21],
.data.refcounted = {g_bytes + 282, 8}},
{.refcount = &grpc_static_metadata_refcounts[22],
.data.refcounted = {g_bytes + 290, 12}},
{.refcount = &grpc_static_metadata_refcounts[23],
.data.refcounted = {g_bytes + 302, 0}},
{.refcount = &grpc_static_metadata_refcounts[24],
.data.refcounted = {g_bytes + 302, 19}},
{.refcount = &grpc_static_metadata_refcounts[25],
.data.refcounted = {g_bytes + 321, 12}},
{.refcount = &grpc_static_metadata_refcounts[26],
.data.refcounted = {g_bytes + 333, 30}},
{.refcount = &grpc_static_metadata_refcounts[27],
.data.refcounted = {g_bytes + 363, 31}},
{.refcount = &grpc_static_metadata_refcounts[28],
.data.refcounted = {g_bytes + 394, 36}},
{.refcount = &grpc_static_metadata_refcounts[29],
.data.refcounted = {g_bytes + 430, 1}},
{.refcount = &grpc_static_metadata_refcounts[30],
.data.refcounted = {g_bytes + 431, 1}},
{.refcount = &grpc_static_metadata_refcounts[31],
.data.refcounted = {g_bytes + 432, 1}},
{.refcount = &grpc_static_metadata_refcounts[32],
.data.refcounted = {g_bytes + 433, 8}},
{.refcount = &grpc_static_metadata_refcounts[33],
.data.refcounted = {g_bytes + 441, 4}},
{.refcount = &grpc_static_metadata_refcounts[34],
.data.refcounted = {g_bytes + 445, 7}},
{.refcount = &grpc_static_metadata_refcounts[35],
.data.refcounted = {g_bytes + 452, 8}},
{.refcount = &grpc_static_metadata_refcounts[36],
.data.refcounted = {g_bytes + 460, 16}},
{.refcount = &grpc_static_metadata_refcounts[37],
.data.refcounted = {g_bytes + 476, 4}},
{.refcount = &grpc_static_metadata_refcounts[38],
.data.refcounted = {g_bytes + 480, 3}},
{.refcount = &grpc_static_metadata_refcounts[39],
.data.refcounted = {g_bytes + 483, 3}},
{.refcount = &grpc_static_metadata_refcounts[40],
.data.refcounted = {g_bytes + 486, 4}},
{.refcount = &grpc_static_metadata_refcounts[41],
.data.refcounted = {g_bytes + 490, 5}},
{.refcount = &grpc_static_metadata_refcounts[42],
.data.refcounted = {g_bytes + 495, 4}},
{.refcount = &grpc_static_metadata_refcounts[43],
.data.refcounted = {g_bytes + 499, 3}},
{.refcount = &grpc_static_metadata_refcounts[44],
.data.refcounted = {g_bytes + 502, 3}},
{.refcount = &grpc_static_metadata_refcounts[45],
.data.refcounted = {g_bytes + 505, 1}},
{.refcount = &grpc_static_metadata_refcounts[46],
.data.refcounted = {g_bytes + 506, 11}},
{.refcount = &grpc_static_metadata_refcounts[47],
.data.refcounted = {g_bytes + 517, 3}},
{.refcount = &grpc_static_metadata_refcounts[48],
.data.refcounted = {g_bytes + 520, 3}},
{.refcount = &grpc_static_metadata_refcounts[49],
.data.refcounted = {g_bytes + 523, 3}},
{.refcount = &grpc_static_metadata_refcounts[50],
.data.refcounted = {g_bytes + 526, 3}},
{.refcount = &grpc_static_metadata_refcounts[51],
.data.refcounted = {g_bytes + 529, 3}},
{.refcount = &grpc_static_metadata_refcounts[52],
.data.refcounted = {g_bytes + 532, 14}},
{.refcount = &grpc_static_metadata_refcounts[53],
.data.refcounted = {g_bytes + 546, 13}},
{.refcount = &grpc_static_metadata_refcounts[54],
.data.refcounted = {g_bytes + 559, 15}},
{.refcount = &grpc_static_metadata_refcounts[55],
.data.refcounted = {g_bytes + 574, 13}},
{.refcount = &grpc_static_metadata_refcounts[56],
.data.refcounted = {g_bytes + 587, 6}},
{.refcount = &grpc_static_metadata_refcounts[57],
.data.refcounted = {g_bytes + 593, 27}},
{.refcount = &grpc_static_metadata_refcounts[58],
.data.refcounted = {g_bytes + 620, 3}},
{.refcount = &grpc_static_metadata_refcounts[59],
.data.refcounted = {g_bytes + 623, 5}},
{.refcount = &grpc_static_metadata_refcounts[60],
.data.refcounted = {g_bytes + 628, 13}},
{.refcount = &grpc_static_metadata_refcounts[61],
.data.refcounted = {g_bytes + 641, 13}},
{.refcount = &grpc_static_metadata_refcounts[62],
.data.refcounted = {g_bytes + 654, 19}},
{.refcount = &grpc_static_metadata_refcounts[63],
.data.refcounted = {g_bytes + 673, 16}},
{.refcount = &grpc_static_metadata_refcounts[64],
.data.refcounted = {g_bytes + 689, 14}},
{.refcount = &grpc_static_metadata_refcounts[65],
.data.refcounted = {g_bytes + 703, 16}},
{.refcount = &grpc_static_metadata_refcounts[66],
.data.refcounted = {g_bytes + 719, 13}},
{.refcount = &grpc_static_metadata_refcounts[67],
.data.refcounted = {g_bytes + 732, 6}},
{.refcount = &grpc_static_metadata_refcounts[68],
.data.refcounted = {g_bytes + 738, 4}},
{.refcount = &grpc_static_metadata_refcounts[69],
.data.refcounted = {g_bytes + 742, 4}},
{.refcount = &grpc_static_metadata_refcounts[70],
.data.refcounted = {g_bytes + 746, 6}},
{.refcount = &grpc_static_metadata_refcounts[71],
.data.refcounted = {g_bytes + 752, 7}},
{.refcount = &grpc_static_metadata_refcounts[72],
.data.refcounted = {g_bytes + 759, 4}},
{.refcount = &grpc_static_metadata_refcounts[73],
.data.refcounted = {g_bytes + 763, 8}},
{.refcount = &grpc_static_metadata_refcounts[74],
.data.refcounted = {g_bytes + 771, 17}},
{.refcount = &grpc_static_metadata_refcounts[75],
.data.refcounted = {g_bytes + 788, 13}},
{.refcount = &grpc_static_metadata_refcounts[76],
.data.refcounted = {g_bytes + 801, 8}},
{.refcount = &grpc_static_metadata_refcounts[77],
.data.refcounted = {g_bytes + 809, 19}},
{.refcount = &grpc_static_metadata_refcounts[78],
.data.refcounted = {g_bytes + 828, 13}},
{.refcount = &grpc_static_metadata_refcounts[79],
.data.refcounted = {g_bytes + 841, 11}},
{.refcount = &grpc_static_metadata_refcounts[80],
.data.refcounted = {g_bytes + 852, 4}},
{.refcount = &grpc_static_metadata_refcounts[81],
.data.refcounted = {g_bytes + 856, 8}},
{.refcount = &grpc_static_metadata_refcounts[82],
.data.refcounted = {g_bytes + 864, 12}},
{.refcount = &grpc_static_metadata_refcounts[83],
.data.refcounted = {g_bytes + 876, 18}},
{.refcount = &grpc_static_metadata_refcounts[84],
.data.refcounted = {g_bytes + 894, 19}},
{.refcount = &grpc_static_metadata_refcounts[85],
.data.refcounted = {g_bytes + 913, 5}},
{.refcount = &grpc_static_metadata_refcounts[86],
.data.refcounted = {g_bytes + 918, 7}},
{.refcount = &grpc_static_metadata_refcounts[87],
.data.refcounted = {g_bytes + 925, 7}},
{.refcount = &grpc_static_metadata_refcounts[88],
.data.refcounted = {g_bytes + 932, 11}},
{.refcount = &grpc_static_metadata_refcounts[89],
.data.refcounted = {g_bytes + 943, 6}},
{.refcount = &grpc_static_metadata_refcounts[90],
.data.refcounted = {g_bytes + 949, 10}},
{.refcount = &grpc_static_metadata_refcounts[91],
.data.refcounted = {g_bytes + 959, 25}},
{.refcount = &grpc_static_metadata_refcounts[92],
.data.refcounted = {g_bytes + 984, 17}},
{.refcount = &grpc_static_metadata_refcounts[93],
.data.refcounted = {g_bytes + 1001, 4}},
{.refcount = &grpc_static_metadata_refcounts[94],
.data.refcounted = {g_bytes + 1005, 3}},
{.refcount = &grpc_static_metadata_refcounts[95],
.data.refcounted = {g_bytes + 1008, 16}},
{.refcount = &grpc_static_metadata_refcounts[96],
.data.refcounted = {g_bytes + 1024, 16}},
{.refcount = &grpc_static_metadata_refcounts[97],
.data.refcounted = {g_bytes + 1040, 13}},
{.refcount = &grpc_static_metadata_refcounts[98],
.data.refcounted = {g_bytes + 1053, 12}},
{.refcount = &grpc_static_metadata_refcounts[99],
.data.refcounted = {g_bytes + 1065, 21}},
{&grpc_static_metadata_refcounts[0], {{g_bytes + 0, 5}}},
{&grpc_static_metadata_refcounts[1], {{g_bytes + 5, 7}}},
{&grpc_static_metadata_refcounts[2], {{g_bytes + 12, 7}}},
{&grpc_static_metadata_refcounts[3], {{g_bytes + 19, 10}}},
{&grpc_static_metadata_refcounts[4], {{g_bytes + 29, 7}}},
{&grpc_static_metadata_refcounts[5], {{g_bytes + 36, 2}}},
{&grpc_static_metadata_refcounts[6], {{g_bytes + 38, 12}}},
{&grpc_static_metadata_refcounts[7], {{g_bytes + 50, 11}}},
{&grpc_static_metadata_refcounts[8], {{g_bytes + 61, 16}}},
{&grpc_static_metadata_refcounts[9], {{g_bytes + 77, 13}}},
{&grpc_static_metadata_refcounts[10], {{g_bytes + 90, 20}}},
{&grpc_static_metadata_refcounts[11], {{g_bytes + 110, 21}}},
{&grpc_static_metadata_refcounts[12], {{g_bytes + 131, 13}}},
{&grpc_static_metadata_refcounts[13], {{g_bytes + 144, 14}}},
{&grpc_static_metadata_refcounts[14], {{g_bytes + 158, 12}}},
{&grpc_static_metadata_refcounts[15], {{g_bytes + 170, 16}}},
{&grpc_static_metadata_refcounts[16], {{g_bytes + 186, 15}}},
{&grpc_static_metadata_refcounts[17], {{g_bytes + 201, 30}}},
{&grpc_static_metadata_refcounts[18], {{g_bytes + 231, 37}}},
{&grpc_static_metadata_refcounts[19], {{g_bytes + 268, 10}}},
{&grpc_static_metadata_refcounts[20], {{g_bytes + 278, 4}}},
{&grpc_static_metadata_refcounts[21], {{g_bytes + 282, 8}}},
{&grpc_static_metadata_refcounts[22], {{g_bytes + 290, 12}}},
{&grpc_static_metadata_refcounts[23], {{g_bytes + 302, 0}}},
{&grpc_static_metadata_refcounts[24], {{g_bytes + 302, 19}}},
{&grpc_static_metadata_refcounts[25], {{g_bytes + 321, 12}}},
{&grpc_static_metadata_refcounts[26], {{g_bytes + 333, 30}}},
{&grpc_static_metadata_refcounts[27], {{g_bytes + 363, 31}}},
{&grpc_static_metadata_refcounts[28], {{g_bytes + 394, 36}}},
{&grpc_static_metadata_refcounts[29], {{g_bytes + 430, 1}}},
{&grpc_static_metadata_refcounts[30], {{g_bytes + 431, 1}}},
{&grpc_static_metadata_refcounts[31], {{g_bytes + 432, 1}}},
{&grpc_static_metadata_refcounts[32], {{g_bytes + 433, 8}}},
{&grpc_static_metadata_refcounts[33], {{g_bytes + 441, 4}}},
{&grpc_static_metadata_refcounts[34], {{g_bytes + 445, 7}}},
{&grpc_static_metadata_refcounts[35], {{g_bytes + 452, 8}}},
{&grpc_static_metadata_refcounts[36], {{g_bytes + 460, 16}}},
{&grpc_static_metadata_refcounts[37], {{g_bytes + 476, 4}}},
{&grpc_static_metadata_refcounts[38], {{g_bytes + 480, 3}}},
{&grpc_static_metadata_refcounts[39], {{g_bytes + 483, 3}}},
{&grpc_static_metadata_refcounts[40], {{g_bytes + 486, 4}}},
{&grpc_static_metadata_refcounts[41], {{g_bytes + 490, 5}}},
{&grpc_static_metadata_refcounts[42], {{g_bytes + 495, 4}}},
{&grpc_static_metadata_refcounts[43], {{g_bytes + 499, 3}}},
{&grpc_static_metadata_refcounts[44], {{g_bytes + 502, 3}}},
{&grpc_static_metadata_refcounts[45], {{g_bytes + 505, 1}}},
{&grpc_static_metadata_refcounts[46], {{g_bytes + 506, 11}}},
{&grpc_static_metadata_refcounts[47], {{g_bytes + 517, 3}}},
{&grpc_static_metadata_refcounts[48], {{g_bytes + 520, 3}}},
{&grpc_static_metadata_refcounts[49], {{g_bytes + 523, 3}}},
{&grpc_static_metadata_refcounts[50], {{g_bytes + 526, 3}}},
{&grpc_static_metadata_refcounts[51], {{g_bytes + 529, 3}}},
{&grpc_static_metadata_refcounts[52], {{g_bytes + 532, 14}}},
{&grpc_static_metadata_refcounts[53], {{g_bytes + 546, 13}}},
{&grpc_static_metadata_refcounts[54], {{g_bytes + 559, 15}}},
{&grpc_static_metadata_refcounts[55], {{g_bytes + 574, 13}}},
{&grpc_static_metadata_refcounts[56], {{g_bytes + 587, 6}}},
{&grpc_static_metadata_refcounts[57], {{g_bytes + 593, 27}}},
{&grpc_static_metadata_refcounts[58], {{g_bytes + 620, 3}}},
{&grpc_static_metadata_refcounts[59], {{g_bytes + 623, 5}}},
{&grpc_static_metadata_refcounts[60], {{g_bytes + 628, 13}}},
{&grpc_static_metadata_refcounts[61], {{g_bytes + 641, 13}}},
{&grpc_static_metadata_refcounts[62], {{g_bytes + 654, 19}}},
{&grpc_static_metadata_refcounts[63], {{g_bytes + 673, 16}}},
{&grpc_static_metadata_refcounts[64], {{g_bytes + 689, 14}}},
{&grpc_static_metadata_refcounts[65], {{g_bytes + 703, 16}}},
{&grpc_static_metadata_refcounts[66], {{g_bytes + 719, 13}}},
{&grpc_static_metadata_refcounts[67], {{g_bytes + 732, 6}}},
{&grpc_static_metadata_refcounts[68], {{g_bytes + 738, 4}}},
{&grpc_static_metadata_refcounts[69], {{g_bytes + 742, 4}}},
{&grpc_static_metadata_refcounts[70], {{g_bytes + 746, 6}}},
{&grpc_static_metadata_refcounts[71], {{g_bytes + 752, 7}}},
{&grpc_static_metadata_refcounts[72], {{g_bytes + 759, 4}}},
{&grpc_static_metadata_refcounts[73], {{g_bytes + 763, 8}}},
{&grpc_static_metadata_refcounts[74], {{g_bytes + 771, 17}}},
{&grpc_static_metadata_refcounts[75], {{g_bytes + 788, 13}}},
{&grpc_static_metadata_refcounts[76], {{g_bytes + 801, 8}}},
{&grpc_static_metadata_refcounts[77], {{g_bytes + 809, 19}}},
{&grpc_static_metadata_refcounts[78], {{g_bytes + 828, 13}}},
{&grpc_static_metadata_refcounts[79], {{g_bytes + 841, 11}}},
{&grpc_static_metadata_refcounts[80], {{g_bytes + 852, 4}}},
{&grpc_static_metadata_refcounts[81], {{g_bytes + 856, 8}}},
{&grpc_static_metadata_refcounts[82], {{g_bytes + 864, 12}}},
{&grpc_static_metadata_refcounts[83], {{g_bytes + 876, 18}}},
{&grpc_static_metadata_refcounts[84], {{g_bytes + 894, 19}}},
{&grpc_static_metadata_refcounts[85], {{g_bytes + 913, 5}}},
{&grpc_static_metadata_refcounts[86], {{g_bytes + 918, 7}}},
{&grpc_static_metadata_refcounts[87], {{g_bytes + 925, 7}}},
{&grpc_static_metadata_refcounts[88], {{g_bytes + 932, 11}}},
{&grpc_static_metadata_refcounts[89], {{g_bytes + 943, 6}}},
{&grpc_static_metadata_refcounts[90], {{g_bytes + 949, 10}}},
{&grpc_static_metadata_refcounts[91], {{g_bytes + 959, 25}}},
{&grpc_static_metadata_refcounts[92], {{g_bytes + 984, 17}}},
{&grpc_static_metadata_refcounts[93], {{g_bytes + 1001, 4}}},
{&grpc_static_metadata_refcounts[94], {{g_bytes + 1005, 3}}},
{&grpc_static_metadata_refcounts[95], {{g_bytes + 1008, 16}}},
{&grpc_static_metadata_refcounts[96], {{g_bytes + 1024, 16}}},
{&grpc_static_metadata_refcounts[97], {{g_bytes + 1040, 13}}},
{&grpc_static_metadata_refcounts[98], {{g_bytes + 1053, 12}}},
{&grpc_static_metadata_refcounts[99], {{g_bytes + 1065, 21}}},
};
uintptr_t grpc_static_mdelem_user_data[GRPC_STATIC_MDELEM_COUNT] = {
@ -478,350 +378,178 @@ grpc_mdelem grpc_static_mdelem_for_static_strings(int a, int b) {
}
grpc_mdelem_data grpc_static_mdelem_table[GRPC_STATIC_MDELEM_COUNT] = {
{{.refcount = &grpc_static_metadata_refcounts[7],
.data.refcounted = {g_bytes + 50, 11}},
{.refcount = &grpc_static_metadata_refcounts[29],
.data.refcounted = {g_bytes + 430, 1}}},
{{.refcount = &grpc_static_metadata_refcounts[7],
.data.refcounted = {g_bytes + 50, 11}},
{.refcount = &grpc_static_metadata_refcounts[30],
.data.refcounted = {g_bytes + 431, 1}}},
{{.refcount = &grpc_static_metadata_refcounts[7],
.data.refcounted = {g_bytes + 50, 11}},
{.refcount = &grpc_static_metadata_refcounts[31],
.data.refcounted = {g_bytes + 432, 1}}},
{{.refcount = &grpc_static_metadata_refcounts[9],
.data.refcounted = {g_bytes + 77, 13}},
{.refcount = &grpc_static_metadata_refcounts[32],
.data.refcounted = {g_bytes + 433, 8}}},
{{.refcount = &grpc_static_metadata_refcounts[9],
.data.refcounted = {g_bytes + 77, 13}},
{.refcount = &grpc_static_metadata_refcounts[33],
.data.refcounted = {g_bytes + 441, 4}}},
{{.refcount = &grpc_static_metadata_refcounts[9],
.data.refcounted = {g_bytes + 77, 13}},
{.refcount = &grpc_static_metadata_refcounts[34],
.data.refcounted = {g_bytes + 445, 7}}},
{{.refcount = &grpc_static_metadata_refcounts[5],
.data.refcounted = {g_bytes + 36, 2}},
{.refcount = &grpc_static_metadata_refcounts[35],
.data.refcounted = {g_bytes + 452, 8}}},
{{.refcount = &grpc_static_metadata_refcounts[14],
.data.refcounted = {g_bytes + 158, 12}},
{.refcount = &grpc_static_metadata_refcounts[36],
.data.refcounted = {g_bytes + 460, 16}}},
{{.refcount = &grpc_static_metadata_refcounts[1],
.data.refcounted = {g_bytes + 5, 7}},
{.refcount = &grpc_static_metadata_refcounts[37],
.data.refcounted = {g_bytes + 476, 4}}},
{{.refcount = &grpc_static_metadata_refcounts[2],
.data.refcounted = {g_bytes + 12, 7}},
{.refcount = &grpc_static_metadata_refcounts[38],
.data.refcounted = {g_bytes + 480, 3}}},
{{.refcount = &grpc_static_metadata_refcounts[2],
.data.refcounted = {g_bytes + 12, 7}},
{.refcount = &grpc_static_metadata_refcounts[39],
.data.refcounted = {g_bytes + 483, 3}}},
{{.refcount = &grpc_static_metadata_refcounts[4],
.data.refcounted = {g_bytes + 29, 7}},
{.refcount = &grpc_static_metadata_refcounts[40],
.data.refcounted = {g_bytes + 486, 4}}},
{{.refcount = &grpc_static_metadata_refcounts[4],
.data.refcounted = {g_bytes + 29, 7}},
{.refcount = &grpc_static_metadata_refcounts[41],
.data.refcounted = {g_bytes + 490, 5}}},
{{.refcount = &grpc_static_metadata_refcounts[4],
.data.refcounted = {g_bytes + 29, 7}},
{.refcount = &grpc_static_metadata_refcounts[42],
.data.refcounted = {g_bytes + 495, 4}}},
{{.refcount = &grpc_static_metadata_refcounts[3],
.data.refcounted = {g_bytes + 19, 10}},
{.refcount = &grpc_static_metadata_refcounts[23],
.data.refcounted = {g_bytes + 302, 0}}},
{{.refcount = &grpc_static_metadata_refcounts[1],
.data.refcounted = {g_bytes + 5, 7}},
{.refcount = &grpc_static_metadata_refcounts[43],
.data.refcounted = {g_bytes + 499, 3}}},
{{.refcount = &grpc_static_metadata_refcounts[1],
.data.refcounted = {g_bytes + 5, 7}},
{.refcount = &grpc_static_metadata_refcounts[44],
.data.refcounted = {g_bytes + 502, 3}}},
{{.refcount = &grpc_static_metadata_refcounts[0],
.data.refcounted = {g_bytes + 0, 5}},
{.refcount = &grpc_static_metadata_refcounts[45],
.data.refcounted = {g_bytes + 505, 1}}},
{{.refcount = &grpc_static_metadata_refcounts[0],
.data.refcounted = {g_bytes + 0, 5}},
{.refcount = &grpc_static_metadata_refcounts[46],
.data.refcounted = {g_bytes + 506, 11}}},
{{.refcount = &grpc_static_metadata_refcounts[2],
.data.refcounted = {g_bytes + 12, 7}},
{.refcount = &grpc_static_metadata_refcounts[47],
.data.refcounted = {g_bytes + 517, 3}}},
{{.refcount = &grpc_static_metadata_refcounts[2],
.data.refcounted = {g_bytes + 12, 7}},
{.refcount = &grpc_static_metadata_refcounts[48],
.data.refcounted = {g_bytes + 520, 3}}},
{{.refcount = &grpc_static_metadata_refcounts[2],
.data.refcounted = {g_bytes + 12, 7}},
{.refcount = &grpc_static_metadata_refcounts[49],
.data.refcounted = {g_bytes + 523, 3}}},
{{.refcount = &grpc_static_metadata_refcounts[2],
.data.refcounted = {g_bytes + 12, 7}},
{.refcount = &grpc_static_metadata_refcounts[50],
.data.refcounted = {g_bytes + 526, 3}}},
{{.refcount = &grpc_static_metadata_refcounts[2],
.data.refcounted = {g_bytes + 12, 7}},
{.refcount = &grpc_static_metadata_refcounts[51],
.data.refcounted = {g_bytes + 529, 3}}},
{{.refcount = &grpc_static_metadata_refcounts[52],
.data.refcounted = {g_bytes + 532, 14}},
{.refcount = &grpc_static_metadata_refcounts[23],
.data.refcounted = {g_bytes + 302, 0}}},
{{.refcount = &grpc_static_metadata_refcounts[16],
.data.refcounted = {g_bytes + 186, 15}},
{.refcount = &grpc_static_metadata_refcounts[23],
.data.refcounted = {g_bytes + 302, 0}}},
{{.refcount = &grpc_static_metadata_refcounts[16],
.data.refcounted = {g_bytes + 186, 15}},
{.refcount = &grpc_static_metadata_refcounts[53],
.data.refcounted = {g_bytes + 546, 13}}},
{{.refcount = &grpc_static_metadata_refcounts[54],
.data.refcounted = {g_bytes + 559, 15}},
{.refcount = &grpc_static_metadata_refcounts[23],
.data.refcounted = {g_bytes + 302, 0}}},
{{.refcount = &grpc_static_metadata_refcounts[55],
.data.refcounted = {g_bytes + 574, 13}},
{.refcount = &grpc_static_metadata_refcounts[23],
.data.refcounted = {g_bytes + 302, 0}}},
{{.refcount = &grpc_static_metadata_refcounts[56],
.data.refcounted = {g_bytes + 587, 6}},
{.refcount = &grpc_static_metadata_refcounts[23],
.data.refcounted = {g_bytes + 302, 0}}},
{{.refcount = &grpc_static_metadata_refcounts[57],
.data.refcounted = {g_bytes + 593, 27}},
{.refcount = &grpc_static_metadata_refcounts[23],
.data.refcounted = {g_bytes + 302, 0}}},
{{.refcount = &grpc_static_metadata_refcounts[58],
.data.refcounted = {g_bytes + 620, 3}},
{.refcount = &grpc_static_metadata_refcounts[23],
.data.refcounted = {g_bytes + 302, 0}}},
{{.refcount = &grpc_static_metadata_refcounts[59],
.data.refcounted = {g_bytes + 623, 5}},
{.refcount = &grpc_static_metadata_refcounts[23],
.data.refcounted = {g_bytes + 302, 0}}},
{{.refcount = &grpc_static_metadata_refcounts[60],
.data.refcounted = {g_bytes + 628, 13}},
{.refcount = &grpc_static_metadata_refcounts[23],
.data.refcounted = {g_bytes + 302, 0}}},
{{.refcount = &grpc_static_metadata_refcounts[61],
.data.refcounted = {g_bytes + 641, 13}},
{.refcount = &grpc_static_metadata_refcounts[23],
.data.refcounted = {g_bytes + 302, 0}}},
{{.refcount = &grpc_static_metadata_refcounts[62],
.data.refcounted = {g_bytes + 654, 19}},
{.refcount = &grpc_static_metadata_refcounts[23],
.data.refcounted = {g_bytes + 302, 0}}},
{{.refcount = &grpc_static_metadata_refcounts[15],
.data.refcounted = {g_bytes + 170, 16}},
{.refcount = &grpc_static_metadata_refcounts[32],
.data.refcounted = {g_bytes + 433, 8}}},
{{.refcount = &grpc_static_metadata_refcounts[15],
.data.refcounted = {g_bytes + 170, 16}},
{.refcount = &grpc_static_metadata_refcounts[33],
.data.refcounted = {g_bytes + 441, 4}}},
{{.refcount = &grpc_static_metadata_refcounts[15],
.data.refcounted = {g_bytes + 170, 16}},
{.refcount = &grpc_static_metadata_refcounts[23],
.data.refcounted = {g_bytes + 302, 0}}},
{{.refcount = &grpc_static_metadata_refcounts[63],
.data.refcounted = {g_bytes + 673, 16}},
{.refcount = &grpc_static_metadata_refcounts[23],
.data.refcounted = {g_bytes + 302, 0}}},
{{.refcount = &grpc_static_metadata_refcounts[64],
.data.refcounted = {g_bytes + 689, 14}},
{.refcount = &grpc_static_metadata_refcounts[23],
.data.refcounted = {g_bytes + 302, 0}}},
{{.refcount = &grpc_static_metadata_refcounts[65],
.data.refcounted = {g_bytes + 703, 16}},
{.refcount = &grpc_static_metadata_refcounts[23],
.data.refcounted = {g_bytes + 302, 0}}},
{{.refcount = &grpc_static_metadata_refcounts[66],
.data.refcounted = {g_bytes + 719, 13}},
{.refcount = &grpc_static_metadata_refcounts[23],
.data.refcounted = {g_bytes + 302, 0}}},
{{.refcount = &grpc_static_metadata_refcounts[14],
.data.refcounted = {g_bytes + 158, 12}},
{.refcount = &grpc_static_metadata_refcounts[23],
.data.refcounted = {g_bytes + 302, 0}}},
{{.refcount = &grpc_static_metadata_refcounts[67],
.data.refcounted = {g_bytes + 732, 6}},
{.refcount = &grpc_static_metadata_refcounts[23],
.data.refcounted = {g_bytes + 302, 0}}},
{{.refcount = &grpc_static_metadata_refcounts[68],
.data.refcounted = {g_bytes + 738, 4}},
{.refcount = &grpc_static_metadata_refcounts[23],
.data.refcounted = {g_bytes + 302, 0}}},
{{.refcount = &grpc_static_metadata_refcounts[69],
.data.refcounted = {g_bytes + 742, 4}},
{.refcount = &grpc_static_metadata_refcounts[23],
.data.refcounted = {g_bytes + 302, 0}}},
{{.refcount = &grpc_static_metadata_refcounts[70],
.data.refcounted = {g_bytes + 746, 6}},
{.refcount = &grpc_static_metadata_refcounts[23],
.data.refcounted = {g_bytes + 302, 0}}},
{{.refcount = &grpc_static_metadata_refcounts[71],
.data.refcounted = {g_bytes + 752, 7}},
{.refcount = &grpc_static_metadata_refcounts[23],
.data.refcounted = {g_bytes + 302, 0}}},
{{.refcount = &grpc_static_metadata_refcounts[72],
.data.refcounted = {g_bytes + 759, 4}},
{.refcount = &grpc_static_metadata_refcounts[23],
.data.refcounted = {g_bytes + 302, 0}}},
{{.refcount = &grpc_static_metadata_refcounts[20],
.data.refcounted = {g_bytes + 278, 4}},
{.refcount = &grpc_static_metadata_refcounts[23],
.data.refcounted = {g_bytes + 302, 0}}},
{{.refcount = &grpc_static_metadata_refcounts[73],
.data.refcounted = {g_bytes + 763, 8}},
{.refcount = &grpc_static_metadata_refcounts[23],
.data.refcounted = {g_bytes + 302, 0}}},
{{.refcount = &grpc_static_metadata_refcounts[74],
.data.refcounted = {g_bytes + 771, 17}},
{.refcount = &grpc_static_metadata_refcounts[23],
.data.refcounted = {g_bytes + 302, 0}}},
{{.refcount = &grpc_static_metadata_refcounts[75],
.data.refcounted = {g_bytes + 788, 13}},
{.refcount = &grpc_static_metadata_refcounts[23],
.data.refcounted = {g_bytes + 302, 0}}},
{{.refcount = &grpc_static_metadata_refcounts[76],
.data.refcounted = {g_bytes + 801, 8}},
{.refcount = &grpc_static_metadata_refcounts[23],
.data.refcounted = {g_bytes + 302, 0}}},
{{.refcount = &grpc_static_metadata_refcounts[77],
.data.refcounted = {g_bytes + 809, 19}},
{.refcount = &grpc_static_metadata_refcounts[23],
.data.refcounted = {g_bytes + 302, 0}}},
{{.refcount = &grpc_static_metadata_refcounts[78],
.data.refcounted = {g_bytes + 828, 13}},
{.refcount = &grpc_static_metadata_refcounts[23],
.data.refcounted = {g_bytes + 302, 0}}},
{{.refcount = &grpc_static_metadata_refcounts[21],
.data.refcounted = {g_bytes + 282, 8}},
{.refcount = &grpc_static_metadata_refcounts[23],
.data.refcounted = {g_bytes + 302, 0}}},
{{.refcount = &grpc_static_metadata_refcounts[79],
.data.refcounted = {g_bytes + 841, 11}},
{.refcount = &grpc_static_metadata_refcounts[23],
.data.refcounted = {g_bytes + 302, 0}}},
{{.refcount = &grpc_static_metadata_refcounts[80],
.data.refcounted = {g_bytes + 852, 4}},
{.refcount = &grpc_static_metadata_refcounts[23],
.data.refcounted = {g_bytes + 302, 0}}},
{{.refcount = &grpc_static_metadata_refcounts[81],
.data.refcounted = {g_bytes + 856, 8}},
{.refcount = &grpc_static_metadata_refcounts[23],
.data.refcounted = {g_bytes + 302, 0}}},
{{.refcount = &grpc_static_metadata_refcounts[82],
.data.refcounted = {g_bytes + 864, 12}},
{.refcount = &grpc_static_metadata_refcounts[23],
.data.refcounted = {g_bytes + 302, 0}}},
{{.refcount = &grpc_static_metadata_refcounts[83],
.data.refcounted = {g_bytes + 876, 18}},
{.refcount = &grpc_static_metadata_refcounts[23],
.data.refcounted = {g_bytes + 302, 0}}},
{{.refcount = &grpc_static_metadata_refcounts[84],
.data.refcounted = {g_bytes + 894, 19}},
{.refcount = &grpc_static_metadata_refcounts[23],
.data.refcounted = {g_bytes + 302, 0}}},
{{.refcount = &grpc_static_metadata_refcounts[85],
.data.refcounted = {g_bytes + 913, 5}},
{.refcount = &grpc_static_metadata_refcounts[23],
.data.refcounted = {g_bytes + 302, 0}}},
{{.refcount = &grpc_static_metadata_refcounts[86],
.data.refcounted = {g_bytes + 918, 7}},
{.refcount = &grpc_static_metadata_refcounts[23],
.data.refcounted = {g_bytes + 302, 0}}},
{{.refcount = &grpc_static_metadata_refcounts[87],
.data.refcounted = {g_bytes + 925, 7}},
{.refcount = &grpc_static_metadata_refcounts[23],
.data.refcounted = {g_bytes + 302, 0}}},
{{.refcount = &grpc_static_metadata_refcounts[88],
.data.refcounted = {g_bytes + 932, 11}},
{.refcount = &grpc_static_metadata_refcounts[23],
.data.refcounted = {g_bytes + 302, 0}}},
{{.refcount = &grpc_static_metadata_refcounts[89],
.data.refcounted = {g_bytes + 943, 6}},
{.refcount = &grpc_static_metadata_refcounts[23],
.data.refcounted = {g_bytes + 302, 0}}},
{{.refcount = &grpc_static_metadata_refcounts[90],
.data.refcounted = {g_bytes + 949, 10}},
{.refcount = &grpc_static_metadata_refcounts[23],
.data.refcounted = {g_bytes + 302, 0}}},
{{.refcount = &grpc_static_metadata_refcounts[91],
.data.refcounted = {g_bytes + 959, 25}},
{.refcount = &grpc_static_metadata_refcounts[23],
.data.refcounted = {g_bytes + 302, 0}}},
{{.refcount = &grpc_static_metadata_refcounts[92],
.data.refcounted = {g_bytes + 984, 17}},
{.refcount = &grpc_static_metadata_refcounts[23],
.data.refcounted = {g_bytes + 302, 0}}},
{{.refcount = &grpc_static_metadata_refcounts[19],
.data.refcounted = {g_bytes + 268, 10}},
{.refcount = &grpc_static_metadata_refcounts[23],
.data.refcounted = {g_bytes + 302, 0}}},
{{.refcount = &grpc_static_metadata_refcounts[93],
.data.refcounted = {g_bytes + 1001, 4}},
{.refcount = &grpc_static_metadata_refcounts[23],
.data.refcounted = {g_bytes + 302, 0}}},
{{.refcount = &grpc_static_metadata_refcounts[94],
.data.refcounted = {g_bytes + 1005, 3}},
{.refcount = &grpc_static_metadata_refcounts[23],
.data.refcounted = {g_bytes + 302, 0}}},
{{.refcount = &grpc_static_metadata_refcounts[95],
.data.refcounted = {g_bytes + 1008, 16}},
{.refcount = &grpc_static_metadata_refcounts[23],
.data.refcounted = {g_bytes + 302, 0}}},
{{.refcount = &grpc_static_metadata_refcounts[10],
.data.refcounted = {g_bytes + 90, 20}},
{.refcount = &grpc_static_metadata_refcounts[32],
.data.refcounted = {g_bytes + 433, 8}}},
{{.refcount = &grpc_static_metadata_refcounts[10],
.data.refcounted = {g_bytes + 90, 20}},
{.refcount = &grpc_static_metadata_refcounts[34],
.data.refcounted = {g_bytes + 445, 7}}},
{{.refcount = &grpc_static_metadata_refcounts[10],
.data.refcounted = {g_bytes + 90, 20}},
{.refcount = &grpc_static_metadata_refcounts[96],
.data.refcounted = {g_bytes + 1024, 16}}},
{{.refcount = &grpc_static_metadata_refcounts[10],
.data.refcounted = {g_bytes + 90, 20}},
{.refcount = &grpc_static_metadata_refcounts[33],
.data.refcounted = {g_bytes + 441, 4}}},
{{.refcount = &grpc_static_metadata_refcounts[10],
.data.refcounted = {g_bytes + 90, 20}},
{.refcount = &grpc_static_metadata_refcounts[97],
.data.refcounted = {g_bytes + 1040, 13}}},
{{.refcount = &grpc_static_metadata_refcounts[10],
.data.refcounted = {g_bytes + 90, 20}},
{.refcount = &grpc_static_metadata_refcounts[98],
.data.refcounted = {g_bytes + 1053, 12}}},
{{.refcount = &grpc_static_metadata_refcounts[10],
.data.refcounted = {g_bytes + 90, 20}},
{.refcount = &grpc_static_metadata_refcounts[99],
.data.refcounted = {g_bytes + 1065, 21}}},
{{.refcount = &grpc_static_metadata_refcounts[16],
.data.refcounted = {g_bytes + 186, 15}},
{.refcount = &grpc_static_metadata_refcounts[32],
.data.refcounted = {g_bytes + 433, 8}}},
{{.refcount = &grpc_static_metadata_refcounts[16],
.data.refcounted = {g_bytes + 186, 15}},
{.refcount = &grpc_static_metadata_refcounts[33],
.data.refcounted = {g_bytes + 441, 4}}},
{{.refcount = &grpc_static_metadata_refcounts[16],
.data.refcounted = {g_bytes + 186, 15}},
{.refcount = &grpc_static_metadata_refcounts[97],
.data.refcounted = {g_bytes + 1040, 13}}},
{{&grpc_static_metadata_refcounts[7], {{g_bytes + 50, 11}}},
{&grpc_static_metadata_refcounts[29], {{g_bytes + 430, 1}}}},
{{&grpc_static_metadata_refcounts[7], {{g_bytes + 50, 11}}},
{&grpc_static_metadata_refcounts[30], {{g_bytes + 431, 1}}}},
{{&grpc_static_metadata_refcounts[7], {{g_bytes + 50, 11}}},
{&grpc_static_metadata_refcounts[31], {{g_bytes + 432, 1}}}},
{{&grpc_static_metadata_refcounts[9], {{g_bytes + 77, 13}}},
{&grpc_static_metadata_refcounts[32], {{g_bytes + 433, 8}}}},
{{&grpc_static_metadata_refcounts[9], {{g_bytes + 77, 13}}},
{&grpc_static_metadata_refcounts[33], {{g_bytes + 441, 4}}}},
{{&grpc_static_metadata_refcounts[9], {{g_bytes + 77, 13}}},
{&grpc_static_metadata_refcounts[34], {{g_bytes + 445, 7}}}},
{{&grpc_static_metadata_refcounts[5], {{g_bytes + 36, 2}}},
{&grpc_static_metadata_refcounts[35], {{g_bytes + 452, 8}}}},
{{&grpc_static_metadata_refcounts[14], {{g_bytes + 158, 12}}},
{&grpc_static_metadata_refcounts[36], {{g_bytes + 460, 16}}}},
{{&grpc_static_metadata_refcounts[1], {{g_bytes + 5, 7}}},
{&grpc_static_metadata_refcounts[37], {{g_bytes + 476, 4}}}},
{{&grpc_static_metadata_refcounts[2], {{g_bytes + 12, 7}}},
{&grpc_static_metadata_refcounts[38], {{g_bytes + 480, 3}}}},
{{&grpc_static_metadata_refcounts[2], {{g_bytes + 12, 7}}},
{&grpc_static_metadata_refcounts[39], {{g_bytes + 483, 3}}}},
{{&grpc_static_metadata_refcounts[4], {{g_bytes + 29, 7}}},
{&grpc_static_metadata_refcounts[40], {{g_bytes + 486, 4}}}},
{{&grpc_static_metadata_refcounts[4], {{g_bytes + 29, 7}}},
{&grpc_static_metadata_refcounts[41], {{g_bytes + 490, 5}}}},
{{&grpc_static_metadata_refcounts[4], {{g_bytes + 29, 7}}},
{&grpc_static_metadata_refcounts[42], {{g_bytes + 495, 4}}}},
{{&grpc_static_metadata_refcounts[3], {{g_bytes + 19, 10}}},
{&grpc_static_metadata_refcounts[23], {{g_bytes + 302, 0}}}},
{{&grpc_static_metadata_refcounts[1], {{g_bytes + 5, 7}}},
{&grpc_static_metadata_refcounts[43], {{g_bytes + 499, 3}}}},
{{&grpc_static_metadata_refcounts[1], {{g_bytes + 5, 7}}},
{&grpc_static_metadata_refcounts[44], {{g_bytes + 502, 3}}}},
{{&grpc_static_metadata_refcounts[0], {{g_bytes + 0, 5}}},
{&grpc_static_metadata_refcounts[45], {{g_bytes + 505, 1}}}},
{{&grpc_static_metadata_refcounts[0], {{g_bytes + 0, 5}}},
{&grpc_static_metadata_refcounts[46], {{g_bytes + 506, 11}}}},
{{&grpc_static_metadata_refcounts[2], {{g_bytes + 12, 7}}},
{&grpc_static_metadata_refcounts[47], {{g_bytes + 517, 3}}}},
{{&grpc_static_metadata_refcounts[2], {{g_bytes + 12, 7}}},
{&grpc_static_metadata_refcounts[48], {{g_bytes + 520, 3}}}},
{{&grpc_static_metadata_refcounts[2], {{g_bytes + 12, 7}}},
{&grpc_static_metadata_refcounts[49], {{g_bytes + 523, 3}}}},
{{&grpc_static_metadata_refcounts[2], {{g_bytes + 12, 7}}},
{&grpc_static_metadata_refcounts[50], {{g_bytes + 526, 3}}}},
{{&grpc_static_metadata_refcounts[2], {{g_bytes + 12, 7}}},
{&grpc_static_metadata_refcounts[51], {{g_bytes + 529, 3}}}},
{{&grpc_static_metadata_refcounts[52], {{g_bytes + 532, 14}}},
{&grpc_static_metadata_refcounts[23], {{g_bytes + 302, 0}}}},
{{&grpc_static_metadata_refcounts[16], {{g_bytes + 186, 15}}},
{&grpc_static_metadata_refcounts[23], {{g_bytes + 302, 0}}}},
{{&grpc_static_metadata_refcounts[16], {{g_bytes + 186, 15}}},
{&grpc_static_metadata_refcounts[53], {{g_bytes + 546, 13}}}},
{{&grpc_static_metadata_refcounts[54], {{g_bytes + 559, 15}}},
{&grpc_static_metadata_refcounts[23], {{g_bytes + 302, 0}}}},
{{&grpc_static_metadata_refcounts[55], {{g_bytes + 574, 13}}},
{&grpc_static_metadata_refcounts[23], {{g_bytes + 302, 0}}}},
{{&grpc_static_metadata_refcounts[56], {{g_bytes + 587, 6}}},
{&grpc_static_metadata_refcounts[23], {{g_bytes + 302, 0}}}},
{{&grpc_static_metadata_refcounts[57], {{g_bytes + 593, 27}}},
{&grpc_static_metadata_refcounts[23], {{g_bytes + 302, 0}}}},
{{&grpc_static_metadata_refcounts[58], {{g_bytes + 620, 3}}},
{&grpc_static_metadata_refcounts[23], {{g_bytes + 302, 0}}}},
{{&grpc_static_metadata_refcounts[59], {{g_bytes + 623, 5}}},
{&grpc_static_metadata_refcounts[23], {{g_bytes + 302, 0}}}},
{{&grpc_static_metadata_refcounts[60], {{g_bytes + 628, 13}}},
{&grpc_static_metadata_refcounts[23], {{g_bytes + 302, 0}}}},
{{&grpc_static_metadata_refcounts[61], {{g_bytes + 641, 13}}},
{&grpc_static_metadata_refcounts[23], {{g_bytes + 302, 0}}}},
{{&grpc_static_metadata_refcounts[62], {{g_bytes + 654, 19}}},
{&grpc_static_metadata_refcounts[23], {{g_bytes + 302, 0}}}},
{{&grpc_static_metadata_refcounts[15], {{g_bytes + 170, 16}}},
{&grpc_static_metadata_refcounts[32], {{g_bytes + 433, 8}}}},
{{&grpc_static_metadata_refcounts[15], {{g_bytes + 170, 16}}},
{&grpc_static_metadata_refcounts[33], {{g_bytes + 441, 4}}}},
{{&grpc_static_metadata_refcounts[15], {{g_bytes + 170, 16}}},
{&grpc_static_metadata_refcounts[23], {{g_bytes + 302, 0}}}},
{{&grpc_static_metadata_refcounts[63], {{g_bytes + 673, 16}}},
{&grpc_static_metadata_refcounts[23], {{g_bytes + 302, 0}}}},
{{&grpc_static_metadata_refcounts[64], {{g_bytes + 689, 14}}},
{&grpc_static_metadata_refcounts[23], {{g_bytes + 302, 0}}}},
{{&grpc_static_metadata_refcounts[65], {{g_bytes + 703, 16}}},
{&grpc_static_metadata_refcounts[23], {{g_bytes + 302, 0}}}},
{{&grpc_static_metadata_refcounts[66], {{g_bytes + 719, 13}}},
{&grpc_static_metadata_refcounts[23], {{g_bytes + 302, 0}}}},
{{&grpc_static_metadata_refcounts[14], {{g_bytes + 158, 12}}},
{&grpc_static_metadata_refcounts[23], {{g_bytes + 302, 0}}}},
{{&grpc_static_metadata_refcounts[67], {{g_bytes + 732, 6}}},
{&grpc_static_metadata_refcounts[23], {{g_bytes + 302, 0}}}},
{{&grpc_static_metadata_refcounts[68], {{g_bytes + 738, 4}}},
{&grpc_static_metadata_refcounts[23], {{g_bytes + 302, 0}}}},
{{&grpc_static_metadata_refcounts[69], {{g_bytes + 742, 4}}},
{&grpc_static_metadata_refcounts[23], {{g_bytes + 302, 0}}}},
{{&grpc_static_metadata_refcounts[70], {{g_bytes + 746, 6}}},
{&grpc_static_metadata_refcounts[23], {{g_bytes + 302, 0}}}},
{{&grpc_static_metadata_refcounts[71], {{g_bytes + 752, 7}}},
{&grpc_static_metadata_refcounts[23], {{g_bytes + 302, 0}}}},
{{&grpc_static_metadata_refcounts[72], {{g_bytes + 759, 4}}},
{&grpc_static_metadata_refcounts[23], {{g_bytes + 302, 0}}}},
{{&grpc_static_metadata_refcounts[20], {{g_bytes + 278, 4}}},
{&grpc_static_metadata_refcounts[23], {{g_bytes + 302, 0}}}},
{{&grpc_static_metadata_refcounts[73], {{g_bytes + 763, 8}}},
{&grpc_static_metadata_refcounts[23], {{g_bytes + 302, 0}}}},
{{&grpc_static_metadata_refcounts[74], {{g_bytes + 771, 17}}},
{&grpc_static_metadata_refcounts[23], {{g_bytes + 302, 0}}}},
{{&grpc_static_metadata_refcounts[75], {{g_bytes + 788, 13}}},
{&grpc_static_metadata_refcounts[23], {{g_bytes + 302, 0}}}},
{{&grpc_static_metadata_refcounts[76], {{g_bytes + 801, 8}}},
{&grpc_static_metadata_refcounts[23], {{g_bytes + 302, 0}}}},
{{&grpc_static_metadata_refcounts[77], {{g_bytes + 809, 19}}},
{&grpc_static_metadata_refcounts[23], {{g_bytes + 302, 0}}}},
{{&grpc_static_metadata_refcounts[78], {{g_bytes + 828, 13}}},
{&grpc_static_metadata_refcounts[23], {{g_bytes + 302, 0}}}},
{{&grpc_static_metadata_refcounts[21], {{g_bytes + 282, 8}}},
{&grpc_static_metadata_refcounts[23], {{g_bytes + 302, 0}}}},
{{&grpc_static_metadata_refcounts[79], {{g_bytes + 841, 11}}},
{&grpc_static_metadata_refcounts[23], {{g_bytes + 302, 0}}}},
{{&grpc_static_metadata_refcounts[80], {{g_bytes + 852, 4}}},
{&grpc_static_metadata_refcounts[23], {{g_bytes + 302, 0}}}},
{{&grpc_static_metadata_refcounts[81], {{g_bytes + 856, 8}}},
{&grpc_static_metadata_refcounts[23], {{g_bytes + 302, 0}}}},
{{&grpc_static_metadata_refcounts[82], {{g_bytes + 864, 12}}},
{&grpc_static_metadata_refcounts[23], {{g_bytes + 302, 0}}}},
{{&grpc_static_metadata_refcounts[83], {{g_bytes + 876, 18}}},
{&grpc_static_metadata_refcounts[23], {{g_bytes + 302, 0}}}},
{{&grpc_static_metadata_refcounts[84], {{g_bytes + 894, 19}}},
{&grpc_static_metadata_refcounts[23], {{g_bytes + 302, 0}}}},
{{&grpc_static_metadata_refcounts[85], {{g_bytes + 913, 5}}},
{&grpc_static_metadata_refcounts[23], {{g_bytes + 302, 0}}}},
{{&grpc_static_metadata_refcounts[86], {{g_bytes + 918, 7}}},
{&grpc_static_metadata_refcounts[23], {{g_bytes + 302, 0}}}},
{{&grpc_static_metadata_refcounts[87], {{g_bytes + 925, 7}}},
{&grpc_static_metadata_refcounts[23], {{g_bytes + 302, 0}}}},
{{&grpc_static_metadata_refcounts[88], {{g_bytes + 932, 11}}},
{&grpc_static_metadata_refcounts[23], {{g_bytes + 302, 0}}}},
{{&grpc_static_metadata_refcounts[89], {{g_bytes + 943, 6}}},
{&grpc_static_metadata_refcounts[23], {{g_bytes + 302, 0}}}},
{{&grpc_static_metadata_refcounts[90], {{g_bytes + 949, 10}}},
{&grpc_static_metadata_refcounts[23], {{g_bytes + 302, 0}}}},
{{&grpc_static_metadata_refcounts[91], {{g_bytes + 959, 25}}},
{&grpc_static_metadata_refcounts[23], {{g_bytes + 302, 0}}}},
{{&grpc_static_metadata_refcounts[92], {{g_bytes + 984, 17}}},
{&grpc_static_metadata_refcounts[23], {{g_bytes + 302, 0}}}},
{{&grpc_static_metadata_refcounts[19], {{g_bytes + 268, 10}}},
{&grpc_static_metadata_refcounts[23], {{g_bytes + 302, 0}}}},
{{&grpc_static_metadata_refcounts[93], {{g_bytes + 1001, 4}}},
{&grpc_static_metadata_refcounts[23], {{g_bytes + 302, 0}}}},
{{&grpc_static_metadata_refcounts[94], {{g_bytes + 1005, 3}}},
{&grpc_static_metadata_refcounts[23], {{g_bytes + 302, 0}}}},
{{&grpc_static_metadata_refcounts[95], {{g_bytes + 1008, 16}}},
{&grpc_static_metadata_refcounts[23], {{g_bytes + 302, 0}}}},
{{&grpc_static_metadata_refcounts[10], {{g_bytes + 90, 20}}},
{&grpc_static_metadata_refcounts[32], {{g_bytes + 433, 8}}}},
{{&grpc_static_metadata_refcounts[10], {{g_bytes + 90, 20}}},
{&grpc_static_metadata_refcounts[34], {{g_bytes + 445, 7}}}},
{{&grpc_static_metadata_refcounts[10], {{g_bytes + 90, 20}}},
{&grpc_static_metadata_refcounts[96], {{g_bytes + 1024, 16}}}},
{{&grpc_static_metadata_refcounts[10], {{g_bytes + 90, 20}}},
{&grpc_static_metadata_refcounts[33], {{g_bytes + 441, 4}}}},
{{&grpc_static_metadata_refcounts[10], {{g_bytes + 90, 20}}},
{&grpc_static_metadata_refcounts[97], {{g_bytes + 1040, 13}}}},
{{&grpc_static_metadata_refcounts[10], {{g_bytes + 90, 20}}},
{&grpc_static_metadata_refcounts[98], {{g_bytes + 1053, 12}}}},
{{&grpc_static_metadata_refcounts[10], {{g_bytes + 90, 20}}},
{&grpc_static_metadata_refcounts[99], {{g_bytes + 1065, 21}}}},
{{&grpc_static_metadata_refcounts[16], {{g_bytes + 186, 15}}},
{&grpc_static_metadata_refcounts[32], {{g_bytes + 433, 8}}}},
{{&grpc_static_metadata_refcounts[16], {{g_bytes + 186, 15}}},
{&grpc_static_metadata_refcounts[33], {{g_bytes + 441, 4}}}},
{{&grpc_static_metadata_refcounts[16], {{g_bytes + 186, 15}}},
{&grpc_static_metadata_refcounts[97], {{g_bytes + 1040, 13}}}},
};
bool grpc_static_callout_is_default[GRPC_BATCH_CALLOUTS_COUNT] = {
true, // :path

@ -18,7 +18,7 @@
#include "src/core/lib/transport/status_conversion.h"
int grpc_status_to_http2_error(grpc_status_code status) {
grpc_http2_error_code grpc_status_to_http2_error(grpc_status_code status) {
switch (status) {
case GRPC_STATUS_OK:
return GRPC_HTTP2_NO_ERROR;

@ -102,9 +102,11 @@ static void slice_stream_unref(grpc_exec_ctx *exec_ctx, void *p) {
grpc_slice grpc_slice_from_stream_owned_buffer(grpc_stream_refcount *refcount,
void *buffer, size_t length) {
slice_stream_ref(&refcount->slice_refcount);
return (grpc_slice){
.refcount = &refcount->slice_refcount,
.data.refcounted = {.bytes = (uint8_t *)buffer, .length = length}};
grpc_slice res;
res.refcount = &refcount->slice_refcount,
res.data.refcounted.bytes = (uint8_t *)buffer;
res.data.refcounted.length = length;
return res;
}
static const grpc_slice_refcount_vtable stream_ref_slice_vtable = {

@ -47,4 +47,14 @@ std::unique_ptr<GenericClientAsyncReaderWriter> GenericStub::PrepareCall(
return CallInternal(channel_.get(), context, method, cq, false, nullptr);
}
// setup a unary call to a named method
std::unique_ptr<GenericClientAsyncResponseReader> GenericStub::PrepareUnaryCall(
ClientContext* context, const grpc::string& method,
const ByteBuffer& request, CompletionQueue* cq) {
return std::unique_ptr<GenericClientAsyncResponseReader>(
GenericClientAsyncResponseReader::Create(
channel_.get(), cq, RpcMethod(method.c_str(), RpcMethod::NORMAL_RPC),
context, request, false));
}
} // namespace grpc

@ -21,6 +21,7 @@
#include <grpc++/impl/grpc_library.h>
#include <grpc++/support/channel_arguments.h>
#include <grpc/support/log.h>
#include <grpc/support/string_util.h>
#include "src/cpp/client/create_channel_internal.h"
#include "src/cpp/common/secure_auth_context.h"
@ -150,6 +151,18 @@ std::shared_ptr<ChannelCredentials> CompositeChannelCredentials(
return nullptr;
}
std::shared_ptr<CallCredentials> CompositeCallCredentials(
const std::shared_ptr<CallCredentials>& creds1,
const std::shared_ptr<CallCredentials>& creds2) {
SecureCallCredentials* s_creds1 = creds1->AsSecureCredentials();
SecureCallCredentials* s_creds2 = creds2->AsSecureCredentials();
if (s_creds1 != nullptr && s_creds2 != nullptr) {
return WrapCallCredentials(grpc_composite_call_credentials_create(
s_creds1->GetRawCreds(), s_creds2->GetRawCreds(), nullptr));
}
return nullptr;
}
void MetadataCredentialsPluginWrapper::Destroy(void* wrapper) {
if (wrapper == nullptr) return;
MetadataCredentialsPluginWrapper* w =
@ -157,28 +170,50 @@ void MetadataCredentialsPluginWrapper::Destroy(void* wrapper) {
delete w;
}
void MetadataCredentialsPluginWrapper::GetMetadata(
int MetadataCredentialsPluginWrapper::GetMetadata(
void* wrapper, grpc_auth_metadata_context context,
grpc_credentials_plugin_metadata_cb cb, void* user_data) {
grpc_credentials_plugin_metadata_cb cb, void* user_data,
grpc_metadata creds_md[GRPC_METADATA_CREDENTIALS_PLUGIN_SYNC_MAX],
size_t* num_creds_md, grpc_status_code* status,
const char** error_details) {
GPR_ASSERT(wrapper);
MetadataCredentialsPluginWrapper* w =
reinterpret_cast<MetadataCredentialsPluginWrapper*>(wrapper);
if (!w->plugin_) {
cb(user_data, NULL, 0, GRPC_STATUS_OK, NULL);
return;
*num_creds_md = 0;
*status = GRPC_STATUS_OK;
*error_details = nullptr;
return true;
}
if (w->plugin_->IsBlocking()) {
// Asynchronous return.
w->thread_pool_->Add(
std::bind(&MetadataCredentialsPluginWrapper::InvokePlugin, w, context,
cb, user_data));
cb, user_data, nullptr, nullptr, nullptr, nullptr));
return 0;
} else {
w->InvokePlugin(context, cb, user_data);
// Synchronous return.
w->InvokePlugin(context, cb, user_data, creds_md, num_creds_md, status,
error_details);
return 1;
}
}
namespace {
void UnrefMetadata(const std::vector<grpc_metadata>& md) {
for (auto it = md.begin(); it != md.end(); ++it) {
grpc_slice_unref(it->key);
grpc_slice_unref(it->value);
}
}
} // namespace
void MetadataCredentialsPluginWrapper::InvokePlugin(
grpc_auth_metadata_context context, grpc_credentials_plugin_metadata_cb cb,
void* user_data) {
void* user_data, grpc_metadata creds_md[4], size_t* num_creds_md,
grpc_status_code* status_code, const char** error_details) {
std::multimap<grpc::string, grpc::string> metadata;
// const_cast is safe since the SecureAuthContext does not take owndership and
@ -196,12 +231,31 @@ void MetadataCredentialsPluginWrapper::InvokePlugin(
md_entry.flags = 0;
md.push_back(md_entry);
}
cb(user_data, md.empty() ? nullptr : &md[0], md.size(),
static_cast<grpc_status_code>(status.error_code()),
status.error_message().c_str());
for (auto it = md.begin(); it != md.end(); ++it) {
grpc_slice_unref(it->key);
grpc_slice_unref(it->value);
if (creds_md != nullptr) {
// Synchronous return.
if (md.size() > GRPC_METADATA_CREDENTIALS_PLUGIN_SYNC_MAX) {
*num_creds_md = 0;
*status_code = GRPC_STATUS_INTERNAL;
*error_details = gpr_strdup(
"blocking plugin credentials returned too many metadata keys");
UnrefMetadata(md);
} else {
for (const auto& elem : md) {
creds_md[*num_creds_md].key = elem.key;
creds_md[*num_creds_md].value = elem.value;
creds_md[*num_creds_md].flags = elem.flags;
++(*num_creds_md);
}
*status_code = static_cast<grpc_status_code>(status.error_code());
*error_details =
status.ok() ? nullptr : gpr_strdup(status.error_message().c_str());
}
} else {
// Asynchronous return.
cb(user_data, md.empty() ? nullptr : &md[0], md.size(),
static_cast<grpc_status_code>(status.error_code()),
status.error_message().c_str());
UnrefMetadata(md);
}
}

@ -58,16 +58,23 @@ class SecureCallCredentials final : public CallCredentials {
class MetadataCredentialsPluginWrapper final : private GrpcLibraryCodegen {
public:
static void Destroy(void* wrapper);
static void GetMetadata(void* wrapper, grpc_auth_metadata_context context,
grpc_credentials_plugin_metadata_cb cb,
void* user_data);
static int GetMetadata(
void* wrapper, grpc_auth_metadata_context context,
grpc_credentials_plugin_metadata_cb cb, void* user_data,
grpc_metadata creds_md[GRPC_METADATA_CREDENTIALS_PLUGIN_SYNC_MAX],
size_t* num_creds_md, grpc_status_code* status,
const char** error_details);
explicit MetadataCredentialsPluginWrapper(
std::unique_ptr<MetadataCredentialsPlugin> plugin);
private:
void InvokePlugin(grpc_auth_metadata_context context,
grpc_credentials_plugin_metadata_cb cb, void* user_data);
void InvokePlugin(
grpc_auth_metadata_context context,
grpc_credentials_plugin_metadata_cb cb, void* user_data,
grpc_metadata creds_md[GRPC_METADATA_CREDENTIALS_PLUGIN_SYNC_MAX],
size_t* num_creds_md, grpc_status_code* status,
const char** error_details);
std::unique_ptr<ThreadPoolInterface> thread_pool_;
std::unique_ptr<MetadataCredentialsPlugin> plugin_;
};

@ -86,6 +86,10 @@ void ChannelArguments::SetCompressionAlgorithm(
SetInt(GRPC_COMPRESSION_CHANNEL_DEFAULT_ALGORITHM, algorithm);
}
void ChannelArguments::SetGrpclbFallbackTimeout(int fallback_timeout) {
SetInt(GRPC_ARG_GRPCLB_FALLBACK_TIMEOUT_MS, fallback_timeout);
}
void ChannelArguments::SetSocketMutator(grpc_socket_mutator* mutator) {
if (!mutator) {
return;

@ -20,6 +20,7 @@
#include <mutex>
#include <grpc++/impl/codegen/method_handler_impl.h>
#include <grpc/slice.h>
#include <grpc/support/alloc.h>
#include <grpc/support/log.h>

@ -16,11 +16,15 @@
*
*/
#include <grpc++/impl/grpc_library.h>
#include <grpc++/support/byte_buffer.h>
#include <grpc/byte_buffer.h>
#include <grpc/byte_buffer_reader.h>
namespace grpc {
static internal::GrpcLibraryInitializer g_gli_initializer;
ByteBuffer::ByteBuffer(const Slice* slices, size_t nslices) {
// The following assertions check that the representation of a grpc::Slice is
// identical to that of a grpc_slice: it has a grpc_slice field, and nothing
@ -29,6 +33,16 @@ ByteBuffer::ByteBuffer(const Slice* slices, size_t nslices) {
"Slice must have same representation as grpc_slice");
static_assert(sizeof(Slice) == sizeof(grpc_slice),
"Slice must have same representation as grpc_slice");
// The following assertions check that the representation of a ByteBuffer is
// identical to grpc_byte_buffer*: it has a grpc_byte_buffer* field,
// and nothing else.
static_assert(std::is_same<decltype(buffer_), grpc_byte_buffer*>::value,
"ByteBuffer must have same representation as "
"grpc_byte_buffer*");
static_assert(sizeof(ByteBuffer) == sizeof(grpc_byte_buffer*),
"ByteBuffer must have same representation as "
"grpc_byte_buffer*");
g_gli_initializer.summon(); // Make sure that initializer linked in
// The const_cast is legal if grpc_raw_byte_buffer_create() does no more
// than its advertised side effect of increasing the reference count of the
// slices it processes, and such an increase does not affect the semantics
@ -37,19 +51,6 @@ ByteBuffer::ByteBuffer(const Slice* slices, size_t nslices) {
reinterpret_cast<grpc_slice*>(const_cast<Slice*>(slices)), nslices);
}
ByteBuffer::~ByteBuffer() {
if (buffer_) {
grpc_byte_buffer_destroy(buffer_);
}
}
void ByteBuffer::Clear() {
if (buffer_) {
grpc_byte_buffer_destroy(buffer_);
buffer_ = nullptr;
}
}
Status ByteBuffer::Dump(std::vector<Slice>* slices) const {
slices->clear();
if (!buffer_) {
@ -80,7 +81,9 @@ ByteBuffer::ByteBuffer(const ByteBuffer& buf)
: buffer_(grpc_byte_buffer_copy(buf.buffer_)) {}
ByteBuffer& ByteBuffer::operator=(const ByteBuffer& buf) {
Clear(); // first remove existing data
if (this != &buf) {
Clear(); // first remove existing data
}
if (buf.buffer_) {
buffer_ = grpc_byte_buffer_copy(buf.buffer_); // then copy
}

@ -50,4 +50,6 @@ Slice::Slice(void* buf, size_t len, void (*destroy)(void*), void* user_data)
Slice::Slice(void* buf, size_t len, void (*destroy)(void*, size_t))
: slice_(grpc_slice_new_with_len(buf, len, destroy)) {}
grpc_slice Slice::c_slice() const { return grpc_slice_ref(slice_); }
} // namespace grpc

@ -61,12 +61,9 @@ namespace Grpc.Core.Internal
try
{
var context = new AuthInterceptorContext(Marshal.PtrToStringAnsi(serviceUrlPtr),
Marshal.PtrToStringAnsi(methodNamePtr));
// Don't await, we are in a native callback and need to return.
#pragma warning disable 4014
GetMetadataAsync(context, callbackPtr, userDataPtr);
#pragma warning restore 4014
var context = new AuthInterceptorContext(Marshal.PtrToStringAnsi(serviceUrlPtr), Marshal.PtrToStringAnsi(methodNamePtr));
// Make a guarantee that credentials_notify_from_plugin is invoked async to be compliant with c-core API.
ThreadPool.QueueUserWorkItem(async (stateInfo) => await GetMetadataAsync(context, callbackPtr, userDataPtr));
}
catch (Exception e)
{

@ -89,6 +89,54 @@ namespace Grpc.IntegrationTesting
client.UnaryCall(new SimpleRequest { }, new CallOptions(credentials: callCredentials));
}
[Test]
public async Task MetadataCredentials_Composed()
{
var first = CallCredentials.FromInterceptor(new AsyncAuthInterceptor((context, metadata) => {
// Attempt to exercise the case where async callback is inlineable/synchronously-runnable.
metadata.Add("first_authorization", "FIRST_SECRET_TOKEN");
return TaskUtils.CompletedTask;
}));
var second = CallCredentials.FromInterceptor(new AsyncAuthInterceptor((context, metadata) => {
metadata.Add("second_authorization", "SECOND_SECRET_TOKEN");
return TaskUtils.CompletedTask;
}));
var third = CallCredentials.FromInterceptor(new AsyncAuthInterceptor((context, metadata) => {
metadata.Add("third_authorization", "THIRD_SECRET_TOKEN");
return TaskUtils.CompletedTask;
}));
var channelCredentials = ChannelCredentials.Create(TestCredentials.CreateSslCredentials(),
CallCredentials.Compose(first, second, third));
channel = new Channel(Host, server.Ports.Single().BoundPort, channelCredentials, options);
var client = new TestService.TestServiceClient(channel);
var call = client.StreamingOutputCall(new StreamingOutputCallRequest { });
Assert.IsTrue(await call.ResponseStream.MoveNext());
Assert.IsFalse(await call.ResponseStream.MoveNext());
}
[Test]
public async Task MetadataCredentials_ComposedPerCall()
{
channel = new Channel(Host, server.Ports.Single().BoundPort, TestCredentials.CreateSslCredentials(), options);
var client = new TestService.TestServiceClient(channel);
var first = CallCredentials.FromInterceptor(new AsyncAuthInterceptor((context, metadata) => {
metadata.Add("first_authorization", "FIRST_SECRET_TOKEN");
return TaskUtils.CompletedTask;
}));
var second = CallCredentials.FromInterceptor(new AsyncAuthInterceptor((context, metadata) => {
metadata.Add("second_authorization", "SECOND_SECRET_TOKEN");
return TaskUtils.CompletedTask;
}));
var third = CallCredentials.FromInterceptor(new AsyncAuthInterceptor((context, metadata) => {
metadata.Add("third_authorization", "THIRD_SECRET_TOKEN");
return TaskUtils.CompletedTask;
}));
var call = client.StreamingOutputCall(new StreamingOutputCallRequest{ },
new CallOptions(credentials: CallCredentials.Compose(first, second, third)));
Assert.IsTrue(await call.ResponseStream.MoveNext());
Assert.IsFalse(await call.ResponseStream.MoveNext());
}
[Test]
public void MetadataCredentials_InterceptorLeavesMetadataEmpty()
{
@ -125,6 +173,17 @@ namespace Grpc.IntegrationTesting
Assert.AreEqual("SECRET_TOKEN", authToken);
return Task.FromResult(new SimpleResponse());
}
public override async Task StreamingOutputCall(StreamingOutputCallRequest request, IServerStreamWriter<StreamingOutputCallResponse> responseStream, ServerCallContext context)
{
var first = context.RequestHeaders.First((entry) => entry.Key == "first_authorization").Value;
Assert.AreEqual("FIRST_SECRET_TOKEN", first);
var second = context.RequestHeaders.First((entry) => entry.Key == "second_authorization").Value;
Assert.AreEqual("SECOND_SECRET_TOKEN", second);
var third = context.RequestHeaders.First((entry) => entry.Key == "third_authorization").Value;
Assert.AreEqual("THIRD_SECRET_TOKEN", third);
await responseStream.WriteAsync(new StreamingOutputCallResponse());
}
}
}
}

@ -1023,13 +1023,17 @@ typedef void(GPR_CALLTYPE *grpcsharp_metadata_interceptor_func)(
grpc_credentials_plugin_metadata_cb cb, void *user_data,
int32_t is_destroy);
static void grpcsharp_get_metadata_handler(
static int grpcsharp_get_metadata_handler(
void *state, grpc_auth_metadata_context context,
grpc_credentials_plugin_metadata_cb cb, void *user_data) {
grpc_credentials_plugin_metadata_cb cb, void *user_data,
grpc_metadata creds_md[GRPC_METADATA_CREDENTIALS_PLUGIN_SYNC_MAX],
size_t *num_creds_md, grpc_status_code *status,
const char **error_details) {
grpcsharp_metadata_interceptor_func interceptor =
(grpcsharp_metadata_interceptor_func)(intptr_t)state;
interceptor(state, context.service_url, context.method_name, cb, user_data,
0);
return 0; /* Asynchronous return. */
}
static void grpcsharp_metadata_credentials_destroy_handler(void *state) {

@ -238,9 +238,12 @@ NAUV_WORK_CB(SendPluginCallback) {
}
}
void plugin_get_metadata(void *state, grpc_auth_metadata_context context,
grpc_credentials_plugin_metadata_cb cb,
void *user_data) {
int plugin_get_metadata(
void *state, grpc_auth_metadata_context context,
grpc_credentials_plugin_metadata_cb cb, void *user_data,
grpc_metadata creds_md[GRPC_METADATA_CREDENTIALS_PLUGIN_SYNC_MAX],
size_t *num_creds_md, grpc_status_code *status,
const char **error_details) {
plugin_state *p_state = reinterpret_cast<plugin_state *>(state);
plugin_callback_data *data = new plugin_callback_data;
data->service_url = context.service_url;
@ -252,6 +255,7 @@ void plugin_get_metadata(void *state, grpc_auth_metadata_context context,
uv_mutex_unlock(&p_state->plugin_mutex);
uv_async_send(&p_state->plugin_async);
return 0; // Async processing.
}
void plugin_uv_close_cb(uv_handle_t *handle) {

@ -75,9 +75,11 @@ typedef struct plugin_state {
uv_async_t plugin_async;
} plugin_state;
void plugin_get_metadata(void *state, grpc_auth_metadata_context context,
grpc_credentials_plugin_metadata_cb cb,
void *user_data);
int plugin_get_metadata(
void *state, grpc_auth_metadata_context context,
grpc_credentials_plugin_metadata_cb cb, void *user_data,
grpc_metadata creds_md[GRPC_METADATA_CREDENTIALS_PLUGIN_SYNC_MAX],
size_t *num_creds_md, grpc_status_code *status, const char **error_details);
void plugin_destroy_state(void *state);

@ -35,6 +35,7 @@
#include <grpc/grpc.h>
#include <grpc/grpc_security.h>
#include <grpc/support/string_util.h>
zend_class_entry *grpc_ce_call_credentials;
#if PHP_MAJOR_VERSION >= 7
@ -143,9 +144,12 @@ PHP_METHOD(CallCredentials, createFromPlugin) {
}
/* Callback function for plugin creds API */
void plugin_get_metadata(void *ptr, grpc_auth_metadata_context context,
grpc_credentials_plugin_metadata_cb cb,
void *user_data) {
int plugin_get_metadata(
void *ptr, grpc_auth_metadata_context context,
grpc_credentials_plugin_metadata_cb cb, void *user_data,
grpc_metadata creds_md[GRPC_METADATA_CREDENTIALS_PLUGIN_SYNC_MAX],
size_t *num_creds_md, grpc_status_code *status,
const char **error_details) {
TSRMLS_FETCH();
plugin_state *state = (plugin_state *)ptr;
@ -175,15 +179,19 @@ void plugin_get_metadata(void *ptr, grpc_auth_metadata_context context,
/* call the user callback function */
zend_call_function(state->fci, state->fci_cache TSRMLS_CC);
grpc_status_code code = GRPC_STATUS_OK;
*num_creds_md = 0;
*status = GRPC_STATUS_OK;
*error_details = NULL;
grpc_metadata_array metadata;
bool cleanup = true;
if (retval == NULL || Z_TYPE_P(retval) != IS_ARRAY) {
cleanup = false;
code = GRPC_STATUS_INVALID_ARGUMENT;
} else if (!create_metadata_array(retval, &metadata)) {
code = GRPC_STATUS_INVALID_ARGUMENT;
*status = GRPC_STATUS_INVALID_ARGUMENT;
return true; // Synchronous return.
}
if (!create_metadata_array(retval, &metadata)) {
*status = GRPC_STATUS_INVALID_ARGUMENT;
return true; // Synchronous return.
}
if (retval != NULL) {
@ -197,14 +205,24 @@ void plugin_get_metadata(void *ptr, grpc_auth_metadata_context context,
#endif
}
/* Pass control back to core */
cb(user_data, metadata.metadata, metadata.count, code, NULL);
if (cleanup) {
for (int i = 0; i < metadata.count; i++) {
if (metadata.count > GRPC_METADATA_CREDENTIALS_PLUGIN_SYNC_MAX) {
*status = GRPC_STATUS_INTERNAL;
*error_details = gpr_strdup(
"PHP plugin credentials returned too many metadata entries");
for (size_t i = 0; i < metadata.count; i++) {
// TODO(stanleycheung): Why don't we need to unref the key here?
grpc_slice_unref(metadata.metadata[i].value);
}
grpc_metadata_array_destroy(&metadata);
} else {
// Return data to core.
*num_creds_md = metadata.count;
for (size_t i = 0; i < metadata.count; ++i) {
creds_md[i] = metadata.metadata[i];
}
}
grpc_metadata_array_destroy(&metadata);
return true; // Synchronous return.
}
/* Cleanup function for plugin creds API */

@ -65,9 +65,12 @@ typedef struct plugin_state {
} plugin_state;
/* Callback function for plugin creds API */
void plugin_get_metadata(void *state, grpc_auth_metadata_context context,
grpc_credentials_plugin_metadata_cb cb,
void *user_data);
int plugin_get_metadata(
void *ptr, grpc_auth_metadata_context context,
grpc_credentials_plugin_metadata_cb cb, void *user_data,
grpc_metadata creds_md[GRPC_METADATA_CREDENTIALS_PLUGIN_SYNC_MAX],
size_t *num_creds_md, grpc_status_code *status,
const char **error_details);
/* Cleanup function for plugin creds API */
void plugin_destroy_state(void *ptr);

@ -1,8 +1,7 @@
{
"minimum-stability": "dev",
"require": {
"grpc/grpc": "dev-master",
"google/protobuf": "^v3.3.0"
"google/protobuf": "v3.4.1"
},
"autoload": {
"psr-4": {

@ -41,7 +41,8 @@ cdef class CredentialsMetadataPlugin:
cdef object plugin_callback
cdef bytes plugin_name
cdef grpc_metadata_credentials_plugin make_c_plugin(self)
cdef grpc_metadata_credentials_plugin _c_plugin(CredentialsMetadataPlugin plugin)
cdef class AuthMetadataContext:
@ -49,8 +50,11 @@ cdef class AuthMetadataContext:
cdef grpc_auth_metadata_context context
cdef void plugin_get_metadata(
cdef int plugin_get_metadata(
void *state, grpc_auth_metadata_context context,
grpc_credentials_plugin_metadata_cb cb, void *user_data) with gil
grpc_credentials_plugin_metadata_cb cb, void *user_data,
grpc_metadata creds_md[GRPC_METADATA_CREDENTIALS_PLUGIN_SYNC_MAX],
size_t *num_creds_md, grpc_status_code *status,
const char **error_details) with gil
cdef void plugin_destroy_c_plugin_state(void *state) with gil

@ -14,6 +14,7 @@
cimport cpython
import threading
import traceback
@ -89,20 +90,20 @@ cdef class CredentialsMetadataPlugin:
self.plugin_callback = plugin_callback
self.plugin_name = name
@staticmethod
cdef grpc_metadata_credentials_plugin make_c_plugin(self):
cdef grpc_metadata_credentials_plugin result
result.get_metadata = plugin_get_metadata
result.destroy = plugin_destroy_c_plugin_state
result.state = <void *>self
result.type = self.plugin_name
cpython.Py_INCREF(self)
return result
def __dealloc__(self):
grpc_shutdown()
cdef grpc_metadata_credentials_plugin _c_plugin(CredentialsMetadataPlugin plugin):
cdef grpc_metadata_credentials_plugin c_plugin
c_plugin.get_metadata = plugin_get_metadata
c_plugin.destroy = plugin_destroy_c_plugin_state
c_plugin.state = <void *>plugin
c_plugin.type = plugin.plugin_name
cpython.Py_INCREF(plugin)
return c_plugin
cdef class AuthMetadataContext:
def __cinit__(self):
@ -122,9 +123,12 @@ cdef class AuthMetadataContext:
grpc_shutdown()
cdef void plugin_get_metadata(
cdef int plugin_get_metadata(
void *state, grpc_auth_metadata_context context,
grpc_credentials_plugin_metadata_cb cb, void *user_data) with gil:
grpc_credentials_plugin_metadata_cb cb, void *user_data,
grpc_metadata creds_md[GRPC_METADATA_CREDENTIALS_PLUGIN_SYNC_MAX],
size_t *num_creds_md, grpc_status_code *status,
const char **error_details) with gil:
called_flag = [False]
def python_callback(
Metadata metadata, grpc_status_code status,
@ -134,12 +138,15 @@ cdef void plugin_get_metadata(
cdef CredentialsMetadataPlugin self = <CredentialsMetadataPlugin>state
cdef AuthMetadataContext cy_context = AuthMetadataContext()
cy_context.context = context
try:
self.plugin_callback(cy_context, python_callback)
except Exception as error:
if not called_flag[0]:
cb(user_data, NULL, 0, StatusCode.unknown,
traceback.format_exc().encode())
def async_callback():
try:
self.plugin_callback(cy_context, python_callback)
except Exception as error:
if not called_flag[0]:
cb(user_data, NULL, 0, StatusCode.unknown,
traceback.format_exc().encode())
threading.Thread(group=None, target=async_callback).start()
return 0 # Asynchronous return
cdef void plugin_destroy_c_plugin_state(void *state) with gil:
cpython.Py_DECREF(<CredentialsMetadataPlugin>state)
@ -239,7 +246,7 @@ def call_credentials_google_iam(authorization_token, authority_selector):
def call_credentials_metadata_plugin(CredentialsMetadataPlugin plugin):
cdef CallCredentials credentials = CallCredentials()
cdef grpc_metadata_credentials_plugin c_plugin = plugin.make_c_plugin()
cdef grpc_metadata_credentials_plugin c_plugin = _c_plugin(plugin)
with nogil:
credentials.c_credentials = (
grpc_metadata_credentials_create_from_plugin(c_plugin, NULL))

@ -375,6 +375,10 @@ cdef extern from "grpc/grpc.h":
cdef extern from "grpc/grpc_security.h":
# Declare this as an enum, this is the only way to make it a const in
# cython
enum: GRPC_METADATA_CREDENTIALS_PLUGIN_SYNC_MAX
ctypedef enum grpc_ssl_roots_override_result:
GRPC_SSL_ROOTS_OVERRIDE_OK
GRPC_SSL_ROOTS_OVERRIDE_FAILED_PERMANENTLY
@ -462,9 +466,12 @@ cdef extern from "grpc/grpc_security.h":
grpc_status_code status, const char *error_details)
ctypedef struct grpc_metadata_credentials_plugin:
void (*get_metadata)(
int (*get_metadata)(
void *state, grpc_auth_metadata_context context,
grpc_credentials_plugin_metadata_cb cb, void *user_data)
grpc_credentials_plugin_metadata_cb cb, void *user_data,
grpc_metadata creds_md[GRPC_METADATA_CREDENTIALS_PLUGIN_SYNC_MAX],
size_t *num_creds_md, grpc_status_code *status,
const char **error_details)
void (*destroy)(void *state)
void *state
const char *type
@ -523,7 +530,7 @@ cdef extern from "grpc/compression.h":
int grpc_compression_algorithm_parse(
grpc_slice value, grpc_compression_algorithm *algorithm) nogil
int grpc_compression_algorithm_name(grpc_compression_algorithm algorithm,
char **name) nogil
const char **name) nogil
grpc_compression_algorithm grpc_compression_algorithm_for_level(
grpc_compression_level level, uint32_t accepted_encodings) nogil
void grpc_compression_options_init(grpc_compression_options *opts) nogil

@ -171,14 +171,6 @@ cdef class Timespec:
gpr_convert_clock_type(self.c_time, GPR_CLOCK_REALTIME))
return <double>real_time.seconds + <double>real_time.nanoseconds / 1e9
@staticmethod
def infinite_future():
return Timespec(float("+inf"))
@staticmethod
def infinite_past():
return Timespec(float("-inf"))
def __richcmp__(Timespec self not None, Timespec other not None, int op):
cdef gpr_timespec self_c_time = self.c_time
cdef gpr_timespec other_c_time = other.c_time
@ -454,7 +446,7 @@ cdef class _MetadataIterator:
self.i = self.i + 1
return result
else:
raise StopIteration
raise StopIteration()
# TODO(https://github.com/grpc/grpc/issues/7950): Eliminate this; just use an
@ -518,7 +510,7 @@ cdef class MetadataArray:
def __getitem__(self, size_t i):
if i >= self.c_metadata_array.count:
raise IndexError
raise IndexError()
key = _slice_bytes(self.c_metadata_array.metadata[i].key)
value = _slice_bytes(self.c_metadata_array.metadata[i].value)
return Metadatum(key=key, value=value)
@ -720,7 +712,7 @@ cdef class _OperationsIterator:
self.i = self.i + 1
return result
else:
raise StopIteration
raise StopIteration()
cdef class Operations:
@ -782,7 +774,7 @@ cdef class CompressionOptions:
def compression_algorithm_name(grpc_compression_algorithm algorithm):
cdef char* name
cdef const char* name
with nogil:
grpc_compression_algorithm_name(algorithm, &name)
# Let Cython do the right thing with string casting

Some files were not shown because too many files have changed in this diff Show More

Loading…
Cancel
Save