Merge github.com:grpc/grpc into tfix2

pull/13039/head
Craig Tiller 7 years ago
commit 48d2696664
  1. 54
      BUILD
  2. 318
      CMakeLists.txt
  3. 339
      Makefile
  4. 16
      binding.gyp
  5. 146
      build.yaml
  6. 18
      config.m4
  7. 17
      config.w32
  8. 20
      doc/environment_variables.md
  9. 61
      gRPC-Core.podspec
  10. 36
      grpc.def
  11. 39
      grpc.gemspec
  12. 35
      grpc.gyp
  13. 2
      include/grpc++/alarm.h
  14. 16
      include/grpc++/channel.h
  15. 239
      include/grpc++/impl/codegen/async_stream.h
  16. 64
      include/grpc++/impl/codegen/async_unary_call.h
  17. 21
      include/grpc++/impl/codegen/byte_buffer.h
  18. 17
      include/grpc++/impl/codegen/call.h
  19. 2
      include/grpc++/impl/codegen/call_hook.h
  20. 45
      include/grpc++/impl/codegen/channel_interface.h
  21. 23
      include/grpc++/impl/codegen/client_context.h
  22. 73
      include/grpc++/impl/codegen/client_unary_call.h
  23. 90
      include/grpc++/impl/codegen/completion_queue.h
  24. 2
      include/grpc++/impl/codegen/completion_queue_tag.h
  25. 2
      include/grpc++/impl/codegen/metadata_map.h
  26. 2
      include/grpc++/impl/codegen/method_handler_impl.h
  27. 3
      include/grpc++/impl/codegen/rpc_method.h
  28. 3
      include/grpc++/impl/codegen/rpc_service_method.h
  29. 27
      include/grpc++/impl/codegen/server_context.h
  30. 42
      include/grpc++/impl/codegen/server_interface.h
  31. 46
      include/grpc++/impl/codegen/service_type.h
  32. 327
      include/grpc++/impl/codegen/sync_stream.h
  33. 6
      include/grpc++/impl/codegen/time.h
  34. 3
      include/grpc++/server.h
  35. 1
      include/grpc++/server_builder.h
  36. 433
      include/grpc/census.h
  37. 17
      include/grpc/grpc.h
  38. 74
      include/grpc/grpc_security.h
  39. 7
      include/grpc/grpc_security_constants.h
  40. 2
      include/grpc/impl/codegen/connectivity_state.h
  41. 18
      include/grpc/support/sync.h
  42. 37
      package.xml
  43. 151
      src/compiler/cpp_generator.cc
  44. 61
      src/core/ext/census/README.md
  45. 51
      src/core/ext/census/aggregation.h
  46. 56
      src/core/ext/census/base_resources.cc
  47. 32
      src/core/ext/census/base_resources.h
  48. 33
      src/core/ext/census/census_init.cc
  49. 69
      src/core/ext/census/census_interface.h
  50. 588
      src/core/ext/census/census_log.cc
  51. 84
      src/core/ext/census/census_log.h
  52. 238
      src/core/ext/census/census_rpc_stats.cc
  53. 86
      src/core/ext/census/census_rpc_stats.h
  54. 226
      src/core/ext/census/census_tracing.cc
  55. 81
      src/core/ext/census/census_tracing.h
  56. 496
      src/core/ext/census/context.cc
  57. 10
      src/core/ext/census/gen/README.md
  58. 161
      src/core/ext/census/gen/census.pb.c
  59. 280
      src/core/ext/census/gen/census.pb.h
  60. 39
      src/core/ext/census/gen/trace_context.pb.c
  61. 78
      src/core/ext/census/gen/trace_context.pb.h
  62. 3
      src/core/ext/census/grpc_context.cc
  63. 196
      src/core/ext/census/grpc_filter.cc
  64. 70
      src/core/ext/census/grpc_plugin.cc
  65. 288
      src/core/ext/census/hash_table.cc
  66. 124
      src/core/ext/census/hash_table.h
  67. 51
      src/core/ext/census/initialize.cc
  68. 305
      src/core/ext/census/intrusive_hash_map.cc
  69. 160
      src/core/ext/census/intrusive_hash_map.h
  70. 48
      src/core/ext/census/intrusive_hash_map_internal.h
  71. 586
      src/core/ext/census/mlog.cc
  72. 88
      src/core/ext/census/mlog.h
  73. 48
      src/core/ext/census/operation.cc
  74. 49
      src/core/ext/census/placeholders.cc
  75. 303
      src/core/ext/census/resource.cc
  76. 56
      src/core/ext/census/resource.h
  77. 36
      src/core/ext/census/rpc_metric_id.h
  78. 71
      src/core/ext/census/trace_context.cc
  79. 64
      src/core/ext/census/trace_context.h
  80. 46
      src/core/ext/census/trace_label.h
  81. 56
      src/core/ext/census/trace_propagation.h
  82. 30
      src/core/ext/census/trace_status.h
  83. 35
      src/core/ext/census/trace_string.h
  84. 55
      src/core/ext/census/tracing.cc
  85. 117
      src/core/ext/census/tracing.h
  86. 301
      src/core/ext/census/window_stats.cc
  87. 166
      src/core/ext/census/window_stats.h
  88. 158
      src/core/ext/filters/client_channel/backup_poller.cc
  89. 25
      src/core/ext/filters/client_channel/backup_poller.h
  90. 44
      src/core/ext/filters/client_channel/client_channel.cc
  91. 19
      src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.cc
  92. 715
      src/core/ext/filters/client_channel/lb_policy/pick_first/pick_first.cc
  93. 467
      src/core/ext/filters/client_channel/lb_policy/round_robin/round_robin.cc
  94. 265
      src/core/ext/filters/client_channel/lb_policy/subchannel_list.cc
  95. 153
      src/core/ext/filters/client_channel/lb_policy/subchannel_list.h
  96. 4
      src/core/ext/filters/client_channel/subchannel.h
  97. 4
      src/core/ext/transport/chttp2/client/chttp2_connector.cc
  98. 153
      src/core/ext/transport/chttp2/transport/chttp2_transport.cc
  99. 573
      src/core/ext/transport/chttp2/transport/flow_control.cc
  100. 336
      src/core/ext/transport/chttp2/transport/flow_control.h
  101. Some files were not shown because too many files have changed in this diff Show More

54
BUILD

@ -409,41 +409,7 @@ grpc_cc_library(
grpc_cc_library(
name = "census",
srcs = [
"src/core/ext/census/base_resources.cc",
"src/core/ext/census/context.cc",
"src/core/ext/census/gen/census.pb.c",
"src/core/ext/census/gen/trace_context.pb.c",
"src/core/ext/census/grpc_context.cc",
"src/core/ext/census/grpc_filter.cc",
"src/core/ext/census/grpc_plugin.cc",
"src/core/ext/census/initialize.cc",
"src/core/ext/census/intrusive_hash_map.cc",
"src/core/ext/census/mlog.cc",
"src/core/ext/census/operation.cc",
"src/core/ext/census/placeholders.cc",
"src/core/ext/census/resource.cc",
"src/core/ext/census/trace_context.cc",
"src/core/ext/census/tracing.cc",
],
hdrs = [
"src/core/ext/census/aggregation.h",
"src/core/ext/census/base_resources.h",
"src/core/ext/census/census_interface.h",
"src/core/ext/census/census_rpc_stats.h",
"src/core/ext/census/gen/census.pb.h",
"src/core/ext/census/gen/trace_context.pb.h",
"src/core/ext/census/grpc_filter.h",
"src/core/ext/census/intrusive_hash_map.h",
"src/core/ext/census/intrusive_hash_map_internal.h",
"src/core/ext/census/mlog.h",
"src/core/ext/census/resource.h",
"src/core/ext/census/rpc_metric_id.h",
"src/core/ext/census/trace_context.h",
"src/core/ext/census/trace_label.h",
"src/core/ext/census/trace_propagation.h",
"src/core/ext/census/trace_status.h",
"src/core/ext/census/trace_string.h",
"src/core/ext/census/tracing.h",
],
external_deps = [
"nanopb",
@ -872,6 +838,7 @@ grpc_cc_library(
grpc_cc_library(
name = "grpc_client_channel",
srcs = [
"src/core/ext/filters/client_channel/backup_poller.cc",
"src/core/ext/filters/client_channel/channel_connectivity.cc",
"src/core/ext/filters/client_channel/client_channel.cc",
"src/core/ext/filters/client_channel/client_channel_factory.cc",
@ -894,6 +861,7 @@ grpc_cc_library(
"src/core/ext/filters/client_channel/uri_parser.cc",
],
hdrs = [
"src/core/ext/filters/client_channel/backup_poller.h",
"src/core/ext/filters/client_channel/client_channel.h",
"src/core/ext/filters/client_channel/client_channel_factory.h",
"src/core/ext/filters/client_channel/connector.h",
@ -1074,6 +1042,21 @@ grpc_cc_library(
],
)
grpc_cc_library(
name = "grpc_lb_subchannel_list",
srcs = [
"src/core/ext/filters/client_channel/lb_policy/subchannel_list.cc",
],
hdrs = [
"src/core/ext/filters/client_channel/lb_policy/subchannel_list.h",
],
language = "c++",
deps = [
"grpc_base",
"grpc_client_channel",
],
)
grpc_cc_library(
name = "grpc_lb_policy_pick_first",
srcs = [
@ -1083,6 +1066,7 @@ grpc_cc_library(
deps = [
"grpc_base",
"grpc_client_channel",
"grpc_lb_subchannel_list",
],
)
@ -1095,6 +1079,7 @@ grpc_cc_library(
deps = [
"grpc_base",
"grpc_client_channel",
"grpc_lb_subchannel_list",
],
)
@ -1261,6 +1246,7 @@ grpc_cc_library(
"src/core/ext/transport/chttp2/transport/bin_encoder.h",
"src/core/ext/transport/chttp2/transport/chttp2_transport.h",
"src/core/ext/transport/chttp2/transport/frame.h",
"src/core/ext/transport/chttp2/transport/flow_control.h",
"src/core/ext/transport/chttp2/transport/frame_data.h",
"src/core/ext/transport/chttp2/transport/frame_goaway.h",
"src/core/ext/transport/chttp2/transport/frame_ping.h",

@ -384,10 +384,6 @@ add_dependencies(buildtests_c bad_server_response_test)
add_dependencies(buildtests_c bin_decoder_test)
add_dependencies(buildtests_c bin_encoder_test)
add_dependencies(buildtests_c byte_stream_test)
add_dependencies(buildtests_c census_context_test)
add_dependencies(buildtests_c census_intrusive_hash_map_test)
add_dependencies(buildtests_c census_resource_test)
add_dependencies(buildtests_c census_trace_context_test)
add_dependencies(buildtests_c channel_create_test)
add_dependencies(buildtests_c chttp2_hpack_encoder_test)
add_dependencies(buildtests_c chttp2_stream_map_test)
@ -459,6 +455,7 @@ add_dependencies(buildtests_c grpc_json_token_test)
endif()
add_dependencies(buildtests_c grpc_jwt_verifier_test)
add_dependencies(buildtests_c grpc_security_connector_test)
add_dependencies(buildtests_c grpc_ssl_credentials_test)
if(_gRPC_PLATFORM_LINUX)
add_dependencies(buildtests_c handshake_client)
endif()
@ -491,7 +488,6 @@ add_dependencies(buildtests_c memory_profile_test)
endif()
add_dependencies(buildtests_c message_compress_test)
add_dependencies(buildtests_c minimal_stack_is_minimal_test)
add_dependencies(buildtests_c mlog_test)
add_dependencies(buildtests_c multiple_server_queues_test)
add_dependencies(buildtests_c murmur_hash_test)
add_dependencies(buildtests_c no_server_test)
@ -559,7 +555,6 @@ add_dependencies(buildtests_c connection_prefix_bad_client_test)
add_dependencies(buildtests_c head_of_line_blocking_bad_client_test)
add_dependencies(buildtests_c headers_bad_client_test)
add_dependencies(buildtests_c initial_settings_frame_bad_client_test)
add_dependencies(buildtests_c large_metadata_bad_client_test)
add_dependencies(buildtests_c server_registered_method_bad_client_test)
add_dependencies(buildtests_c simple_request_bad_client_test)
add_dependencies(buildtests_c unknown_frame_bad_client_test)
@ -709,6 +704,9 @@ add_dependencies(buildtests_cxx http2_client)
endif()
add_dependencies(buildtests_cxx hybrid_end2end_test)
if(_gRPC_PLATFORM_LINUX OR _gRPC_PLATFORM_MAC OR _gRPC_PLATFORM_POSIX)
add_dependencies(buildtests_cxx inproc_sync_unary_ping_pong_test)
endif()
if(_gRPC_PLATFORM_LINUX OR _gRPC_PLATFORM_MAC OR _gRPC_PLATFORM_POSIX)
add_dependencies(buildtests_cxx interop_client)
endif()
if(_gRPC_PLATFORM_LINUX OR _gRPC_PLATFORM_MAC OR _gRPC_PLATFORM_POSIX)
@ -1150,6 +1148,7 @@ add_library(grpc
src/core/tsi/transport_security_adapter.cc
src/core/ext/transport/chttp2/server/chttp2_server.cc
src/core/ext/transport/chttp2/client/secure/secure_channel_create.cc
src/core/ext/filters/client_channel/backup_poller.cc
src/core/ext/filters/client_channel/channel_connectivity.cc
src/core/ext/filters/client_channel/client_channel.cc
src/core/ext/filters/client_channel/client_channel_factory.cc
@ -1189,6 +1188,7 @@ add_library(grpc
third_party/nanopb/pb_encode.c
src/core/ext/filters/client_channel/resolver/fake/fake_resolver.cc
src/core/ext/filters/client_channel/lb_policy/pick_first/pick_first.cc
src/core/ext/filters/client_channel/lb_policy/subchannel_list.cc
src/core/ext/filters/client_channel/lb_policy/round_robin/round_robin.cc
src/core/ext/filters/client_channel/resolver/dns/c_ares/dns_resolver_ares.cc
src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver_posix.cc
@ -1198,21 +1198,7 @@ add_library(grpc
src/core/ext/filters/client_channel/resolver/sockaddr/sockaddr_resolver.cc
src/core/ext/filters/load_reporting/server_load_reporting_filter.cc
src/core/ext/filters/load_reporting/server_load_reporting_plugin.cc
src/core/ext/census/base_resources.cc
src/core/ext/census/context.cc
src/core/ext/census/gen/census.pb.c
src/core/ext/census/gen/trace_context.pb.c
src/core/ext/census/grpc_context.cc
src/core/ext/census/grpc_filter.cc
src/core/ext/census/grpc_plugin.cc
src/core/ext/census/initialize.cc
src/core/ext/census/intrusive_hash_map.cc
src/core/ext/census/mlog.cc
src/core/ext/census/operation.cc
src/core/ext/census/placeholders.cc
src/core/ext/census/resource.cc
src/core/ext/census/trace_context.cc
src/core/ext/census/tracing.cc
src/core/ext/filters/max_age/max_age_filter.cc
src/core/ext/filters/message_size/message_size_filter.cc
src/core/ext/filters/workarounds/workaround_cronet_compression_filter.cc
@ -1474,6 +1460,7 @@ add_library(grpc_cronet
src/core/ext/filters/http/http_filters_plugin.cc
src/core/ext/filters/http/message_compress/message_compress_filter.cc
src/core/ext/filters/http/server/http_server_filter.cc
src/core/ext/filters/client_channel/backup_poller.cc
src/core/ext/filters/client_channel/channel_connectivity.cc
src/core/ext/filters/client_channel/client_channel.cc
src/core/ext/filters/client_channel/client_channel_factory.cc
@ -1764,6 +1751,7 @@ add_library(grpc_test_util
src/core/lib/transport/transport.cc
src/core/lib/transport/transport_op_string.cc
src/core/lib/debug/trace.cc
src/core/ext/filters/client_channel/backup_poller.cc
src/core/ext/filters/client_channel/channel_connectivity.cc
src/core/ext/filters/client_channel/client_channel.cc
src/core/ext/filters/client_channel/client_channel_factory.cc
@ -2029,6 +2017,7 @@ add_library(grpc_test_util_unsecure
src/core/lib/transport/transport.cc
src/core/lib/transport/transport_op_string.cc
src/core/lib/debug/trace.cc
src/core/ext/filters/client_channel/backup_poller.cc
src/core/ext/filters/client_channel/channel_connectivity.cc
src/core/ext/filters/client_channel/client_channel.cc
src/core/ext/filters/client_channel/client_channel_factory.cc
@ -2313,6 +2302,7 @@ add_library(grpc_unsecure
src/core/ext/transport/chttp2/client/insecure/channel_create.cc
src/core/ext/transport/chttp2/client/insecure/channel_create_posix.cc
src/core/ext/transport/chttp2/client/chttp2_connector.cc
src/core/ext/filters/client_channel/backup_poller.cc
src/core/ext/filters/client_channel/channel_connectivity.cc
src/core/ext/filters/client_channel/client_channel.cc
src/core/ext/filters/client_channel/client_channel_factory.cc
@ -2355,22 +2345,9 @@ add_library(grpc_unsecure
third_party/nanopb/pb_decode.c
third_party/nanopb/pb_encode.c
src/core/ext/filters/client_channel/lb_policy/pick_first/pick_first.cc
src/core/ext/filters/client_channel/lb_policy/subchannel_list.cc
src/core/ext/filters/client_channel/lb_policy/round_robin/round_robin.cc
src/core/ext/census/base_resources.cc
src/core/ext/census/context.cc
src/core/ext/census/gen/census.pb.c
src/core/ext/census/gen/trace_context.pb.c
src/core/ext/census/grpc_context.cc
src/core/ext/census/grpc_filter.cc
src/core/ext/census/grpc_plugin.cc
src/core/ext/census/initialize.cc
src/core/ext/census/intrusive_hash_map.cc
src/core/ext/census/mlog.cc
src/core/ext/census/operation.cc
src/core/ext/census/placeholders.cc
src/core/ext/census/resource.cc
src/core/ext/census/trace_context.cc
src/core/ext/census/tracing.cc
src/core/ext/filters/max_age/max_age_filter.cc
src/core/ext/filters/message_size/message_size_filter.cc
src/core/ext/filters/workarounds/workaround_cronet_compression_filter.cc
@ -3048,6 +3025,7 @@ add_library(grpc++_cronet
src/core/ext/filters/http/http_filters_plugin.cc
src/core/ext/filters/http/message_compress/message_compress_filter.cc
src/core/ext/filters/http/server/http_server_filter.cc
src/core/ext/filters/client_channel/backup_poller.cc
src/core/ext/filters/client_channel/channel_connectivity.cc
src/core/ext/filters/client_channel/client_channel.cc
src/core/ext/filters/client_channel/client_channel_factory.cc
@ -3072,21 +3050,7 @@ add_library(grpc++_cronet
src/core/ext/transport/chttp2/server/insecure/server_chttp2.cc
src/core/ext/transport/chttp2/server/insecure/server_chttp2_posix.cc
src/core/ext/transport/chttp2/server/chttp2_server.cc
src/core/ext/census/base_resources.cc
src/core/ext/census/context.cc
src/core/ext/census/gen/census.pb.c
src/core/ext/census/gen/trace_context.pb.c
src/core/ext/census/grpc_context.cc
src/core/ext/census/grpc_filter.cc
src/core/ext/census/grpc_plugin.cc
src/core/ext/census/initialize.cc
src/core/ext/census/intrusive_hash_map.cc
src/core/ext/census/mlog.cc
src/core/ext/census/operation.cc
src/core/ext/census/placeholders.cc
src/core/ext/census/resource.cc
src/core/ext/census/trace_context.cc
src/core/ext/census/tracing.cc
third_party/nanopb/pb_common.c
third_party/nanopb/pb_decode.c
third_party/nanopb/pb_encode.c
@ -5388,126 +5352,6 @@ target_link_libraries(byte_stream_test
endif (gRPC_BUILD_TESTS)
if (gRPC_BUILD_TESTS)
add_executable(census_context_test
test/core/census/context_test.c
)
target_include_directories(census_context_test
PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
PRIVATE ${BORINGSSL_ROOT_DIR}/include
PRIVATE ${PROTOBUF_ROOT_DIR}/src
PRIVATE ${BENCHMARK_ROOT_DIR}/include
PRIVATE ${ZLIB_ROOT_DIR}
PRIVATE ${CMAKE_CURRENT_BINARY_DIR}/third_party/zlib
PRIVATE ${CARES_INCLUDE_DIR}
PRIVATE ${CMAKE_CURRENT_BINARY_DIR}/third_party/cares/cares
PRIVATE ${CMAKE_CURRENT_BINARY_DIR}/third_party/gflags/include
PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/third_party/abseil-cpp
)
target_link_libraries(census_context_test
${_gRPC_ALLTARGETS_LIBRARIES}
grpc_test_util
grpc
gpr_test_util
gpr
)
endif (gRPC_BUILD_TESTS)
if (gRPC_BUILD_TESTS)
add_executable(census_intrusive_hash_map_test
test/core/census/intrusive_hash_map_test.c
)
target_include_directories(census_intrusive_hash_map_test
PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
PRIVATE ${BORINGSSL_ROOT_DIR}/include
PRIVATE ${PROTOBUF_ROOT_DIR}/src
PRIVATE ${BENCHMARK_ROOT_DIR}/include
PRIVATE ${ZLIB_ROOT_DIR}
PRIVATE ${CMAKE_CURRENT_BINARY_DIR}/third_party/zlib
PRIVATE ${CARES_INCLUDE_DIR}
PRIVATE ${CMAKE_CURRENT_BINARY_DIR}/third_party/cares/cares
PRIVATE ${CMAKE_CURRENT_BINARY_DIR}/third_party/gflags/include
PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/third_party/abseil-cpp
)
target_link_libraries(census_intrusive_hash_map_test
${_gRPC_ALLTARGETS_LIBRARIES}
grpc_test_util
grpc
gpr_test_util
gpr
)
endif (gRPC_BUILD_TESTS)
if (gRPC_BUILD_TESTS)
add_executable(census_resource_test
test/core/census/resource_test.c
)
target_include_directories(census_resource_test
PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
PRIVATE ${BORINGSSL_ROOT_DIR}/include
PRIVATE ${PROTOBUF_ROOT_DIR}/src
PRIVATE ${BENCHMARK_ROOT_DIR}/include
PRIVATE ${ZLIB_ROOT_DIR}
PRIVATE ${CMAKE_CURRENT_BINARY_DIR}/third_party/zlib
PRIVATE ${CARES_INCLUDE_DIR}
PRIVATE ${CMAKE_CURRENT_BINARY_DIR}/third_party/cares/cares
PRIVATE ${CMAKE_CURRENT_BINARY_DIR}/third_party/gflags/include
PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/third_party/abseil-cpp
)
target_link_libraries(census_resource_test
${_gRPC_ALLTARGETS_LIBRARIES}
grpc_test_util
grpc
gpr_test_util
gpr
)
endif (gRPC_BUILD_TESTS)
if (gRPC_BUILD_TESTS)
add_executable(census_trace_context_test
test/core/census/trace_context_test.c
)
target_include_directories(census_trace_context_test
PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
PRIVATE ${BORINGSSL_ROOT_DIR}/include
PRIVATE ${PROTOBUF_ROOT_DIR}/src
PRIVATE ${BENCHMARK_ROOT_DIR}/include
PRIVATE ${ZLIB_ROOT_DIR}
PRIVATE ${CMAKE_CURRENT_BINARY_DIR}/third_party/zlib
PRIVATE ${CARES_INCLUDE_DIR}
PRIVATE ${CMAKE_CURRENT_BINARY_DIR}/third_party/cares/cares
PRIVATE ${CMAKE_CURRENT_BINARY_DIR}/third_party/gflags/include
PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/third_party/abseil-cpp
)
target_link_libraries(census_trace_context_test
${_gRPC_ALLTARGETS_LIBRARIES}
grpc_test_util
grpc
gpr_test_util
gpr
)
endif (gRPC_BUILD_TESTS)
if (gRPC_BUILD_TESTS)
add_executable(channel_create_test
test/core/surface/channel_create_test.c
)
@ -7288,6 +7132,36 @@ target_link_libraries(grpc_security_connector_test
gpr
)
endif (gRPC_BUILD_TESTS)
if (gRPC_BUILD_TESTS)
add_executable(grpc_ssl_credentials_test
test/core/security/ssl_credentials_test.c
)
target_include_directories(grpc_ssl_credentials_test
PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
PRIVATE ${BORINGSSL_ROOT_DIR}/include
PRIVATE ${PROTOBUF_ROOT_DIR}/src
PRIVATE ${BENCHMARK_ROOT_DIR}/include
PRIVATE ${ZLIB_ROOT_DIR}
PRIVATE ${CMAKE_CURRENT_BINARY_DIR}/third_party/zlib
PRIVATE ${CARES_INCLUDE_DIR}
PRIVATE ${CMAKE_CURRENT_BINARY_DIR}/third_party/cares/cares
PRIVATE ${CMAKE_CURRENT_BINARY_DIR}/third_party/gflags/include
PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/third_party/abseil-cpp
)
target_link_libraries(grpc_ssl_credentials_test
${_gRPC_ALLTARGETS_LIBRARIES}
grpc_test_util
grpc
gpr_test_util
gpr
)
endif (gRPC_BUILD_TESTS)
add_executable(grpc_verify_jwt
@ -7996,36 +7870,6 @@ target_link_libraries(minimal_stack_is_minimal_test
endif (gRPC_BUILD_TESTS)
if (gRPC_BUILD_TESTS)
add_executable(mlog_test
test/core/census/mlog_test.c
)
target_include_directories(mlog_test
PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
PRIVATE ${BORINGSSL_ROOT_DIR}/include
PRIVATE ${PROTOBUF_ROOT_DIR}/src
PRIVATE ${BENCHMARK_ROOT_DIR}/include
PRIVATE ${ZLIB_ROOT_DIR}
PRIVATE ${CMAKE_CURRENT_BINARY_DIR}/third_party/zlib
PRIVATE ${CARES_INCLUDE_DIR}
PRIVATE ${CMAKE_CURRENT_BINARY_DIR}/third_party/cares/cares
PRIVATE ${CMAKE_CURRENT_BINARY_DIR}/third_party/gflags/include
PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/third_party/abseil-cpp
)
target_link_libraries(mlog_test
${_gRPC_ALLTARGETS_LIBRARIES}
grpc_test_util
grpc
gpr_test_util
gpr
)
endif (gRPC_BUILD_TESTS)
if (gRPC_BUILD_TESTS)
add_executable(multiple_server_queues_test
test/core/end2end/multiple_server_queues_test.c
)
@ -11532,6 +11376,52 @@ endif (gRPC_BUILD_TESTS)
if (gRPC_BUILD_TESTS)
if(_gRPC_PLATFORM_LINUX OR _gRPC_PLATFORM_MAC OR _gRPC_PLATFORM_POSIX)
add_executable(inproc_sync_unary_ping_pong_test
test/cpp/qps/inproc_sync_unary_ping_pong_test.cc
third_party/googletest/googletest/src/gtest-all.cc
third_party/googletest/googlemock/src/gmock-all.cc
)
target_include_directories(inproc_sync_unary_ping_pong_test
PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
PRIVATE ${BORINGSSL_ROOT_DIR}/include
PRIVATE ${PROTOBUF_ROOT_DIR}/src
PRIVATE ${BENCHMARK_ROOT_DIR}/include
PRIVATE ${ZLIB_ROOT_DIR}
PRIVATE ${CMAKE_CURRENT_BINARY_DIR}/third_party/zlib
PRIVATE ${CARES_INCLUDE_DIR}
PRIVATE ${CMAKE_CURRENT_BINARY_DIR}/third_party/cares/cares
PRIVATE ${CMAKE_CURRENT_BINARY_DIR}/third_party/gflags/include
PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/third_party/abseil-cpp
PRIVATE third_party/googletest/googletest/include
PRIVATE third_party/googletest/googletest
PRIVATE third_party/googletest/googlemock/include
PRIVATE third_party/googletest/googlemock
PRIVATE ${_gRPC_PROTO_GENS_DIR}
)
target_link_libraries(inproc_sync_unary_ping_pong_test
${_gRPC_PROTOBUF_LIBRARIES}
${_gRPC_ALLTARGETS_LIBRARIES}
qps
grpc++_core_stats
grpc++_test_util
grpc_test_util
grpc++
grpc
gpr_test_util
gpr
grpc++_test_config
${_gRPC_GFLAGS_LIBRARIES}
)
endif()
endif (gRPC_BUILD_TESTS)
if (gRPC_BUILD_TESTS)
if(_gRPC_PLATFORM_LINUX OR _gRPC_PLATFORM_MAC OR _gRPC_PLATFORM_POSIX)
add_executable(interop_client
third_party/googletest/googletest/src/gtest-all.cc
third_party/googletest/googlemock/src/gmock-all.cc
@ -13249,38 +13139,6 @@ target_link_libraries(initial_settings_frame_bad_client_test
endif (gRPC_BUILD_TESTS)
if (gRPC_BUILD_TESTS)
add_executable(large_metadata_bad_client_test
test/core/bad_client/tests/large_metadata.c
)
target_include_directories(large_metadata_bad_client_test
PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
PRIVATE ${BORINGSSL_ROOT_DIR}/include
PRIVATE ${PROTOBUF_ROOT_DIR}/src
PRIVATE ${BENCHMARK_ROOT_DIR}/include
PRIVATE ${ZLIB_ROOT_DIR}
PRIVATE ${CMAKE_CURRENT_BINARY_DIR}/third_party/zlib
PRIVATE ${CARES_INCLUDE_DIR}
PRIVATE ${CMAKE_CURRENT_BINARY_DIR}/third_party/cares/cares
PRIVATE ${CMAKE_CURRENT_BINARY_DIR}/third_party/gflags/include
PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/third_party/abseil-cpp
)
target_link_libraries(large_metadata_bad_client_test
${_gRPC_SSL_LIBRARIES}
${_gRPC_ALLTARGETS_LIBRARIES}
bad_client_test
grpc_test_util_unsecure
grpc_unsecure
gpr_test_util
gpr
)
endif (gRPC_BUILD_TESTS)
if (gRPC_BUILD_TESTS)
add_executable(server_registered_method_bad_client_test
test/core/bad_client/tests/server_registered_method.c
)

@ -955,10 +955,6 @@ bad_server_response_test: $(BINDIR)/$(CONFIG)/bad_server_response_test
bin_decoder_test: $(BINDIR)/$(CONFIG)/bin_decoder_test
bin_encoder_test: $(BINDIR)/$(CONFIG)/bin_encoder_test
byte_stream_test: $(BINDIR)/$(CONFIG)/byte_stream_test
census_context_test: $(BINDIR)/$(CONFIG)/census_context_test
census_intrusive_hash_map_test: $(BINDIR)/$(CONFIG)/census_intrusive_hash_map_test
census_resource_test: $(BINDIR)/$(CONFIG)/census_resource_test
census_trace_context_test: $(BINDIR)/$(CONFIG)/census_trace_context_test
channel_create_test: $(BINDIR)/$(CONFIG)/channel_create_test
check_epollexclusive: $(BINDIR)/$(CONFIG)/check_epollexclusive
chttp2_hpack_encoder_test: $(BINDIR)/$(CONFIG)/chttp2_hpack_encoder_test
@ -1019,6 +1015,7 @@ grpc_json_token_test: $(BINDIR)/$(CONFIG)/grpc_json_token_test
grpc_jwt_verifier_test: $(BINDIR)/$(CONFIG)/grpc_jwt_verifier_test
grpc_print_google_default_creds_token: $(BINDIR)/$(CONFIG)/grpc_print_google_default_creds_token
grpc_security_connector_test: $(BINDIR)/$(CONFIG)/grpc_security_connector_test
grpc_ssl_credentials_test: $(BINDIR)/$(CONFIG)/grpc_ssl_credentials_test
grpc_verify_jwt: $(BINDIR)/$(CONFIG)/grpc_verify_jwt
handshake_client: $(BINDIR)/$(CONFIG)/handshake_client
handshake_server: $(BINDIR)/$(CONFIG)/handshake_server
@ -1047,7 +1044,6 @@ memory_profile_server: $(BINDIR)/$(CONFIG)/memory_profile_server
memory_profile_test: $(BINDIR)/$(CONFIG)/memory_profile_test
message_compress_test: $(BINDIR)/$(CONFIG)/message_compress_test
minimal_stack_is_minimal_test: $(BINDIR)/$(CONFIG)/minimal_stack_is_minimal_test
mlog_test: $(BINDIR)/$(CONFIG)/mlog_test
multiple_server_queues_test: $(BINDIR)/$(CONFIG)/multiple_server_queues_test
murmur_hash_test: $(BINDIR)/$(CONFIG)/murmur_hash_test
nanopb_fuzzer_response_test: $(BINDIR)/$(CONFIG)/nanopb_fuzzer_response_test
@ -1148,6 +1144,7 @@ h2_ssl_cert_test: $(BINDIR)/$(CONFIG)/h2_ssl_cert_test
health_service_end2end_test: $(BINDIR)/$(CONFIG)/health_service_end2end_test
http2_client: $(BINDIR)/$(CONFIG)/http2_client
hybrid_end2end_test: $(BINDIR)/$(CONFIG)/hybrid_end2end_test
inproc_sync_unary_ping_pong_test: $(BINDIR)/$(CONFIG)/inproc_sync_unary_ping_pong_test
interop_client: $(BINDIR)/$(CONFIG)/interop_client
interop_server: $(BINDIR)/$(CONFIG)/interop_server
interop_test: $(BINDIR)/$(CONFIG)/interop_test
@ -1226,7 +1223,6 @@ connection_prefix_bad_client_test: $(BINDIR)/$(CONFIG)/connection_prefix_bad_cli
head_of_line_blocking_bad_client_test: $(BINDIR)/$(CONFIG)/head_of_line_blocking_bad_client_test
headers_bad_client_test: $(BINDIR)/$(CONFIG)/headers_bad_client_test
initial_settings_frame_bad_client_test: $(BINDIR)/$(CONFIG)/initial_settings_frame_bad_client_test
large_metadata_bad_client_test: $(BINDIR)/$(CONFIG)/large_metadata_bad_client_test
server_registered_method_bad_client_test: $(BINDIR)/$(CONFIG)/server_registered_method_bad_client_test
simple_request_bad_client_test: $(BINDIR)/$(CONFIG)/simple_request_bad_client_test
unknown_frame_bad_client_test: $(BINDIR)/$(CONFIG)/unknown_frame_bad_client_test
@ -1356,10 +1352,6 @@ buildtests_c: privatelibs_c \
$(BINDIR)/$(CONFIG)/bin_decoder_test \
$(BINDIR)/$(CONFIG)/bin_encoder_test \
$(BINDIR)/$(CONFIG)/byte_stream_test \
$(BINDIR)/$(CONFIG)/census_context_test \
$(BINDIR)/$(CONFIG)/census_intrusive_hash_map_test \
$(BINDIR)/$(CONFIG)/census_resource_test \
$(BINDIR)/$(CONFIG)/census_trace_context_test \
$(BINDIR)/$(CONFIG)/channel_create_test \
$(BINDIR)/$(CONFIG)/chttp2_hpack_encoder_test \
$(BINDIR)/$(CONFIG)/chttp2_stream_map_test \
@ -1413,6 +1405,7 @@ buildtests_c: privatelibs_c \
$(BINDIR)/$(CONFIG)/grpc_json_token_test \
$(BINDIR)/$(CONFIG)/grpc_jwt_verifier_test \
$(BINDIR)/$(CONFIG)/grpc_security_connector_test \
$(BINDIR)/$(CONFIG)/grpc_ssl_credentials_test \
$(BINDIR)/$(CONFIG)/handshake_client \
$(BINDIR)/$(CONFIG)/handshake_server \
$(BINDIR)/$(CONFIG)/hpack_parser_test \
@ -1435,7 +1428,6 @@ buildtests_c: privatelibs_c \
$(BINDIR)/$(CONFIG)/memory_profile_test \
$(BINDIR)/$(CONFIG)/message_compress_test \
$(BINDIR)/$(CONFIG)/minimal_stack_is_minimal_test \
$(BINDIR)/$(CONFIG)/mlog_test \
$(BINDIR)/$(CONFIG)/multiple_server_queues_test \
$(BINDIR)/$(CONFIG)/murmur_hash_test \
$(BINDIR)/$(CONFIG)/no_server_test \
@ -1483,7 +1475,6 @@ buildtests_c: privatelibs_c \
$(BINDIR)/$(CONFIG)/head_of_line_blocking_bad_client_test \
$(BINDIR)/$(CONFIG)/headers_bad_client_test \
$(BINDIR)/$(CONFIG)/initial_settings_frame_bad_client_test \
$(BINDIR)/$(CONFIG)/large_metadata_bad_client_test \
$(BINDIR)/$(CONFIG)/server_registered_method_bad_client_test \
$(BINDIR)/$(CONFIG)/simple_request_bad_client_test \
$(BINDIR)/$(CONFIG)/unknown_frame_bad_client_test \
@ -1586,6 +1577,7 @@ buildtests_cxx: privatelibs_cxx \
$(BINDIR)/$(CONFIG)/health_service_end2end_test \
$(BINDIR)/$(CONFIG)/http2_client \
$(BINDIR)/$(CONFIG)/hybrid_end2end_test \
$(BINDIR)/$(CONFIG)/inproc_sync_unary_ping_pong_test \
$(BINDIR)/$(CONFIG)/interop_client \
$(BINDIR)/$(CONFIG)/interop_server \
$(BINDIR)/$(CONFIG)/interop_test \
@ -1710,6 +1702,7 @@ buildtests_cxx: privatelibs_cxx \
$(BINDIR)/$(CONFIG)/health_service_end2end_test \
$(BINDIR)/$(CONFIG)/http2_client \
$(BINDIR)/$(CONFIG)/hybrid_end2end_test \
$(BINDIR)/$(CONFIG)/inproc_sync_unary_ping_pong_test \
$(BINDIR)/$(CONFIG)/interop_client \
$(BINDIR)/$(CONFIG)/interop_server \
$(BINDIR)/$(CONFIG)/interop_test \
@ -1777,14 +1770,6 @@ test_c: buildtests_c
$(Q) $(BINDIR)/$(CONFIG)/bin_encoder_test || ( echo test bin_encoder_test failed ; exit 1 )
$(E) "[RUN] Testing byte_stream_test"
$(Q) $(BINDIR)/$(CONFIG)/byte_stream_test || ( echo test byte_stream_test failed ; exit 1 )
$(E) "[RUN] Testing census_context_test"
$(Q) $(BINDIR)/$(CONFIG)/census_context_test || ( echo test census_context_test failed ; exit 1 )
$(E) "[RUN] Testing census_intrusive_hash_map_test"
$(Q) $(BINDIR)/$(CONFIG)/census_intrusive_hash_map_test || ( echo test census_intrusive_hash_map_test failed ; exit 1 )
$(E) "[RUN] Testing census_resource_test"
$(Q) $(BINDIR)/$(CONFIG)/census_resource_test || ( echo test census_resource_test failed ; exit 1 )
$(E) "[RUN] Testing census_trace_context_test"
$(Q) $(BINDIR)/$(CONFIG)/census_trace_context_test || ( echo test census_trace_context_test failed ; exit 1 )
$(E) "[RUN] Testing channel_create_test"
$(Q) $(BINDIR)/$(CONFIG)/channel_create_test || ( echo test channel_create_test failed ; exit 1 )
$(E) "[RUN] Testing chttp2_hpack_encoder_test"
@ -1885,6 +1870,8 @@ test_c: buildtests_c
$(Q) $(BINDIR)/$(CONFIG)/grpc_jwt_verifier_test || ( echo test grpc_jwt_verifier_test failed ; exit 1 )
$(E) "[RUN] Testing grpc_security_connector_test"
$(Q) $(BINDIR)/$(CONFIG)/grpc_security_connector_test || ( echo test grpc_security_connector_test failed ; exit 1 )
$(E) "[RUN] Testing grpc_ssl_credentials_test"
$(Q) $(BINDIR)/$(CONFIG)/grpc_ssl_credentials_test || ( echo test grpc_ssl_credentials_test failed ; exit 1 )
$(E) "[RUN] Testing handshake_client"
$(Q) $(BINDIR)/$(CONFIG)/handshake_client || ( echo test handshake_client failed ; exit 1 )
$(E) "[RUN] Testing handshake_server"
@ -2015,8 +2002,6 @@ test_c: buildtests_c
$(Q) $(BINDIR)/$(CONFIG)/headers_bad_client_test || ( echo test headers_bad_client_test failed ; exit 1 )
$(E) "[RUN] Testing initial_settings_frame_bad_client_test"
$(Q) $(BINDIR)/$(CONFIG)/initial_settings_frame_bad_client_test || ( echo test initial_settings_frame_bad_client_test failed ; exit 1 )
$(E) "[RUN] Testing large_metadata_bad_client_test"
$(Q) $(BINDIR)/$(CONFIG)/large_metadata_bad_client_test || ( echo test large_metadata_bad_client_test failed ; exit 1 )
$(E) "[RUN] Testing server_registered_method_bad_client_test"
$(Q) $(BINDIR)/$(CONFIG)/server_registered_method_bad_client_test || ( echo test server_registered_method_bad_client_test failed ; exit 1 )
$(E) "[RUN] Testing simple_request_bad_client_test"
@ -2030,8 +2015,6 @@ test_c: buildtests_c
flaky_test_c: buildtests_c
$(E) "[RUN] Testing mlog_test"
$(Q) $(BINDIR)/$(CONFIG)/mlog_test || ( echo test mlog_test failed ; exit 1 )
test_cxx: buildtests_cxx
@ -2117,6 +2100,8 @@ test_cxx: buildtests_cxx
$(Q) $(BINDIR)/$(CONFIG)/h2_ssl_cert_test || ( echo test h2_ssl_cert_test failed ; exit 1 )
$(E) "[RUN] Testing health_service_end2end_test"
$(Q) $(BINDIR)/$(CONFIG)/health_service_end2end_test || ( echo test health_service_end2end_test failed ; exit 1 )
$(E) "[RUN] Testing inproc_sync_unary_ping_pong_test"
$(Q) $(BINDIR)/$(CONFIG)/inproc_sync_unary_ping_pong_test || ( echo test inproc_sync_unary_ping_pong_test failed ; exit 1 )
$(E) "[RUN] Testing interop_test"
$(Q) $(BINDIR)/$(CONFIG)/interop_test || ( echo test interop_test failed ; exit 1 )
$(E) "[RUN] Testing memory_test"
@ -3148,6 +3133,7 @@ LIBGRPC_SRC = \
src/core/tsi/transport_security_adapter.cc \
src/core/ext/transport/chttp2/server/chttp2_server.cc \
src/core/ext/transport/chttp2/client/secure/secure_channel_create.cc \
src/core/ext/filters/client_channel/backup_poller.cc \
src/core/ext/filters/client_channel/channel_connectivity.cc \
src/core/ext/filters/client_channel/client_channel.cc \
src/core/ext/filters/client_channel/client_channel_factory.cc \
@ -3187,6 +3173,7 @@ LIBGRPC_SRC = \
third_party/nanopb/pb_encode.c \
src/core/ext/filters/client_channel/resolver/fake/fake_resolver.cc \
src/core/ext/filters/client_channel/lb_policy/pick_first/pick_first.cc \
src/core/ext/filters/client_channel/lb_policy/subchannel_list.cc \
src/core/ext/filters/client_channel/lb_policy/round_robin/round_robin.cc \
src/core/ext/filters/client_channel/resolver/dns/c_ares/dns_resolver_ares.cc \
src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver_posix.cc \
@ -3196,21 +3183,7 @@ LIBGRPC_SRC = \
src/core/ext/filters/client_channel/resolver/sockaddr/sockaddr_resolver.cc \
src/core/ext/filters/load_reporting/server_load_reporting_filter.cc \
src/core/ext/filters/load_reporting/server_load_reporting_plugin.cc \
src/core/ext/census/base_resources.cc \
src/core/ext/census/context.cc \
src/core/ext/census/gen/census.pb.c \
src/core/ext/census/gen/trace_context.pb.c \
src/core/ext/census/grpc_context.cc \
src/core/ext/census/grpc_filter.cc \
src/core/ext/census/grpc_plugin.cc \
src/core/ext/census/initialize.cc \
src/core/ext/census/intrusive_hash_map.cc \
src/core/ext/census/mlog.cc \
src/core/ext/census/operation.cc \
src/core/ext/census/placeholders.cc \
src/core/ext/census/resource.cc \
src/core/ext/census/trace_context.cc \
src/core/ext/census/tracing.cc \
src/core/ext/filters/max_age/max_age_filter.cc \
src/core/ext/filters/message_size/message_size_filter.cc \
src/core/ext/filters/workarounds/workaround_cronet_compression_filter.cc \
@ -3471,6 +3444,7 @@ LIBGRPC_CRONET_SRC = \
src/core/ext/filters/http/http_filters_plugin.cc \
src/core/ext/filters/http/message_compress/message_compress_filter.cc \
src/core/ext/filters/http/server/http_server_filter.cc \
src/core/ext/filters/client_channel/backup_poller.cc \
src/core/ext/filters/client_channel/channel_connectivity.cc \
src/core/ext/filters/client_channel/client_channel.cc \
src/core/ext/filters/client_channel/client_channel_factory.cc \
@ -3759,6 +3733,7 @@ LIBGRPC_TEST_UTIL_SRC = \
src/core/lib/transport/transport.cc \
src/core/lib/transport/transport_op_string.cc \
src/core/lib/debug/trace.cc \
src/core/ext/filters/client_channel/backup_poller.cc \
src/core/ext/filters/client_channel/channel_connectivity.cc \
src/core/ext/filters/client_channel/client_channel.cc \
src/core/ext/filters/client_channel/client_channel_factory.cc \
@ -4014,6 +3989,7 @@ LIBGRPC_TEST_UTIL_UNSECURE_SRC = \
src/core/lib/transport/transport.cc \
src/core/lib/transport/transport_op_string.cc \
src/core/lib/debug/trace.cc \
src/core/ext/filters/client_channel/backup_poller.cc \
src/core/ext/filters/client_channel/channel_connectivity.cc \
src/core/ext/filters/client_channel/client_channel.cc \
src/core/ext/filters/client_channel/client_channel_factory.cc \
@ -4275,6 +4251,7 @@ LIBGRPC_UNSECURE_SRC = \
src/core/ext/transport/chttp2/client/insecure/channel_create.cc \
src/core/ext/transport/chttp2/client/insecure/channel_create_posix.cc \
src/core/ext/transport/chttp2/client/chttp2_connector.cc \
src/core/ext/filters/client_channel/backup_poller.cc \
src/core/ext/filters/client_channel/channel_connectivity.cc \
src/core/ext/filters/client_channel/client_channel.cc \
src/core/ext/filters/client_channel/client_channel_factory.cc \
@ -4317,22 +4294,9 @@ LIBGRPC_UNSECURE_SRC = \
third_party/nanopb/pb_decode.c \
third_party/nanopb/pb_encode.c \
src/core/ext/filters/client_channel/lb_policy/pick_first/pick_first.cc \
src/core/ext/filters/client_channel/lb_policy/subchannel_list.cc \
src/core/ext/filters/client_channel/lb_policy/round_robin/round_robin.cc \
src/core/ext/census/base_resources.cc \
src/core/ext/census/context.cc \
src/core/ext/census/gen/census.pb.c \
src/core/ext/census/gen/trace_context.pb.c \
src/core/ext/census/grpc_context.cc \
src/core/ext/census/grpc_filter.cc \
src/core/ext/census/grpc_plugin.cc \
src/core/ext/census/initialize.cc \
src/core/ext/census/intrusive_hash_map.cc \
src/core/ext/census/mlog.cc \
src/core/ext/census/operation.cc \
src/core/ext/census/placeholders.cc \
src/core/ext/census/resource.cc \
src/core/ext/census/trace_context.cc \
src/core/ext/census/tracing.cc \
src/core/ext/filters/max_age/max_age_filter.cc \
src/core/ext/filters/message_size/message_size_filter.cc \
src/core/ext/filters/workarounds/workaround_cronet_compression_filter.cc \
@ -4988,6 +4952,7 @@ LIBGRPC++_CRONET_SRC = \
src/core/ext/filters/http/http_filters_plugin.cc \
src/core/ext/filters/http/message_compress/message_compress_filter.cc \
src/core/ext/filters/http/server/http_server_filter.cc \
src/core/ext/filters/client_channel/backup_poller.cc \
src/core/ext/filters/client_channel/channel_connectivity.cc \
src/core/ext/filters/client_channel/client_channel.cc \
src/core/ext/filters/client_channel/client_channel_factory.cc \
@ -5012,21 +4977,7 @@ LIBGRPC++_CRONET_SRC = \
src/core/ext/transport/chttp2/server/insecure/server_chttp2.cc \
src/core/ext/transport/chttp2/server/insecure/server_chttp2_posix.cc \
src/core/ext/transport/chttp2/server/chttp2_server.cc \
src/core/ext/census/base_resources.cc \
src/core/ext/census/context.cc \
src/core/ext/census/gen/census.pb.c \
src/core/ext/census/gen/trace_context.pb.c \
src/core/ext/census/grpc_context.cc \
src/core/ext/census/grpc_filter.cc \
src/core/ext/census/grpc_plugin.cc \
src/core/ext/census/initialize.cc \
src/core/ext/census/intrusive_hash_map.cc \
src/core/ext/census/mlog.cc \
src/core/ext/census/operation.cc \
src/core/ext/census/placeholders.cc \
src/core/ext/census/resource.cc \
src/core/ext/census/trace_context.cc \
src/core/ext/census/tracing.cc \
third_party/nanopb/pb_common.c \
third_party/nanopb/pb_decode.c \
third_party/nanopb/pb_encode.c \
@ -8293,6 +8244,7 @@ LIBBENCHMARK_SRC = \
third_party/benchmark/src/commandlineflags.cc \
third_party/benchmark/src/complexity.cc \
third_party/benchmark/src/console_reporter.cc \
third_party/benchmark/src/counter.cc \
third_party/benchmark/src/csv_reporter.cc \
third_party/benchmark/src/json_reporter.cc \
third_party/benchmark/src/reporter.cc \
@ -9067,134 +9019,6 @@ endif
endif
CENSUS_CONTEXT_TEST_SRC = \
test/core/census/context_test.c \
CENSUS_CONTEXT_TEST_OBJS = $(addprefix $(OBJDIR)/$(CONFIG)/, $(addsuffix .o, $(basename $(CENSUS_CONTEXT_TEST_SRC))))
ifeq ($(NO_SECURE),true)
# You can't build secure targets if you don't have OpenSSL.
$(BINDIR)/$(CONFIG)/census_context_test: openssl_dep_error
else
$(BINDIR)/$(CONFIG)/census_context_test: $(CENSUS_CONTEXT_TEST_OBJS) $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a
$(E) "[LD] Linking $@"
$(Q) mkdir -p `dirname $@`
$(Q) $(LD) $(LDFLAGS) $(CENSUS_CONTEXT_TEST_OBJS) $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a $(LDLIBS) $(LDLIBS_SECURE) -o $(BINDIR)/$(CONFIG)/census_context_test
endif
$(OBJDIR)/$(CONFIG)/test/core/census/context_test.o: $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a
deps_census_context_test: $(CENSUS_CONTEXT_TEST_OBJS:.o=.dep)
ifneq ($(NO_SECURE),true)
ifneq ($(NO_DEPS),true)
-include $(CENSUS_CONTEXT_TEST_OBJS:.o=.dep)
endif
endif
CENSUS_INTRUSIVE_HASH_MAP_TEST_SRC = \
test/core/census/intrusive_hash_map_test.c \
CENSUS_INTRUSIVE_HASH_MAP_TEST_OBJS = $(addprefix $(OBJDIR)/$(CONFIG)/, $(addsuffix .o, $(basename $(CENSUS_INTRUSIVE_HASH_MAP_TEST_SRC))))
ifeq ($(NO_SECURE),true)
# You can't build secure targets if you don't have OpenSSL.
$(BINDIR)/$(CONFIG)/census_intrusive_hash_map_test: openssl_dep_error
else
$(BINDIR)/$(CONFIG)/census_intrusive_hash_map_test: $(CENSUS_INTRUSIVE_HASH_MAP_TEST_OBJS) $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a
$(E) "[LD] Linking $@"
$(Q) mkdir -p `dirname $@`
$(Q) $(LD) $(LDFLAGS) $(CENSUS_INTRUSIVE_HASH_MAP_TEST_OBJS) $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a $(LDLIBS) $(LDLIBS_SECURE) -o $(BINDIR)/$(CONFIG)/census_intrusive_hash_map_test
endif
$(OBJDIR)/$(CONFIG)/test/core/census/intrusive_hash_map_test.o: $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a
deps_census_intrusive_hash_map_test: $(CENSUS_INTRUSIVE_HASH_MAP_TEST_OBJS:.o=.dep)
ifneq ($(NO_SECURE),true)
ifneq ($(NO_DEPS),true)
-include $(CENSUS_INTRUSIVE_HASH_MAP_TEST_OBJS:.o=.dep)
endif
endif
CENSUS_RESOURCE_TEST_SRC = \
test/core/census/resource_test.c \
CENSUS_RESOURCE_TEST_OBJS = $(addprefix $(OBJDIR)/$(CONFIG)/, $(addsuffix .o, $(basename $(CENSUS_RESOURCE_TEST_SRC))))
ifeq ($(NO_SECURE),true)
# You can't build secure targets if you don't have OpenSSL.
$(BINDIR)/$(CONFIG)/census_resource_test: openssl_dep_error
else
$(BINDIR)/$(CONFIG)/census_resource_test: $(CENSUS_RESOURCE_TEST_OBJS) $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a
$(E) "[LD] Linking $@"
$(Q) mkdir -p `dirname $@`
$(Q) $(LD) $(LDFLAGS) $(CENSUS_RESOURCE_TEST_OBJS) $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a $(LDLIBS) $(LDLIBS_SECURE) -o $(BINDIR)/$(CONFIG)/census_resource_test
endif
$(OBJDIR)/$(CONFIG)/test/core/census/resource_test.o: $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a
deps_census_resource_test: $(CENSUS_RESOURCE_TEST_OBJS:.o=.dep)
ifneq ($(NO_SECURE),true)
ifneq ($(NO_DEPS),true)
-include $(CENSUS_RESOURCE_TEST_OBJS:.o=.dep)
endif
endif
CENSUS_TRACE_CONTEXT_TEST_SRC = \
test/core/census/trace_context_test.c \
CENSUS_TRACE_CONTEXT_TEST_OBJS = $(addprefix $(OBJDIR)/$(CONFIG)/, $(addsuffix .o, $(basename $(CENSUS_TRACE_CONTEXT_TEST_SRC))))
ifeq ($(NO_SECURE),true)
# You can't build secure targets if you don't have OpenSSL.
$(BINDIR)/$(CONFIG)/census_trace_context_test: openssl_dep_error
else
$(BINDIR)/$(CONFIG)/census_trace_context_test: $(CENSUS_TRACE_CONTEXT_TEST_OBJS) $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a
$(E) "[LD] Linking $@"
$(Q) mkdir -p `dirname $@`
$(Q) $(LD) $(LDFLAGS) $(CENSUS_TRACE_CONTEXT_TEST_OBJS) $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a $(LDLIBS) $(LDLIBS_SECURE) -o $(BINDIR)/$(CONFIG)/census_trace_context_test
endif
$(OBJDIR)/$(CONFIG)/test/core/census/trace_context_test.o: $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a
deps_census_trace_context_test: $(CENSUS_TRACE_CONTEXT_TEST_OBJS:.o=.dep)
ifneq ($(NO_SECURE),true)
ifneq ($(NO_DEPS),true)
-include $(CENSUS_TRACE_CONTEXT_TEST_OBJS:.o=.dep)
endif
endif
CHANNEL_CREATE_TEST_SRC = \
test/core/surface/channel_create_test.c \
@ -11118,6 +10942,38 @@ endif
endif
GRPC_SSL_CREDENTIALS_TEST_SRC = \
test/core/security/ssl_credentials_test.c \
GRPC_SSL_CREDENTIALS_TEST_OBJS = $(addprefix $(OBJDIR)/$(CONFIG)/, $(addsuffix .o, $(basename $(GRPC_SSL_CREDENTIALS_TEST_SRC))))
ifeq ($(NO_SECURE),true)
# You can't build secure targets if you don't have OpenSSL.
$(BINDIR)/$(CONFIG)/grpc_ssl_credentials_test: openssl_dep_error
else
$(BINDIR)/$(CONFIG)/grpc_ssl_credentials_test: $(GRPC_SSL_CREDENTIALS_TEST_OBJS) $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a
$(E) "[LD] Linking $@"
$(Q) mkdir -p `dirname $@`
$(Q) $(LD) $(LDFLAGS) $(GRPC_SSL_CREDENTIALS_TEST_OBJS) $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a $(LDLIBS) $(LDLIBS_SECURE) -o $(BINDIR)/$(CONFIG)/grpc_ssl_credentials_test
endif
$(OBJDIR)/$(CONFIG)/test/core/security/ssl_credentials_test.o: $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a
deps_grpc_ssl_credentials_test: $(GRPC_SSL_CREDENTIALS_TEST_OBJS:.o=.dep)
ifneq ($(NO_SECURE),true)
ifneq ($(NO_DEPS),true)
-include $(GRPC_SSL_CREDENTIALS_TEST_OBJS:.o=.dep)
endif
endif
GRPC_VERIFY_JWT_SRC = \
test/core/security/verify_jwt.c \
@ -12014,38 +11870,6 @@ endif
endif
MLOG_TEST_SRC = \
test/core/census/mlog_test.c \
MLOG_TEST_OBJS = $(addprefix $(OBJDIR)/$(CONFIG)/, $(addsuffix .o, $(basename $(MLOG_TEST_SRC))))
ifeq ($(NO_SECURE),true)
# You can't build secure targets if you don't have OpenSSL.
$(BINDIR)/$(CONFIG)/mlog_test: openssl_dep_error
else
$(BINDIR)/$(CONFIG)/mlog_test: $(MLOG_TEST_OBJS) $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a
$(E) "[LD] Linking $@"
$(Q) mkdir -p `dirname $@`
$(Q) $(LD) $(LDFLAGS) $(MLOG_TEST_OBJS) $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a $(LDLIBS) $(LDLIBS_SECURE) -o $(BINDIR)/$(CONFIG)/mlog_test
endif
$(OBJDIR)/$(CONFIG)/test/core/census/mlog_test.o: $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a
deps_mlog_test: $(MLOG_TEST_OBJS:.o=.dep)
ifneq ($(NO_SECURE),true)
ifneq ($(NO_DEPS),true)
-include $(MLOG_TEST_OBJS:.o=.dep)
endif
endif
MULTIPLE_SERVER_QUEUES_TEST_SRC = \
test/core/end2end/multiple_server_queues_test.c \
@ -15802,6 +15626,49 @@ endif
endif
INPROC_SYNC_UNARY_PING_PONG_TEST_SRC = \
test/cpp/qps/inproc_sync_unary_ping_pong_test.cc \
INPROC_SYNC_UNARY_PING_PONG_TEST_OBJS = $(addprefix $(OBJDIR)/$(CONFIG)/, $(addsuffix .o, $(basename $(INPROC_SYNC_UNARY_PING_PONG_TEST_SRC))))
ifeq ($(NO_SECURE),true)
# You can't build secure targets if you don't have OpenSSL.
$(BINDIR)/$(CONFIG)/inproc_sync_unary_ping_pong_test: openssl_dep_error
else
ifeq ($(NO_PROTOBUF),true)
# You can't build the protoc plugins or protobuf-enabled targets if you don't have protobuf 3.0.0+.
$(BINDIR)/$(CONFIG)/inproc_sync_unary_ping_pong_test: protobuf_dep_error
else
$(BINDIR)/$(CONFIG)/inproc_sync_unary_ping_pong_test: $(PROTOBUF_DEP) $(INPROC_SYNC_UNARY_PING_PONG_TEST_OBJS) $(LIBDIR)/$(CONFIG)/libqps.a $(LIBDIR)/$(CONFIG)/libgrpc++_core_stats.a $(LIBDIR)/$(CONFIG)/libgrpc++_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc++.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a $(LIBDIR)/$(CONFIG)/libgrpc++_test_config.a
$(E) "[LD] Linking $@"
$(Q) mkdir -p `dirname $@`
$(Q) $(LDXX) $(LDFLAGS) $(INPROC_SYNC_UNARY_PING_PONG_TEST_OBJS) $(LIBDIR)/$(CONFIG)/libqps.a $(LIBDIR)/$(CONFIG)/libgrpc++_core_stats.a $(LIBDIR)/$(CONFIG)/libgrpc++_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc++.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a $(LIBDIR)/$(CONFIG)/libgrpc++_test_config.a $(LDLIBSXX) $(LDLIBS_PROTOBUF) $(LDLIBS) $(LDLIBS_SECURE) $(GTEST_LIB) -o $(BINDIR)/$(CONFIG)/inproc_sync_unary_ping_pong_test
endif
endif
$(OBJDIR)/$(CONFIG)/test/cpp/qps/inproc_sync_unary_ping_pong_test.o: $(LIBDIR)/$(CONFIG)/libqps.a $(LIBDIR)/$(CONFIG)/libgrpc++_core_stats.a $(LIBDIR)/$(CONFIG)/libgrpc++_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc++.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a $(LIBDIR)/$(CONFIG)/libgrpc++_test_config.a
deps_inproc_sync_unary_ping_pong_test: $(INPROC_SYNC_UNARY_PING_PONG_TEST_OBJS:.o=.dep)
ifneq ($(NO_SECURE),true)
ifneq ($(NO_DEPS),true)
-include $(INPROC_SYNC_UNARY_PING_PONG_TEST_OBJS:.o=.dep)
endif
endif
ifeq ($(NO_SECURE),true)
# You can't build secure targets if you don't have OpenSSL.
@ -18542,26 +18409,6 @@ ifneq ($(NO_DEPS),true)
endif
LARGE_METADATA_BAD_CLIENT_TEST_SRC = \
test/core/bad_client/tests/large_metadata.c \
LARGE_METADATA_BAD_CLIENT_TEST_OBJS = $(addprefix $(OBJDIR)/$(CONFIG)/, $(addsuffix .o, $(basename $(LARGE_METADATA_BAD_CLIENT_TEST_SRC))))
$(BINDIR)/$(CONFIG)/large_metadata_bad_client_test: $(LARGE_METADATA_BAD_CLIENT_TEST_OBJS) $(LIBDIR)/$(CONFIG)/libbad_client_test.a $(LIBDIR)/$(CONFIG)/libgrpc_test_util_unsecure.a $(LIBDIR)/$(CONFIG)/libgrpc_unsecure.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a
$(E) "[LD] Linking $@"
$(Q) mkdir -p `dirname $@`
$(Q) $(LD) $(LDFLAGS) $(LARGE_METADATA_BAD_CLIENT_TEST_OBJS) $(LIBDIR)/$(CONFIG)/libbad_client_test.a $(LIBDIR)/$(CONFIG)/libgrpc_test_util_unsecure.a $(LIBDIR)/$(CONFIG)/libgrpc_unsecure.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a $(LDLIBS) -o $(BINDIR)/$(CONFIG)/large_metadata_bad_client_test
$(OBJDIR)/$(CONFIG)/test/core/bad_client/tests/large_metadata.o: $(LIBDIR)/$(CONFIG)/libbad_client_test.a $(LIBDIR)/$(CONFIG)/libgrpc_test_util_unsecure.a $(LIBDIR)/$(CONFIG)/libgrpc_unsecure.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a
deps_large_metadata_bad_client_test: $(LARGE_METADATA_BAD_CLIENT_TEST_OBJS:.o=.dep)
ifneq ($(NO_DEPS),true)
-include $(LARGE_METADATA_BAD_CLIENT_TEST_OBJS:.o=.dep)
endif
SERVER_REGISTERED_METHOD_BAD_CLIENT_TEST_SRC = \
test/core/bad_client/tests/server_registered_method.c \

@ -853,6 +853,7 @@
'src/core/tsi/transport_security_adapter.cc',
'src/core/ext/transport/chttp2/server/chttp2_server.cc',
'src/core/ext/transport/chttp2/client/secure/secure_channel_create.cc',
'src/core/ext/filters/client_channel/backup_poller.cc',
'src/core/ext/filters/client_channel/channel_connectivity.cc',
'src/core/ext/filters/client_channel/client_channel.cc',
'src/core/ext/filters/client_channel/client_channel_factory.cc',
@ -892,6 +893,7 @@
'third_party/nanopb/pb_encode.c',
'src/core/ext/filters/client_channel/resolver/fake/fake_resolver.cc',
'src/core/ext/filters/client_channel/lb_policy/pick_first/pick_first.cc',
'src/core/ext/filters/client_channel/lb_policy/subchannel_list.cc',
'src/core/ext/filters/client_channel/lb_policy/round_robin/round_robin.cc',
'src/core/ext/filters/client_channel/resolver/dns/c_ares/dns_resolver_ares.cc',
'src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver_posix.cc',
@ -901,21 +903,7 @@
'src/core/ext/filters/client_channel/resolver/sockaddr/sockaddr_resolver.cc',
'src/core/ext/filters/load_reporting/server_load_reporting_filter.cc',
'src/core/ext/filters/load_reporting/server_load_reporting_plugin.cc',
'src/core/ext/census/base_resources.cc',
'src/core/ext/census/context.cc',
'src/core/ext/census/gen/census.pb.c',
'src/core/ext/census/gen/trace_context.pb.c',
'src/core/ext/census/grpc_context.cc',
'src/core/ext/census/grpc_filter.cc',
'src/core/ext/census/grpc_plugin.cc',
'src/core/ext/census/initialize.cc',
'src/core/ext/census/intrusive_hash_map.cc',
'src/core/ext/census/mlog.cc',
'src/core/ext/census/operation.cc',
'src/core/ext/census/placeholders.cc',
'src/core/ext/census/resource.cc',
'src/core/ext/census/trace_context.cc',
'src/core/ext/census/tracing.cc',
'src/core/ext/filters/max_age/max_age_filter.cc',
'src/core/ext/filters/message_size/message_size_filter.cc',
'src/core/ext/filters/workarounds/workaround_cronet_compression_filter.cc',

@ -19,42 +19,8 @@ filegroups:
- name: census
public_headers:
- include/grpc/census.h
headers:
- src/core/ext/census/aggregation.h
- src/core/ext/census/base_resources.h
- src/core/ext/census/census_interface.h
- src/core/ext/census/census_rpc_stats.h
- src/core/ext/census/gen/census.pb.h
- src/core/ext/census/gen/trace_context.pb.h
- src/core/ext/census/grpc_filter.h
- src/core/ext/census/intrusive_hash_map.h
- src/core/ext/census/intrusive_hash_map_internal.h
- src/core/ext/census/mlog.h
- src/core/ext/census/resource.h
- src/core/ext/census/rpc_metric_id.h
- src/core/ext/census/trace_context.h
- src/core/ext/census/trace_label.h
- src/core/ext/census/trace_propagation.h
- src/core/ext/census/trace_status.h
- src/core/ext/census/trace_string.h
- src/core/ext/census/tracing.h
src:
- src/core/ext/census/base_resources.cc
- src/core/ext/census/context.cc
- src/core/ext/census/gen/census.pb.c
- src/core/ext/census/gen/trace_context.pb.c
src:
- src/core/ext/census/grpc_context.cc
- src/core/ext/census/grpc_filter.cc
- src/core/ext/census/grpc_plugin.cc
- src/core/ext/census/initialize.cc
- src/core/ext/census/intrusive_hash_map.cc
- src/core/ext/census/mlog.cc
- src/core/ext/census/operation.cc
- src/core/ext/census/placeholders.cc
- src/core/ext/census/resource.cc
- src/core/ext/census/trace_context.cc
- src/core/ext/census/tracing.cc
plugin: census_grpc_plugin
uses:
- grpc_base
- nanopb
@ -463,6 +429,7 @@ filegroups:
- grpc_trace_headers
- name: grpc_client_channel
headers:
- src/core/ext/filters/client_channel/backup_poller.h
- src/core/ext/filters/client_channel/client_channel.h
- src/core/ext/filters/client_channel/client_channel_factory.h
- src/core/ext/filters/client_channel/connector.h
@ -482,6 +449,7 @@ filegroups:
- src/core/ext/filters/client_channel/subchannel_index.h
- src/core/ext/filters/client_channel/uri_parser.h
src:
- src/core/ext/filters/client_channel/backup_poller.cc
- src/core/ext/filters/client_channel/channel_connectivity.cc
- src/core/ext/filters/client_channel/client_channel.cc
- src/core/ext/filters/client_channel/client_channel_factory.cc
@ -590,6 +558,7 @@ filegroups:
uses:
- grpc_base
- grpc_client_channel
- grpc_lb_subchannel_list
- name: grpc_lb_policy_round_robin
src:
- src/core/ext/filters/client_channel/lb_policy/round_robin/round_robin.cc
@ -597,6 +566,15 @@ filegroups:
uses:
- grpc_base
- grpc_client_channel
- grpc_lb_subchannel_list
- name: grpc_lb_subchannel_list
headers:
- src/core/ext/filters/client_channel/lb_policy/subchannel_list.h
src:
- src/core/ext/filters/client_channel/lb_policy/subchannel_list.cc
uses:
- grpc_base
- grpc_client_channel
- name: grpc_max_age_filter
headers:
- src/core/ext/filters/max_age/max_age_filter.h
@ -778,6 +756,7 @@ filegroups:
- src/core/ext/transport/chttp2/transport/bin_decoder.h
- src/core/ext/transport/chttp2/transport/bin_encoder.h
- src/core/ext/transport/chttp2/transport/chttp2_transport.h
- src/core/ext/transport/chttp2/transport/flow_control.h
- src/core/ext/transport/chttp2/transport/frame.h
- src/core/ext/transport/chttp2/transport/frame_data.h
- src/core/ext/transport/chttp2/transport/frame_goaway.h
@ -1833,50 +1812,6 @@ targets:
- gpr_test_util
- gpr
uses_polling: false
- name: census_context_test
build: test
language: c
src:
- test/core/census/context_test.c
deps:
- grpc_test_util
- grpc
- gpr_test_util
- gpr
uses_polling: false
- name: census_intrusive_hash_map_test
build: test
language: c
src:
- test/core/census/intrusive_hash_map_test.c
deps:
- grpc_test_util
- grpc
- gpr_test_util
- gpr
uses_polling: false
- name: census_resource_test
build: test
language: c
src:
- test/core/census/resource_test.c
deps:
- grpc_test_util
- grpc
- gpr_test_util
- gpr
uses_polling: false
- name: census_trace_context_test
build: test
language: c
src:
- test/core/census/trace_context_test.c
deps:
- grpc_test_util
- grpc
- gpr_test_util
- gpr
uses_polling: false
- name: channel_create_test
build: test
language: c
@ -2539,6 +2474,16 @@ targets:
- grpc
- gpr_test_util
- gpr
- name: grpc_ssl_credentials_test
build: test
language: c
src:
- test/core/security/ssl_credentials_test.c
deps:
- grpc_test_util
- grpc
- gpr_test_util
- gpr
- name: grpc_verify_jwt
build: tool
language: c
@ -2875,18 +2820,6 @@ targets:
- gpr_test_util
- gpr
uses_polling: false
- name: mlog_test
flaky: true
build: test
language: c
src:
- test/core/census/mlog_test.c
deps:
- grpc_test_util
- grpc
- gpr_test_util
- gpr
uses_polling: false
- name: multiple_server_queues_test
build: test
language: c
@ -3761,6 +3694,8 @@ targets:
- grpc++_test_config
benchmark: true
defaults: benchmark
exclude_configs:
- tsan
excluded_poll_engines:
- poll
- poll-cv
@ -3916,9 +3851,6 @@ targets:
- grpc
- gpr_test_util
- gpr
excluded_poll_engines:
- poll
- poll-cv
- name: codegen_test_full
gtest: true
build: test
@ -4207,9 +4139,6 @@ targets:
- grpc
- gpr_test_util
- gpr
excluded_poll_engines:
- poll
- poll-cv
- name: grpclb_test
gtest: false
build: test
@ -4224,9 +4153,6 @@ targets:
- grpc
- gpr_test_util
- gpr
excluded_poll_engines:
- poll
- poll-cv
- name: h2_ssl_cert_test
gtest: true
build: test
@ -4285,6 +4211,25 @@ targets:
- grpc
- gpr_test_util
- gpr
- name: inproc_sync_unary_ping_pong_test
build: test
language: c++
src:
- test/cpp/qps/inproc_sync_unary_ping_pong_test.cc
deps:
- qps
- grpc++_core_stats
- grpc++_test_util
- grpc_test_util
- grpc++
- grpc
- gpr_test_util
- gpr
- grpc++_test_config
platforms:
- mac
- linux
- posix
- name: interop_client
build: test
run: false
@ -4778,7 +4723,6 @@ targets:
- grpc_unsecure
- gpr_test_util
- gpr
timeout_seconds: 1200
- name: transport_pid_controller_test
build: test
language: c++

@ -278,6 +278,7 @@ if test "$PHP_GRPC" != "no"; then
src/core/tsi/transport_security_adapter.cc \
src/core/ext/transport/chttp2/server/chttp2_server.cc \
src/core/ext/transport/chttp2/client/secure/secure_channel_create.cc \
src/core/ext/filters/client_channel/backup_poller.cc \
src/core/ext/filters/client_channel/channel_connectivity.cc \
src/core/ext/filters/client_channel/client_channel.cc \
src/core/ext/filters/client_channel/client_channel_factory.cc \
@ -317,6 +318,7 @@ if test "$PHP_GRPC" != "no"; then
third_party/nanopb/pb_encode.c \
src/core/ext/filters/client_channel/resolver/fake/fake_resolver.cc \
src/core/ext/filters/client_channel/lb_policy/pick_first/pick_first.cc \
src/core/ext/filters/client_channel/lb_policy/subchannel_list.cc \
src/core/ext/filters/client_channel/lb_policy/round_robin/round_robin.cc \
src/core/ext/filters/client_channel/resolver/dns/c_ares/dns_resolver_ares.cc \
src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver_posix.cc \
@ -326,21 +328,7 @@ if test "$PHP_GRPC" != "no"; then
src/core/ext/filters/client_channel/resolver/sockaddr/sockaddr_resolver.cc \
src/core/ext/filters/load_reporting/server_load_reporting_filter.cc \
src/core/ext/filters/load_reporting/server_load_reporting_plugin.cc \
src/core/ext/census/base_resources.cc \
src/core/ext/census/context.cc \
src/core/ext/census/gen/census.pb.c \
src/core/ext/census/gen/trace_context.pb.c \
src/core/ext/census/grpc_context.cc \
src/core/ext/census/grpc_filter.cc \
src/core/ext/census/grpc_plugin.cc \
src/core/ext/census/initialize.cc \
src/core/ext/census/intrusive_hash_map.cc \
src/core/ext/census/mlog.cc \
src/core/ext/census/operation.cc \
src/core/ext/census/placeholders.cc \
src/core/ext/census/resource.cc \
src/core/ext/census/trace_context.cc \
src/core/ext/census/tracing.cc \
src/core/ext/filters/max_age/max_age_filter.cc \
src/core/ext/filters/message_size/message_size_filter.cc \
src/core/ext/filters/workarounds/workaround_cronet_compression_filter.cc \
@ -658,8 +646,8 @@ if test "$PHP_GRPC" != "no"; then
PHP_ADD_BUILD_DIR($ext_builddir/src/boringssl)
PHP_ADD_BUILD_DIR($ext_builddir/src/core/ext/census)
PHP_ADD_BUILD_DIR($ext_builddir/src/core/ext/census/gen)
PHP_ADD_BUILD_DIR($ext_builddir/src/core/ext/filters/client_channel)
PHP_ADD_BUILD_DIR($ext_builddir/src/core/ext/filters/client_channel/lb_policy)
PHP_ADD_BUILD_DIR($ext_builddir/src/core/ext/filters/client_channel/lb_policy/grpclb)
PHP_ADD_BUILD_DIR($ext_builddir/src/core/ext/filters/client_channel/lb_policy/grpclb/proto/grpc/lb/v1)
PHP_ADD_BUILD_DIR($ext_builddir/src/core/ext/filters/client_channel/lb_policy/pick_first)

@ -255,6 +255,7 @@ if (PHP_GRPC != "no") {
"src\\core\\tsi\\transport_security_adapter.cc " +
"src\\core\\ext\\transport\\chttp2\\server\\chttp2_server.cc " +
"src\\core\\ext\\transport\\chttp2\\client\\secure\\secure_channel_create.cc " +
"src\\core\\ext\\filters\\client_channel\\backup_poller.cc " +
"src\\core\\ext\\filters\\client_channel\\channel_connectivity.cc " +
"src\\core\\ext\\filters\\client_channel\\client_channel.cc " +
"src\\core\\ext\\filters\\client_channel\\client_channel_factory.cc " +
@ -294,6 +295,7 @@ if (PHP_GRPC != "no") {
"third_party\\nanopb\\pb_encode.c " +
"src\\core\\ext\\filters\\client_channel\\resolver\\fake\\fake_resolver.cc " +
"src\\core\\ext\\filters\\client_channel\\lb_policy\\pick_first\\pick_first.cc " +
"src\\core\\ext\\filters\\client_channel\\lb_policy\\subchannel_list.cc " +
"src\\core\\ext\\filters\\client_channel\\lb_policy\\round_robin\\round_robin.cc " +
"src\\core\\ext\\filters\\client_channel\\resolver\\dns\\c_ares\\dns_resolver_ares.cc " +
"src\\core\\ext\\filters\\client_channel\\resolver\\dns\\c_ares\\grpc_ares_ev_driver_posix.cc " +
@ -303,21 +305,7 @@ if (PHP_GRPC != "no") {
"src\\core\\ext\\filters\\client_channel\\resolver\\sockaddr\\sockaddr_resolver.cc " +
"src\\core\\ext\\filters\\load_reporting\\server_load_reporting_filter.cc " +
"src\\core\\ext\\filters\\load_reporting\\server_load_reporting_plugin.cc " +
"src\\core\\ext\\census\\base_resources.cc " +
"src\\core\\ext\\census\\context.cc " +
"src\\core\\ext\\census\\gen\\census.pb.c " +
"src\\core\\ext\\census\\gen\\trace_context.pb.c " +
"src\\core\\ext\\census\\grpc_context.cc " +
"src\\core\\ext\\census\\grpc_filter.cc " +
"src\\core\\ext\\census\\grpc_plugin.cc " +
"src\\core\\ext\\census\\initialize.cc " +
"src\\core\\ext\\census\\intrusive_hash_map.cc " +
"src\\core\\ext\\census\\mlog.cc " +
"src\\core\\ext\\census\\operation.cc " +
"src\\core\\ext\\census\\placeholders.cc " +
"src\\core\\ext\\census\\resource.cc " +
"src\\core\\ext\\census\\trace_context.cc " +
"src\\core\\ext\\census\\tracing.cc " +
"src\\core\\ext\\filters\\max_age\\max_age_filter.cc " +
"src\\core\\ext\\filters\\message_size\\message_size_filter.cc " +
"src\\core\\ext\\filters\\workarounds\\workaround_cronet_compression_filter.cc " +
@ -661,7 +649,6 @@ if (PHP_GRPC != "no") {
FSO.CreateFolder(base_dir+"\\ext\\grpc\\src\\core");
FSO.CreateFolder(base_dir+"\\ext\\grpc\\src\\core\\ext");
FSO.CreateFolder(base_dir+"\\ext\\grpc\\src\\core\\ext\\census");
FSO.CreateFolder(base_dir+"\\ext\\grpc\\src\\core\\ext\\census\\gen");
FSO.CreateFolder(base_dir+"\\ext\\grpc\\src\\core\\ext\\filters");
FSO.CreateFolder(base_dir+"\\ext\\grpc\\src\\core\\ext\\filters\\client_channel");
FSO.CreateFolder(base_dir+"\\ext\\grpc\\src\\core\\ext\\filters\\client_channel\\lb_policy");

@ -49,6 +49,7 @@ some configuration as environment variables that can be set.
- connectivity_state - traces connectivity state changes to channels
- channel_stack_builder - traces information about channel stacks being built
- executor - traces grpc's internal thread pool ('the executor')
- glb - traces the grpclb load balancer
- http - traces state in the http2 transport engine
- http2_stream_state - traces all http2 stream state mutations.
- http1 - traces HTTP/1.x operations performed by gRPC
@ -56,11 +57,12 @@ some configuration as environment variables that can be set.
- flowctl - traces http2 flow control
- op_failure - traces error information when failure is pushed onto a
completion queue
- round_robin - traces the round_robin load balancing policy
- pick_first - traces the pick first load balancing policy
- plugin_credentials - traces plugin credentials
- pollable_refcount - traces reference counting of 'pollable' objects (only
in DEBUG)
- resource_quota - trace resource quota objects internals
- glb - traces the grpclb load balancer
- round_robin - traces the round_robin load balancing policy
- queue_pluck
- queue_timeout
- server_channel - lightweight trace of significant server channel events
@ -118,10 +120,10 @@ some configuration as environment variables that can be set.
perform name resolution
- ares - a DNS resolver based around the c-ares library
* GRPC_DISABLE_CHANNEL_CONNECTIVITY_WATCHER
The channel connectivity watcher uses one extra thread to check the channel
state every 500 ms on the client side. It can help reconnect disconnected
client channels (mostly due to idleness), so that the next RPC on this channel
won't fail. Set to 1 to turn off this watcher and save a thread. Please note
this is a temporary work-around, it will be removed in the future once we have
support for automatically reestablishing failed connections.
* GRPC_CLIENT_CHANNEL_BACKUP_POLL_INTERVAL_MS
Default: 5000
Declares the interval between two backup polls on client channels. These polls
are run in the timer thread so that gRPC can process connection failures while
there is no active polling thread. They help reconnect disconnected client
channels (mostly due to idleness), so that the next RPC on this channel won't
fail. Set to 0 to turn off the backup polls.

@ -89,6 +89,7 @@ Pod::Spec.new do |s|
s.default_subspecs = 'Interface', 'Implementation'
s.compiler_flags = '-DGRPC_ARES=0'
s.libraries = 'c++'
# Like many other C libraries, gRPC-Core has its public headers under `include/<libname>/` and its
# sources and private headers in other directories outside `include/`. Cocoapods' linter doesn't
@ -249,6 +250,7 @@ Pod::Spec.new do |s|
'src/core/ext/transport/chttp2/transport/bin_decoder.h',
'src/core/ext/transport/chttp2/transport/bin_encoder.h',
'src/core/ext/transport/chttp2/transport/chttp2_transport.h',
'src/core/ext/transport/chttp2/transport/flow_control.h',
'src/core/ext/transport/chttp2/transport/frame.h',
'src/core/ext/transport/chttp2/transport/frame_data.h',
'src/core/ext/transport/chttp2/transport/frame_goaway.h',
@ -297,6 +299,7 @@ Pod::Spec.new do |s|
'src/core/tsi/transport_security_adapter.h',
'src/core/tsi/transport_security_interface.h',
'src/core/ext/transport/chttp2/server/chttp2_server.h',
'src/core/ext/filters/client_channel/backup_poller.h',
'src/core/ext/filters/client_channel/client_channel.h',
'src/core/ext/filters/client_channel/client_channel_factory.h',
'src/core/ext/filters/client_channel/connector.h',
@ -446,28 +449,11 @@ Pod::Spec.new do |s|
'src/core/ext/filters/client_channel/lb_policy/grpclb/load_balancer_api.h',
'src/core/ext/filters/client_channel/lb_policy/grpclb/proto/grpc/lb/v1/load_balancer.pb.h',
'src/core/ext/filters/client_channel/resolver/fake/fake_resolver.h',
'src/core/ext/filters/client_channel/lb_policy/subchannel_list.h',
'src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver.h',
'src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.h',
'src/core/ext/filters/load_reporting/server_load_reporting_filter.h',
'src/core/ext/filters/load_reporting/server_load_reporting_plugin.h',
'src/core/ext/census/aggregation.h',
'src/core/ext/census/base_resources.h',
'src/core/ext/census/census_interface.h',
'src/core/ext/census/census_rpc_stats.h',
'src/core/ext/census/gen/census.pb.h',
'src/core/ext/census/gen/trace_context.pb.h',
'src/core/ext/census/grpc_filter.h',
'src/core/ext/census/intrusive_hash_map.h',
'src/core/ext/census/intrusive_hash_map_internal.h',
'src/core/ext/census/mlog.h',
'src/core/ext/census/resource.h',
'src/core/ext/census/rpc_metric_id.h',
'src/core/ext/census/trace_context.h',
'src/core/ext/census/trace_label.h',
'src/core/ext/census/trace_propagation.h',
'src/core/ext/census/trace_status.h',
'src/core/ext/census/trace_string.h',
'src/core/ext/census/tracing.h',
'src/core/ext/filters/max_age/max_age_filter.h',
'src/core/ext/filters/message_size/message_size_filter.h',
'src/core/ext/filters/workarounds/workaround_cronet_compression_filter.h',
@ -666,6 +652,7 @@ Pod::Spec.new do |s|
'src/core/tsi/transport_security_adapter.cc',
'src/core/ext/transport/chttp2/server/chttp2_server.cc',
'src/core/ext/transport/chttp2/client/secure/secure_channel_create.cc',
'src/core/ext/filters/client_channel/backup_poller.cc',
'src/core/ext/filters/client_channel/channel_connectivity.cc',
'src/core/ext/filters/client_channel/client_channel.cc',
'src/core/ext/filters/client_channel/client_channel_factory.cc',
@ -702,6 +689,7 @@ Pod::Spec.new do |s|
'src/core/ext/filters/client_channel/lb_policy/grpclb/proto/grpc/lb/v1/load_balancer.pb.c',
'src/core/ext/filters/client_channel/resolver/fake/fake_resolver.cc',
'src/core/ext/filters/client_channel/lb_policy/pick_first/pick_first.cc',
'src/core/ext/filters/client_channel/lb_policy/subchannel_list.cc',
'src/core/ext/filters/client_channel/lb_policy/round_robin/round_robin.cc',
'src/core/ext/filters/client_channel/resolver/dns/c_ares/dns_resolver_ares.cc',
'src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver_posix.cc',
@ -711,21 +699,7 @@ Pod::Spec.new do |s|
'src/core/ext/filters/client_channel/resolver/sockaddr/sockaddr_resolver.cc',
'src/core/ext/filters/load_reporting/server_load_reporting_filter.cc',
'src/core/ext/filters/load_reporting/server_load_reporting_plugin.cc',
'src/core/ext/census/base_resources.cc',
'src/core/ext/census/context.cc',
'src/core/ext/census/gen/census.pb.c',
'src/core/ext/census/gen/trace_context.pb.c',
'src/core/ext/census/grpc_context.cc',
'src/core/ext/census/grpc_filter.cc',
'src/core/ext/census/grpc_plugin.cc',
'src/core/ext/census/initialize.cc',
'src/core/ext/census/intrusive_hash_map.cc',
'src/core/ext/census/mlog.cc',
'src/core/ext/census/operation.cc',
'src/core/ext/census/placeholders.cc',
'src/core/ext/census/resource.cc',
'src/core/ext/census/trace_context.cc',
'src/core/ext/census/tracing.cc',
'src/core/ext/filters/max_age/max_age_filter.cc',
'src/core/ext/filters/message_size/message_size_filter.cc',
'src/core/ext/filters/workarounds/workaround_cronet_compression_filter.cc',
@ -751,6 +725,7 @@ Pod::Spec.new do |s|
'src/core/ext/transport/chttp2/transport/bin_decoder.h',
'src/core/ext/transport/chttp2/transport/bin_encoder.h',
'src/core/ext/transport/chttp2/transport/chttp2_transport.h',
'src/core/ext/transport/chttp2/transport/flow_control.h',
'src/core/ext/transport/chttp2/transport/frame.h',
'src/core/ext/transport/chttp2/transport/frame_data.h',
'src/core/ext/transport/chttp2/transport/frame_goaway.h',
@ -799,6 +774,7 @@ Pod::Spec.new do |s|
'src/core/tsi/transport_security_adapter.h',
'src/core/tsi/transport_security_interface.h',
'src/core/ext/transport/chttp2/server/chttp2_server.h',
'src/core/ext/filters/client_channel/backup_poller.h',
'src/core/ext/filters/client_channel/client_channel.h',
'src/core/ext/filters/client_channel/client_channel_factory.h',
'src/core/ext/filters/client_channel/connector.h',
@ -948,28 +924,11 @@ Pod::Spec.new do |s|
'src/core/ext/filters/client_channel/lb_policy/grpclb/load_balancer_api.h',
'src/core/ext/filters/client_channel/lb_policy/grpclb/proto/grpc/lb/v1/load_balancer.pb.h',
'src/core/ext/filters/client_channel/resolver/fake/fake_resolver.h',
'src/core/ext/filters/client_channel/lb_policy/subchannel_list.h',
'src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver.h',
'src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.h',
'src/core/ext/filters/load_reporting/server_load_reporting_filter.h',
'src/core/ext/filters/load_reporting/server_load_reporting_plugin.h',
'src/core/ext/census/aggregation.h',
'src/core/ext/census/base_resources.h',
'src/core/ext/census/census_interface.h',
'src/core/ext/census/census_rpc_stats.h',
'src/core/ext/census/gen/census.pb.h',
'src/core/ext/census/gen/trace_context.pb.h',
'src/core/ext/census/grpc_filter.h',
'src/core/ext/census/intrusive_hash_map.h',
'src/core/ext/census/intrusive_hash_map_internal.h',
'src/core/ext/census/mlog.h',
'src/core/ext/census/resource.h',
'src/core/ext/census/rpc_metric_id.h',
'src/core/ext/census/trace_context.h',
'src/core/ext/census/trace_label.h',
'src/core/ext/census/trace_propagation.h',
'src/core/ext/census/trace_status.h',
'src/core/ext/census/trace_string.h',
'src/core/ext/census/tracing.h',
'src/core/ext/filters/max_age/max_age_filter.h',
'src/core/ext/filters/message_size/message_size_filter.h',
'src/core/ext/filters/workarounds/workaround_cronet_compression_filter.h',
@ -1005,7 +964,7 @@ Pod::Spec.new do |s|
'test/core/end2end/tests/*.{c,h}',
'test/core/end2end/fixtures/*.h',
'test/core/end2end/data/*.{c,h}',
'test/core/util/debugger_macros.{c,h}',
'test/core/util/debugger_macros.{cc,h}',
'test/core/util/test_config.{c,h}',
'test/core/util/port.h',
'test/core/util/port.c',

@ -1,32 +1,4 @@
EXPORTS
census_initialize
census_shutdown
census_supported
census_enabled
census_context_create
census_context_destroy
census_context_get_status
census_context_initialize_iterator
census_context_next_tag
census_context_get_tag
census_context_encode
census_context_decode
census_trace_mask
census_set_trace_mask
census_start_rpc_op_timestamp
census_start_client_rpc_op
census_set_rpc_client_peer
census_start_server_rpc_op
census_start_op
census_end_op
census_trace_print
census_trace_scan_start
census_get_trace_record
census_trace_scan_end
census_define_resource
census_delete_resource
census_resource_id
census_record_values
grpc_compression_algorithm_parse
grpc_compression_algorithm_name
grpc_stream_compression_algorithm_name
@ -54,6 +26,8 @@ EXPORTS
grpc_completion_queue_pluck
grpc_completion_queue_shutdown
grpc_completion_queue_destroy
grpc_completion_queue_thread_local_cache_init
grpc_completion_queue_thread_local_cache_flush
grpc_alarm_create
grpc_alarm_set
grpc_alarm_cancel
@ -130,8 +104,14 @@ EXPORTS
grpc_metadata_credentials_create_from_plugin
grpc_secure_channel_create
grpc_server_credentials_release
grpc_ssl_server_certificate_config_create
grpc_ssl_server_certificate_config_destroy
grpc_ssl_server_credentials_create
grpc_ssl_server_credentials_create_ex
grpc_ssl_server_credentials_create_options_using_config
grpc_ssl_server_credentials_create_options_using_config_fetcher
grpc_ssl_server_credentials_options_destroy
grpc_ssl_server_credentials_create_with_options
grpc_server_add_secure_http2_port
grpc_call_set_credentials
grpc_server_credentials_set_auth_metadata_processor

@ -28,7 +28,7 @@ Gem::Specification.new do |s|
s.platform = Gem::Platform::RUBY
s.add_dependency 'google-protobuf', '~> 3.1'
s.add_dependency 'googleauth', '~> 0.5.1'
s.add_dependency 'googleauth', '>= 0.5.1', '< 0.7'
s.add_dependency 'googleapis-common-protos-types', '~> 1.0.0'
s.add_development_dependency 'bundler', '~> 1.9'
@ -181,6 +181,7 @@ Gem::Specification.new do |s|
s.files += %w( src/core/ext/transport/chttp2/transport/bin_decoder.h )
s.files += %w( src/core/ext/transport/chttp2/transport/bin_encoder.h )
s.files += %w( src/core/ext/transport/chttp2/transport/chttp2_transport.h )
s.files += %w( src/core/ext/transport/chttp2/transport/flow_control.h )
s.files += %w( src/core/ext/transport/chttp2/transport/frame.h )
s.files += %w( src/core/ext/transport/chttp2/transport/frame_data.h )
s.files += %w( src/core/ext/transport/chttp2/transport/frame_goaway.h )
@ -229,6 +230,7 @@ Gem::Specification.new do |s|
s.files += %w( src/core/tsi/transport_security_adapter.h )
s.files += %w( src/core/tsi/transport_security_interface.h )
s.files += %w( src/core/ext/transport/chttp2/server/chttp2_server.h )
s.files += %w( src/core/ext/filters/client_channel/backup_poller.h )
s.files += %w( src/core/ext/filters/client_channel/client_channel.h )
s.files += %w( src/core/ext/filters/client_channel/client_channel_factory.h )
s.files += %w( src/core/ext/filters/client_channel/connector.h )
@ -382,28 +384,11 @@ Gem::Specification.new do |s|
s.files += %w( third_party/nanopb/pb_decode.h )
s.files += %w( third_party/nanopb/pb_encode.h )
s.files += %w( src/core/ext/filters/client_channel/resolver/fake/fake_resolver.h )
s.files += %w( src/core/ext/filters/client_channel/lb_policy/subchannel_list.h )
s.files += %w( src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver.h )
s.files += %w( src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.h )
s.files += %w( src/core/ext/filters/load_reporting/server_load_reporting_filter.h )
s.files += %w( src/core/ext/filters/load_reporting/server_load_reporting_plugin.h )
s.files += %w( src/core/ext/census/aggregation.h )
s.files += %w( src/core/ext/census/base_resources.h )
s.files += %w( src/core/ext/census/census_interface.h )
s.files += %w( src/core/ext/census/census_rpc_stats.h )
s.files += %w( src/core/ext/census/gen/census.pb.h )
s.files += %w( src/core/ext/census/gen/trace_context.pb.h )
s.files += %w( src/core/ext/census/grpc_filter.h )
s.files += %w( src/core/ext/census/intrusive_hash_map.h )
s.files += %w( src/core/ext/census/intrusive_hash_map_internal.h )
s.files += %w( src/core/ext/census/mlog.h )
s.files += %w( src/core/ext/census/resource.h )
s.files += %w( src/core/ext/census/rpc_metric_id.h )
s.files += %w( src/core/ext/census/trace_context.h )
s.files += %w( src/core/ext/census/trace_label.h )
s.files += %w( src/core/ext/census/trace_propagation.h )
s.files += %w( src/core/ext/census/trace_status.h )
s.files += %w( src/core/ext/census/trace_string.h )
s.files += %w( src/core/ext/census/tracing.h )
s.files += %w( src/core/ext/filters/max_age/max_age_filter.h )
s.files += %w( src/core/ext/filters/message_size/message_size_filter.h )
s.files += %w( src/core/ext/filters/workarounds/workaround_cronet_compression_filter.h )
@ -602,6 +587,7 @@ Gem::Specification.new do |s|
s.files += %w( src/core/tsi/transport_security_adapter.cc )
s.files += %w( src/core/ext/transport/chttp2/server/chttp2_server.cc )
s.files += %w( src/core/ext/transport/chttp2/client/secure/secure_channel_create.cc )
s.files += %w( src/core/ext/filters/client_channel/backup_poller.cc )
s.files += %w( src/core/ext/filters/client_channel/channel_connectivity.cc )
s.files += %w( src/core/ext/filters/client_channel/client_channel.cc )
s.files += %w( src/core/ext/filters/client_channel/client_channel_factory.cc )
@ -641,6 +627,7 @@ Gem::Specification.new do |s|
s.files += %w( third_party/nanopb/pb_encode.c )
s.files += %w( src/core/ext/filters/client_channel/resolver/fake/fake_resolver.cc )
s.files += %w( src/core/ext/filters/client_channel/lb_policy/pick_first/pick_first.cc )
s.files += %w( src/core/ext/filters/client_channel/lb_policy/subchannel_list.cc )
s.files += %w( src/core/ext/filters/client_channel/lb_policy/round_robin/round_robin.cc )
s.files += %w( src/core/ext/filters/client_channel/resolver/dns/c_ares/dns_resolver_ares.cc )
s.files += %w( src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver_posix.cc )
@ -650,21 +637,7 @@ Gem::Specification.new do |s|
s.files += %w( src/core/ext/filters/client_channel/resolver/sockaddr/sockaddr_resolver.cc )
s.files += %w( src/core/ext/filters/load_reporting/server_load_reporting_filter.cc )
s.files += %w( src/core/ext/filters/load_reporting/server_load_reporting_plugin.cc )
s.files += %w( src/core/ext/census/base_resources.cc )
s.files += %w( src/core/ext/census/context.cc )
s.files += %w( src/core/ext/census/gen/census.pb.c )
s.files += %w( src/core/ext/census/gen/trace_context.pb.c )
s.files += %w( src/core/ext/census/grpc_context.cc )
s.files += %w( src/core/ext/census/grpc_filter.cc )
s.files += %w( src/core/ext/census/grpc_plugin.cc )
s.files += %w( src/core/ext/census/initialize.cc )
s.files += %w( src/core/ext/census/intrusive_hash_map.cc )
s.files += %w( src/core/ext/census/mlog.cc )
s.files += %w( src/core/ext/census/operation.cc )
s.files += %w( src/core/ext/census/placeholders.cc )
s.files += %w( src/core/ext/census/resource.cc )
s.files += %w( src/core/ext/census/trace_context.cc )
s.files += %w( src/core/ext/census/tracing.cc )
s.files += %w( src/core/ext/filters/max_age/max_age_filter.cc )
s.files += %w( src/core/ext/filters/message_size/message_size_filter.cc )
s.files += %w( src/core/ext/filters/workarounds/workaround_cronet_compression_filter.cc )

@ -419,6 +419,7 @@
'src/core/tsi/transport_security_adapter.cc',
'src/core/ext/transport/chttp2/server/chttp2_server.cc',
'src/core/ext/transport/chttp2/client/secure/secure_channel_create.cc',
'src/core/ext/filters/client_channel/backup_poller.cc',
'src/core/ext/filters/client_channel/channel_connectivity.cc',
'src/core/ext/filters/client_channel/client_channel.cc',
'src/core/ext/filters/client_channel/client_channel_factory.cc',
@ -458,6 +459,7 @@
'third_party/nanopb/pb_encode.c',
'src/core/ext/filters/client_channel/resolver/fake/fake_resolver.cc',
'src/core/ext/filters/client_channel/lb_policy/pick_first/pick_first.cc',
'src/core/ext/filters/client_channel/lb_policy/subchannel_list.cc',
'src/core/ext/filters/client_channel/lb_policy/round_robin/round_robin.cc',
'src/core/ext/filters/client_channel/resolver/dns/c_ares/dns_resolver_ares.cc',
'src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver_posix.cc',
@ -467,21 +469,7 @@
'src/core/ext/filters/client_channel/resolver/sockaddr/sockaddr_resolver.cc',
'src/core/ext/filters/load_reporting/server_load_reporting_filter.cc',
'src/core/ext/filters/load_reporting/server_load_reporting_plugin.cc',
'src/core/ext/census/base_resources.cc',
'src/core/ext/census/context.cc',
'src/core/ext/census/gen/census.pb.c',
'src/core/ext/census/gen/trace_context.pb.c',
'src/core/ext/census/grpc_context.cc',
'src/core/ext/census/grpc_filter.cc',
'src/core/ext/census/grpc_plugin.cc',
'src/core/ext/census/initialize.cc',
'src/core/ext/census/intrusive_hash_map.cc',
'src/core/ext/census/mlog.cc',
'src/core/ext/census/operation.cc',
'src/core/ext/census/placeholders.cc',
'src/core/ext/census/resource.cc',
'src/core/ext/census/trace_context.cc',
'src/core/ext/census/tracing.cc',
'src/core/ext/filters/max_age/max_age_filter.cc',
'src/core/ext/filters/message_size/message_size_filter.cc',
'src/core/ext/filters/workarounds/workaround_cronet_compression_filter.cc',
@ -661,6 +649,7 @@
'src/core/lib/transport/transport.cc',
'src/core/lib/transport/transport_op_string.cc',
'src/core/lib/debug/trace.cc',
'src/core/ext/filters/client_channel/backup_poller.cc',
'src/core/ext/filters/client_channel/channel_connectivity.cc',
'src/core/ext/filters/client_channel/client_channel.cc',
'src/core/ext/filters/client_channel/client_channel_factory.cc',
@ -868,6 +857,7 @@
'src/core/lib/transport/transport.cc',
'src/core/lib/transport/transport_op_string.cc',
'src/core/lib/debug/trace.cc',
'src/core/ext/filters/client_channel/backup_poller.cc',
'src/core/ext/filters/client_channel/channel_connectivity.cc',
'src/core/ext/filters/client_channel/client_channel.cc',
'src/core/ext/filters/client_channel/client_channel_factory.cc',
@ -1093,6 +1083,7 @@
'src/core/ext/transport/chttp2/client/insecure/channel_create.cc',
'src/core/ext/transport/chttp2/client/insecure/channel_create_posix.cc',
'src/core/ext/transport/chttp2/client/chttp2_connector.cc',
'src/core/ext/filters/client_channel/backup_poller.cc',
'src/core/ext/filters/client_channel/channel_connectivity.cc',
'src/core/ext/filters/client_channel/client_channel.cc',
'src/core/ext/filters/client_channel/client_channel_factory.cc',
@ -1135,22 +1126,9 @@
'third_party/nanopb/pb_decode.c',
'third_party/nanopb/pb_encode.c',
'src/core/ext/filters/client_channel/lb_policy/pick_first/pick_first.cc',
'src/core/ext/filters/client_channel/lb_policy/subchannel_list.cc',
'src/core/ext/filters/client_channel/lb_policy/round_robin/round_robin.cc',
'src/core/ext/census/base_resources.cc',
'src/core/ext/census/context.cc',
'src/core/ext/census/gen/census.pb.c',
'src/core/ext/census/gen/trace_context.pb.c',
'src/core/ext/census/grpc_context.cc',
'src/core/ext/census/grpc_filter.cc',
'src/core/ext/census/grpc_plugin.cc',
'src/core/ext/census/initialize.cc',
'src/core/ext/census/intrusive_hash_map.cc',
'src/core/ext/census/mlog.cc',
'src/core/ext/census/operation.cc',
'src/core/ext/census/placeholders.cc',
'src/core/ext/census/resource.cc',
'src/core/ext/census/trace_context.cc',
'src/core/ext/census/tracing.cc',
'src/core/ext/filters/max_age/max_age_filter.cc',
'src/core/ext/filters/message_size/message_size_filter.cc',
'src/core/ext/filters/workarounds/workaround_cronet_compression_filter.cc',
@ -2316,6 +2294,7 @@
'third_party/benchmark/src/commandlineflags.cc',
'third_party/benchmark/src/complexity.cc',
'third_party/benchmark/src/console_reporter.cc',
'third_party/benchmark/src/counter.cc',
'third_party/benchmark/src/csv_reporter.cc',
'third_party/benchmark/src/json_reporter.cc',
'third_party/benchmark/src/reporter.cc',

@ -92,7 +92,7 @@ class Alarm : private GrpcLibraryCodegen {
}
private:
class AlarmEntry : public CompletionQueueTag {
class AlarmEntry : public internal::CompletionQueueTag {
public:
AlarmEntry(void* tag) : tag_(tag) {}
void Set(void* tag) { tag_ = tag; }

@ -32,7 +32,7 @@ struct grpc_channel;
namespace grpc {
/// Channels represent a connection to an endpoint. Created by \a CreateChannel.
class Channel final : public ChannelInterface,
public CallHook,
public internal::CallHook,
public std::enable_shared_from_this<Channel>,
private GrpcLibraryCodegen {
public:
@ -51,18 +51,16 @@ class Channel final : public ChannelInterface,
private:
template <class InputMessage, class OutputMessage>
friend Status BlockingUnaryCall(ChannelInterface* channel,
const RpcMethod& method,
ClientContext* context,
const InputMessage& request,
OutputMessage* result);
friend class internal::BlockingUnaryCallImpl;
friend std::shared_ptr<Channel> CreateChannelInternal(
const grpc::string& host, grpc_channel* c_channel);
Channel(const grpc::string& host, grpc_channel* c_channel);
Call CreateCall(const RpcMethod& method, ClientContext* context,
CompletionQueue* cq) override;
void PerformOpsOnCall(CallOpSetInterface* ops, Call* call) override;
internal::Call CreateCall(const internal::RpcMethod& method,
ClientContext* context,
CompletionQueue* cq) override;
void PerformOpsOnCall(internal::CallOpSetInterface* ops,
internal::Call* call) override;
void* RegisterMethod(const char* method) override;
void NotifyOnStateChangeImpl(grpc_connectivity_state last_observed,

@ -30,6 +30,7 @@ namespace grpc {
class CompletionQueue;
namespace internal {
/// Common interface for all client side asynchronous streaming.
class ClientAsyncStreamingInterface {
public:
@ -151,15 +152,16 @@ class AsyncWriterInterface {
}
};
} // namespace internal
template <class R>
class ClientAsyncReaderInterface : public ClientAsyncStreamingInterface,
public AsyncReaderInterface<R> {};
class ClientAsyncReaderInterface
: public internal::ClientAsyncStreamingInterface,
public internal::AsyncReaderInterface<R> {};
/// Async client-side API for doing server-streaming RPCs,
/// where the incoming message stream coming from the server has
/// messages of type \a R.
namespace internal {
template <class R>
class ClientAsyncReader final : public ClientAsyncReaderInterface<R> {
class ClientAsyncReaderFactory {
public:
/// Create a stream object.
/// Write the first request out if \a start is set.
@ -169,16 +171,25 @@ class ClientAsyncReader final : public ClientAsyncReaderInterface<R> {
/// Note that \a context will be used to fill in custom initial metadata
/// used to send to the server when starting the call.
template <class W>
static ClientAsyncReader* Create(ChannelInterface* channel,
CompletionQueue* cq, const RpcMethod& method,
ClientContext* context, const W& request,
bool start, void* tag) {
Call call = channel->CreateCall(method, context, cq);
static ClientAsyncReader<R>* Create(ChannelInterface* channel,
CompletionQueue* cq,
const ::grpc::internal::RpcMethod& method,
ClientContext* context, const W& request,
bool start, void* tag) {
::grpc::internal::Call call = channel->CreateCall(method, context, cq);
return new (g_core_codegen_interface->grpc_call_arena_alloc(
call.call(), sizeof(ClientAsyncReader)))
ClientAsyncReader(call, context, request, start, tag);
call.call(), sizeof(ClientAsyncReader<R>)))
ClientAsyncReader<R>(call, context, request, start, tag);
}
};
} // namespace internal
/// Async client-side API for doing server-streaming RPCs,
/// where the incoming message stream coming from the server has
/// messages of type \a R.
template <class R>
class ClientAsyncReader final : public ClientAsyncReaderInterface<R> {
public:
// always allocated against a call arena, no memory free required
static void operator delete(void* ptr, std::size_t size) {
assert(size == sizeof(ClientAsyncReader));
@ -233,9 +244,10 @@ class ClientAsyncReader final : public ClientAsyncReaderInterface<R> {
}
private:
friend class internal::ClientAsyncReaderFactory<R>;
template <class W>
ClientAsyncReader(Call call, ClientContext* context, const W& request,
bool start, void* tag)
ClientAsyncReader(::grpc::internal::Call call, ClientContext* context,
const W& request, bool start, void* tag)
: context_(context), call_(call), started_(start) {
// TODO(ctiller): don't assert
GPR_CODEGEN_ASSERT(init_ops_.SendMessage(request).ok());
@ -255,19 +267,27 @@ class ClientAsyncReader final : public ClientAsyncReaderInterface<R> {
}
ClientContext* context_;
Call call_;
::grpc::internal::Call call_;
bool started_;
CallOpSet<CallOpSendInitialMetadata, CallOpSendMessage, CallOpClientSendClose>
::grpc::internal::CallOpSet<::grpc::internal::CallOpSendInitialMetadata,
::grpc::internal::CallOpSendMessage,
::grpc::internal::CallOpClientSendClose>
init_ops_;
CallOpSet<CallOpRecvInitialMetadata> meta_ops_;
CallOpSet<CallOpRecvInitialMetadata, CallOpRecvMessage<R>> read_ops_;
CallOpSet<CallOpRecvInitialMetadata, CallOpClientRecvStatus> finish_ops_;
::grpc::internal::CallOpSet<::grpc::internal::CallOpRecvInitialMetadata>
meta_ops_;
::grpc::internal::CallOpSet<::grpc::internal::CallOpRecvInitialMetadata,
::grpc::internal::CallOpRecvMessage<R>>
read_ops_;
::grpc::internal::CallOpSet<::grpc::internal::CallOpRecvInitialMetadata,
::grpc::internal::CallOpClientRecvStatus>
finish_ops_;
};
/// Common interface for client side asynchronous writing.
template <class W>
class ClientAsyncWriterInterface : public ClientAsyncStreamingInterface,
public AsyncWriterInterface<W> {
class ClientAsyncWriterInterface
: public internal::ClientAsyncStreamingInterface,
public internal::AsyncWriterInterface<W> {
public:
/// Signal the client is done with the writes (half-close the client stream).
/// Thread-safe with respect to \a AsyncReaderInterface::Read
@ -276,11 +296,9 @@ class ClientAsyncWriterInterface : public ClientAsyncStreamingInterface,
virtual void WritesDone(void* tag) = 0;
};
/// Async API on the client side for doing client-streaming RPCs,
/// where the outgoing message stream going to the server contains
/// messages of type \a W.
namespace internal {
template <class W>
class ClientAsyncWriter final : public ClientAsyncWriterInterface<W> {
class ClientAsyncWriterFactory {
public:
/// Create a stream object.
/// Start the RPC if \a start is set
@ -294,16 +312,25 @@ class ClientAsyncWriter final : public ClientAsyncWriterInterface<W> {
/// message from the server upon a successful call to the \a Finish
/// method of this instance.
template <class R>
static ClientAsyncWriter* Create(ChannelInterface* channel,
CompletionQueue* cq, const RpcMethod& method,
ClientContext* context, R* response,
bool start, void* tag) {
Call call = channel->CreateCall(method, context, cq);
static ClientAsyncWriter<W>* Create(ChannelInterface* channel,
CompletionQueue* cq,
const ::grpc::internal::RpcMethod& method,
ClientContext* context, R* response,
bool start, void* tag) {
::grpc::internal::Call call = channel->CreateCall(method, context, cq);
return new (g_core_codegen_interface->grpc_call_arena_alloc(
call.call(), sizeof(ClientAsyncWriter)))
ClientAsyncWriter(call, context, response, start, tag);
call.call(), sizeof(ClientAsyncWriter<W>)))
ClientAsyncWriter<W>(call, context, response, start, tag);
}
};
} // namespace internal
/// Async API on the client side for doing client-streaming RPCs,
/// where the outgoing message stream going to the server contains
/// messages of type \a W.
template <class W>
class ClientAsyncWriter final : public ClientAsyncWriterInterface<W> {
public:
// always allocated against a call arena, no memory free required
static void operator delete(void* ptr, std::size_t size) {
assert(size == sizeof(ClientAsyncWriter));
@ -376,9 +403,10 @@ class ClientAsyncWriter final : public ClientAsyncWriterInterface<W> {
}
private:
friend class internal::ClientAsyncWriterFactory<W>;
template <class R>
ClientAsyncWriter(Call call, ClientContext* context, R* response, bool start,
void* tag)
ClientAsyncWriter(::grpc::internal::Call call, ClientContext* context,
R* response, bool start, void* tag)
: context_(context), call_(call), started_(start) {
finish_ops_.RecvMessage(response);
finish_ops_.AllowNoMessage();
@ -401,13 +429,17 @@ class ClientAsyncWriter final : public ClientAsyncWriterInterface<W> {
}
ClientContext* context_;
Call call_;
::grpc::internal::Call call_;
bool started_;
CallOpSet<CallOpRecvInitialMetadata> meta_ops_;
CallOpSet<CallOpSendInitialMetadata, CallOpSendMessage, CallOpClientSendClose>
::grpc::internal::CallOpSet<::grpc::internal::CallOpRecvInitialMetadata>
meta_ops_;
::grpc::internal::CallOpSet<::grpc::internal::CallOpSendInitialMetadata,
::grpc::internal::CallOpSendMessage,
::grpc::internal::CallOpClientSendClose>
write_ops_;
CallOpSet<CallOpRecvInitialMetadata, CallOpGenericRecvMessage,
CallOpClientRecvStatus>
::grpc::internal::CallOpSet<::grpc::internal::CallOpRecvInitialMetadata,
::grpc::internal::CallOpGenericRecvMessage,
::grpc::internal::CallOpClientRecvStatus>
finish_ops_;
};
@ -415,9 +447,10 @@ class ClientAsyncWriter final : public ClientAsyncWriterInterface<W> {
/// where the client-to-server message stream has messages of type \a W,
/// and the server-to-client message stream has messages of type \a R.
template <class W, class R>
class ClientAsyncReaderWriterInterface : public ClientAsyncStreamingInterface,
public AsyncWriterInterface<W>,
public AsyncReaderInterface<R> {
class ClientAsyncReaderWriterInterface
: public internal::ClientAsyncStreamingInterface,
public internal::AsyncWriterInterface<W>,
public internal::AsyncReaderInterface<R> {
public:
/// Signal the client is done with the writes (half-close the client stream).
/// Thread-safe with respect to \a AsyncReaderInterface::Read
@ -426,13 +459,9 @@ class ClientAsyncReaderWriterInterface : public ClientAsyncStreamingInterface,
virtual void WritesDone(void* tag) = 0;
};
/// Async client-side interface for bi-directional streaming,
/// where the outgoing message stream going to the server
/// has messages of type \a W, and the incoming message stream coming
/// from the server has messages of type \a R.
namespace internal {
template <class W, class R>
class ClientAsyncReaderWriter final
: public ClientAsyncReaderWriterInterface<W, R> {
class ClientAsyncReaderWriterFactory {
public:
/// Create a stream object.
/// Start the RPC request if \a start is set.
@ -441,18 +470,27 @@ class ClientAsyncReaderWriter final
/// nullptr and the actual call must be initiated by StartCall
/// Note that \a context will be used to fill in custom initial metadata
/// used to send to the server when starting the call.
static ClientAsyncReaderWriter* Create(ChannelInterface* channel,
CompletionQueue* cq,
const RpcMethod& method,
ClientContext* context, bool start,
void* tag) {
Call call = channel->CreateCall(method, context, cq);
static ClientAsyncReaderWriter<W, R>* Create(
ChannelInterface* channel, CompletionQueue* cq,
const ::grpc::internal::RpcMethod& method, ClientContext* context,
bool start, void* tag) {
::grpc::internal::Call call = channel->CreateCall(method, context, cq);
return new (g_core_codegen_interface->grpc_call_arena_alloc(
call.call(), sizeof(ClientAsyncReaderWriter)))
ClientAsyncReaderWriter(call, context, start, tag);
call.call(), sizeof(ClientAsyncReaderWriter<W, R>)))
ClientAsyncReaderWriter<W, R>(call, context, start, tag);
}
};
} // namespace internal
/// Async client-side interface for bi-directional streaming,
/// where the outgoing message stream going to the server
/// has messages of type \a W, and the incoming message stream coming
/// from the server has messages of type \a R.
template <class W, class R>
class ClientAsyncReaderWriter final
: public ClientAsyncReaderWriterInterface<W, R> {
public:
// always allocated against a call arena, no memory free required
static void operator delete(void* ptr, std::size_t size) {
assert(size == sizeof(ClientAsyncReaderWriter));
@ -532,8 +570,9 @@ class ClientAsyncReaderWriter final
}
private:
ClientAsyncReaderWriter(Call call, ClientContext* context, bool start,
void* tag)
friend class internal::ClientAsyncReaderWriterFactory<W, R>;
ClientAsyncReaderWriter(::grpc::internal::Call call, ClientContext* context,
bool start, void* tag)
: context_(context), call_(call), started_(start) {
if (start) {
StartCallInternal(tag);
@ -554,18 +593,26 @@ class ClientAsyncReaderWriter final
}
ClientContext* context_;
Call call_;
::grpc::internal::Call call_;
bool started_;
CallOpSet<CallOpRecvInitialMetadata> meta_ops_;
CallOpSet<CallOpRecvInitialMetadata, CallOpRecvMessage<R>> read_ops_;
CallOpSet<CallOpSendInitialMetadata, CallOpSendMessage, CallOpClientSendClose>
::grpc::internal::CallOpSet<::grpc::internal::CallOpRecvInitialMetadata>
meta_ops_;
::grpc::internal::CallOpSet<::grpc::internal::CallOpRecvInitialMetadata,
::grpc::internal::CallOpRecvMessage<R>>
read_ops_;
::grpc::internal::CallOpSet<::grpc::internal::CallOpSendInitialMetadata,
::grpc::internal::CallOpSendMessage,
::grpc::internal::CallOpClientSendClose>
write_ops_;
CallOpSet<CallOpRecvInitialMetadata, CallOpClientRecvStatus> finish_ops_;
::grpc::internal::CallOpSet<::grpc::internal::CallOpRecvInitialMetadata,
::grpc::internal::CallOpClientRecvStatus>
finish_ops_;
};
template <class W, class R>
class ServerAsyncReaderInterface : public ServerAsyncStreamingInterface,
public AsyncReaderInterface<R> {
class ServerAsyncReaderInterface
: public internal::ServerAsyncStreamingInterface,
public internal::AsyncReaderInterface<R> {
public:
/// Indicate that the stream is to be finished with a certain status code
/// and also send out \a msg response to the client.
@ -692,20 +739,23 @@ class ServerAsyncReader final : public ServerAsyncReaderInterface<W, R> {
}
private:
void BindCall(Call* call) override { call_ = *call; }
void BindCall(::grpc::internal::Call* call) override { call_ = *call; }
Call call_;
::grpc::internal::Call call_;
ServerContext* ctx_;
CallOpSet<CallOpSendInitialMetadata> meta_ops_;
CallOpSet<CallOpRecvMessage<R>> read_ops_;
CallOpSet<CallOpSendInitialMetadata, CallOpSendMessage,
CallOpServerSendStatus>
::grpc::internal::CallOpSet<::grpc::internal::CallOpSendInitialMetadata>
meta_ops_;
::grpc::internal::CallOpSet<::grpc::internal::CallOpRecvMessage<R>> read_ops_;
::grpc::internal::CallOpSet<::grpc::internal::CallOpSendInitialMetadata,
::grpc::internal::CallOpSendMessage,
::grpc::internal::CallOpServerSendStatus>
finish_ops_;
};
template <class W>
class ServerAsyncWriterInterface : public ServerAsyncStreamingInterface,
public AsyncWriterInterface<W> {
class ServerAsyncWriterInterface
: public internal::ServerAsyncStreamingInterface,
public internal::AsyncWriterInterface<W> {
public:
/// Indicate that the stream is to be finished with a certain status code.
/// Request notification for when the server has sent the appropriate
@ -823,7 +873,7 @@ class ServerAsyncWriter final : public ServerAsyncWriterInterface<W> {
}
private:
void BindCall(Call* call) override { call_ = *call; }
void BindCall(::grpc::internal::Call* call) override { call_ = *call; }
template <class T>
void EnsureInitialMetadataSent(T* ops) {
@ -837,20 +887,25 @@ class ServerAsyncWriter final : public ServerAsyncWriterInterface<W> {
}
}
Call call_;
::grpc::internal::Call call_;
ServerContext* ctx_;
CallOpSet<CallOpSendInitialMetadata> meta_ops_;
CallOpSet<CallOpSendInitialMetadata, CallOpSendMessage,
CallOpServerSendStatus>
::grpc::internal::CallOpSet<::grpc::internal::CallOpSendInitialMetadata>
meta_ops_;
::grpc::internal::CallOpSet<::grpc::internal::CallOpSendInitialMetadata,
::grpc::internal::CallOpSendMessage,
::grpc::internal::CallOpServerSendStatus>
write_ops_;
CallOpSet<CallOpSendInitialMetadata, CallOpServerSendStatus> finish_ops_;
::grpc::internal::CallOpSet<::grpc::internal::CallOpSendInitialMetadata,
::grpc::internal::CallOpServerSendStatus>
finish_ops_;
};
/// Server-side interface for asynchronous bi-directional streaming.
template <class W, class R>
class ServerAsyncReaderWriterInterface : public ServerAsyncStreamingInterface,
public AsyncWriterInterface<W>,
public AsyncReaderInterface<R> {
class ServerAsyncReaderWriterInterface
: public internal::ServerAsyncStreamingInterface,
public internal::AsyncWriterInterface<W>,
public internal::AsyncReaderInterface<R> {
public:
/// Indicate that the stream is to be finished with a certain status code.
/// Request notification for when the server has sent the appropriate
@ -980,7 +1035,7 @@ class ServerAsyncReaderWriter final
private:
friend class ::grpc::Server;
void BindCall(Call* call) override { call_ = *call; }
void BindCall(::grpc::internal::Call* call) override { call_ = *call; }
template <class T>
void EnsureInitialMetadataSent(T* ops) {
@ -994,14 +1049,18 @@ class ServerAsyncReaderWriter final
}
}
Call call_;
::grpc::internal::Call call_;
ServerContext* ctx_;
CallOpSet<CallOpSendInitialMetadata> meta_ops_;
CallOpSet<CallOpRecvMessage<R>> read_ops_;
CallOpSet<CallOpSendInitialMetadata, CallOpSendMessage,
CallOpServerSendStatus>
::grpc::internal::CallOpSet<::grpc::internal::CallOpSendInitialMetadata>
meta_ops_;
::grpc::internal::CallOpSet<::grpc::internal::CallOpRecvMessage<R>> read_ops_;
::grpc::internal::CallOpSet<::grpc::internal::CallOpSendInitialMetadata,
::grpc::internal::CallOpSendMessage,
::grpc::internal::CallOpServerSendStatus>
write_ops_;
CallOpSet<CallOpSendInitialMetadata, CallOpServerSendStatus> finish_ops_;
::grpc::internal::CallOpSet<::grpc::internal::CallOpSendInitialMetadata,
::grpc::internal::CallOpServerSendStatus>
finish_ops_;
};
} // namespace grpc

@ -69,11 +69,9 @@ class ClientAsyncResponseReaderInterface {
virtual void Finish(R* msg, Status* status, void* tag) = 0;
};
/// Async API for client-side unary RPCs, where the message response
/// received from the server is of type \a R.
namespace internal {
template <class R>
class ClientAsyncResponseReader final
: public ClientAsyncResponseReaderInterface<R> {
class ClientAsyncResponseReaderFactory {
public:
/// Start a call and write the request out if \a start is set.
/// \a tag will be notified on \a cq when the call has been started (i.e.
@ -82,17 +80,24 @@ class ClientAsyncResponseReader final
/// Note that \a context will be used to fill in custom initial metadata
/// used to send to the server when starting the call.
template <class W>
static ClientAsyncResponseReader* Create(ChannelInterface* channel,
CompletionQueue* cq,
const RpcMethod& method,
ClientContext* context,
const W& request, bool start) {
Call call = channel->CreateCall(method, context, cq);
static ClientAsyncResponseReader<R>* Create(
ChannelInterface* channel, CompletionQueue* cq,
const ::grpc::internal::RpcMethod& method, ClientContext* context,
const W& request, bool start) {
::grpc::internal::Call call = channel->CreateCall(method, context, cq);
return new (g_core_codegen_interface->grpc_call_arena_alloc(
call.call(), sizeof(ClientAsyncResponseReader)))
ClientAsyncResponseReader(call, context, request, start);
call.call(), sizeof(ClientAsyncResponseReader<R>)))
ClientAsyncResponseReader<R>(call, context, request, start);
}
};
} // namespace internal
/// Async API for client-side unary RPCs, where the message response
/// received from the server is of type \a R.
template <class R>
class ClientAsyncResponseReader final
: public ClientAsyncResponseReaderInterface<R> {
public:
// always allocated against a call arena, no memory free required
static void operator delete(void* ptr, std::size_t size) {
assert(size == sizeof(ClientAsyncResponseReader));
@ -137,13 +142,14 @@ class ClientAsyncResponseReader final
}
private:
friend class internal::ClientAsyncResponseReaderFactory<R>;
ClientContext* const context_;
Call call_;
::grpc::internal::Call call_;
bool started_;
template <class W>
ClientAsyncResponseReader(Call call, ClientContext* context, const W& request,
bool start)
ClientAsyncResponseReader(::grpc::internal::Call call, ClientContext* context,
const W& request, bool start)
: context_(context), call_(call), started_(start) {
// Bind the metadata at time of StartCallInternal but set up the rest here
// TODO(ctiller): don't assert
@ -162,19 +168,23 @@ class ClientAsyncResponseReader final
static void* operator new(std::size_t size);
static void* operator new(std::size_t size, void* p) { return p; }
SneakyCallOpSet<CallOpSendInitialMetadata, CallOpSendMessage,
CallOpClientSendClose>
::grpc::internal::SneakyCallOpSet<::grpc::internal::CallOpSendInitialMetadata,
::grpc::internal::CallOpSendMessage,
::grpc::internal::CallOpClientSendClose>
init_buf;
CallOpSet<CallOpRecvInitialMetadata> meta_buf;
CallOpSet<CallOpRecvInitialMetadata, CallOpRecvMessage<R>,
CallOpClientRecvStatus>
::grpc::internal::CallOpSet<::grpc::internal::CallOpRecvInitialMetadata>
meta_buf;
::grpc::internal::CallOpSet<::grpc::internal::CallOpRecvInitialMetadata,
::grpc::internal::CallOpRecvMessage<R>,
::grpc::internal::CallOpClientRecvStatus>
finish_buf;
};
/// Async server-side API for handling unary calls, where the single
/// response message sent to the client is of type \a W.
template <class W>
class ServerAsyncResponseWriter final : public ServerAsyncStreamingInterface {
class ServerAsyncResponseWriter final
: public internal::ServerAsyncStreamingInterface {
public:
explicit ServerAsyncResponseWriter(ServerContext* ctx)
: call_(nullptr, nullptr, nullptr), ctx_(ctx) {}
@ -262,13 +272,15 @@ class ServerAsyncResponseWriter final : public ServerAsyncStreamingInterface {
}
private:
void BindCall(Call* call) override { call_ = *call; }
void BindCall(::grpc::internal::Call* call) override { call_ = *call; }
Call call_;
::grpc::internal::Call call_;
ServerContext* ctx_;
CallOpSet<CallOpSendInitialMetadata> meta_buf_;
CallOpSet<CallOpSendInitialMetadata, CallOpSendMessage,
CallOpServerSendStatus>
::grpc::internal::CallOpSet<::grpc::internal::CallOpSendInitialMetadata>
meta_buf_;
::grpc::internal::CallOpSet<::grpc::internal::CallOpSendInitialMetadata,
::grpc::internal::CallOpSendMessage,
::grpc::internal::CallOpServerSendStatus>
finish_buf_;
};

@ -31,18 +31,19 @@
namespace grpc {
namespace internal {
class CallOpSendMessage;
template <class R>
class CallOpRecvMessage;
class CallOpGenericRecvMessage;
class MethodHandler;
template <class ServiceType, class RequestType, class ResponseType>
class RpcMethodHandler;
template <class ServiceType, class RequestType, class ResponseType>
class ServerStreamingHandler;
namespace CallOpGenericRecvMessageHelper {
template <class R>
class DeserializeFuncType;
} // namespace CallOpGenericRecvMessageHelper
} // namespace internal
/// A sequence of bytes.
class ByteBuffer final {
public:
@ -97,17 +98,17 @@ class ByteBuffer final {
private:
friend class SerializationTraits<ByteBuffer, void>;
friend class CallOpSendMessage;
friend class internal::CallOpSendMessage;
template <class R>
friend class CallOpRecvMessage;
friend class CallOpGenericRecvMessage;
friend class MethodHandler;
friend class internal::CallOpRecvMessage;
friend class internal::CallOpGenericRecvMessage;
friend class internal::MethodHandler;
template <class ServiceType, class RequestType, class ResponseType>
friend class RpcMethodHandler;
friend class internal::RpcMethodHandler;
template <class ServiceType, class RequestType, class ResponseType>
friend class ServerStreamingHandler;
friend class internal::ServerStreamingHandler;
template <class R>
friend class CallOpGenericRecvMessageHelper::DeserializeFuncType;
friend class internal::DeserializeFuncType;
grpc_byte_buffer* buffer_;

@ -43,11 +43,13 @@
namespace grpc {
class ByteBuffer;
class Call;
class CallHook;
class CompletionQueue;
extern CoreCodegenInterface* g_core_codegen_interface;
namespace internal {
class Call;
class CallHook;
const char kBinaryErrorDetailsKey[] = "grpc-status-details-bin";
// TODO(yangg) if the map is changed before we send, the pointers will be a
@ -75,6 +77,7 @@ inline grpc_metadata* FillMetadataArray(
}
return metadata_array;
}
} // namespace internal
/// Per-message write options.
class WriteOptions {
@ -199,6 +202,7 @@ class WriteOptions {
bool last_message_;
};
namespace internal {
/// Default argument for CallOpSet. I is unused by the class, but can be
/// used for generating multiple names for the same thing.
template <int I>
@ -387,7 +391,6 @@ class CallOpRecvMessage {
bool allow_not_getting_message_;
};
namespace CallOpGenericRecvMessageHelper {
class DeserializeFunc {
public:
virtual Status Deserialize(ByteBuffer* buf) = 0;
@ -407,7 +410,6 @@ class DeserializeFuncType final : public DeserializeFunc {
private:
R* message_; // Not a managed pointer because management is external to this
};
} // namespace CallOpGenericRecvMessageHelper
class CallOpGenericRecvMessage {
public:
@ -418,8 +420,7 @@ class CallOpGenericRecvMessage {
void RecvMessage(R* message) {
// Use an explicit base class pointer to avoid resolution error in the
// following unique_ptr::reset for some old implementations.
CallOpGenericRecvMessageHelper::DeserializeFunc* func =
new CallOpGenericRecvMessageHelper::DeserializeFuncType<R>(message);
DeserializeFunc* func = new DeserializeFuncType<R>(message);
deserialize_.reset(func);
}
@ -459,7 +460,7 @@ class CallOpGenericRecvMessage {
}
private:
std::unique_ptr<CallOpGenericRecvMessageHelper::DeserializeFunc> deserialize_;
std::unique_ptr<DeserializeFunc> deserialize_;
ByteBuffer recv_buf_;
bool allow_not_getting_message_;
};
@ -714,7 +715,7 @@ class Call final {
grpc_call* call_;
int max_receive_message_size_;
};
} // namespace internal
} // namespace grpc
#endif // GRPCXX_IMPL_CODEGEN_CALL_H

@ -21,6 +21,7 @@
namespace grpc {
namespace internal {
class CallOpSetInterface;
class Call;
@ -31,6 +32,7 @@ class CallHook {
virtual ~CallHook() {}
virtual void PerformOpsOnCall(CallOpSetInterface* ops, Call* call) = 0;
};
} // namespace internal
} // namespace grpc

@ -24,10 +24,8 @@
#include <grpc/impl/codegen/connectivity_state.h>
namespace grpc {
class Call;
class ChannelInterface;
class ClientContext;
class RpcMethod;
class CallOpSetInterface;
class CompletionQueue;
template <class R>
@ -36,14 +34,22 @@ template <class W>
class ClientWriter;
template <class W, class R>
class ClientReaderWriter;
namespace internal {
class Call;
class CallOpSetInterface;
class RpcMethod;
template <class InputMessage, class OutputMessage>
class BlockingUnaryCallImpl;
template <class R>
class ClientAsyncReader;
class ClientAsyncReaderFactory;
template <class W>
class ClientAsyncWriter;
class ClientAsyncWriterFactory;
template <class W, class R>
class ClientAsyncReaderWriter;
class ClientAsyncReaderWriterFactory;
template <class R>
class ClientAsyncResponseReader;
class ClientAsyncResponseReaderFactory;
} // namespace internal
/// Codegen interface for \a grpc::Channel.
class ChannelInterface {
@ -88,23 +94,21 @@ class ChannelInterface {
template <class W, class R>
friend class ::grpc::ClientReaderWriter;
template <class R>
friend class ::grpc::ClientAsyncReader;
friend class ::grpc::internal::ClientAsyncReaderFactory;
template <class W>
friend class ::grpc::ClientAsyncWriter;
friend class ::grpc::internal::ClientAsyncWriterFactory;
template <class W, class R>
friend class ::grpc::ClientAsyncReaderWriter;
friend class ::grpc::internal::ClientAsyncReaderWriterFactory;
template <class R>
friend class ::grpc::ClientAsyncResponseReader;
friend class ::grpc::internal::ClientAsyncResponseReaderFactory;
template <class InputMessage, class OutputMessage>
friend Status BlockingUnaryCall(ChannelInterface* channel,
const RpcMethod& method,
ClientContext* context,
const InputMessage& request,
OutputMessage* result);
friend class ::grpc::RpcMethod;
virtual Call CreateCall(const RpcMethod& method, ClientContext* context,
CompletionQueue* cq) = 0;
virtual void PerformOpsOnCall(CallOpSetInterface* ops, Call* call) = 0;
friend class ::grpc::internal::BlockingUnaryCallImpl;
friend class ::grpc::internal::RpcMethod;
virtual internal::Call CreateCall(const internal::RpcMethod& method,
ClientContext* context,
CompletionQueue* cq) = 0;
virtual void PerformOpsOnCall(internal::CallOpSetInterface* ops,
internal::Call* call) = 0;
virtual void* RegisterMethod(const char* method) = 0;
virtual void NotifyOnStateChangeImpl(grpc_connectivity_state last_observed,
gpr_timespec deadline,
@ -112,7 +116,6 @@ class ChannelInterface {
virtual bool WaitForStateChangeImpl(grpc_connectivity_state last_observed,
gpr_timespec deadline) = 0;
};
} // namespace grpc
#endif // GRPCXX_IMPL_CODEGEN_CHANNEL_INTERFACE_H

@ -60,7 +60,16 @@ class Channel;
class ChannelInterface;
class CompletionQueue;
class CallCredentials;
class ClientContext;
namespace internal {
class RpcMethod;
class CallOpClientRecvStatus;
class CallOpRecvInitialMetadata;
template <class InputMessage, class OutputMessage>
class BlockingUnaryCallImpl;
} // namespace internal
template <class R>
class ClientReader;
template <class W>
@ -345,8 +354,8 @@ class ClientContext {
ClientContext& operator=(const ClientContext&);
friend class ::grpc::testing::InteropClientContextInspector;
friend class CallOpClientRecvStatus;
friend class CallOpRecvInitialMetadata;
friend class ::grpc::internal::CallOpClientRecvStatus;
friend class ::grpc::internal::CallOpRecvInitialMetadata;
friend class Channel;
template <class R>
friend class ::grpc::ClientReader;
@ -363,11 +372,7 @@ class ClientContext {
template <class R>
friend class ::grpc::ClientAsyncResponseReader;
template <class InputMessage, class OutputMessage>
friend Status BlockingUnaryCall(ChannelInterface* channel,
const RpcMethod& method,
ClientContext* context,
const InputMessage& request,
OutputMessage* result);
friend class ::grpc::internal::BlockingUnaryCallImpl;
grpc_call* call() const { return call_; }
void set_call(grpc_call* call, const std::shared_ptr<Channel>& channel);
@ -399,8 +404,8 @@ class ClientContext {
mutable std::shared_ptr<const AuthContext> auth_context_;
struct census_context* census_context_;
std::multimap<grpc::string, grpc::string> send_initial_metadata_;
MetadataMap recv_initial_metadata_;
MetadataMap trailing_metadata_;
internal::MetadataMap recv_initial_metadata_;
internal::MetadataMap trailing_metadata_;
grpc_call* propagate_from_call_;
PropagationOptions propagation_options_;

@ -30,43 +30,60 @@ namespace grpc {
class Channel;
class ClientContext;
class CompletionQueue;
class RpcMethod;
namespace internal {
class RpcMethod;
/// Wrapper that performs a blocking unary call
template <class InputMessage, class OutputMessage>
Status BlockingUnaryCall(ChannelInterface* channel, const RpcMethod& method,
ClientContext* context, const InputMessage& request,
OutputMessage* result) {
CompletionQueue cq(grpc_completion_queue_attributes{
GRPC_CQ_CURRENT_VERSION, GRPC_CQ_PLUCK,
GRPC_CQ_DEFAULT_POLLING}); // Pluckable completion queue
Call call(channel->CreateCall(method, context, &cq));
CallOpSet<CallOpSendInitialMetadata, CallOpSendMessage,
CallOpRecvInitialMetadata, CallOpRecvMessage<OutputMessage>,
CallOpClientSendClose, CallOpClientRecvStatus>
ops;
Status status = ops.SendMessage(request);
if (!status.ok()) {
return status;
}
ops.SendInitialMetadata(context->send_initial_metadata_,
context->initial_metadata_flags());
ops.RecvInitialMetadata(context);
ops.RecvMessage(result);
ops.ClientSendClose();
ops.ClientRecvStatus(context, &status);
call.PerformOps(&ops);
if (cq.Pluck(&ops)) {
if (!ops.got_message && status.ok()) {
return Status(StatusCode::UNIMPLEMENTED,
"No message returned for unary request");
return BlockingUnaryCallImpl<InputMessage, OutputMessage>(
channel, method, context, request, result)
.status();
};
template <class InputMessage, class OutputMessage>
class BlockingUnaryCallImpl {
public:
BlockingUnaryCallImpl(ChannelInterface* channel, const RpcMethod& method,
ClientContext* context, const InputMessage& request,
OutputMessage* result) {
CompletionQueue cq(grpc_completion_queue_attributes{
GRPC_CQ_CURRENT_VERSION, GRPC_CQ_PLUCK,
GRPC_CQ_DEFAULT_POLLING}); // Pluckable completion queue
Call call(channel->CreateCall(method, context, &cq));
CallOpSet<CallOpSendInitialMetadata, CallOpSendMessage,
CallOpRecvInitialMetadata, CallOpRecvMessage<OutputMessage>,
CallOpClientSendClose, CallOpClientRecvStatus>
ops;
status_ = ops.SendMessage(request);
if (!status_.ok()) {
return;
}
ops.SendInitialMetadata(context->send_initial_metadata_,
context->initial_metadata_flags());
ops.RecvInitialMetadata(context);
ops.RecvMessage(result);
ops.ClientSendClose();
ops.ClientRecvStatus(context, &status_);
call.PerformOps(&ops);
if (cq.Pluck(&ops)) {
if (!ops.got_message && status_.ok()) {
status_ = Status(StatusCode::UNIMPLEMENTED,
"No message returned for unary request");
}
} else {
GPR_CODEGEN_ASSERT(!status_.ok());
}
} else {
GPR_CODEGEN_ASSERT(!status.ok());
}
return status;
}
Status status() { return status_; }
private:
Status status_;
};
} // namespace internal
} // namespace grpc
#endif // GRPCXX_IMPL_CODEGEN_CLIENT_UNARY_CALL_H

@ -56,7 +56,19 @@ class ServerWriter;
namespace internal {
template <class W, class R>
class ServerReaderWriterBody;
}
} // namespace internal
class Channel;
class ChannelInterface;
class ClientContext;
class CompletionQueue;
class Server;
class ServerBuilder;
class ServerContext;
namespace internal {
class CompletionQueueTag;
class RpcMethod;
template <class ServiceType, class RequestType, class ResponseType>
class RpcMethodHandler;
template <class ServiceType, class RequestType, class ResponseType>
@ -66,16 +78,11 @@ class ServerStreamingHandler;
template <class ServiceType, class RequestType, class ResponseType>
class BidiStreamingHandler;
class UnknownMethodHandler;
class Channel;
class ChannelInterface;
class ClientContext;
class CompletionQueueTag;
class CompletionQueue;
class RpcMethod;
class Server;
class ServerBuilder;
class ServerContext;
template <class Streamer, bool WriteNeeded>
class TemplatedBidiStreamingHandler;
template <class InputMessage, class OutputMessage>
class BlockingUnaryCallImpl;
} // namespace internal
extern CoreCodegenInterface* g_core_codegen_interface;
@ -109,6 +116,30 @@ class CompletionQueue : private GrpcLibraryCodegen {
TIMEOUT ///< deadline was reached.
};
/// EXPERIMENTAL
/// First executes \a F, then reads from the queue, blocking up to
/// \a deadline (or the queue's shutdown).
/// Both \a tag and \a ok are updated upon success (if an event is available
/// within the \a deadline). A \a tag points to an arbitrary location usually
/// employed to uniquely identify an event.
///
/// \param F[in] Function to execute before calling AsyncNext on this queue.
/// \param tag[out] Upon sucess, updated to point to the event's tag.
/// \param ok[out] Upon sucess, true if read a regular event, false otherwise.
/// \param deadline[in] How long to block in wait for an event.
///
/// \return The type of event read.
template <typename T, typename F>
NextStatus DoThenAsyncNext(F&& f, void** tag, bool* ok, const T& deadline) {
CompletionQueueTLSCache cache = CompletionQueueTLSCache(this);
f();
if (cache.Flush(tag, ok)) {
return GOT_EVENT;
} else {
return AsyncNext(tag, ok, deadline);
}
}
/// Read from the queue, blocking up to \a deadline (or the queue's shutdown).
/// Both \a tag and \a ok are updated upon success (if an event is available
/// within the \a deadline). A \a tag points to an arbitrary location usually
@ -196,28 +227,39 @@ class CompletionQueue : private GrpcLibraryCodegen {
template <class W, class R>
friend class ::grpc::internal::ServerReaderWriterBody;
template <class ServiceType, class RequestType, class ResponseType>
friend class RpcMethodHandler;
friend class ::grpc::internal::RpcMethodHandler;
template <class ServiceType, class RequestType, class ResponseType>
friend class ClientStreamingHandler;
friend class ::grpc::internal::ClientStreamingHandler;
template <class ServiceType, class RequestType, class ResponseType>
friend class ServerStreamingHandler;
friend class ::grpc::internal::ServerStreamingHandler;
template <class Streamer, bool WriteNeeded>
friend class TemplatedBidiStreamingHandler;
friend class UnknownMethodHandler;
friend class ::grpc::internal::TemplatedBidiStreamingHandler;
friend class ::grpc::internal::UnknownMethodHandler;
friend class ::grpc::Server;
friend class ::grpc::ServerContext;
template <class InputMessage, class OutputMessage>
friend Status BlockingUnaryCall(ChannelInterface* channel,
const RpcMethod& method,
ClientContext* context,
const InputMessage& request,
OutputMessage* result);
friend class ::grpc::internal::BlockingUnaryCallImpl;
/// EXPERIMENTAL
/// Creates a Thread Local cache to store the first event
/// On this completion queue queued from this thread. Once
/// initialized, it must be flushed on the same thread.
class CompletionQueueTLSCache {
public:
CompletionQueueTLSCache(CompletionQueue* cq);
~CompletionQueueTLSCache();
bool Flush(void** tag, bool* ok);
private:
CompletionQueue* cq_;
bool flushed_;
};
NextStatus AsyncNextInternal(void** tag, bool* ok, gpr_timespec deadline);
/// Wraps \a grpc_completion_queue_pluck.
/// \warning Must not be mixed with calls to \a Next.
bool Pluck(CompletionQueueTag* tag) {
bool Pluck(internal::CompletionQueueTag* tag) {
auto deadline =
g_core_codegen_interface->gpr_inf_future(GPR_CLOCK_REALTIME);
auto ev = g_core_codegen_interface->grpc_completion_queue_pluck(
@ -238,7 +280,7 @@ class CompletionQueue : private GrpcLibraryCodegen {
/// implementation to simple call the other TryPluck function with a zero
/// timeout. i.e:
/// TryPluck(tag, gpr_time_0(GPR_CLOCK_REALTIME))
void TryPluck(CompletionQueueTag* tag) {
void TryPluck(internal::CompletionQueueTag* tag) {
auto deadline = g_core_codegen_interface->gpr_time_0(GPR_CLOCK_REALTIME);
auto ev = g_core_codegen_interface->grpc_completion_queue_pluck(
cq_, tag, deadline, nullptr);
@ -254,7 +296,7 @@ class CompletionQueue : private GrpcLibraryCodegen {
///
/// This exects tag->FinalizeResult (if called) to return 'false' i.e expects
/// that the tag is internal not something that is returned to the user.
void TryPluck(CompletionQueueTag* tag, gpr_timespec deadline) {
void TryPluck(internal::CompletionQueueTag* tag, gpr_timespec deadline) {
auto ev = g_core_codegen_interface->grpc_completion_queue_pluck(
cq_, tag, deadline, nullptr);
if (ev.type == GRPC_QUEUE_TIMEOUT || ev.type == GRPC_QUEUE_SHUTDOWN) {

@ -21,6 +21,7 @@
namespace grpc {
namespace internal {
/// An interface allowing implementors to process and filter event tags.
class CompletionQueueTag {
public:
@ -31,6 +32,7 @@ class CompletionQueueTag {
/// queue
virtual bool FinalizeResult(void** tag, bool* status) = 0;
};
} // namespace internal
} // namespace grpc

@ -23,6 +23,7 @@
namespace grpc {
namespace internal {
class MetadataMap {
public:
MetadataMap() { memset(&arr_, 0, sizeof(arr_)); }
@ -50,6 +51,7 @@ class MetadataMap {
grpc_metadata_array arr_;
std::multimap<grpc::string_ref, grpc::string_ref> map_;
};
} // namespace internal
} // namespace grpc

@ -26,6 +26,7 @@
namespace grpc {
namespace internal {
/// A wrapper class of an application provided rpc method handler.
template <class ServiceType, class RequestType, class ResponseType>
class RpcMethodHandler : public MethodHandler {
@ -266,6 +267,7 @@ class UnknownMethodHandler : public MethodHandler {
}
};
} // namespace internal
} // namespace grpc
#endif // GRPCXX_IMPL_CODEGEN_METHOD_HANDLER_IMPL_H

@ -24,7 +24,7 @@
#include <grpc++/impl/codegen/channel_interface.h>
namespace grpc {
namespace internal {
/// Descriptor of an RPC method
class RpcMethod {
public:
@ -55,6 +55,7 @@ class RpcMethod {
void* const channel_tag_;
};
} // namespace internal
} // namespace grpc
#endif // GRPCXX_IMPL_CODEGEN_RPC_METHOD_H

@ -32,8 +32,8 @@
namespace grpc {
class ServerContext;
class StreamContextInterface;
namespace internal {
/// Base class for running an RPC handler.
class MethodHandler {
public:
@ -71,6 +71,7 @@ class RpcServiceMethod : public RpcMethod {
void* server_tag_;
std::unique_ptr<MethodHandler> handler_;
};
} // namespace internal
} // namespace grpc

@ -55,7 +55,6 @@ class ServerWriter;
namespace internal {
template <class W, class R>
class ServerReaderWriterBody;
}
template <class ServiceType, class RequestType, class ResponseType>
class RpcMethodHandler;
template <class ServiceType, class RequestType, class ResponseType>
@ -65,9 +64,11 @@ class ServerStreamingHandler;
template <class ServiceType, class RequestType, class ResponseType>
class BidiStreamingHandler;
class UnknownMethodHandler;
template <class Streamer, bool WriteNeeded>
class TemplatedBidiStreamingHandler;
class Call;
class CallOpBuffer;
} // namespace internal
class CompletionQueue;
class Server;
class ServerInterface;
@ -247,14 +248,14 @@ class ServerContext {
template <class W, class R>
friend class ::grpc::internal::ServerReaderWriterBody;
template <class ServiceType, class RequestType, class ResponseType>
friend class RpcMethodHandler;
friend class ::grpc::internal::RpcMethodHandler;
template <class ServiceType, class RequestType, class ResponseType>
friend class ClientStreamingHandler;
friend class ::grpc::internal::ClientStreamingHandler;
template <class ServiceType, class RequestType, class ResponseType>
friend class ServerStreamingHandler;
friend class ::grpc::internal::ServerStreamingHandler;
template <class Streamer, bool WriteNeeded>
friend class TemplatedBidiStreamingHandler;
friend class UnknownMethodHandler;
friend class ::grpc::internal::TemplatedBidiStreamingHandler;
friend class ::grpc::internal::UnknownMethodHandler;
friend class ::grpc::ClientContext;
/// Prevent copying.
@ -263,9 +264,9 @@ class ServerContext {
class CompletionOp;
void BeginCompletionOp(Call* call);
void BeginCompletionOp(internal::Call* call);
/// Return the tag queued by BeginCompletionOp()
CompletionQueueTag* GetCompletionOpTag();
internal::CompletionQueueTag* GetCompletionOpTag();
ServerContext(gpr_timespec deadline, grpc_metadata_array* arr);
@ -282,7 +283,7 @@ class ServerContext {
CompletionQueue* cq_;
bool sent_initial_metadata_;
mutable std::shared_ptr<const AuthContext> auth_context_;
MetadataMap client_metadata_;
internal::MetadataMap client_metadata_;
std::multimap<grpc::string, grpc::string> initial_metadata_;
std::multimap<grpc::string, grpc::string> trailing_metadata_;
@ -290,7 +291,9 @@ class ServerContext {
grpc_compression_level compression_level_;
grpc_compression_algorithm compression_algorithm_;
CallOpSet<CallOpSendInitialMetadata, CallOpSendMessage> pending_ops_;
internal::CallOpSet<internal::CallOpSendInitialMetadata,
internal::CallOpSendMessage>
pending_ops_;
bool has_pending_ops_;
};

@ -30,20 +30,21 @@ namespace grpc {
class AsyncGenericService;
class Channel;
class GenericServerContext;
class RpcService;
class ServerAsyncStreamingInterface;
class ServerCompletionQueue;
class ServerContext;
class ServerCredentials;
class Service;
class ThreadPoolInterface;
extern CoreCodegenInterface* g_core_codegen_interface;
/// Models a gRPC server.
///
/// Servers are configured and started via \a grpc::ServerBuilder.
class ServerInterface : public CallHook {
namespace internal {
class ServerAsyncStreamingInterface;
} // namespace internal
class ServerInterface : public internal::CallHook {
public:
virtual ~ServerInterface() {}
@ -78,7 +79,7 @@ class ServerInterface : public CallHook {
virtual void Wait() = 0;
protected:
friend class Service;
friend class ::grpc::Service;
/// Register a service. This call does not take ownership of the service.
/// The service must exist for the lifetime of the Server instance.
@ -116,12 +117,13 @@ class ServerInterface : public CallHook {
virtual grpc_server* server() = 0;
virtual void PerformOpsOnCall(CallOpSetInterface* ops, Call* call) = 0;
virtual void PerformOpsOnCall(internal::CallOpSetInterface* ops,
internal::Call* call) = 0;
class BaseAsyncRequest : public CompletionQueueTag {
class BaseAsyncRequest : public internal::CompletionQueueTag {
public:
BaseAsyncRequest(ServerInterface* server, ServerContext* context,
ServerAsyncStreamingInterface* stream,
internal::ServerAsyncStreamingInterface* stream,
CompletionQueue* call_cq, void* tag,
bool delete_on_finalize);
virtual ~BaseAsyncRequest();
@ -131,7 +133,7 @@ class ServerInterface : public CallHook {
protected:
ServerInterface* const server_;
ServerContext* const context_;
ServerAsyncStreamingInterface* const stream_;
internal::ServerAsyncStreamingInterface* const stream_;
CompletionQueue* const call_cq_;
void* const tag_;
const bool delete_on_finalize_;
@ -141,7 +143,7 @@ class ServerInterface : public CallHook {
class RegisteredAsyncRequest : public BaseAsyncRequest {
public:
RegisteredAsyncRequest(ServerInterface* server, ServerContext* context,
ServerAsyncStreamingInterface* stream,
internal::ServerAsyncStreamingInterface* stream,
CompletionQueue* call_cq, void* tag);
// uses BaseAsyncRequest::FinalizeResult
@ -155,7 +157,7 @@ class ServerInterface : public CallHook {
public:
NoPayloadAsyncRequest(void* registered_method, ServerInterface* server,
ServerContext* context,
ServerAsyncStreamingInterface* stream,
internal::ServerAsyncStreamingInterface* stream,
CompletionQueue* call_cq,
ServerCompletionQueue* notification_cq, void* tag)
: RegisteredAsyncRequest(server, context, stream, call_cq, tag) {
@ -170,7 +172,7 @@ class ServerInterface : public CallHook {
public:
PayloadAsyncRequest(void* registered_method, ServerInterface* server,
ServerContext* context,
ServerAsyncStreamingInterface* stream,
internal::ServerAsyncStreamingInterface* stream,
CompletionQueue* call_cq,
ServerCompletionQueue* notification_cq, void* tag,
Message* request)
@ -212,7 +214,7 @@ class ServerInterface : public CallHook {
void* const registered_method_;
ServerInterface* const server_;
ServerContext* const context_;
ServerAsyncStreamingInterface* const stream_;
internal::ServerAsyncStreamingInterface* const stream_;
CompletionQueue* const call_cq_;
ServerCompletionQueue* const notification_cq_;
void* const tag_;
@ -223,7 +225,7 @@ class ServerInterface : public CallHook {
class GenericAsyncRequest : public BaseAsyncRequest {
public:
GenericAsyncRequest(ServerInterface* server, GenericServerContext* context,
ServerAsyncStreamingInterface* stream,
internal::ServerAsyncStreamingInterface* stream,
CompletionQueue* call_cq,
ServerCompletionQueue* notification_cq, void* tag,
bool delete_on_finalize);
@ -235,8 +237,9 @@ class ServerInterface : public CallHook {
};
template <class Message>
void RequestAsyncCall(RpcServiceMethod* method, ServerContext* context,
ServerAsyncStreamingInterface* stream,
void RequestAsyncCall(internal::RpcServiceMethod* method,
ServerContext* context,
internal::ServerAsyncStreamingInterface* stream,
CompletionQueue* call_cq,
ServerCompletionQueue* notification_cq, void* tag,
Message* message) {
@ -246,8 +249,9 @@ class ServerInterface : public CallHook {
message);
}
void RequestAsyncCall(RpcServiceMethod* method, ServerContext* context,
ServerAsyncStreamingInterface* stream,
void RequestAsyncCall(internal::RpcServiceMethod* method,
ServerContext* context,
internal::ServerAsyncStreamingInterface* stream,
CompletionQueue* call_cq,
ServerCompletionQueue* notification_cq, void* tag) {
GPR_CODEGEN_ASSERT(method);
@ -256,7 +260,7 @@ class ServerInterface : public CallHook {
}
void RequestAsyncGenericCall(GenericServerContext* context,
ServerAsyncStreamingInterface* stream,
internal::ServerAsyncStreamingInterface* stream,
CompletionQueue* call_cq,
ServerCompletionQueue* notification_cq,
void* tag) {

@ -28,13 +28,14 @@
namespace grpc {
class Call;
class CompletionQueue;
class Server;
class ServerInterface;
class ServerCompletionQueue;
class ServerContext;
namespace internal {
class Call;
class ServerAsyncStreamingInterface {
public:
virtual ~ServerAsyncStreamingInterface() {}
@ -48,9 +49,10 @@ class ServerAsyncStreamingInterface {
virtual void SendInitialMetadata(void* tag) = 0;
private:
friend class ServerInterface;
friend class ::grpc::ServerInterface;
virtual void BindCall(Call* call) = 0;
};
} // namespace internal
/// Desriptor of an RPC service and its various RPC methods
class Service {
@ -88,40 +90,38 @@ class Service {
protected:
template <class Message>
void RequestAsyncUnary(int index, ServerContext* context, Message* request,
ServerAsyncStreamingInterface* stream,
internal::ServerAsyncStreamingInterface* stream,
CompletionQueue* call_cq,
ServerCompletionQueue* notification_cq, void* tag) {
server_->RequestAsyncCall(methods_[index].get(), context, stream, call_cq,
notification_cq, tag, request);
}
void RequestAsyncClientStreaming(int index, ServerContext* context,
ServerAsyncStreamingInterface* stream,
CompletionQueue* call_cq,
ServerCompletionQueue* notification_cq,
void* tag) {
void RequestAsyncClientStreaming(
int index, ServerContext* context,
internal::ServerAsyncStreamingInterface* stream, CompletionQueue* call_cq,
ServerCompletionQueue* notification_cq, void* tag) {
server_->RequestAsyncCall(methods_[index].get(), context, stream, call_cq,
notification_cq, tag);
}
template <class Message>
void RequestAsyncServerStreaming(int index, ServerContext* context,
Message* request,
ServerAsyncStreamingInterface* stream,
CompletionQueue* call_cq,
ServerCompletionQueue* notification_cq,
void* tag) {
void RequestAsyncServerStreaming(
int index, ServerContext* context, Message* request,
internal::ServerAsyncStreamingInterface* stream, CompletionQueue* call_cq,
ServerCompletionQueue* notification_cq, void* tag) {
server_->RequestAsyncCall(methods_[index].get(), context, stream, call_cq,
notification_cq, tag, request);
}
void RequestAsyncBidiStreaming(int index, ServerContext* context,
ServerAsyncStreamingInterface* stream,
CompletionQueue* call_cq,
ServerCompletionQueue* notification_cq,
void* tag) {
void RequestAsyncBidiStreaming(
int index, ServerContext* context,
internal::ServerAsyncStreamingInterface* stream, CompletionQueue* call_cq,
ServerCompletionQueue* notification_cq, void* tag) {
server_->RequestAsyncCall(methods_[index].get(), context, stream, call_cq,
notification_cq, tag);
}
void AddMethod(RpcServiceMethod* method) { methods_.emplace_back(method); }
void AddMethod(internal::RpcServiceMethod* method) {
methods_.emplace_back(method);
}
void MarkMethodAsync(int index) {
GPR_CODEGEN_ASSERT(
@ -139,7 +139,7 @@ class Service {
methods_[index].reset();
}
void MarkMethodStreamed(int index, MethodHandler* streamed_method) {
void MarkMethodStreamed(int index, internal::MethodHandler* streamed_method) {
GPR_CODEGEN_ASSERT(methods_[index] && methods_[index]->handler() &&
"Cannot mark an async or generic method Streamed");
methods_[index]->SetHandler(streamed_method);
@ -148,14 +148,14 @@ class Service {
// case of BIDI_STREAMING that has 1 read and 1 write, in that order,
// and split server-side streaming is BIDI_STREAMING with 1 read and
// any number of writes, in that order.
methods_[index]->SetMethodType(::grpc::RpcMethod::BIDI_STREAMING);
methods_[index]->SetMethodType(internal::RpcMethod::BIDI_STREAMING);
}
private:
friend class Server;
friend class ServerInterface;
ServerInterface* server_;
std::vector<std::unique_ptr<RpcServiceMethod>> methods_;
std::vector<std::unique_ptr<internal::RpcServiceMethod>> methods_;
};
} // namespace grpc

@ -30,6 +30,7 @@
namespace grpc {
namespace internal {
/// Common interface for all synchronous client side streaming.
class ClientStreamingInterface {
public:
@ -141,10 +142,12 @@ class WriterInterface {
}
};
} // namespace internal
/// Client-side interface for streaming reads of message of type \a R.
template <class R>
class ClientReaderInterface : public ClientStreamingInterface,
public ReaderInterface<R> {
class ClientReaderInterface : public internal::ClientStreamingInterface,
public internal::ReaderInterface<R> {
public:
/// Block to wait for initial metadata from server. The received metadata
/// can only be accessed after this call returns. Should only be called before
@ -153,35 +156,25 @@ class ClientReaderInterface : public ClientStreamingInterface,
virtual void WaitForInitialMetadata() = 0;
};
namespace internal {
template <class R>
class ClientReaderFactory {
public:
template <class W>
static ClientReader<R>* Create(ChannelInterface* channel,
const ::grpc::internal::RpcMethod& method,
ClientContext* context, const W& request) {
return new ClientReader<R>(channel, method, context, request);
}
};
} // namespace internal
/// Synchronous (blocking) client-side API for doing server-streaming RPCs,
/// where the stream of messages coming from the server has messages
/// of type \a R.
template <class R>
class ClientReader final : public ClientReaderInterface<R> {
public:
/// Block to create a stream and write the initial metadata and \a request
/// out. Note that \a context will be used to fill in custom initial
/// metadata used to send to the server when starting the call.
template <class W>
ClientReader(ChannelInterface* channel, const RpcMethod& method,
ClientContext* context, const W& request)
: context_(context),
cq_(grpc_completion_queue_attributes{
GRPC_CQ_CURRENT_VERSION, GRPC_CQ_PLUCK,
GRPC_CQ_DEFAULT_POLLING}), // Pluckable cq
call_(channel->CreateCall(method, context, &cq_)) {
CallOpSet<CallOpSendInitialMetadata, CallOpSendMessage,
CallOpClientSendClose>
ops;
ops.SendInitialMetadata(context->send_initial_metadata_,
context->initial_metadata_flags());
// TODO(ctiller): don't assert
GPR_CODEGEN_ASSERT(ops.SendMessage(request).ok());
ops.ClientSendClose();
call_.PerformOps(&ops);
cq_.Pluck(&ops);
}
/// See the \a ClientStreamingInterface.WaitForInitialMetadata method for
/// semantics.
///
@ -192,7 +185,8 @@ class ClientReader final : public ClientReaderInterface<R> {
void WaitForInitialMetadata() override {
GPR_CODEGEN_ASSERT(!context_->initial_metadata_received_);
CallOpSet<CallOpRecvInitialMetadata> ops;
::grpc::internal::CallOpSet<::grpc::internal::CallOpRecvInitialMetadata>
ops;
ops.RecvInitialMetadata(context_);
call_.PerformOps(&ops);
cq_.Pluck(&ops); /// status ignored
@ -209,7 +203,9 @@ class ClientReader final : public ClientReaderInterface<R> {
/// already received (if initial metadata is received, it can be then
/// accessed through the \a ClientContext associated with this call).
bool Read(R* msg) override {
CallOpSet<CallOpRecvInitialMetadata, CallOpRecvMessage<R>> ops;
::grpc::internal::CallOpSet<::grpc::internal::CallOpRecvInitialMetadata,
::grpc::internal::CallOpRecvMessage<R>>
ops;
if (!context_->initial_metadata_received_) {
ops.RecvInitialMetadata(context_);
}
@ -224,7 +220,7 @@ class ClientReader final : public ClientReaderInterface<R> {
/// The \a ClientContext associated with this call is updated with
/// possible metadata received from the server.
Status Finish() override {
CallOpSet<CallOpClientRecvStatus> ops;
::grpc::internal::CallOpSet<::grpc::internal::CallOpClientRecvStatus> ops;
Status status;
ops.ClientRecvStatus(context_, &status);
call_.PerformOps(&ops);
@ -233,15 +229,41 @@ class ClientReader final : public ClientReaderInterface<R> {
}
private:
friend class internal::ClientReaderFactory<R>;
ClientContext* context_;
CompletionQueue cq_;
Call call_;
::grpc::internal::Call call_;
/// Block to create a stream and write the initial metadata and \a request
/// out. Note that \a context will be used to fill in custom initial
/// metadata used to send to the server when starting the call.
template <class W>
ClientReader(::grpc::ChannelInterface* channel,
const ::grpc::internal::RpcMethod& method,
ClientContext* context, const W& request)
: context_(context),
cq_(grpc_completion_queue_attributes{
GRPC_CQ_CURRENT_VERSION, GRPC_CQ_PLUCK,
GRPC_CQ_DEFAULT_POLLING}), // Pluckable cq
call_(channel->CreateCall(method, context, &cq_)) {
::grpc::internal::CallOpSet<::grpc::internal::CallOpSendInitialMetadata,
::grpc::internal::CallOpSendMessage,
::grpc::internal::CallOpClientSendClose>
ops;
ops.SendInitialMetadata(context->send_initial_metadata_,
context->initial_metadata_flags());
// TODO(ctiller): don't assert
GPR_CODEGEN_ASSERT(ops.SendMessage(request).ok());
ops.ClientSendClose();
call_.PerformOps(&ops);
cq_.Pluck(&ops);
}
};
/// Client-side interface for streaming writes of message type \a W.
template <class W>
class ClientWriterInterface : public ClientStreamingInterface,
public WriterInterface<W> {
class ClientWriterInterface : public internal::ClientStreamingInterface,
public internal::WriterInterface<W> {
public:
/// Half close writing from the client. (signal that the stream of messages
/// coming from the client is complete).
@ -252,37 +274,25 @@ class ClientWriterInterface : public ClientStreamingInterface,
virtual bool WritesDone() = 0;
};
namespace internal {
template <class W>
class ClientWriterFactory {
public:
template <class R>
static ClientWriter<W>* Create(::grpc::ChannelInterface* channel,
const ::grpc::internal::RpcMethod& method,
ClientContext* context, R* response) {
return new ClientWriter<W>(channel, method, context, response);
}
};
} // namespace internal
/// Synchronous (blocking) client-side API for doing client-streaming RPCs,
/// where the outgoing message stream coming from the client has messages of
/// type \a W.
template <class W>
class ClientWriter : public ClientWriterInterface<W> {
public:
/// Block to create a stream (i.e. send request headers and other initial
/// metadata to the server). Note that \a context will be used to fill
/// in custom initial metadata. \a response will be filled in with the
/// single expected response message from the server upon a successful
/// call to the \a Finish method of this instance.
template <class R>
ClientWriter(ChannelInterface* channel, const RpcMethod& method,
ClientContext* context, R* response)
: context_(context),
cq_(grpc_completion_queue_attributes{
GRPC_CQ_CURRENT_VERSION, GRPC_CQ_PLUCK,
GRPC_CQ_DEFAULT_POLLING}), // Pluckable cq
call_(channel->CreateCall(method, context, &cq_)) {
finish_ops_.RecvMessage(response);
finish_ops_.AllowNoMessage();
if (!context_->initial_metadata_corked_) {
CallOpSet<CallOpSendInitialMetadata> ops;
ops.SendInitialMetadata(context->send_initial_metadata_,
context->initial_metadata_flags());
call_.PerformOps(&ops);
cq_.Pluck(&ops);
}
}
/// See the \a ClientStreamingInterface.WaitForInitialMetadata method for
/// semantics.
///
@ -292,7 +302,8 @@ class ClientWriter : public ClientWriterInterface<W> {
void WaitForInitialMetadata() {
GPR_CODEGEN_ASSERT(!context_->initial_metadata_received_);
CallOpSet<CallOpRecvInitialMetadata> ops;
::grpc::internal::CallOpSet<::grpc::internal::CallOpRecvInitialMetadata>
ops;
ops.RecvInitialMetadata(context_);
call_.PerformOps(&ops);
cq_.Pluck(&ops); // status ignored
@ -304,10 +315,11 @@ class ClientWriter : public ClientWriterInterface<W> {
/// Side effect:
/// Also sends initial metadata if not already sent (using the
/// \a ClientContext associated with this call).
using WriterInterface<W>::Write;
using ::grpc::internal::WriterInterface<W>::Write;
bool Write(const W& msg, WriteOptions options) override {
CallOpSet<CallOpSendInitialMetadata, CallOpSendMessage,
CallOpClientSendClose>
::grpc::internal::CallOpSet<::grpc::internal::CallOpSendInitialMetadata,
::grpc::internal::CallOpSendMessage,
::grpc::internal::CallOpClientSendClose>
ops;
if (options.is_last_message()) {
@ -328,7 +340,7 @@ class ClientWriter : public ClientWriterInterface<W> {
}
bool WritesDone() override {
CallOpSet<CallOpClientSendClose> ops;
::grpc::internal::CallOpSet<::grpc::internal::CallOpClientSendClose> ops;
ops.ClientSendClose();
call_.PerformOps(&ops);
return cq_.Pluck(&ops);
@ -352,21 +364,51 @@ class ClientWriter : public ClientWriterInterface<W> {
}
private:
friend class internal::ClientWriterFactory<W>;
/// Block to create a stream (i.e. send request headers and other initial
/// metadata to the server). Note that \a context will be used to fill
/// in custom initial metadata. \a response will be filled in with the
/// single expected response message from the server upon a successful
/// call to the \a Finish method of this instance.
template <class R>
ClientWriter(ChannelInterface* channel,
const ::grpc::internal::RpcMethod& method,
ClientContext* context, R* response)
: context_(context),
cq_(grpc_completion_queue_attributes{
GRPC_CQ_CURRENT_VERSION, GRPC_CQ_PLUCK,
GRPC_CQ_DEFAULT_POLLING}), // Pluckable cq
call_(channel->CreateCall(method, context, &cq_)) {
finish_ops_.RecvMessage(response);
finish_ops_.AllowNoMessage();
if (!context_->initial_metadata_corked_) {
::grpc::internal::CallOpSet<::grpc::internal::CallOpSendInitialMetadata>
ops;
ops.SendInitialMetadata(context->send_initial_metadata_,
context->initial_metadata_flags());
call_.PerformOps(&ops);
cq_.Pluck(&ops);
}
}
ClientContext* context_;
CallOpSet<CallOpRecvInitialMetadata, CallOpGenericRecvMessage,
CallOpClientRecvStatus>
::grpc::internal::CallOpSet<::grpc::internal::CallOpRecvInitialMetadata,
::grpc::internal::CallOpGenericRecvMessage,
::grpc::internal::CallOpClientRecvStatus>
finish_ops_;
CompletionQueue cq_;
Call call_;
::grpc::internal::Call call_;
};
/// Client-side interface for bi-directional streaming with
/// client-to-server stream messages of type \a W and
/// server-to-client stream messages of type \a R.
template <class W, class R>
class ClientReaderWriterInterface : public ClientStreamingInterface,
public WriterInterface<W>,
public ReaderInterface<R> {
class ClientReaderWriterInterface : public internal::ClientStreamingInterface,
public internal::WriterInterface<W>,
public internal::ReaderInterface<R> {
public:
/// Block to wait for initial metadata from server. The received metadata
/// can only be accessed after this call returns. Should only be called before
@ -375,7 +417,7 @@ class ClientReaderWriterInterface : public ClientStreamingInterface,
virtual void WaitForInitialMetadata() = 0;
/// Half close writing from the client. (signal that the stream of messages
/// coming from the client is complete).
/// coming from the clinet is complete).
/// Blocks until currently-pending writes are completed.
/// Thread-safe with respect to \a ReaderInterface::Read
///
@ -383,6 +425,18 @@ class ClientReaderWriterInterface : public ClientStreamingInterface,
virtual bool WritesDone() = 0;
};
namespace internal {
template <class W, class R>
class ClientReaderWriterFactory {
public:
static ClientReaderWriter<W, R>* Create(
::grpc::ChannelInterface* channel,
const ::grpc::internal::RpcMethod& method, ClientContext* context) {
return new ClientReaderWriter<W, R>(channel, method, context);
}
};
} // namespace internal
/// Synchronous (blocking) client-side API for bi-directional streaming RPCs,
/// where the outgoing message stream coming from the client has messages of
/// type \a W, and the incoming messages stream coming from the server has
@ -390,25 +444,6 @@ class ClientReaderWriterInterface : public ClientStreamingInterface,
template <class W, class R>
class ClientReaderWriter final : public ClientReaderWriterInterface<W, R> {
public:
/// Block to create a stream and write the initial metadata and \a request
/// out. Note that \a context will be used to fill in custom initial metadata
/// used to send to the server when starting the call.
ClientReaderWriter(ChannelInterface* channel, const RpcMethod& method,
ClientContext* context)
: context_(context),
cq_(grpc_completion_queue_attributes{
GRPC_CQ_CURRENT_VERSION, GRPC_CQ_PLUCK,
GRPC_CQ_DEFAULT_POLLING}), // Pluckable cq
call_(channel->CreateCall(method, context, &cq_)) {
if (!context_->initial_metadata_corked_) {
CallOpSet<CallOpSendInitialMetadata> ops;
ops.SendInitialMetadata(context->send_initial_metadata_,
context->initial_metadata_flags());
call_.PerformOps(&ops);
cq_.Pluck(&ops);
}
}
/// Block waiting to read initial metadata from the server.
/// This call is optional, but if it is used, it cannot be used concurrently
/// with or after the \a Finish method.
@ -418,7 +453,8 @@ class ClientReaderWriter final : public ClientReaderWriterInterface<W, R> {
void WaitForInitialMetadata() override {
GPR_CODEGEN_ASSERT(!context_->initial_metadata_received_);
CallOpSet<CallOpRecvInitialMetadata> ops;
::grpc::internal::CallOpSet<::grpc::internal::CallOpRecvInitialMetadata>
ops;
ops.RecvInitialMetadata(context_);
call_.PerformOps(&ops);
cq_.Pluck(&ops); // status ignored
@ -434,7 +470,9 @@ class ClientReaderWriter final : public ClientReaderWriterInterface<W, R> {
/// Also receives initial metadata if not already received (updates the \a
/// ClientContext associated with this call in that case).
bool Read(R* msg) override {
CallOpSet<CallOpRecvInitialMetadata, CallOpRecvMessage<R>> ops;
::grpc::internal::CallOpSet<::grpc::internal::CallOpRecvInitialMetadata,
::grpc::internal::CallOpRecvMessage<R>>
ops;
if (!context_->initial_metadata_received_) {
ops.RecvInitialMetadata(context_);
}
@ -448,10 +486,11 @@ class ClientReaderWriter final : public ClientReaderWriterInterface<W, R> {
/// Side effect:
/// Also sends initial metadata if not already sent (using the
/// \a ClientContext associated with this call to fill in values).
using WriterInterface<W>::Write;
using ::grpc::internal::WriterInterface<W>::Write;
bool Write(const W& msg, WriteOptions options) override {
CallOpSet<CallOpSendInitialMetadata, CallOpSendMessage,
CallOpClientSendClose>
::grpc::internal::CallOpSet<::grpc::internal::CallOpSendInitialMetadata,
::grpc::internal::CallOpSendMessage,
::grpc::internal::CallOpClientSendClose>
ops;
if (options.is_last_message()) {
@ -472,7 +511,7 @@ class ClientReaderWriter final : public ClientReaderWriterInterface<W, R> {
}
bool WritesDone() override {
CallOpSet<CallOpClientSendClose> ops;
::grpc::internal::CallOpSet<::grpc::internal::CallOpClientSendClose> ops;
ops.ClientSendClose();
call_.PerformOps(&ops);
return cq_.Pluck(&ops);
@ -484,7 +523,9 @@ class ClientReaderWriter final : public ClientReaderWriterInterface<W, R> {
/// - the \a ClientContext associated with this call is updated with
/// possible trailing metadata sent from the server.
Status Finish() override {
CallOpSet<CallOpRecvInitialMetadata, CallOpClientRecvStatus> ops;
::grpc::internal::CallOpSet<::grpc::internal::CallOpRecvInitialMetadata,
::grpc::internal::CallOpClientRecvStatus>
ops;
if (!context_->initial_metadata_received_) {
ops.RecvInitialMetadata(context_);
}
@ -496,15 +537,38 @@ class ClientReaderWriter final : public ClientReaderWriterInterface<W, R> {
}
private:
friend class internal::ClientReaderWriterFactory<W, R>;
ClientContext* context_;
CompletionQueue cq_;
Call call_;
::grpc::internal::Call call_;
/// Block to create a stream and write the initial metadata and \a request
/// out. Note that \a context will be used to fill in custom initial metadata
/// used to send to the server when starting the call.
ClientReaderWriter(::grpc::ChannelInterface* channel,
const ::grpc::internal::RpcMethod& method,
ClientContext* context)
: context_(context),
cq_(grpc_completion_queue_attributes{
GRPC_CQ_CURRENT_VERSION, GRPC_CQ_PLUCK,
GRPC_CQ_DEFAULT_POLLING}), // Pluckable cq
call_(channel->CreateCall(method, context, &cq_)) {
if (!context_->initial_metadata_corked_) {
::grpc::internal::CallOpSet<::grpc::internal::CallOpSendInitialMetadata>
ops;
ops.SendInitialMetadata(context->send_initial_metadata_,
context->initial_metadata_flags());
call_.PerformOps(&ops);
cq_.Pluck(&ops);
}
}
};
/// Server-side interface for streaming reads of message of type \a R.
template <class R>
class ServerReaderInterface : public ServerStreamingInterface,
public ReaderInterface<R> {};
class ServerReaderInterface : public internal::ServerStreamingInterface,
public internal::ReaderInterface<R> {};
/// Synchronous (blocking) server-side API for doing client-streaming RPCs,
/// where the incoming message stream coming from the client has messages of
@ -512,15 +576,13 @@ class ServerReaderInterface : public ServerStreamingInterface,
template <class R>
class ServerReader final : public ServerReaderInterface<R> {
public:
ServerReader(Call* call, ServerContext* ctx) : call_(call), ctx_(ctx) {}
/// See the \a ServerStreamingInterface.SendInitialMetadata method
/// for semantics. Note that initial metadata will be affected by the
/// \a ServerContext associated with this call.
void SendInitialMetadata() override {
GPR_CODEGEN_ASSERT(!ctx_->sent_initial_metadata_);
CallOpSet<CallOpSendInitialMetadata> ops;
internal::CallOpSet<internal::CallOpSendInitialMetadata> ops;
ops.SendInitialMetadata(ctx_->initial_metadata_,
ctx_->initial_metadata_flags());
if (ctx_->compression_level_set()) {
@ -537,21 +599,27 @@ class ServerReader final : public ServerReaderInterface<R> {
}
bool Read(R* msg) override {
CallOpSet<CallOpRecvMessage<R>> ops;
internal::CallOpSet<internal::CallOpRecvMessage<R>> ops;
ops.RecvMessage(msg);
call_->PerformOps(&ops);
return call_->cq()->Pluck(&ops) && ops.got_message;
}
private:
Call* const call_;
internal::Call* const call_;
ServerContext* const ctx_;
template <class ServiceType, class RequestType, class ResponseType>
friend class internal::ClientStreamingHandler;
ServerReader(internal::Call* call, ServerContext* ctx)
: call_(call), ctx_(ctx) {}
};
/// Server-side interface for streaming writes of message of type \a W.
template <class W>
class ServerWriterInterface : public ServerStreamingInterface,
public WriterInterface<W> {};
class ServerWriterInterface : public internal::ServerStreamingInterface,
public internal::WriterInterface<W> {};
/// Synchronous (blocking) server-side API for doing for doing a
/// server-streaming RPCs, where the outgoing message stream coming from the
@ -559,8 +627,6 @@ class ServerWriterInterface : public ServerStreamingInterface,
template <class W>
class ServerWriter final : public ServerWriterInterface<W> {
public:
ServerWriter(Call* call, ServerContext* ctx) : call_(call), ctx_(ctx) {}
/// See the \a ServerStreamingInterface.SendInitialMetadata method
/// for semantics.
/// Note that initial metadata will be affected by the
@ -568,7 +634,7 @@ class ServerWriter final : public ServerWriterInterface<W> {
void SendInitialMetadata() override {
GPR_CODEGEN_ASSERT(!ctx_->sent_initial_metadata_);
CallOpSet<CallOpSendInitialMetadata> ops;
internal::CallOpSet<internal::CallOpSendInitialMetadata> ops;
ops.SendInitialMetadata(ctx_->initial_metadata_,
ctx_->initial_metadata_flags());
if (ctx_->compression_level_set()) {
@ -584,11 +650,12 @@ class ServerWriter final : public ServerWriterInterface<W> {
/// Side effect:
/// Also sends initial metadata if not already sent (using the
/// \a ClientContext associated with this call to fill in values).
using WriterInterface<W>::Write;
using internal::WriterInterface<W>::Write;
bool Write(const W& msg, WriteOptions options) override {
if (options.is_last_message()) {
options.set_buffer_hint();
}
if (!ctx_->pending_ops_.SendMessage(msg, options).ok()) {
return false;
}
@ -613,15 +680,21 @@ class ServerWriter final : public ServerWriterInterface<W> {
}
private:
Call* const call_;
internal::Call* const call_;
ServerContext* const ctx_;
template <class ServiceType, class RequestType, class ResponseType>
friend class internal::ServerStreamingHandler;
ServerWriter(internal::Call* call, ServerContext* ctx)
: call_(call), ctx_(ctx) {}
};
/// Server-side interface for bi-directional streaming.
template <class W, class R>
class ServerReaderWriterInterface : public ServerStreamingInterface,
public WriterInterface<W>,
public ReaderInterface<R> {};
class ServerReaderWriterInterface : public internal::ServerStreamingInterface,
public internal::WriterInterface<W>,
public internal::ReaderInterface<R> {};
/// Actual implementation of bi-directional streaming
namespace internal {
@ -688,6 +761,7 @@ class ServerReaderWriterBody final {
Call* const call_;
ServerContext* const ctx_;
};
} // namespace internal
/// Synchronous (blocking) server-side API for a bidirectional
@ -697,8 +771,6 @@ class ServerReaderWriterBody final {
template <class W, class R>
class ServerReaderWriter final : public ServerReaderWriterInterface<W, R> {
public:
ServerReaderWriter(Call* call, ServerContext* ctx) : body_(call, ctx) {}
/// See the \a ServerStreamingInterface.SendInitialMetadata method
/// for semantics. Note that initial metadata will be affected by the
/// \a ServerContext associated with this call.
@ -715,13 +787,18 @@ class ServerReaderWriter final : public ServerReaderWriterInterface<W, R> {
/// Side effect:
/// Also sends initial metadata if not already sent (using the \a
/// ServerContext associated with this call).
using WriterInterface<W>::Write;
using internal::WriterInterface<W>::Write;
bool Write(const W& msg, WriteOptions options) override {
return body_.Write(msg, options);
}
private:
internal::ServerReaderWriterBody<W, R> body_;
friend class internal::TemplatedBidiStreamingHandler<ServerReaderWriter<W, R>,
false>;
ServerReaderWriter(internal::Call* call, ServerContext* ctx)
: body_(call, ctx) {}
};
/// A class to represent a flow-controlled unary call. This is something
@ -736,9 +813,6 @@ template <class RequestType, class ResponseType>
class ServerUnaryStreamer final
: public ServerReaderWriterInterface<ResponseType, RequestType> {
public:
ServerUnaryStreamer(Call* call, ServerContext* ctx)
: body_(call, ctx), read_done_(false), write_done_(false) {}
/// Block to send initial metadata to client.
/// Implicit input parameter:
/// - the \a ServerContext associated with this call will be used for
@ -775,7 +849,7 @@ class ServerUnaryStreamer final
/// \param options The WriteOptions affecting the write operation.
///
/// \return \a true on success, \a false when the stream has been closed.
using WriterInterface<ResponseType>::Write;
using internal::WriterInterface<ResponseType>::Write;
bool Write(const ResponseType& response, WriteOptions options) override {
if (write_done_ || !read_done_) {
return false;
@ -788,6 +862,11 @@ class ServerUnaryStreamer final
internal::ServerReaderWriterBody<ResponseType, RequestType> body_;
bool read_done_;
bool write_done_;
friend class internal::TemplatedBidiStreamingHandler<
ServerUnaryStreamer<RequestType, ResponseType>, true>;
ServerUnaryStreamer(internal::Call* call, ServerContext* ctx)
: body_(call, ctx), read_done_(false), write_done_(false) {}
};
/// A class to represent a flow-controlled server-side streaming call.
@ -799,9 +878,6 @@ template <class RequestType, class ResponseType>
class ServerSplitStreamer final
: public ServerReaderWriterInterface<ResponseType, RequestType> {
public:
ServerSplitStreamer(Call* call, ServerContext* ctx)
: body_(call, ctx), read_done_(false) {}
/// Block to send initial metadata to client.
/// Implicit input parameter:
/// - the \a ServerContext associated with this call will be used for
@ -838,7 +914,7 @@ class ServerSplitStreamer final
/// \param options The WriteOptions affecting the write operation.
///
/// \return \a true on success, \a false when the stream has been closed.
using WriterInterface<ResponseType>::Write;
using internal::WriterInterface<ResponseType>::Write;
bool Write(const ResponseType& response, WriteOptions options) override {
return read_done_ && body_.Write(response, options);
}
@ -846,6 +922,11 @@ class ServerSplitStreamer final
private:
internal::ServerReaderWriterBody<ResponseType, RequestType> body_;
bool read_done_;
friend class internal::TemplatedBidiStreamingHandler<
ServerSplitStreamer<RequestType, ResponseType>, false>;
ServerSplitStreamer(internal::Call* call, ServerContext* ctx)
: body_(call, ctx), read_done_(false) {}
};
} // namespace grpc

@ -19,6 +19,8 @@
#ifndef GRPCXX_IMPL_CODEGEN_TIME_H
#define GRPCXX_IMPL_CODEGEN_TIME_H
#include <chrono>
#include <grpc++/impl/codegen/config.h>
#include <grpc/impl/codegen/grpc_types.h>
@ -59,10 +61,6 @@ class TimePoint<gpr_timespec> {
} // namespace grpc
#include <chrono>
#include <grpc/impl/codegen/grpc_types.h>
namespace grpc {
// from and to should be absolute time.

@ -175,7 +175,8 @@ class Server final : public ServerInterface, private GrpcLibraryCodegen {
/// \param num_cqs How many completion queues does \a cqs hold.
void Start(ServerCompletionQueue** cqs, size_t num_cqs) override;
void PerformOpsOnCall(CallOpSetInterface* ops, Call* call) override;
void PerformOpsOnCall(internal::CallOpSetInterface* ops,
internal::Call* call) override;
void ShutdownInternal(gpr_timespec deadline) override;

@ -40,7 +40,6 @@ namespace grpc {
class AsyncGenericService;
class ResourceQuota;
class CompletionQueue;
class RpcService;
class Server;
class ServerCompletionQueue;
class ServerCredentials;

@ -16,10 +16,6 @@
*
*/
/** RPC-internal Census API's. These are designed to be generic enough that
* they can (ultimately) be used in many different RPC systems (with differing
* implementations). */
#ifndef GRPC_CENSUS_H
#define GRPC_CENSUS_H
@ -29,439 +25,12 @@
extern "C" {
#endif
/** Identify census features that can be enabled via census_initialize(). */
enum census_features {
CENSUS_FEATURE_NONE = 0, /** Do not enable census. */
CENSUS_FEATURE_TRACING = 1, /** Enable census tracing. */
CENSUS_FEATURE_STATS = 2, /** Enable Census stats collection. */
CENSUS_FEATURE_CPU = 4, /** Enable Census CPU usage collection. */
CENSUS_FEATURE_ALL =
CENSUS_FEATURE_TRACING | CENSUS_FEATURE_STATS | CENSUS_FEATURE_CPU
};
/** Shutdown and startup census subsystem. The 'features' argument should be
* the OR (|) of census_features values. If census fails to initialize, then
* census_initialize() will return -1, otherwise the set of enabled features
* (which may be smaller than that provided in the `features` argument, see
* census_supported()) is returned. It is an error to call census_initialize()
* more than once (without an intervening census_shutdown()). These functions
* are not thread-safe. */
CENSUSAPI int census_initialize(int features);
CENSUSAPI void census_shutdown(void);
/** Return the features supported by the current census implementation (not all
* features will be available on all platforms). */
CENSUSAPI int census_supported(void);
/** Return the census features currently enabled. */
CENSUSAPI int census_enabled(void);
/**
A Census Context is a handle used by Census to represent the current tracing
and stats collection information. Contexts should be propagated across RPC's
(this is the responsibility of the local RPC system). A context is typically
used as the first argument to most census functions. Conceptually, they
should be thought of as specific to a single RPC/thread. The user visible
context representation is that of a collection of key:value string pairs,
each of which is termed a 'tag'; these form the basis against which Census
metrics will be recorded. Keys are unique within a context. */
(this is the responsibility of the local RPC system). */
typedef struct census_context census_context;
/** A tag is a key:value pair. Both keys and values are nil-terminated strings,
containing printable ASCII characters (decimal 32-126). Keys must be at
least one character in length. Both keys and values can have at most
CENSUS_MAX_TAG_KB_LEN characters (including the terminating nil). The
maximum number of tags that can be propagated is
CENSUS_MAX_PROPAGATED_TAGS. Users should also remember that some systems
may have limits on, e.g., the number of bytes that can be transmitted as
metadata, and that larger tags means more memory consumed and time in
processing. */
typedef struct {
const char *key;
const char *value;
uint8_t flags;
} census_tag;
/** Maximum length of a tag's key or value. */
#define CENSUS_MAX_TAG_KV_LEN 255
/** Maximum number of propagatable tags. */
#define CENSUS_MAX_PROPAGATED_TAGS 255
/** Tag flags. */
#define CENSUS_TAG_PROPAGATE 1 /** Tag should be propagated over RPC */
#define CENSUS_TAG_STATS 2 /** Tag will be used for statistics aggregation */
#define CENSUS_TAG_RESERVED 4 /** Reserved for internal use. */
/** Flag values 4,8,16,32,64,128 are reserved for future/internal use. Clients
should not use or rely on their values. */
#define CENSUS_TAG_IS_PROPAGATED(flags) (flags & CENSUS_TAG_PROPAGATE)
#define CENSUS_TAG_IS_STATS(flags) (flags & CENSUS_TAG_STATS)
/** An instance of this structure is kept by every context, and records the
basic information associated with the creation of that context. */
typedef struct {
int n_propagated_tags; /** number of propagated tags */
int n_local_tags; /** number of non-propagated (local) tags */
int n_deleted_tags; /** number of tags that were deleted */
int n_added_tags; /** number of tags that were added */
int n_modified_tags; /** number of tags that were modified */
int n_invalid_tags; /** number of tags with bad keys or values (e.g.
longer than CENSUS_MAX_TAG_KV_LEN) */
int n_ignored_tags; /** number of tags ignored because of
CENSUS_MAX_PROPAGATED_TAGS limit. */
} census_context_status;
/** Create a new context, adding and removing tags from an existing context.
This will copy all tags from the 'tags' input, so it is recommended
to add as many tags in a single operation as is practical for the client.
@param base Base context to build upon. Can be NULL.
@param tags A set of tags to be added/changed/deleted. Tags with keys that
are in 'tags', but not 'base', are added to the context. Keys that are in
both 'tags' and 'base' will have their value/flags modified. Tags with keys
in both, but with NULL values, will be deleted from the context. Tags with
invalid (too long or short) keys or values will be ignored.
If adding a tag will result in more than CENSUS_MAX_PROPAGATED_TAGS in either
binary or non-binary tags, they will be ignored, as will deletions of
tags that don't exist.
@param ntags number of tags in 'tags'
@param status If not NULL, will return a pointer to a census_context_status
structure containing information about the new context and status of the
tags used in its creation.
@return A new, valid census_context.
*/
CENSUSAPI census_context *census_context_create(
const census_context *base, const census_tag *tags, int ntags,
census_context_status const **status);
/** Destroy a context. Once this function has been called, the context cannot
be reused. */
CENSUSAPI void census_context_destroy(census_context *context);
/** Get a pointer to the original status from the context creation. */
CENSUSAPI const census_context_status *census_context_get_status(
const census_context *context);
/** Structure used for iterating over the tags in a context. API clients should
not use or reference internal fields - neither their contents or
presence/absence are guaranteed. */
typedef struct {
const census_context *context;
int base;
int index;
char *kvm;
} census_context_iterator;
/** Initialize a census_tag_iterator. Must be called before first use. */
CENSUSAPI void census_context_initialize_iterator(
const census_context *context, census_context_iterator *iterator);
/** Get the contents of the "next" tag in the context. If there are no more
tags, returns 0 (and 'tag' contents will be unchanged), otherwise returns 1.
*/
CENSUSAPI int census_context_next_tag(census_context_iterator *iterator,
census_tag *tag);
/** Get a context tag by key. Returns 0 if the key is not present. */
CENSUSAPI int census_context_get_tag(const census_context *context,
const char *key, census_tag *tag);
/** Tag set encode/decode functionality. These functions are intended
for use by RPC systems only, for purposes of transmitting/receiving contexts.
*/
/** Encode a context into a buffer.
@param context context to be encoded
@param buffer buffer into which the context will be encoded.
@param buf_size number of available bytes in buffer.
@return The number of buffer bytes consumed for the encoded context, or
zero if the buffer was of insufficient size. */
CENSUSAPI size_t census_context_encode(const census_context *context,
char *buffer, size_t buf_size);
/** Decode context buffer encoded with census_context_encode(). Returns NULL
if there is an error in parsing either buffer. */
CENSUSAPI census_context *census_context_decode(const char *buffer,
size_t size);
/** Distributed traces can have a number of options. */
enum census_trace_mask_values {
CENSUS_TRACE_MASK_NONE = 0, /** Default, empty flags */
CENSUS_TRACE_MASK_IS_SAMPLED = 1 /** RPC tracing enabled for this context. */
};
/** Get the current trace mask associated with this context. The value returned
will be the logical OR of census_trace_mask_values values. */
CENSUSAPI int census_trace_mask(const census_context *context);
/** Set the trace mask associated with a context. */
CENSUSAPI void census_set_trace_mask(int trace_mask);
/** The concept of "operation" is a fundamental concept for Census. In an RPC
system, an operation typically represents a single RPC, or a significant
sub-part thereof (e.g. a single logical "read" RPC to a distributed storage
system might do several other actions in parallel, from looking up metadata
indices to making requests of other services - each of these could be a
sub-operation with the larger RPC operation). Census uses operations for the
following:
CPU accounting: If enabled, census will measure the thread CPU time
consumed between operation start and end times.
Active operations: Census will maintain information on all currently
active operations.
Distributed tracing: Each operation serves as a logical trace span.
Stats collection: Stats are broken down by operation (e.g. latency
breakdown for each unique RPC path).
The following functions serve to delineate the start and stop points for
each logical operation. */
/**
This structure represents a timestamp as used by census to record the time
at which an operation begins.
*/
typedef struct {
/** Use gpr_timespec for default implementation. High performance
* implementations should use a cycle-counter based timestamp. */
gpr_timespec ts;
} census_timestamp;
/**
Mark the beginning of an RPC operation. The information required to call the
functions to record the start of RPC operations (both client and server) may
not be callable at the true start time of the operation, due to information
not being available (e.g. the census context data will not be available in a
server RPC until at least initial metadata has been processed). To ensure
correct CPU accounting and latency recording, RPC systems can call this
function to get the timestamp of operation beginning. This can later be used
as an argument to census_start_{client,server}_rpc_op(). NB: for correct
CPU accounting, the system must guarantee that the same thread is used
for all request processing after this function is called.
@return A timestamp representing the operation start time.
*/
CENSUSAPI census_timestamp census_start_rpc_op_timestamp(void);
/**
Represent functions to map RPC name ID to service/method names. Census
breaks down all RPC stats by service and method names. We leave the
definition and format of these to the RPC system. For efficiency purposes,
we encode these as a single 64 bit identifier, and allow the RPC system to
provide a structure for functions that can convert these to service and
method strings.
TODO(aveitch): Instead of providing this as an argument to the rpc_start_op()
functions, maybe it should be set once at census initialization.
*/
typedef struct {
const char *(*get_rpc_service_name)(int64_t id);
const char *(*get_rpc_method_name)(int64_t id);
} census_rpc_name_info;
/**
Start a client rpc operation. This function should be called as early in the
client RPC path as possible. This function will create a new context. If
the context argument is non-null, then the new context will inherit all
its properties, with the following changes:
- create a new operation ID for the new context, marking it as a child of
the previous operation.
- use the new RPC path and peer information for tracing and stats
collection purposes, rather than those from the original context
If the context argument is NULL, then a new root context is created. This
is particularly important for tracing purposes (the trace spans generated
will be unassociated with any other trace spans, except those
downstream). The trace_mask will be used for tracing operations associated
with the new context.
In some RPC systems (e.g. where load balancing is used), peer information
may not be available at the time the operation starts. In this case, use a
NULL value for peer, and set it later using the
census_set_rpc_client_peer() function.
@param context The parent context. Can be NULL.
@param rpc_name_id The rpc name identifier to be associated with this RPC.
@param rpc_name_info Used to decode rpc_name_id.
@param peer RPC peer. If not available at the time, NULL can be used,
and a later census_set_rpc_client_peer() call made.
@param trace_mask An OR of census_trace_mask_values values. Only used in
the creation of a new root context (context == NULL).
@param start_time A timestamp returned from census_start_rpc_op_timestamp().
Can be NULL. Used to set the true time the operation
begins.
@return A new census context.
*/
CENSUSAPI census_context *census_start_client_rpc_op(
const census_context *context, int64_t rpc_name_id,
const census_rpc_name_info *rpc_name_info, const char *peer, int trace_mask,
const census_timestamp *start_time);
/**
Add peer information to a context representing a client RPC operation.
*/
CENSUSAPI void census_set_rpc_client_peer(census_context *context,
const char *peer);
/**
Start a server RPC operation. Returns a new context to be used in future
census calls. If buffer is non-NULL, then the buffer contents should
represent the client context, as generated by census_context_serialize().
If buffer is NULL, a new root context is created.
@param buffer Buffer containing bytes output from census_context_serialize().
@param rpc_name_id The rpc name identifier to be associated with this RPC.
@param rpc_name_info Used to decode rpc_name_id.
@param peer RPC peer.
@param trace_mask An OR of census_trace_mask_values values. Only used in
the creation of a new root context (buffer == NULL).
@param start_time A timestamp returned from census_start_rpc_op_timestamp().
Can be NULL. Used to set the true time the operation
begins.
@return A new census context.
*/
CENSUSAPI census_context *census_start_server_rpc_op(
const char *buffer, int64_t rpc_name_id,
const census_rpc_name_info *rpc_name_info, const char *peer, int trace_mask,
census_timestamp *start_time);
/**
Start a new, non-RPC operation. In general, this function works very
similarly to census_start_client_rpc_op, with the primary difference being
the replacement of host/path information with the more generic family/name
tags. If the context argument is non-null, then the new context will
inherit all its properties, with the following changes:
- create a new operation ID for the new context, marking it as a child of
the previous operation.
- use the family and name information for tracing and stats collection
purposes, rather than those from the original context
If the context argument is NULL, then a new root context is created. This
is particularly important for tracing purposes (the trace spans generated
will be unassociated with any other trace spans, except those
downstream). The trace_mask will be used for tracing
operations associated with the new context.
@param context The base context. Can be NULL.
@param family Family name to associate with the trace
@param name Name within family to associate with traces/stats
@param trace_mask An OR of census_trace_mask_values values. Only used if
context is NULL.
@return A new census context.
*/
CENSUSAPI census_context *census_start_op(census_context *context,
const char *family, const char *name,
int trace_mask);
/**
End an operation started by any of the census_start_*_op*() calls. The
context used in this call will no longer be valid once this function
completes.
@param context Context associated with operation which is ending.
@param status status associated with the operation. Not interpreted by
census.
*/
CENSUSAPI void census_end_op(census_context *context, int status);
#define CENSUS_TRACE_RECORD_START_OP ((uint32_t)0)
#define CENSUS_TRACE_RECORD_END_OP ((uint32_t)1)
/** Insert a trace record into the trace stream. The record consists of an
arbitrary size buffer, the size of which is provided in 'n'.
@param context Trace context
@param type User-defined type to associate with trace entry.
@param buffer Pointer to buffer to use
@param n Number of bytes in buffer
*/
CENSUSAPI void census_trace_print(census_context *context, uint32_t type,
const char *buffer, size_t n);
/** Trace record. */
typedef struct {
census_timestamp timestamp; /** Time of record creation */
uint64_t trace_id; /** Trace ID associated with record */
uint64_t op_id; /** Operation ID associated with record */
uint32_t type; /** Type (as used in census_trace_print() */
const char *buffer; /** Buffer (from census_trace_print() */
size_t buf_size; /** Number of bytes inside buffer */
} census_trace_record;
/** Start a scan of existing trace records. While a scan is ongoing, addition
of new trace records will be blocked if the underlying trace buffers
fill up, so trace processing systems should endeavor to complete
reading as soon as possible.
@param consume if non-zero, indicates that reading records also "consumes"
the previously read record - i.e. releases space in the trace log
while scanning is ongoing.
@returns 0 on success, non-zero on failure (e.g. if a scan is already ongoing)
*/
CENSUSAPI int census_trace_scan_start(int consume);
/** Get a trace record. The data pointed to by the trace buffer is guaranteed
stable until the next census_get_trace_record() call (if the consume
argument to census_trace_scan_start was non-zero) or census_trace_scan_end()
is called (otherwise).
@param trace_record structure that will be filled in with oldest trace record.
@returns -1 if an error occurred (e.g. no previous call to
census_trace_scan_start()), 0 if there is no more trace data (and
trace_record will not be modified) or 1 otherwise.
*/
CENSUSAPI int census_get_trace_record(census_trace_record *trace_record);
/** End a scan previously started by census_trace_scan_start() */
CENSUSAPI void census_trace_scan_end();
/** Core stats collection API's. The following concepts are used:
* Resource: Users record measurements for a single resource. Examples
include RPC latency, CPU seconds consumed, and bytes transmitted.
* Aggregation: An aggregation of a set of measurements. Census supports the
following aggregation types:
* Distribution - statistical distribution information, used for
recording average, standard deviation etc. Can include a histogram.
* Interval - a count of events that happen in a rolling time window.
* View: A view is a combination of a Resource, a set of tag keys and an
Aggregation. When a measurement for a Resource matches the View tags, it is
recorded (for each unique set of tag values) using the Aggregation type.
Each resource can have an arbitrary number of views by which it will be
broken down.
Census uses protos to define each of the above, and output results. This
ensures unification across the different language and runtime
implementations. The proto definitions can be found in src/proto/census.
*/
/** Define a new resource. `resource_pb` should contain an encoded Resource
protobuf, `resource_pb_size` being the size of the buffer. Returns a -ve
value on error, or a positive (>= 0) resource id (for use in
census_delete_resource() and census_record_values()). In order to be valid, a
resource must have a name, and at least one numerator in its unit type. The
resource name must be unique, and an error will be returned if it is not. */
CENSUSAPI int32_t census_define_resource(const uint8_t *resource_pb,
size_t resource_pb_size);
/** Delete a resource created by census_define_resource(). */
CENSUSAPI void census_delete_resource(int32_t resource_id);
/** Determine the id of a resource, given its name. returns -1 if the resource
does not exist. */
CENSUSAPI int32_t census_resource_id(const char *name);
/** A single value to be recorded comprises two parts: an ID for the particular
* resource and the value to be recorded against it. */
typedef struct {
int32_t resource_id;
double value;
} census_value;
/** Record new usage values against the given context. */
CENSUSAPI void census_record_values(census_context *context,
census_value *values, size_t nvalues);
#ifdef __cplusplus
}
#endif

@ -143,6 +143,23 @@ GRPCAPI void grpc_completion_queue_shutdown(grpc_completion_queue *cq);
drained and no threads are executing grpc_completion_queue_next */
GRPCAPI void grpc_completion_queue_destroy(grpc_completion_queue *cq);
/*********** EXPERIMENTAL API ************/
/** Initializes a thread local cache for \a cq.
* grpc_flush_cq_tls_cache() MUST be called on the same thread,
* with the same cq.
*/
GRPCAPI void grpc_completion_queue_thread_local_cache_init(
grpc_completion_queue *cq);
/*********** EXPERIMENTAL API ************/
/** Flushes the thread local cache for \a cq.
* Returns 1 if there was contents in the cache. If there was an event
* in \a cq tls cache, its tag is placed in tag, and ok is set to the
* event success.
*/
GRPCAPI int grpc_completion_queue_thread_local_cache_flush(
grpc_completion_queue *cq, void **tag, int *ok);
/** Create a completion queue alarm instance */
GRPCAPI grpc_alarm *grpc_alarm_create(void *reserved);

@ -316,6 +316,43 @@ typedef struct grpc_server_credentials grpc_server_credentials;
*/
GRPCAPI void grpc_server_credentials_release(grpc_server_credentials *creds);
/** Server certificate config object holds the server's public certificates and
associated private keys, as well as any CA certificates needed for client
certificate validation (if applicable). Create using
grpc_ssl_server_certificate_config_create(). */
typedef struct grpc_ssl_server_certificate_config
grpc_ssl_server_certificate_config;
/** Creates a grpc_ssl_server_certificate_config object.
- pem_roots_cert is the NULL-terminated string containing the PEM encoding of
the client root certificates. This parameter may be NULL if the server does
not want the client to be authenticated with SSL.
- pem_key_cert_pairs is an array private key / certificate chains of the
server. This parameter cannot be NULL.
- num_key_cert_pairs indicates the number of items in the private_key_files
and cert_chain_files parameters. It must be at least 1.
- It is the caller's responsibility to free this object via
grpc_ssl_server_certificate_config_destroy(). */
GRPCAPI grpc_ssl_server_certificate_config *
grpc_ssl_server_certificate_config_create(
const char *pem_root_certs,
const grpc_ssl_pem_key_cert_pair *pem_key_cert_pairs,
size_t num_key_cert_pairs);
/** Destroys a grpc_ssl_server_certificate_config object. */
GRPCAPI void grpc_ssl_server_certificate_config_destroy(
grpc_ssl_server_certificate_config *config);
/** Callback to retrieve updated SSL server certificates, private keys, and
trusted CAs (for client authentication).
- user_data parameter, if not NULL, contains opaque data to be used by the
callback.
- Use grpc_ssl_server_certificate_config_create to create the config.
- The caller assumes ownership of the config. */
typedef grpc_ssl_certificate_config_reload_status (
*grpc_ssl_server_certificate_config_callback)(
void *user_data, grpc_ssl_server_certificate_config **config);
/** Deprecated in favor of grpc_ssl_server_credentials_create_ex.
Creates an SSL server_credentials object.
- pem_roots_cert is the NULL-terminated string containing the PEM encoding of
@ -332,7 +369,8 @@ GRPCAPI grpc_server_credentials *grpc_ssl_server_credentials_create(
const char *pem_root_certs, grpc_ssl_pem_key_cert_pair *pem_key_cert_pairs,
size_t num_key_cert_pairs, int force_client_auth, void *reserved);
/** Same as grpc_ssl_server_credentials_create method except uses
/** Deprecated in favor of grpc_ssl_server_credentials_create_with_options.
Same as grpc_ssl_server_credentials_create method except uses
grpc_ssl_client_certificate_request_type enum to support more ways to
authenticate client cerificates.*/
GRPCAPI grpc_server_credentials *grpc_ssl_server_credentials_create_ex(
@ -341,6 +379,40 @@ GRPCAPI grpc_server_credentials *grpc_ssl_server_credentials_create_ex(
grpc_ssl_client_certificate_request_type client_certificate_request,
void *reserved);
typedef struct grpc_ssl_server_credentials_options
grpc_ssl_server_credentials_options;
/** Creates an options object using a certificate config. Use this method when
the certificates and keys of the SSL server will not change during the
server's lifetime.
- Takes ownership of the certificate_config parameter. */
GRPCAPI grpc_ssl_server_credentials_options *
grpc_ssl_server_credentials_create_options_using_config(
grpc_ssl_client_certificate_request_type client_certificate_request,
grpc_ssl_server_certificate_config *certificate_config);
/** Creates an options object using a certificate config fetcher. Use this
method to reload the certificates and keys of the SSL server without
interrupting the operation of the server. Initial certificate config will be
fetched during server initialization.
- user_data parameter, if not NULL, contains opaque data which will be passed
to the fetcher (see definition of
grpc_ssl_server_certificate_config_callback). */
GRPCAPI grpc_ssl_server_credentials_options *
grpc_ssl_server_credentials_create_options_using_config_fetcher(
grpc_ssl_client_certificate_request_type client_certificate_request,
grpc_ssl_server_certificate_config_callback cb, void *user_data);
/** Destroys a grpc_ssl_server_credentials_options object. */
GRPCAPI void grpc_ssl_server_credentials_options_destroy(
grpc_ssl_server_credentials_options *options);
/** Creates an SSL server_credentials object using the provided options struct.
- Takes ownership of the options parameter. */
GRPCAPI grpc_server_credentials *
grpc_ssl_server_credentials_create_with_options(
grpc_ssl_server_credentials_options *options);
/** --- Server-side secure ports. --- */
/** Add a HTTP2 over an encrypted link over tcp listener.

@ -48,6 +48,13 @@ typedef enum {
GRPC_SSL_ROOTS_OVERRIDE_FAIL
} grpc_ssl_roots_override_result;
/** Callback results for dynamically loading a SSL certificate config. */
typedef enum {
GRPC_SSL_CERTIFICATE_CONFIG_RELOAD_UNCHANGED,
GRPC_SSL_CERTIFICATE_CONFIG_RELOAD_NEW,
GRPC_SSL_CERTIFICATE_CONFIG_RELOAD_FAIL
} grpc_ssl_certificate_config_reload_status;
typedef enum {
/** Server does not request client certificate. A client can present a self
signed or signed certificates if it wishes to do so and they would be

@ -25,8 +25,6 @@ extern "C" {
/** Connectivity state of a channel. */
typedef enum {
/** channel has just been initialized */
GRPC_CHANNEL_INIT = -1,
/** channel is idle */
GRPC_CHANNEL_IDLE,
/** channel is connecting */

@ -274,7 +274,23 @@ GPRAPI intptr_t gpr_stats_read(const gpr_stats_counter *c);
#endif /* 0 */
#ifdef __cplusplus
}
} // extern "C"
namespace grpc_core {
class mu_guard {
public:
mu_guard(gpr_mu *mu) : mu_(mu) { gpr_mu_lock(mu); }
~mu_guard() { gpr_mu_unlock(mu_); }
mu_guard(const mu_guard &) = delete;
mu_guard &operator=(const mu_guard &) = delete;
private:
gpr_mu *const mu_;
};
} // namespace grpc_core
#endif
#endif /* GRPC_SUPPORT_SYNC_H */

@ -193,6 +193,7 @@
<file baseinstalldir="/" name="src/core/ext/transport/chttp2/transport/bin_decoder.h" role="src" />
<file baseinstalldir="/" name="src/core/ext/transport/chttp2/transport/bin_encoder.h" role="src" />
<file baseinstalldir="/" name="src/core/ext/transport/chttp2/transport/chttp2_transport.h" role="src" />
<file baseinstalldir="/" name="src/core/ext/transport/chttp2/transport/flow_control.h" role="src" />
<file baseinstalldir="/" name="src/core/ext/transport/chttp2/transport/frame.h" role="src" />
<file baseinstalldir="/" name="src/core/ext/transport/chttp2/transport/frame_data.h" role="src" />
<file baseinstalldir="/" name="src/core/ext/transport/chttp2/transport/frame_goaway.h" role="src" />
@ -241,6 +242,7 @@
<file baseinstalldir="/" name="src/core/tsi/transport_security_adapter.h" role="src" />
<file baseinstalldir="/" name="src/core/tsi/transport_security_interface.h" role="src" />
<file baseinstalldir="/" name="src/core/ext/transport/chttp2/server/chttp2_server.h" role="src" />
<file baseinstalldir="/" name="src/core/ext/filters/client_channel/backup_poller.h" role="src" />
<file baseinstalldir="/" name="src/core/ext/filters/client_channel/client_channel.h" role="src" />
<file baseinstalldir="/" name="src/core/ext/filters/client_channel/client_channel_factory.h" role="src" />
<file baseinstalldir="/" name="src/core/ext/filters/client_channel/connector.h" role="src" />
@ -394,28 +396,11 @@
<file baseinstalldir="/" name="third_party/nanopb/pb_decode.h" role="src" />
<file baseinstalldir="/" name="third_party/nanopb/pb_encode.h" role="src" />
<file baseinstalldir="/" name="src/core/ext/filters/client_channel/resolver/fake/fake_resolver.h" role="src" />
<file baseinstalldir="/" name="src/core/ext/filters/client_channel/lb_policy/subchannel_list.h" role="src" />
<file baseinstalldir="/" name="src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver.h" role="src" />
<file baseinstalldir="/" name="src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.h" role="src" />
<file baseinstalldir="/" name="src/core/ext/filters/load_reporting/server_load_reporting_filter.h" role="src" />
<file baseinstalldir="/" name="src/core/ext/filters/load_reporting/server_load_reporting_plugin.h" role="src" />
<file baseinstalldir="/" name="src/core/ext/census/aggregation.h" role="src" />
<file baseinstalldir="/" name="src/core/ext/census/base_resources.h" role="src" />
<file baseinstalldir="/" name="src/core/ext/census/census_interface.h" role="src" />
<file baseinstalldir="/" name="src/core/ext/census/census_rpc_stats.h" role="src" />
<file baseinstalldir="/" name="src/core/ext/census/gen/census.pb.h" role="src" />
<file baseinstalldir="/" name="src/core/ext/census/gen/trace_context.pb.h" role="src" />
<file baseinstalldir="/" name="src/core/ext/census/grpc_filter.h" role="src" />
<file baseinstalldir="/" name="src/core/ext/census/intrusive_hash_map.h" role="src" />
<file baseinstalldir="/" name="src/core/ext/census/intrusive_hash_map_internal.h" role="src" />
<file baseinstalldir="/" name="src/core/ext/census/mlog.h" role="src" />
<file baseinstalldir="/" name="src/core/ext/census/resource.h" role="src" />
<file baseinstalldir="/" name="src/core/ext/census/rpc_metric_id.h" role="src" />
<file baseinstalldir="/" name="src/core/ext/census/trace_context.h" role="src" />
<file baseinstalldir="/" name="src/core/ext/census/trace_label.h" role="src" />
<file baseinstalldir="/" name="src/core/ext/census/trace_propagation.h" role="src" />
<file baseinstalldir="/" name="src/core/ext/census/trace_status.h" role="src" />
<file baseinstalldir="/" name="src/core/ext/census/trace_string.h" role="src" />
<file baseinstalldir="/" name="src/core/ext/census/tracing.h" role="src" />
<file baseinstalldir="/" name="src/core/ext/filters/max_age/max_age_filter.h" role="src" />
<file baseinstalldir="/" name="src/core/ext/filters/message_size/message_size_filter.h" role="src" />
<file baseinstalldir="/" name="src/core/ext/filters/workarounds/workaround_cronet_compression_filter.h" role="src" />
@ -614,6 +599,7 @@
<file baseinstalldir="/" name="src/core/tsi/transport_security_adapter.cc" role="src" />
<file baseinstalldir="/" name="src/core/ext/transport/chttp2/server/chttp2_server.cc" role="src" />
<file baseinstalldir="/" name="src/core/ext/transport/chttp2/client/secure/secure_channel_create.cc" role="src" />
<file baseinstalldir="/" name="src/core/ext/filters/client_channel/backup_poller.cc" role="src" />
<file baseinstalldir="/" name="src/core/ext/filters/client_channel/channel_connectivity.cc" role="src" />
<file baseinstalldir="/" name="src/core/ext/filters/client_channel/client_channel.cc" role="src" />
<file baseinstalldir="/" name="src/core/ext/filters/client_channel/client_channel_factory.cc" role="src" />
@ -653,6 +639,7 @@
<file baseinstalldir="/" name="third_party/nanopb/pb_encode.c" role="src" />
<file baseinstalldir="/" name="src/core/ext/filters/client_channel/resolver/fake/fake_resolver.cc" role="src" />
<file baseinstalldir="/" name="src/core/ext/filters/client_channel/lb_policy/pick_first/pick_first.cc" role="src" />
<file baseinstalldir="/" name="src/core/ext/filters/client_channel/lb_policy/subchannel_list.cc" role="src" />
<file baseinstalldir="/" name="src/core/ext/filters/client_channel/lb_policy/round_robin/round_robin.cc" role="src" />
<file baseinstalldir="/" name="src/core/ext/filters/client_channel/resolver/dns/c_ares/dns_resolver_ares.cc" role="src" />
<file baseinstalldir="/" name="src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver_posix.cc" role="src" />
@ -662,21 +649,7 @@
<file baseinstalldir="/" name="src/core/ext/filters/client_channel/resolver/sockaddr/sockaddr_resolver.cc" role="src" />
<file baseinstalldir="/" name="src/core/ext/filters/load_reporting/server_load_reporting_filter.cc" role="src" />
<file baseinstalldir="/" name="src/core/ext/filters/load_reporting/server_load_reporting_plugin.cc" role="src" />
<file baseinstalldir="/" name="src/core/ext/census/base_resources.cc" role="src" />
<file baseinstalldir="/" name="src/core/ext/census/context.cc" role="src" />
<file baseinstalldir="/" name="src/core/ext/census/gen/census.pb.c" role="src" />
<file baseinstalldir="/" name="src/core/ext/census/gen/trace_context.pb.c" role="src" />
<file baseinstalldir="/" name="src/core/ext/census/grpc_context.cc" role="src" />
<file baseinstalldir="/" name="src/core/ext/census/grpc_filter.cc" role="src" />
<file baseinstalldir="/" name="src/core/ext/census/grpc_plugin.cc" role="src" />
<file baseinstalldir="/" name="src/core/ext/census/initialize.cc" role="src" />
<file baseinstalldir="/" name="src/core/ext/census/intrusive_hash_map.cc" role="src" />
<file baseinstalldir="/" name="src/core/ext/census/mlog.cc" role="src" />
<file baseinstalldir="/" name="src/core/ext/census/operation.cc" role="src" />
<file baseinstalldir="/" name="src/core/ext/census/placeholders.cc" role="src" />
<file baseinstalldir="/" name="src/core/ext/census/resource.cc" role="src" />
<file baseinstalldir="/" name="src/core/ext/census/trace_context.cc" role="src" />
<file baseinstalldir="/" name="src/core/ext/census/tracing.cc" role="src" />
<file baseinstalldir="/" name="src/core/ext/filters/max_age/max_age_filter.cc" role="src" />
<file baseinstalldir="/" name="src/core/ext/filters/message_size/message_size_filter.cc" role="src" />
<file baseinstalldir="/" name="src/core/ext/filters/workarounds/workaround_cronet_compression_filter.cc" role="src" />

@ -140,7 +140,6 @@ grpc::string GetHeaderIncludes(grpc_generator::File *file,
printer->Print(vars, "namespace grpc {\n");
printer->Print(vars, "class CompletionQueue;\n");
printer->Print(vars, "class Channel;\n");
printer->Print(vars, "class RpcService;\n");
printer->Print(vars, "class ServerCompletionQueue;\n");
printer->Print(vars, "class ServerContext;\n");
printer->Print(vars, "} // namespace grpc\n\n");
@ -324,7 +323,8 @@ void PrintHeaderClientMethodInterfaces(
} else if (ServerOnlyStreaming(method)) {
printer->Print(
*vars,
"virtual ::grpc::ClientReaderInterface< $Response$>* $Method$Raw("
"virtual ::grpc::ClientReaderInterface< $Response$>* "
"$Method$Raw("
"::grpc::ClientContext* context, const $Request$& request) = 0;\n");
for (auto async_prefix : async_prefixes) {
(*vars)["AsyncPrefix"] = async_prefix.prefix;
@ -546,7 +546,8 @@ void PrintHeaderClientMethodData(grpc_generator::Printer *printer,
const grpc_generator::Method *method,
std::map<grpc::string, grpc::string> *vars) {
(*vars)["Method"] = method->name();
printer->Print(*vars, "const ::grpc::RpcMethod rpcmethod_$Method$_;\n");
printer->Print(*vars,
"const ::grpc::internal::RpcMethod rpcmethod_$Method$_;\n");
}
void PrintHeaderServerMethodSync(grpc_generator::Printer *printer,
@ -718,7 +719,7 @@ void PrintHeaderServerMethodStreamedUnary(
printer->Print(*vars,
"WithStreamedUnaryMethod_$Method$() {\n"
" ::grpc::Service::MarkMethodStreamed($Idx$,\n"
" new ::grpc::StreamedUnaryHandler< $Request$, "
" new ::grpc::internal::StreamedUnaryHandler< $Request$, "
"$Response$>(std::bind"
"(&WithStreamedUnaryMethod_$Method$<BaseClass>::"
"Streamed$Method$, this, std::placeholders::_1, "
@ -766,15 +767,16 @@ void PrintHeaderServerMethodSplitStreaming(
"{}\n");
printer->Print(" public:\n");
printer->Indent();
printer->Print(*vars,
"WithSplitStreamingMethod_$Method$() {\n"
" ::grpc::Service::MarkMethodStreamed($Idx$,\n"
" new ::grpc::SplitServerStreamingHandler< $Request$, "
"$Response$>(std::bind"
"(&WithSplitStreamingMethod_$Method$<BaseClass>::"
"Streamed$Method$, this, std::placeholders::_1, "
"std::placeholders::_2)));\n"
"}\n");
printer->Print(
*vars,
"WithSplitStreamingMethod_$Method$() {\n"
" ::grpc::Service::MarkMethodStreamed($Idx$,\n"
" new ::grpc::internal::SplitServerStreamingHandler< $Request$, "
"$Response$>(std::bind"
"(&WithSplitStreamingMethod_$Method$<BaseClass>::"
"Streamed$Method$, this, std::placeholders::_1, "
"std::placeholders::_2)));\n"
"}\n");
printer->Print(*vars,
"~WithSplitStreamingMethod_$Method$() override {\n"
" BaseClassMustBeDerivedFromService(this);\n"
@ -914,7 +916,8 @@ void PrintHeaderService(grpc_generator::Printer *printer,
" {\n public:\n");
printer->Indent();
printer->Print(
"Stub(const std::shared_ptr< ::grpc::ChannelInterface>& channel);\n");
"Stub(const std::shared_ptr< ::grpc::ChannelInterface>& "
"channel);\n");
for (int i = 0; i < service->method_count(); ++i) {
PrintHeaderClientMethod(printer, service->method(i).get(), vars, true);
}
@ -1185,10 +1188,9 @@ void PrintSourceClientMethod(grpc_generator::Printer *printer,
"::grpc::ClientContext* context, "
"const $Request$& request, $Response$* response) {\n");
printer->Print(*vars,
" return ::grpc::BlockingUnaryCall(channel_.get(), "
"rpcmethod_$Method$_, "
"context, request, response);\n"
"}\n\n");
" return ::grpc::internal::BlockingUnaryCall"
"(channel_.get(), rpcmethod_$Method$_, "
"context, request, response);\n}\n\n");
for (auto async_prefix : async_prefixes) {
(*vars)["AsyncPrefix"] = async_prefix.prefix;
(*vars)["AsyncStart"] = async_prefix.start;
@ -1198,25 +1200,27 @@ void PrintSourceClientMethod(grpc_generator::Printer *printer,
"ClientContext* context, "
"const $Request$& request, "
"::grpc::CompletionQueue* cq) {\n");
printer->Print(*vars,
" return "
"::grpc::ClientAsyncResponseReader< $Response$>::Create("
"channel_.get(), cq, "
"rpcmethod_$Method$_, "
"context, request, $AsyncStart$);\n"
"}\n\n");
printer->Print(
*vars,
" return "
"::grpc::internal::ClientAsyncResponseReaderFactory< $Response$>"
"::Create(channel_.get(), cq, "
"rpcmethod_$Method$_, "
"context, request, $AsyncStart$);\n"
"}\n\n");
}
} else if (ClientOnlyStreaming(method)) {
printer->Print(*vars,
"::grpc::ClientWriter< $Request$>* "
"$ns$$Service$::Stub::$Method$Raw("
"::grpc::ClientContext* context, $Response$* response) {\n");
printer->Print(*vars,
" return new ::grpc::ClientWriter< $Request$>("
"channel_.get(), "
"rpcmethod_$Method$_, "
"context, response);\n"
"}\n\n");
printer->Print(
*vars,
" return ::grpc::internal::ClientWriterFactory< $Request$>::Create("
"channel_.get(), "
"rpcmethod_$Method$_, "
"context, response);\n"
"}\n\n");
for (auto async_prefix : async_prefixes) {
(*vars)["AsyncPrefix"] = async_prefix.prefix;
(*vars)["AsyncStart"] = async_prefix.start;
@ -1227,12 +1231,13 @@ void PrintSourceClientMethod(grpc_generator::Printer *printer,
"$ns$$Service$::Stub::$AsyncPrefix$$Method$Raw("
"::grpc::ClientContext* context, $Response$* response, "
"::grpc::CompletionQueue* cq$AsyncMethodParams$) {\n");
printer->Print(*vars,
" return ::grpc::ClientAsyncWriter< $Request$>::Create("
"channel_.get(), cq, "
"rpcmethod_$Method$_, "
"context, response, $AsyncStart$$AsyncCreateArgs$);\n"
"}\n\n");
printer->Print(
*vars,
" return ::grpc::internal::ClientAsyncWriterFactory< $Request$>"
"::Create(channel_.get(), cq, "
"rpcmethod_$Method$_, "
"context, response, $AsyncStart$$AsyncCreateArgs$);\n"
"}\n\n");
}
} else if (ServerOnlyStreaming(method)) {
printer->Print(
@ -1240,12 +1245,13 @@ void PrintSourceClientMethod(grpc_generator::Printer *printer,
"::grpc::ClientReader< $Response$>* "
"$ns$$Service$::Stub::$Method$Raw("
"::grpc::ClientContext* context, const $Request$& request) {\n");
printer->Print(*vars,
" return new ::grpc::ClientReader< $Response$>("
"channel_.get(), "
"rpcmethod_$Method$_, "
"context, request);\n"
"}\n\n");
printer->Print(
*vars,
" return ::grpc::internal::ClientReaderFactory< $Response$>::Create("
"channel_.get(), "
"rpcmethod_$Method$_, "
"context, request);\n"
"}\n\n");
for (auto async_prefix : async_prefixes) {
(*vars)["AsyncPrefix"] = async_prefix.prefix;
(*vars)["AsyncStart"] = async_prefix.start;
@ -1257,12 +1263,13 @@ void PrintSourceClientMethod(grpc_generator::Printer *printer,
"$ns$$Service$::Stub::$AsyncPrefix$$Method$Raw("
"::grpc::ClientContext* context, const $Request$& request, "
"::grpc::CompletionQueue* cq$AsyncMethodParams$) {\n");
printer->Print(*vars,
" return ::grpc::ClientAsyncReader< $Response$>::Create("
"channel_.get(), cq, "
"rpcmethod_$Method$_, "
"context, request, $AsyncStart$$AsyncCreateArgs$);\n"
"}\n\n");
printer->Print(
*vars,
" return ::grpc::internal::ClientAsyncReaderFactory< $Response$>"
"::Create(channel_.get(), cq, "
"rpcmethod_$Method$_, "
"context, request, $AsyncStart$$AsyncCreateArgs$);\n"
"}\n\n");
}
} else if (method->BidiStreaming()) {
printer->Print(
@ -1270,8 +1277,8 @@ void PrintSourceClientMethod(grpc_generator::Printer *printer,
"::grpc::ClientReaderWriter< $Request$, $Response$>* "
"$ns$$Service$::Stub::$Method$Raw(::grpc::ClientContext* context) {\n");
printer->Print(*vars,
" return new ::grpc::ClientReaderWriter< "
"$Request$, $Response$>("
" return ::grpc::internal::ClientReaderWriterFactory< "
"$Request$, $Response$>::Create("
"channel_.get(), "
"rpcmethod_$Method$_, "
"context);\n"
@ -1286,14 +1293,14 @@ void PrintSourceClientMethod(grpc_generator::Printer *printer,
"$ns$$Service$::Stub::$AsyncPrefix$$Method$Raw(::grpc::"
"ClientContext* context, "
"::grpc::CompletionQueue* cq$AsyncMethodParams$) {\n");
printer->Print(
*vars,
" return "
"::grpc::ClientAsyncReaderWriter< $Request$, $Response$>::Create("
"channel_.get(), cq, "
"rpcmethod_$Method$_, "
"context, $AsyncStart$$AsyncCreateArgs$);\n"
"}\n\n");
printer->Print(*vars,
" return "
"::grpc::internal::ClientAsyncReaderWriterFactory< "
"$Request$, $Response$>::Create("
"channel_.get(), cq, "
"rpcmethod_$Method$_, "
"context, $AsyncStart$$AsyncCreateArgs$);\n"
"}\n\n");
}
}
}
@ -1404,7 +1411,7 @@ void PrintSourceService(grpc_generator::Printer *printer,
printer->Print(*vars,
", rpcmethod_$Method$_("
"$prefix$$Service$_method_names[$Idx$], "
"::grpc::RpcMethod::$StreamingType$, "
"::grpc::internal::RpcMethod::$StreamingType$, "
"channel"
")\n");
}
@ -1427,38 +1434,38 @@ void PrintSourceService(grpc_generator::Printer *printer,
if (method->NoStreaming()) {
printer->Print(
*vars,
"AddMethod(new ::grpc::RpcServiceMethod(\n"
"AddMethod(new ::grpc::internal::RpcServiceMethod(\n"
" $prefix$$Service$_method_names[$Idx$],\n"
" ::grpc::RpcMethod::NORMAL_RPC,\n"
" new ::grpc::RpcMethodHandler< $ns$$Service$::Service, "
" ::grpc::internal::RpcMethod::NORMAL_RPC,\n"
" new ::grpc::internal::RpcMethodHandler< $ns$$Service$::Service, "
"$Request$, "
"$Response$>(\n"
" std::mem_fn(&$ns$$Service$::Service::$Method$), this)));\n");
} else if (ClientOnlyStreaming(method.get())) {
printer->Print(
*vars,
"AddMethod(new ::grpc::RpcServiceMethod(\n"
"AddMethod(new ::grpc::internal::RpcServiceMethod(\n"
" $prefix$$Service$_method_names[$Idx$],\n"
" ::grpc::RpcMethod::CLIENT_STREAMING,\n"
" new ::grpc::ClientStreamingHandler< "
" ::grpc::internal::RpcMethod::CLIENT_STREAMING,\n"
" new ::grpc::internal::ClientStreamingHandler< "
"$ns$$Service$::Service, $Request$, $Response$>(\n"
" std::mem_fn(&$ns$$Service$::Service::$Method$), this)));\n");
} else if (ServerOnlyStreaming(method.get())) {
printer->Print(
*vars,
"AddMethod(new ::grpc::RpcServiceMethod(\n"
"AddMethod(new ::grpc::internal::RpcServiceMethod(\n"
" $prefix$$Service$_method_names[$Idx$],\n"
" ::grpc::RpcMethod::SERVER_STREAMING,\n"
" new ::grpc::ServerStreamingHandler< "
" ::grpc::internal::RpcMethod::SERVER_STREAMING,\n"
" new ::grpc::internal::ServerStreamingHandler< "
"$ns$$Service$::Service, $Request$, $Response$>(\n"
" std::mem_fn(&$ns$$Service$::Service::$Method$), this)));\n");
} else if (method->BidiStreaming()) {
printer->Print(
*vars,
"AddMethod(new ::grpc::RpcServiceMethod(\n"
"AddMethod(new ::grpc::internal::RpcServiceMethod(\n"
" $prefix$$Service$_method_names[$Idx$],\n"
" ::grpc::RpcMethod::BIDI_STREAMING,\n"
" new ::grpc::BidiStreamingHandler< "
" ::grpc::internal::RpcMethod::BIDI_STREAMING,\n"
" new ::grpc::internal::BidiStreamingHandler< "
"$ns$$Service$::Service, $Request$, $Response$>(\n"
" std::mem_fn(&$ns$$Service$::Service::$Method$), this)));\n");
}

@ -1,61 +0,0 @@
<!---
* Copyright 2015 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
-->
# Census - a resource measurement and tracing system
This directory contains code for Census, which will ultimately provide the
following features for any gRPC-using system:
* A [dapper](http://research.google.com/pubs/pub36356.html)-like tracing
system, enabling tracing across a distributed infrastructure.
* RPC statistics and measurements for key metrics, such as latency, bytes
transferred, number of errors etc.
* Resource measurement framework which can be used for measuring custom
metrics. Through the use of [tags](#Tags), these can be broken down across
the entire distributed stack.
* Easy integration of the above with
[Google Cloud Trace](https://cloud.google.com/tools/cloud-trace) and
[Google Cloud Monitoring](https://cloud.google.com/monitoring/).
## Concepts
### Context
### Operations
### Tags
### Metrics
## API
### Internal/RPC API
### External/Client API
### RPC API
## Files in this directory
Note that files and functions in this directory can be split into two
categories:
* Files that define core census library functions. Functions etc. in these
files are named census\_\*, and constitute the core census library
functionality. At some time in the future, these will become a standalone
library.
* Files that define functions etc. that provide a convenient interface between
grpc and the core census functionality. These files are all named
grpc\_\*.{c,h}, and define function names beginning with grpc\_census\_\*.

@ -1,51 +0,0 @@
/*
*
* Copyright 2015 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
#include <stddef.h>
#ifndef GRPC_CORE_EXT_CENSUS_AGGREGATION_H
#define GRPC_CORE_EXT_CENSUS_AGGREGATION_H
/** Structure used to describe an aggregation type. */
struct census_aggregation_ops {
/* Create a new aggregation. The pointer returned can be used in future calls
to clone(), free(), record(), data() and reset(). */
void *(*create)(const void *create_arg);
/* Make a copy of an aggregation created by create() */
void *(*clone)(const void *aggregation);
/* Destroy an aggregation created by create() */
void (*free)(void *aggregation);
/* Record a new value against aggregation. */
void (*record)(void *aggregation, double value);
/* Return current aggregation data. The caller must cast this object into
the correct type for the aggregation result. The object returned can be
freed by using free_data(). */
void *(*data)(const void *aggregation);
/* free data returned by data() */
void (*free_data)(void *data);
/* Reset an aggregation to default (zero) values. */
void (*reset)(void *aggregation);
/* Merge 'from' aggregation into 'to'. Both aggregations must be compatible */
void (*merge)(void *to, const void *from);
/* Fill buffer with printable string version of aggregation contents. For
debugging only. Returns the number of bytes added to buffer (a value == n
implies the buffer was of insufficient size). */
size_t (*print)(const void *aggregation, char *buffer, size_t n);
};
#endif /* GRPC_CORE_EXT_CENSUS_AGGREGATION_H */

@ -1,56 +0,0 @@
/*
* Copyright 2016 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
#include "src/core/ext/census/base_resources.h"
#include <stdio.h>
#include <string.h>
#include <grpc/census.h>
#include <grpc/support/log.h>
#include "src/core/ext/census/resource.h"
// Add base RPC resource definitions for use by RPC runtime.
//
// TODO(aveitch): All of these are currently hardwired definitions encoded in
// the code in this file. These should be converted to use an external
// configuration mechanism, in which these resources are defined in a text
// file, which is compiled to .pb format and read by still-to-be-written
// configuration functions.
// Define all base resources. This should be called by census initialization.
void define_base_resources() {
google_census_Resource_BasicUnit numerator =
google_census_Resource_BasicUnit_SECS;
resource r = {(char *)"client_rpc_latency", // name
(char *)"Client RPC latency in seconds", // description
0, // prefix
1, // n_numerators
&numerator, // numerators
0, // n_denominators
NULL}; // denominators
define_resource(&r);
r = {(char *)"server_rpc_latency", // name
(char *)"Server RPC latency in seconds", // description
0, // prefix
1, // n_numerators
&numerator, // numerators
0, // n_denominators
NULL}; // denominators
define_resource(&r);
}

@ -1,32 +0,0 @@
/*
* Copyright 2016 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
#ifndef GRPC_CORE_EXT_CENSUS_BASE_RESOURCES_H
#define GRPC_CORE_EXT_CENSUS_BASE_RESOURCES_H
#ifdef __cplusplus
extern "C" {
#endif
/* Define all base resources. This should be called by census initialization. */
void define_base_resources();
#ifdef __cplusplus
}
#endif
#endif /* GRPC_CORE_EXT_CENSUS_BASE_RESOURCES_H */

@ -1,33 +0,0 @@
/*
*
* Copyright 2015 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
#include "src/core/ext/census/census_interface.h"
#include <grpc/support/log.h>
#include "src/core/ext/census/census_rpc_stats.h"
#include "src/core/ext/census/census_tracing.h"
void census_init(void) {
census_tracing_init();
census_stats_store_init();
}
void census_shutdown(void) {
census_stats_store_shutdown();
census_tracing_shutdown();
}

@ -1,69 +0,0 @@
/*
*
* Copyright 2015 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
#ifndef GRPC_CORE_EXT_CENSUS_CENSUS_INTERFACE_H
#define GRPC_CORE_EXT_CENSUS_CENSUS_INTERFACE_H
#include <grpc/support/port_platform.h>
/* Maximum length of an individual census trace annotation. */
#define CENSUS_MAX_ANNOTATION_LENGTH 200
#ifdef __cplusplus
extern "C" {
#endif
/* Structure of a census op id. Define as structure because 64bit integer is not
available on every platform for C89. */
typedef struct census_op_id {
uint32_t upper;
uint32_t lower;
} census_op_id;
typedef struct census_rpc_stats census_rpc_stats;
/* Initializes Census library. No-op if Census is already initialized. */
void census_init(void);
/* Shutdown Census Library. */
void census_shutdown(void);
/* Annotates grpc method name on a census_op_id. The method name has the format
of <full quantified rpc service name>/<rpc function name>. Returns 0 iff
op_id and method_name are all valid. op_id is valid after its creation and
before calling census_tracing_end_op().
TODO(hongyu): Figure out valid characters set for service name and command
name and document requirements here.*/
int census_add_method_tag(census_op_id op_id, const char *method_name);
/* Annotates tracing information to a specific op_id.
Up to CENSUS_MAX_ANNOTATION_LENGTH bytes are recorded. */
void census_tracing_print(census_op_id op_id, const char *annotation);
/* Starts tracing for an RPC. Returns a locally unique census_op_id */
census_op_id census_tracing_start_op(void);
/* Ends tracing. Calling this function will invalidate the input op_id. */
void census_tracing_end_op(census_op_id op_id);
#ifdef __cplusplus
}
#endif
#endif /* GRPC_CORE_EXT_CENSUS_CENSUS_INTERFACE_H */

@ -1,588 +0,0 @@
/*
*
* Copyright 2015 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
/* Available log space is divided up in blocks of
CENSUS_LOG_2_MAX_RECORD_SIZE bytes. A block can be in one of the
following three data structures:
- Free blocks (free_block_list)
- Blocks with unread data (dirty_block_list)
- Blocks currently attached to cores (core_local_blocks[])
census_log_start_write() moves a block from core_local_blocks[] to the
end of dirty_block_list when block:
- is out-of-space OR
- has an incomplete record (an incomplete record occurs when a thread calls
census_log_start_write() and is context-switched before calling
census_log_end_write()
So, blocks in dirty_block_list are ordered, from oldest to newest, by time
when block is detached from the core.
census_log_read_next() first iterates over dirty_block_list and then
core_local_blocks[]. It moves completely read blocks from dirty_block_list
to free_block_list. Blocks in core_local_blocks[] are not freed, even when
completely read.
If log is configured to discard old records and free_block_list is empty,
census_log_start_write() iterates over dirty_block_list to allocate a
new block. It moves the oldest available block (no pending read/write) to
core_local_blocks[].
core_local_block_struct is used to implement a map from core id to the block
associated with that core. This mapping is advisory. It is possible that the
block returned by this mapping is no longer associated with that core. This
mapping is updated, lazily, by census_log_start_write().
Locking in block struct:
Exclusive g_log.lock must be held before calling any functions operatong on
block structs except census_log_start_write() and
census_log_end_write().
Writes to a block are serialized via writer_lock.
census_log_start_write() acquires this lock and
census_log_end_write() releases it. On failure to acquire the lock,
writer allocates a new block for the current core and updates
core_local_block accordingly.
Simultaneous read and write access is allowed. Reader can safely read up to
committed bytes (bytes_committed).
reader_lock protects the block, currently being read, from getting recycled.
start_read() acquires reader_lock and end_read() releases the lock.
Read/write access to a block is disabled via try_disable_access(). It returns
with both writer_lock and reader_lock held. These locks are subsequently
released by enable_access() to enable access to the block.
A note on naming: Most function/struct names are prepended by cl_
(shorthand for census_log). Further, functions that manipulate structures
include the name of the structure, which will be passed as the first
argument. E.g. cl_block_initialize() will initialize a cl_block.
*/
#include "src/core/ext/census/census_log.h"
#include <grpc/support/alloc.h>
#include <grpc/support/atm.h>
#include <grpc/support/cpu.h>
#include <grpc/support/log.h>
#include <grpc/support/port_platform.h>
#include <grpc/support/sync.h>
#include <grpc/support/useful.h>
#include <string.h>
/* End of platform specific code */
typedef struct census_log_block_list_struct {
struct census_log_block_list_struct *next;
struct census_log_block_list_struct *prev;
struct census_log_block *block;
} cl_block_list_struct;
typedef struct census_log_block {
/* Pointer to underlying buffer */
char *buffer;
gpr_atm writer_lock;
gpr_atm reader_lock;
/* Keeps completely written bytes. Declared atomic because accessed
simultaneously by reader and writer. */
gpr_atm bytes_committed;
/* Bytes already read */
int32_t bytes_read;
/* Links for list */
cl_block_list_struct link;
/* We want this structure to be cacheline aligned. We assume the following
sizes for the various parts on 32/64bit systems:
type 32b size 64b size
char* 4 8
3x gpr_atm 12 24
int32_t 4 8 (assumes padding)
cl_block_list_struct 12 24
TOTAL 32 64
Depending on the size of our cacheline and the architecture, we
selectively add char buffering to this structure. The size is checked
via assert in census_log_initialize(). */
#if defined(GPR_ARCH_64)
#define CL_BLOCK_PAD_SIZE (GPR_CACHELINE_SIZE - 64)
#else
#if defined(GPR_ARCH_32)
#define CL_BLOCK_PAD_SIZE (GPR_CACHELINE_SIZE - 32)
#else
#error "Unknown architecture"
#endif
#endif
#if CL_BLOCK_PAD_SIZE > 0
char padding[CL_BLOCK_PAD_SIZE];
#endif
} cl_block;
/* A list of cl_blocks, doubly-linked through cl_block::link. */
typedef struct census_log_block_list {
int32_t count; /* Number of items in list. */
cl_block_list_struct ht; /* head/tail of linked list. */
} cl_block_list;
/* Cacheline aligned block pointers to avoid false sharing. Block pointer must
be initialized via set_block(), before calling other functions */
typedef struct census_log_core_local_block {
gpr_atm block;
/* Ensure cachline alignment: we assume sizeof(gpr_atm) == 4 or 8 */
#if defined(GPR_ARCH_64)
#define CL_CORE_LOCAL_BLOCK_PAD_SIZE (GPR_CACHELINE_SIZE - 8)
#else
#if defined(GPR_ARCH_32)
#define CL_CORE_LOCAL_BLOCK_PAD_SIZE (GPR_CACHELINE_SIZE - 4)
#else
#error "Unknown architecture"
#endif
#endif
#if CL_CORE_LOCAL_BLOCK_PAD_SIZE > 0
char padding[CL_CORE_LOCAL_BLOCK_PAD_SIZE];
#endif
} cl_core_local_block;
struct census_log {
int discard_old_records;
/* Number of cores (aka hardware-contexts) */
unsigned num_cores;
/* number of CENSUS_LOG_2_MAX_RECORD_SIZE blocks in log */
int32_t num_blocks;
cl_block *blocks; /* Block metadata. */
cl_core_local_block *core_local_blocks; /* Keeps core to block mappings. */
gpr_mu lock;
int initialized; /* has log been initialized? */
/* Keeps the state of the reader iterator. A value of 0 indicates that
iterator has reached the end. census_log_init_reader() resets the
value to num_core to restart iteration. */
uint32_t read_iterator_state;
/* Points to the block being read. If non-NULL, the block is locked for
reading (block_being_read_->reader_lock is held). */
cl_block *block_being_read;
/* A non-zero value indicates that log is full. */
gpr_atm is_full;
char *buffer;
cl_block_list free_block_list;
cl_block_list dirty_block_list;
gpr_atm out_of_space_count;
};
/* Single internal log */
static struct census_log g_log;
/* Functions that operate on an atomic memory location used as a lock */
/* Returns non-zero if lock is acquired */
static int cl_try_lock(gpr_atm *lock) { return gpr_atm_acq_cas(lock, 0, 1); }
static void cl_unlock(gpr_atm *lock) { gpr_atm_rel_store(lock, 0); }
/* Functions that operate on cl_core_local_block's */
static void cl_core_local_block_set_block(cl_core_local_block *clb,
cl_block *block) {
gpr_atm_rel_store(&clb->block, (gpr_atm)block);
}
static cl_block *cl_core_local_block_get_block(cl_core_local_block *clb) {
return (cl_block *)gpr_atm_acq_load(&clb->block);
}
/* Functions that operate on cl_block_list_struct's */
static void cl_block_list_struct_initialize(cl_block_list_struct *bls,
cl_block *block) {
bls->next = bls->prev = bls;
bls->block = block;
}
/* Functions that operate on cl_block_list's */
static void cl_block_list_initialize(cl_block_list *list) {
list->count = 0;
cl_block_list_struct_initialize(&list->ht, NULL);
}
/* Returns head of *this, or NULL if empty. */
static cl_block *cl_block_list_head(cl_block_list *list) {
return list->ht.next->block;
}
/* Insert element *e after *pos. */
static void cl_block_list_insert(cl_block_list *list, cl_block_list_struct *pos,
cl_block_list_struct *e) {
list->count++;
e->next = pos->next;
e->prev = pos;
e->next->prev = e;
e->prev->next = e;
}
/* Insert block at the head of the list */
static void cl_block_list_insert_at_head(cl_block_list *list, cl_block *block) {
cl_block_list_insert(list, &list->ht, &block->link);
}
/* Insert block at the tail of the list */
static void cl_block_list_insert_at_tail(cl_block_list *list, cl_block *block) {
cl_block_list_insert(list, list->ht.prev, &block->link);
}
/* Removes block *b. Requires *b be in the list. */
static void cl_block_list_remove(cl_block_list *list, cl_block *b) {
list->count--;
b->link.next->prev = b->link.prev;
b->link.prev->next = b->link.next;
}
/* Functions that operate on cl_block's */
static void cl_block_initialize(cl_block *block, char *buffer) {
block->buffer = buffer;
gpr_atm_rel_store(&block->writer_lock, 0);
gpr_atm_rel_store(&block->reader_lock, 0);
gpr_atm_rel_store(&block->bytes_committed, 0);
block->bytes_read = 0;
cl_block_list_struct_initialize(&block->link, block);
}
/* Guards against exposing partially written buffer to the reader. */
static void cl_block_set_bytes_committed(cl_block *block,
int32_t bytes_committed) {
gpr_atm_rel_store(&block->bytes_committed, bytes_committed);
}
static int32_t cl_block_get_bytes_committed(cl_block *block) {
return gpr_atm_acq_load(&block->bytes_committed);
}
/* Tries to disable future read/write access to this block. Succeeds if:
- no in-progress write AND
- no in-progress read AND
- 'discard_data' set to true OR no unread data
On success, clears the block state and returns with writer_lock_ and
reader_lock_ held. These locks are released by a subsequent
cl_block_access_enable() call. */
static int cl_block_try_disable_access(cl_block *block, int discard_data) {
if (!cl_try_lock(&block->writer_lock)) {
return 0;
}
if (!cl_try_lock(&block->reader_lock)) {
cl_unlock(&block->writer_lock);
return 0;
}
if (!discard_data &&
(block->bytes_read != cl_block_get_bytes_committed(block))) {
cl_unlock(&block->reader_lock);
cl_unlock(&block->writer_lock);
return 0;
}
cl_block_set_bytes_committed(block, 0);
block->bytes_read = 0;
return 1;
}
static void cl_block_enable_access(cl_block *block) {
cl_unlock(&block->reader_lock);
cl_unlock(&block->writer_lock);
}
/* Returns with writer_lock held. */
static void *cl_block_start_write(cl_block *block, size_t size) {
int32_t bytes_committed;
if (!cl_try_lock(&block->writer_lock)) {
return NULL;
}
bytes_committed = cl_block_get_bytes_committed(block);
if (bytes_committed + size > CENSUS_LOG_MAX_RECORD_SIZE) {
cl_unlock(&block->writer_lock);
return NULL;
}
return block->buffer + bytes_committed;
}
/* Releases writer_lock and increments committed bytes by 'bytes_written'.
'bytes_written' must be <= 'size' specified in the corresponding
StartWrite() call. This function is thread-safe. */
static void cl_block_end_write(cl_block *block, size_t bytes_written) {
cl_block_set_bytes_committed(
block, cl_block_get_bytes_committed(block) + bytes_written);
cl_unlock(&block->writer_lock);
}
/* Returns a pointer to the first unread byte in buffer. The number of bytes
available are returned in 'bytes_available'. Acquires reader lock that is
released by a subsequent cl_block_end_read() call. Returns NULL if:
- read in progress
- no data available */
static void *cl_block_start_read(cl_block *block, size_t *bytes_available) {
void *record;
if (!cl_try_lock(&block->reader_lock)) {
return NULL;
}
/* bytes_committed may change from under us. Use bytes_available to update
bytes_read below. */
*bytes_available = cl_block_get_bytes_committed(block) - block->bytes_read;
if (*bytes_available == 0) {
cl_unlock(&block->reader_lock);
return NULL;
}
record = block->buffer + block->bytes_read;
block->bytes_read += *bytes_available;
return record;
}
static void cl_block_end_read(cl_block *block) {
cl_unlock(&block->reader_lock);
}
/* Internal functions operating on g_log */
/* Allocates a new free block (or recycles an available dirty block if log is
configured to discard old records). Returns NULL if out-of-space. */
static cl_block *cl_allocate_block(void) {
cl_block *block = cl_block_list_head(&g_log.free_block_list);
if (block != NULL) {
cl_block_list_remove(&g_log.free_block_list, block);
return block;
}
if (!g_log.discard_old_records) {
/* No free block and log is configured to keep old records. */
return NULL;
}
/* Recycle dirty block. Start from the oldest. */
for (block = cl_block_list_head(&g_log.dirty_block_list); block != NULL;
block = block->link.next->block) {
if (cl_block_try_disable_access(block, 1 /* discard data */)) {
cl_block_list_remove(&g_log.dirty_block_list, block);
return block;
}
}
return NULL;
}
/* Allocates a new block and updates core id => block mapping. 'old_block'
points to the block that the caller thinks is attached to
'core_id'. 'old_block' may be NULL. Returns non-zero if:
- allocated a new block OR
- 'core_id' => 'old_block' mapping changed (another thread allocated a
block before lock was acquired). */
static int cl_allocate_core_local_block(int32_t core_id, cl_block *old_block) {
/* Now that we have the lock, check if core-local mapping has changed. */
cl_core_local_block *core_local_block = &g_log.core_local_blocks[core_id];
cl_block *block = cl_core_local_block_get_block(core_local_block);
if ((block != NULL) && (block != old_block)) {
return 1;
}
if (block != NULL) {
cl_core_local_block_set_block(core_local_block, NULL);
cl_block_list_insert_at_tail(&g_log.dirty_block_list, block);
}
block = cl_allocate_block();
if (block == NULL) {
gpr_atm_rel_store(&g_log.is_full, 1);
return 0;
}
cl_core_local_block_set_block(core_local_block, block);
cl_block_enable_access(block);
return 1;
}
static cl_block *cl_get_block(void *record) {
uintptr_t p = (uintptr_t)((char *)record - g_log.buffer);
uintptr_t index = p >> CENSUS_LOG_2_MAX_RECORD_SIZE;
return &g_log.blocks[index];
}
/* Gets the next block to read and tries to free 'prev' block (if not NULL).
Returns NULL if reached the end. */
static cl_block *cl_next_block_to_read(cl_block *prev) {
cl_block *block = NULL;
if (g_log.read_iterator_state == g_log.num_cores) {
/* We are traversing dirty list; find the next dirty block. */
if (prev != NULL) {
/* Try to free the previous block if there is no unread data. This block
may have unread data if previously incomplete record completed between
read_next() calls. */
block = prev->link.next->block;
if (cl_block_try_disable_access(prev, 0 /* do not discard data */)) {
cl_block_list_remove(&g_log.dirty_block_list, prev);
cl_block_list_insert_at_head(&g_log.free_block_list, prev);
gpr_atm_rel_store(&g_log.is_full, 0);
}
} else {
block = cl_block_list_head(&g_log.dirty_block_list);
}
if (block != NULL) {
return block;
}
/* We are done with the dirty list; moving on to core-local blocks. */
}
while (g_log.read_iterator_state > 0) {
g_log.read_iterator_state--;
block = cl_core_local_block_get_block(
&g_log.core_local_blocks[g_log.read_iterator_state]);
if (block != NULL) {
return block;
}
}
return NULL;
}
/* External functions: primary stats_log interface */
void census_log_initialize(size_t size_in_mb, int discard_old_records) {
int32_t ix;
/* Check cacheline alignment. */
GPR_ASSERT(sizeof(cl_block) % GPR_CACHELINE_SIZE == 0);
GPR_ASSERT(sizeof(cl_core_local_block) % GPR_CACHELINE_SIZE == 0);
GPR_ASSERT(!g_log.initialized);
g_log.discard_old_records = discard_old_records;
g_log.num_cores = gpr_cpu_num_cores();
/* Ensure at least as many blocks as there are cores. */
g_log.num_blocks = GPR_MAX(
g_log.num_cores, (size_in_mb << 20) >> CENSUS_LOG_2_MAX_RECORD_SIZE);
gpr_mu_init(&g_log.lock);
g_log.read_iterator_state = 0;
g_log.block_being_read = NULL;
gpr_atm_rel_store(&g_log.is_full, 0);
g_log.core_local_blocks = (cl_core_local_block *)gpr_malloc_aligned(
g_log.num_cores * sizeof(cl_core_local_block), GPR_CACHELINE_SIZE_LOG);
memset(g_log.core_local_blocks, 0,
g_log.num_cores * sizeof(cl_core_local_block));
g_log.blocks = (cl_block *)gpr_malloc_aligned(
g_log.num_blocks * sizeof(cl_block), GPR_CACHELINE_SIZE_LOG);
memset(g_log.blocks, 0, g_log.num_blocks * sizeof(cl_block));
g_log.buffer = gpr_malloc(g_log.num_blocks * CENSUS_LOG_MAX_RECORD_SIZE);
memset(g_log.buffer, 0, g_log.num_blocks * CENSUS_LOG_MAX_RECORD_SIZE);
cl_block_list_initialize(&g_log.free_block_list);
cl_block_list_initialize(&g_log.dirty_block_list);
for (ix = 0; ix < g_log.num_blocks; ++ix) {
cl_block *block = g_log.blocks + ix;
cl_block_initialize(block,
g_log.buffer + (CENSUS_LOG_MAX_RECORD_SIZE * ix));
cl_block_try_disable_access(block, 1 /* discard data */);
cl_block_list_insert_at_tail(&g_log.free_block_list, block);
}
gpr_atm_rel_store(&g_log.out_of_space_count, 0);
g_log.initialized = 1;
}
void census_log_shutdown(void) {
GPR_ASSERT(g_log.initialized);
gpr_mu_destroy(&g_log.lock);
gpr_free_aligned(g_log.core_local_blocks);
g_log.core_local_blocks = NULL;
gpr_free_aligned(g_log.blocks);
g_log.blocks = NULL;
gpr_free(g_log.buffer);
g_log.buffer = NULL;
g_log.initialized = 0;
}
void *census_log_start_write(size_t size) {
/* Used to bound number of times block allocation is attempted. */
int32_t attempts_remaining = g_log.num_blocks;
/* TODO(aveitch): move this inside the do loop when current_cpu is fixed */
int32_t core_id = gpr_cpu_current_cpu();
GPR_ASSERT(g_log.initialized);
if (size > CENSUS_LOG_MAX_RECORD_SIZE) {
return NULL;
}
do {
int allocated;
void *record = NULL;
cl_block *block =
cl_core_local_block_get_block(&g_log.core_local_blocks[core_id]);
if (block && (record = cl_block_start_write(block, size))) {
return record;
}
/* Need to allocate a new block. We are here if:
- No block associated with the core OR
- Write in-progress on the block OR
- block is out of space */
if (gpr_atm_acq_load(&g_log.is_full)) {
gpr_atm_no_barrier_fetch_add(&g_log.out_of_space_count, 1);
return NULL;
}
gpr_mu_lock(&g_log.lock);
allocated = cl_allocate_core_local_block(core_id, block);
gpr_mu_unlock(&g_log.lock);
if (!allocated) {
gpr_atm_no_barrier_fetch_add(&g_log.out_of_space_count, 1);
return NULL;
}
} while (attempts_remaining--);
/* Give up. */
gpr_atm_no_barrier_fetch_add(&g_log.out_of_space_count, 1);
return NULL;
}
void census_log_end_write(void *record, size_t bytes_written) {
GPR_ASSERT(g_log.initialized);
cl_block_end_write(cl_get_block(record), bytes_written);
}
void census_log_init_reader(void) {
GPR_ASSERT(g_log.initialized);
gpr_mu_lock(&g_log.lock);
/* If a block is locked for reading unlock it. */
if (g_log.block_being_read != NULL) {
cl_block_end_read(g_log.block_being_read);
g_log.block_being_read = NULL;
}
g_log.read_iterator_state = g_log.num_cores;
gpr_mu_unlock(&g_log.lock);
}
const void *census_log_read_next(size_t *bytes_available) {
GPR_ASSERT(g_log.initialized);
gpr_mu_lock(&g_log.lock);
if (g_log.block_being_read != NULL) {
cl_block_end_read(g_log.block_being_read);
}
do {
g_log.block_being_read = cl_next_block_to_read(g_log.block_being_read);
if (g_log.block_being_read != NULL) {
void *record =
cl_block_start_read(g_log.block_being_read, bytes_available);
if (record != NULL) {
gpr_mu_unlock(&g_log.lock);
return record;
}
}
} while (g_log.block_being_read != NULL);
gpr_mu_unlock(&g_log.lock);
return NULL;
}
size_t census_log_remaining_space(void) {
size_t space;
GPR_ASSERT(g_log.initialized);
gpr_mu_lock(&g_log.lock);
if (g_log.discard_old_records) {
/* Remaining space is not meaningful; just return the entire log space. */
space = g_log.num_blocks << CENSUS_LOG_2_MAX_RECORD_SIZE;
} else {
space = g_log.free_block_list.count * CENSUS_LOG_MAX_RECORD_SIZE;
}
gpr_mu_unlock(&g_log.lock);
return space;
}
int census_log_out_of_space_count(void) {
GPR_ASSERT(g_log.initialized);
return gpr_atm_acq_load(&g_log.out_of_space_count);
}

@ -1,84 +0,0 @@
/*
*
* Copyright 2015 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
#ifndef GRPC_CORE_EXT_CENSUS_CENSUS_LOG_H
#define GRPC_CORE_EXT_CENSUS_CENSUS_LOG_H
#include <stddef.h>
/* Maximum record size, in bytes. */
#define CENSUS_LOG_2_MAX_RECORD_SIZE 14 /* 2^14 = 16KB */
#define CENSUS_LOG_MAX_RECORD_SIZE (1 << CENSUS_LOG_2_MAX_RECORD_SIZE)
#ifdef __cplusplus
extern "C" {
#endif
/* Initialize the statistics logging subsystem with the given log size. A log
size of 0 will result in the smallest possible log for the platform
(approximately CENSUS_LOG_MAX_RECORD_SIZE * gpr_cpu_num_cores()). If
discard_old_records is non-zero, then new records will displace older ones
when the log is full. This function must be called before any other
census_log functions.
*/
void census_log_initialize(size_t size_in_mb, int discard_old_records);
/* Shutdown the logging subsystem. Caller must ensure that:
- no in progress or future call to any census_log functions
- no incomplete records
*/
void census_log_shutdown(void);
/* Allocates and returns a 'size' bytes record and marks it in use. A
subsequent census_log_end_write() marks the record complete. The
'bytes_written' census_log_end_write() argument must be <=
'size'. Returns NULL if out-of-space AND:
- log is configured to keep old records OR
- all blocks are pinned by incomplete records.
*/
void *census_log_start_write(size_t size);
void census_log_end_write(void *record, size_t bytes_written);
/* census_log_read_next() iterates over blocks with data and for each block
returns a pointer to the first unread byte. The number of bytes that can be
read are returned in 'bytes_available'. Reader is expected to read all
available data. Reading the data consumes it i.e. it cannot be read again.
census_log_read_next() returns NULL if the end is reached i.e last block
is read. census_log_init_reader() starts the iteration or aborts the
current iteration.
*/
void census_log_init_reader(void);
const void *census_log_read_next(size_t *bytes_available);
/* Returns estimated remaining space across all blocks, in bytes. If log is
configured to discard old records, returns total log space. Otherwise,
returns space available in empty blocks (partially filled blocks are
treated as full).
*/
size_t census_log_remaining_space(void);
/* Returns the number of times grpc_stats_log_start_write() failed due to
out-of-space. */
int census_log_out_of_space_count(void);
#ifdef __cplusplus
}
#endif
#endif /* GRPC_CORE_EXT_CENSUS_CENSUS_LOG_H */

@ -1,238 +0,0 @@
/*
*
* Copyright 2015 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
#include <string.h>
#include <grpc/support/alloc.h>
#include <grpc/support/log.h>
#include <grpc/support/sync.h>
#include "src/core/ext/census/census_interface.h"
#include "src/core/ext/census/census_rpc_stats.h"
#include "src/core/ext/census/census_tracing.h"
#include "src/core/ext/census/hash_table.h"
#include "src/core/ext/census/window_stats.h"
#include "src/core/lib/support/murmur_hash.h"
#include "src/core/lib/support/string.h"
#define NUM_INTERVALS 3
#define MINUTE_INTERVAL 0
#define HOUR_INTERVAL 1
#define TOTAL_INTERVAL 2
/* for easier typing */
typedef census_per_method_rpc_stats per_method_stats;
/* Ensure mu is only initialized once. */
static gpr_once g_stats_store_mu_init = GPR_ONCE_INIT;
/* Guards two stats stores. */
static gpr_mu g_mu;
static census_ht *g_client_stats_store = NULL;
static census_ht *g_server_stats_store = NULL;
static void init_mutex(void) { gpr_mu_init(&g_mu); }
static void init_mutex_once(void) {
gpr_once_init(&g_stats_store_mu_init, init_mutex);
}
static int cmp_str_keys(const void *k1, const void *k2) {
return strcmp((const char *)k1, (const char *)k2);
}
/* TODO(hongyu): replace it with cityhash64 */
static uint64_t simple_hash(const void *k) {
size_t len = strlen(k);
uint64_t higher = gpr_murmur_hash3((const char *)k, len / 2, 0);
return higher << 32 |
gpr_murmur_hash3((const char *)k + len / 2, len - len / 2, 0);
}
static void delete_stats(void *stats) {
census_window_stats_destroy((struct census_window_stats *)stats);
}
static void delete_key(void *key) { gpr_free(key); }
static const census_ht_option ht_opt = {
CENSUS_HT_POINTER /* key type */, 1999 /* n_of_buckets */,
simple_hash /* hash function */, cmp_str_keys /* key comparator */,
delete_stats /* data deleter */, delete_key /* key deleter */
};
static void init_rpc_stats(void *stats) {
memset(stats, 0, sizeof(census_rpc_stats));
}
static void stat_add_proportion(double p, void *base, const void *addme) {
census_rpc_stats *b = (census_rpc_stats *)base;
census_rpc_stats *a = (census_rpc_stats *)addme;
b->cnt += p * a->cnt;
b->rpc_error_cnt += p * a->rpc_error_cnt;
b->app_error_cnt += p * a->app_error_cnt;
b->elapsed_time_ms += p * a->elapsed_time_ms;
b->api_request_bytes += p * a->api_request_bytes;
b->wire_request_bytes += p * a->wire_request_bytes;
b->api_response_bytes += p * a->api_response_bytes;
b->wire_response_bytes += p * a->wire_response_bytes;
}
static void stat_add(void *base, const void *addme) {
stat_add_proportion(1.0, base, addme);
}
static gpr_timespec min_hour_total_intervals[3] = {
{60, 0}, {3600, 0}, {36000000, 0}};
static const census_window_stats_stat_info window_stats_settings = {
sizeof(census_rpc_stats), init_rpc_stats, stat_add, stat_add_proportion};
census_rpc_stats *census_rpc_stats_create_empty(void) {
census_rpc_stats *ret =
(census_rpc_stats *)gpr_malloc(sizeof(census_rpc_stats));
memset(ret, 0, sizeof(census_rpc_stats));
return ret;
}
void census_aggregated_rpc_stats_set_empty(census_aggregated_rpc_stats *data) {
int i = 0;
for (i = 0; i < data->num_entries; i++) {
if (data->stats[i].method != NULL) {
gpr_free((void *)data->stats[i].method);
}
}
if (data->stats != NULL) {
gpr_free(data->stats);
}
data->num_entries = 0;
data->stats = NULL;
}
static void record_stats(census_ht *store, census_op_id op_id,
const census_rpc_stats *stats) {
gpr_mu_lock(&g_mu);
if (store != NULL) {
census_trace_obj *trace = NULL;
census_internal_lock_trace_store();
trace = census_get_trace_obj_locked(op_id);
if (trace != NULL) {
const char *method_name = census_get_trace_method_name(trace);
struct census_window_stats *window_stats = NULL;
census_ht_key key;
key.ptr = (void *)method_name;
window_stats = census_ht_find(store, key);
census_internal_unlock_trace_store();
if (window_stats == NULL) {
window_stats = census_window_stats_create(3, min_hour_total_intervals,
30, &window_stats_settings);
key.ptr = gpr_strdup(key.ptr);
census_ht_insert(store, key, (void *)window_stats);
}
census_window_stats_add(window_stats, gpr_now(GPR_CLOCK_REALTIME), stats);
} else {
census_internal_unlock_trace_store();
}
}
gpr_mu_unlock(&g_mu);
}
void census_record_rpc_client_stats(census_op_id op_id,
const census_rpc_stats *stats) {
record_stats(g_client_stats_store, op_id, stats);
}
void census_record_rpc_server_stats(census_op_id op_id,
const census_rpc_stats *stats) {
record_stats(g_server_stats_store, op_id, stats);
}
/* Get stats from input stats store */
static void get_stats(census_ht *store, census_aggregated_rpc_stats *data) {
GPR_ASSERT(data != NULL);
if (data->num_entries != 0) {
census_aggregated_rpc_stats_set_empty(data);
}
gpr_mu_lock(&g_mu);
if (store != NULL) {
size_t n;
unsigned i, j;
gpr_timespec now = gpr_now(GPR_CLOCK_REALTIME);
census_ht_kv *kv = census_ht_get_all_elements(store, &n);
if (kv != NULL) {
data->num_entries = n;
data->stats =
(per_method_stats *)gpr_malloc(sizeof(per_method_stats) * n);
for (i = 0; i < n; i++) {
census_window_stats_sums sums[NUM_INTERVALS];
for (j = 0; j < NUM_INTERVALS; j++) {
sums[j].statistic = (void *)census_rpc_stats_create_empty();
}
data->stats[i].method = gpr_strdup(kv[i].k.ptr);
census_window_stats_get_sums(kv[i].v, now, sums);
data->stats[i].minute_stats =
*(census_rpc_stats *)sums[MINUTE_INTERVAL].statistic;
data->stats[i].hour_stats =
*(census_rpc_stats *)sums[HOUR_INTERVAL].statistic;
data->stats[i].total_stats =
*(census_rpc_stats *)sums[TOTAL_INTERVAL].statistic;
for (j = 0; j < NUM_INTERVALS; j++) {
gpr_free(sums[j].statistic);
}
}
gpr_free(kv);
}
}
gpr_mu_unlock(&g_mu);
}
void census_get_client_stats(census_aggregated_rpc_stats *data) {
get_stats(g_client_stats_store, data);
}
void census_get_server_stats(census_aggregated_rpc_stats *data) {
get_stats(g_server_stats_store, data);
}
void census_stats_store_init(void) {
init_mutex_once();
gpr_mu_lock(&g_mu);
if (g_client_stats_store == NULL && g_server_stats_store == NULL) {
g_client_stats_store = census_ht_create(&ht_opt);
g_server_stats_store = census_ht_create(&ht_opt);
} else {
gpr_log(GPR_ERROR, "Census stats store already initialized.");
}
gpr_mu_unlock(&g_mu);
}
void census_stats_store_shutdown(void) {
init_mutex_once();
gpr_mu_lock(&g_mu);
if (g_client_stats_store != NULL) {
census_ht_destroy(g_client_stats_store);
g_client_stats_store = NULL;
} else {
gpr_log(GPR_ERROR, "Census server stats store not initialized.");
}
if (g_server_stats_store != NULL) {
census_ht_destroy(g_server_stats_store);
g_server_stats_store = NULL;
} else {
gpr_log(GPR_ERROR, "Census client stats store not initialized.");
}
gpr_mu_unlock(&g_mu);
}

@ -1,86 +0,0 @@
/*
*
* Copyright 2015 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
#ifndef GRPC_CORE_EXT_CENSUS_CENSUS_RPC_STATS_H
#define GRPC_CORE_EXT_CENSUS_CENSUS_RPC_STATS_H
#include <grpc/support/port_platform.h>
#include "src/core/ext/census/census_interface.h"
#ifdef __cplusplus
extern "C" {
#endif
struct census_rpc_stats {
uint64_t cnt;
uint64_t rpc_error_cnt;
uint64_t app_error_cnt;
double elapsed_time_ms;
double api_request_bytes;
double wire_request_bytes;
double api_response_bytes;
double wire_response_bytes;
};
/* Creates an empty rpc stats object on heap. */
census_rpc_stats *census_rpc_stats_create_empty(void);
typedef struct census_per_method_rpc_stats {
const char *method;
census_rpc_stats minute_stats; /* cumulative stats in the past minute */
census_rpc_stats hour_stats; /* cumulative stats in the past hour */
census_rpc_stats total_stats; /* cumulative stats from last gc */
} census_per_method_rpc_stats;
typedef struct census_aggregated_rpc_stats {
int num_entries;
census_per_method_rpc_stats *stats;
} census_aggregated_rpc_stats;
/* Initializes an aggregated rpc stats object to an empty state. */
void census_aggregated_rpc_stats_set_empty(census_aggregated_rpc_stats *data);
/* Records client side stats of a rpc. */
void census_record_rpc_client_stats(census_op_id op_id,
const census_rpc_stats *stats);
/* Records server side stats of a rpc. */
void census_record_rpc_server_stats(census_op_id op_id,
const census_rpc_stats *stats);
/* The following two functions are intended for inprocess query of
per-service per-method stats from grpc implementations. */
/* Populates *data_map with server side aggregated per-service per-method
stats.
DO NOT CALL from outside of grpc code. */
void census_get_server_stats(census_aggregated_rpc_stats *data_map);
/* Populates *data_map with client side aggregated per-service per-method
stats.
DO NOT CALL from outside of grpc code. */
void census_get_client_stats(census_aggregated_rpc_stats *data_map);
void census_stats_store_init(void);
void census_stats_store_shutdown(void);
#ifdef __cplusplus
}
#endif
#endif /* GRPC_CORE_EXT_CENSUS_CENSUS_RPC_STATS_H */

@ -1,226 +0,0 @@
/*
*
* Copyright 2015 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
#include "src/core/ext/census/census_tracing.h"
#include "src/core/ext/census/census_interface.h"
#include <stdio.h>
#include <string.h>
#include <grpc/support/alloc.h>
#include <grpc/support/log.h>
#include <grpc/support/port_platform.h>
#include <grpc/support/sync.h>
#include "src/core/ext/census/hash_table.h"
#include "src/core/lib/support/string.h"
void census_trace_obj_destroy(census_trace_obj *obj) {
census_trace_annotation *p = obj->annotations;
while (p != NULL) {
census_trace_annotation *next = p->next;
gpr_free(p);
p = next;
}
gpr_free(obj->method);
gpr_free(obj);
}
static void delete_trace_obj(void *obj) {
census_trace_obj_destroy((census_trace_obj *)obj);
}
static const census_ht_option ht_opt = {
CENSUS_HT_UINT64 /* key type */,
571 /* n_of_buckets */,
NULL /* hash */,
NULL /* compare_keys */,
delete_trace_obj /* delete data */,
NULL /* delete key */
};
static gpr_once g_init_mutex_once = GPR_ONCE_INIT;
static gpr_mu g_mu; /* Guards following two static variables. */
static census_ht *g_trace_store = NULL;
static uint64_t g_id = 0;
static census_ht_key op_id_as_key(census_op_id *id) {
return *(census_ht_key *)id;
}
static uint64_t op_id_2_uint64(census_op_id *id) {
uint64_t ret;
memcpy(&ret, id, sizeof(census_op_id));
return ret;
}
static void init_mutex(void) { gpr_mu_init(&g_mu); }
static void init_mutex_once(void) {
gpr_once_init(&g_init_mutex_once, init_mutex);
}
census_op_id census_tracing_start_op(void) {
gpr_mu_lock(&g_mu);
{
census_trace_obj *ret = gpr_malloc(sizeof(census_trace_obj));
memset(ret, 0, sizeof(census_trace_obj));
g_id++;
memcpy(&ret->id, &g_id, sizeof(census_op_id));
ret->rpc_stats.cnt = 1;
ret->ts = gpr_now(GPR_CLOCK_REALTIME);
census_ht_insert(g_trace_store, op_id_as_key(&ret->id), (void *)ret);
gpr_log(GPR_DEBUG, "Start tracing for id %lu", g_id);
gpr_mu_unlock(&g_mu);
return ret->id;
}
}
int census_add_method_tag(census_op_id op_id, const char *method) {
int ret = 0;
census_trace_obj *trace = NULL;
gpr_mu_lock(&g_mu);
trace = census_ht_find(g_trace_store, op_id_as_key(&op_id));
if (trace == NULL) {
ret = 1;
} else {
trace->method = gpr_strdup(method);
}
gpr_mu_unlock(&g_mu);
return ret;
}
void census_tracing_print(census_op_id op_id, const char *anno_txt) {
census_trace_obj *trace = NULL;
gpr_mu_lock(&g_mu);
trace = census_ht_find(g_trace_store, op_id_as_key(&op_id));
if (trace != NULL) {
census_trace_annotation *anno = gpr_malloc(sizeof(census_trace_annotation));
anno->ts = gpr_now(GPR_CLOCK_REALTIME);
{
char *d = anno->txt;
const char *s = anno_txt;
int n = 0;
for (; n < CENSUS_MAX_ANNOTATION_LENGTH && *s != '\0'; ++n) {
*d++ = *s++;
}
*d = '\0';
}
anno->next = trace->annotations;
trace->annotations = anno;
}
gpr_mu_unlock(&g_mu);
}
void census_tracing_end_op(census_op_id op_id) {
census_trace_obj *trace = NULL;
gpr_mu_lock(&g_mu);
trace = census_ht_find(g_trace_store, op_id_as_key(&op_id));
if (trace != NULL) {
trace->rpc_stats.elapsed_time_ms = gpr_timespec_to_micros(
gpr_time_sub(gpr_now(GPR_CLOCK_REALTIME), trace->ts));
gpr_log(GPR_DEBUG, "End tracing for id %lu, method %s, latency %f us",
op_id_2_uint64(&op_id), trace->method,
trace->rpc_stats.elapsed_time_ms);
census_ht_erase(g_trace_store, op_id_as_key(&op_id));
}
gpr_mu_unlock(&g_mu);
}
void census_tracing_init(void) {
init_mutex_once();
gpr_mu_lock(&g_mu);
if (g_trace_store == NULL) {
g_id = 1;
g_trace_store = census_ht_create(&ht_opt);
} else {
gpr_log(GPR_ERROR, "Census trace store already initialized.");
}
gpr_mu_unlock(&g_mu);
}
void census_tracing_shutdown(void) {
gpr_mu_lock(&g_mu);
if (g_trace_store != NULL) {
census_ht_destroy(g_trace_store);
g_trace_store = NULL;
} else {
gpr_log(GPR_ERROR, "Census trace store is not initialized.");
}
gpr_mu_unlock(&g_mu);
}
void census_internal_lock_trace_store(void) { gpr_mu_lock(&g_mu); }
void census_internal_unlock_trace_store(void) { gpr_mu_unlock(&g_mu); }
census_trace_obj *census_get_trace_obj_locked(census_op_id op_id) {
if (g_trace_store == NULL) {
gpr_log(GPR_ERROR, "Census trace store is not initialized.");
return NULL;
}
return (census_trace_obj *)census_ht_find(g_trace_store,
op_id_as_key(&op_id));
}
const char *census_get_trace_method_name(const census_trace_obj *trace) {
return trace->method;
}
static census_trace_annotation *dup_annotation_chain(
census_trace_annotation *from) {
census_trace_annotation *ret = NULL;
census_trace_annotation **to = &ret;
for (; from != NULL; from = from->next) {
*to = gpr_malloc(sizeof(census_trace_annotation));
memcpy(*to, from, sizeof(census_trace_annotation));
to = &(*to)->next;
}
return ret;
}
static census_trace_obj *trace_obj_dup(census_trace_obj *from) {
census_trace_obj *to = NULL;
GPR_ASSERT(from != NULL);
to = gpr_malloc(sizeof(census_trace_obj));
to->id = from->id;
to->ts = from->ts;
to->rpc_stats = from->rpc_stats;
to->method = gpr_strdup(from->method);
to->annotations = dup_annotation_chain(from->annotations);
return to;
}
census_trace_obj **census_get_active_ops(int *num_active_ops) {
census_trace_obj **ret = NULL;
gpr_mu_lock(&g_mu);
if (g_trace_store != NULL) {
size_t n = 0;
census_ht_kv *all_kvs = census_ht_get_all_elements(g_trace_store, &n);
*num_active_ops = (int)n;
if (n != 0) {
size_t i = 0;
ret = gpr_malloc(sizeof(census_trace_obj *) * n);
for (i = 0; i < n; i++) {
ret[i] = trace_obj_dup((census_trace_obj *)all_kvs[i].v);
}
}
gpr_free(all_kvs);
}
gpr_mu_unlock(&g_mu);
return ret;
}

@ -1,81 +0,0 @@
/*
*
* Copyright 2015 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
#ifndef GRPC_CORE_EXT_CENSUS_CENSUS_TRACING_H
#define GRPC_CORE_EXT_CENSUS_CENSUS_TRACING_H
#include <grpc/support/time.h>
#include "src/core/ext/census/census_rpc_stats.h"
/* WARNING: The data structures and APIs provided by this file are for GRPC
library's internal use ONLY. They might be changed in backward-incompatible
ways and are not subject to any deprecation policy.
They are not recommended for external use.
*/
#ifdef __cplusplus
extern "C" {
#endif
/* Struct for a trace annotation. */
typedef struct census_trace_annotation {
gpr_timespec ts; /* timestamp of the annotation */
char txt[CENSUS_MAX_ANNOTATION_LENGTH + 1]; /* actual txt annotation */
struct census_trace_annotation *next;
} census_trace_annotation;
typedef struct census_trace_obj {
census_op_id id;
gpr_timespec ts;
census_rpc_stats rpc_stats;
char *method;
census_trace_annotation *annotations;
} census_trace_obj;
/* Deletes trace object. */
void census_trace_obj_destroy(census_trace_obj *obj);
/* Initializes trace store. This function is thread safe. */
void census_tracing_init(void);
/* Shutsdown trace store. This function is thread safe. */
void census_tracing_shutdown(void);
/* Gets trace obj corresponding to the input op_id. Returns NULL if trace store
is not initialized or trace obj is not found. Requires trace store being
locked before calling this function. */
census_trace_obj *census_get_trace_obj_locked(census_op_id op_id);
/* The following two functions acquire and release the trace store global lock.
They are for census internal use only. */
void census_internal_lock_trace_store(void);
void census_internal_unlock_trace_store(void);
/* Gets method name associated with the input trace object. */
const char *census_get_trace_method_name(const census_trace_obj *trace);
/* Returns an array of pointers to trace objects of currently active operations
and fills in number of active operations. Returns NULL if there are no active
operations.
Caller owns the returned objects. */
census_trace_obj **census_get_active_ops(int *num_active_ops);
#ifdef __cplusplus
}
#endif
#endif /* GRPC_CORE_EXT_CENSUS_CENSUS_TRACING_H */

@ -1,496 +0,0 @@
/*
*
* Copyright 2015 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
#include <grpc/census.h>
#include <grpc/support/alloc.h>
#include <grpc/support/log.h>
#include <grpc/support/port_platform.h>
#include <grpc/support/useful.h>
#include <stdbool.h>
#include <string.h>
#include "src/core/lib/support/string.h"
// Functions in this file support the public context API, including
// encoding/decoding as part of context propagation across RPC's. The overall
// requirements (in approximate priority order) for the
// context representation:
// 1. Efficient conversion to/from wire format
// 2. Minimal bytes used on-wire
// 3. Efficient context creation
// 4. Efficient lookup of tag value for a key
// 5. Efficient iteration over tags
// 6. Minimal memory footprint
//
// Notes on tradeoffs/decisions:
// * tag includes 1 byte length of key, as well as nil-terminating byte. These
// are to aid in efficient parsing and the ability to directly return key
// strings. This is more important than saving a single byte/tag on the wire.
// * The wire encoding uses only single byte values. This eliminates the need
// to handle endian-ness conversions. It also means there is a hard upper
// limit of 255 for both CENSUS_MAX_TAG_KV_LEN and CENSUS_MAX_PROPAGATED_TAGS.
// * Keep all tag information (keys/values/flags) in a single memory buffer,
// that can be directly copied to the wire.
// min and max valid chars in tag keys and values. All printable ASCII is OK.
#define MIN_VALID_TAG_CHAR 32 // ' '
#define MAX_VALID_TAG_CHAR 126 // '~'
// Structure representing a set of tags. Essentially a count of number of tags
// present, and pointer to a chunk of memory that contains the per-tag details.
struct tag_set {
int ntags; // number of tags.
int ntags_alloc; // ntags + number of deleted tags (total number of tags
// in all of kvm). This will always be == ntags, except during the process
// of building a new tag set.
size_t kvm_size; // number of bytes allocated for key/value storage.
size_t kvm_used; // number of bytes of used key/value memory
char *kvm; // key/value memory. Consists of repeated entries of:
// Offset Size Description
// 0 1 Key length, including trailing 0. (K)
// 1 1 Value length, including trailing 0 (V)
// 2 1 Flags
// 3 K Key bytes
// 3 + K V Value bytes
//
// We refer to the first 3 entries as the 'tag header'. If extra values are
// introduced in the header, you will need to modify the TAG_HEADER_SIZE
// constant, the raw_tag structure (and everything that uses it) and the
// encode/decode functions appropriately.
};
// Number of bytes in tag header.
#define TAG_HEADER_SIZE 3 // key length (1) + value length (1) + flags (1)
// Offsets to tag header entries.
#define KEY_LEN_OFFSET 0
#define VALUE_LEN_OFFSET 1
#define FLAG_OFFSET 2
// raw_tag represents the raw-storage form of a tag in the kvm of a tag_set.
struct raw_tag {
uint8_t key_len;
uint8_t value_len;
uint8_t flags;
char *key;
char *value;
};
// Use a reserved flag bit for indication of deleted tag.
#define CENSUS_TAG_DELETED CENSUS_TAG_RESERVED
#define CENSUS_TAG_IS_DELETED(flags) (flags & CENSUS_TAG_DELETED)
// Primary representation of a context. Composed of 2 underlying tag_set
// structs, one each for propagated and local (non-propagated) tags. This is
// to efficiently support tag encoding/decoding.
// TODO(aveitch): need to add tracing id's/structure.
struct census_context {
struct tag_set tags[2];
census_context_status status;
};
// Indices into the tags member of census_context
#define PROPAGATED_TAGS 0
#define LOCAL_TAGS 1
// Validate (check all characters are in range and size is less than limit) a
// key or value string. Returns 0 if the string is invalid, or the length
// (including terminator) if valid.
static size_t validate_tag(const char *kv) {
size_t len = 1;
char ch;
while ((ch = *kv++) != 0) {
if (ch < MIN_VALID_TAG_CHAR || ch > MAX_VALID_TAG_CHAR) {
return 0;
}
len++;
}
if (len > CENSUS_MAX_TAG_KV_LEN) {
return 0;
}
return len;
}
// Extract a raw tag given a pointer (raw) to the tag header. Allow for some
// extra bytes in the tag header (see encode/decode functions for usage: this
// allows for future expansion of the tag header).
static char *decode_tag(struct raw_tag *tag, char *header, int offset) {
tag->key_len = (uint8_t)(*header++);
tag->value_len = (uint8_t)(*header++);
tag->flags = (uint8_t)(*header++);
header += offset;
tag->key = header;
header += tag->key_len;
tag->value = header;
return header + tag->value_len;
}
// Make a copy (in 'to') of an existing tag_set.
static void tag_set_copy(struct tag_set *to, const struct tag_set *from) {
memcpy(to, from, sizeof(struct tag_set));
to->kvm = (char *)gpr_malloc(to->kvm_size);
memcpy(to->kvm, from->kvm, from->kvm_used);
}
// Delete a tag from a tag_set, if it exists (returns true if it did).
static bool tag_set_delete_tag(struct tag_set *tags, const char *key,
size_t key_len) {
char *kvp = tags->kvm;
for (int i = 0; i < tags->ntags_alloc; i++) {
uint8_t *flags = (uint8_t *)(kvp + FLAG_OFFSET);
struct raw_tag tag;
kvp = decode_tag(&tag, kvp, 0);
if (CENSUS_TAG_IS_DELETED(tag.flags)) continue;
if ((key_len == tag.key_len) && (memcmp(key, tag.key, key_len) == 0)) {
*flags |= CENSUS_TAG_DELETED;
tags->ntags--;
return true;
}
}
return false;
}
// Delete a tag from a context, return true if it existed.
static bool context_delete_tag(census_context *context, const census_tag *tag,
size_t key_len) {
return (
tag_set_delete_tag(&context->tags[LOCAL_TAGS], tag->key, key_len) ||
tag_set_delete_tag(&context->tags[PROPAGATED_TAGS], tag->key, key_len));
}
// Add a tag to a tag_set. Return true on success, false if the tag could
// not be added because of constraints on tag set size. This function should
// not be called if the tag may already exist (in a non-deleted state) in
// the tag_set, as that would result in two tags with the same key.
static bool tag_set_add_tag(struct tag_set *tags, const census_tag *tag,
size_t key_len, size_t value_len) {
if (tags->ntags == CENSUS_MAX_PROPAGATED_TAGS) {
return false;
}
const size_t tag_size = key_len + value_len + TAG_HEADER_SIZE;
if (tags->kvm_used + tag_size > tags->kvm_size) {
// allocate new memory if needed
tags->kvm_size += 2 * CENSUS_MAX_TAG_KV_LEN + TAG_HEADER_SIZE;
char *new_kvm = (char *)gpr_malloc(tags->kvm_size);
if (tags->kvm_used > 0) memcpy(new_kvm, tags->kvm, tags->kvm_used);
gpr_free(tags->kvm);
tags->kvm = new_kvm;
}
char *kvp = tags->kvm + tags->kvm_used;
*kvp++ = (char)key_len;
*kvp++ = (char)value_len;
// ensure reserved flags are not used.
*kvp++ = (char)(tag->flags & (CENSUS_TAG_PROPAGATE | CENSUS_TAG_STATS));
memcpy(kvp, tag->key, key_len);
kvp += key_len;
memcpy(kvp, tag->value, value_len);
tags->kvm_used += tag_size;
tags->ntags++;
tags->ntags_alloc++;
return true;
}
// Add/modify/delete a tag to/in a context. Caller must validate that tag key
// etc. are valid.
static void context_modify_tag(census_context *context, const census_tag *tag,
size_t key_len, size_t value_len) {
// First delete the tag if it is already present.
bool deleted = context_delete_tag(context, tag, key_len);
bool added = false;
if (CENSUS_TAG_IS_PROPAGATED(tag->flags)) {
added = tag_set_add_tag(&context->tags[PROPAGATED_TAGS], tag, key_len,
value_len);
} else {
added =
tag_set_add_tag(&context->tags[LOCAL_TAGS], tag, key_len, value_len);
}
if (deleted) {
context->status.n_modified_tags++;
} else {
if (added) {
context->status.n_added_tags++;
} else {
context->status.n_ignored_tags++;
}
}
}
// Remove memory used for deleted tags from a tag set. Basic algorithm:
// 1) Walk through tag set to find first deleted tag. Record where it is.
// 2) Find the next not-deleted tag. Copy all of kvm from there to the end
// "over" the deleted tags
// 3) repeat #1 and #2 until we have seen all tags
// 4) if we are still looking for a not-deleted tag, then all the end portion
// of the kvm is deleted. Just reduce the used amount of memory by the
// appropriate amount.
static void tag_set_flatten(struct tag_set *tags) {
if (tags->ntags == tags->ntags_alloc) return;
bool found_deleted = false; // found a deleted tag.
char *kvp = tags->kvm;
char *dbase = NULL; // record location of deleted tag
for (int i = 0; i < tags->ntags_alloc; i++) {
struct raw_tag tag;
char *next_kvp = decode_tag(&tag, kvp, 0);
if (found_deleted) {
if (!CENSUS_TAG_IS_DELETED(tag.flags)) {
ptrdiff_t reduce = kvp - dbase; // #bytes in deleted tags
GPR_ASSERT(reduce > 0);
ptrdiff_t copy_size = tags->kvm + tags->kvm_used - kvp;
GPR_ASSERT(copy_size > 0);
memmove(dbase, kvp, (size_t)copy_size);
tags->kvm_used -= (size_t)reduce;
next_kvp -= reduce;
found_deleted = false;
}
} else {
if (CENSUS_TAG_IS_DELETED(tag.flags)) {
dbase = kvp;
found_deleted = true;
}
}
kvp = next_kvp;
}
if (found_deleted) {
GPR_ASSERT(dbase > tags->kvm);
tags->kvm_used = (size_t)(dbase - tags->kvm);
}
tags->ntags_alloc = tags->ntags;
}
census_context *census_context_create(const census_context *base,
const census_tag *tags, int ntags,
census_context_status const **status) {
census_context *context =
(census_context *)gpr_malloc(sizeof(census_context));
// If we are given a base, copy it into our new tag set. Otherwise set it
// to zero/NULL everything.
if (base == NULL) {
memset(context, 0, sizeof(census_context));
} else {
tag_set_copy(&context->tags[PROPAGATED_TAGS], &base->tags[PROPAGATED_TAGS]);
tag_set_copy(&context->tags[LOCAL_TAGS], &base->tags[LOCAL_TAGS]);
memset(&context->status, 0, sizeof(context->status));
}
// Walk over the additional tags and, for those that aren't invalid, modify
// the context to add/replace/delete as required.
for (int i = 0; i < ntags; i++) {
const census_tag *tag = &tags[i];
size_t key_len = validate_tag(tag->key);
// ignore the tag if it is invalid or too short.
if (key_len <= 1) {
context->status.n_invalid_tags++;
} else {
if (tag->value != NULL) {
size_t value_len = validate_tag(tag->value);
if (value_len != 0) {
context_modify_tag(context, tag, key_len, value_len);
} else {
context->status.n_invalid_tags++;
}
} else {
if (context_delete_tag(context, tag, key_len)) {
context->status.n_deleted_tags++;
}
}
}
}
// Remove any deleted tags, update status if needed, and return.
tag_set_flatten(&context->tags[PROPAGATED_TAGS]);
tag_set_flatten(&context->tags[LOCAL_TAGS]);
context->status.n_propagated_tags = context->tags[PROPAGATED_TAGS].ntags;
context->status.n_local_tags = context->tags[LOCAL_TAGS].ntags;
if (status) {
*status = &context->status;
}
return context;
}
const census_context_status *census_context_get_status(
const census_context *context) {
return &context->status;
}
void census_context_destroy(census_context *context) {
gpr_free(context->tags[PROPAGATED_TAGS].kvm);
gpr_free(context->tags[LOCAL_TAGS].kvm);
gpr_free(context);
}
void census_context_initialize_iterator(const census_context *context,
census_context_iterator *iterator) {
iterator->context = context;
iterator->index = 0;
if (context->tags[PROPAGATED_TAGS].ntags != 0) {
iterator->base = PROPAGATED_TAGS;
iterator->kvm = context->tags[PROPAGATED_TAGS].kvm;
} else if (context->tags[LOCAL_TAGS].ntags != 0) {
iterator->base = LOCAL_TAGS;
iterator->kvm = context->tags[LOCAL_TAGS].kvm;
} else {
iterator->base = -1;
}
}
int census_context_next_tag(census_context_iterator *iterator,
census_tag *tag) {
if (iterator->base < 0) {
return 0;
}
struct raw_tag raw;
iterator->kvm = decode_tag(&raw, iterator->kvm, 0);
tag->key = raw.key;
tag->value = raw.value;
tag->flags = raw.flags;
if (++iterator->index == iterator->context->tags[iterator->base].ntags) {
do {
if (iterator->base == LOCAL_TAGS) {
iterator->base = -1;
return 1;
}
} while (iterator->context->tags[++iterator->base].ntags == 0);
iterator->index = 0;
iterator->kvm = iterator->context->tags[iterator->base].kvm;
}
return 1;
}
// Find a tag in a tag_set by key. Return true if found, false otherwise.
static bool tag_set_get_tag(const struct tag_set *tags, const char *key,
size_t key_len, census_tag *tag) {
char *kvp = tags->kvm;
for (int i = 0; i < tags->ntags; i++) {
struct raw_tag raw;
kvp = decode_tag(&raw, kvp, 0);
if (key_len == raw.key_len && memcmp(raw.key, key, key_len) == 0) {
tag->key = raw.key;
tag->value = raw.value;
tag->flags = raw.flags;
return true;
}
}
return false;
}
int census_context_get_tag(const census_context *context, const char *key,
census_tag *tag) {
size_t key_len = strlen(key) + 1;
if (key_len == 1) {
return 0;
}
if (tag_set_get_tag(&context->tags[PROPAGATED_TAGS], key, key_len, tag) ||
tag_set_get_tag(&context->tags[LOCAL_TAGS], key, key_len, tag)) {
return 1;
}
return 0;
}
// Context encoding and decoding functions.
//
// Wire format for tag_set's on the wire:
//
// First, a tag set header:
//
// offset bytes description
// 0 1 version number
// 1 1 number of bytes in this header. This allows for future
// expansion.
// 2 1 number of bytes in each tag header.
// 3 1 ntags value from tag set.
//
// This is followed by the key/value memory from struct tag_set.
#define ENCODED_VERSION 0 // Version number
#define ENCODED_HEADER_SIZE 4 // size of tag set header
// Encode a tag set. Returns 0 if buffer is too small.
static size_t tag_set_encode(const struct tag_set *tags, char *buffer,
size_t buf_size) {
if (buf_size < ENCODED_HEADER_SIZE + tags->kvm_used) {
return 0;
}
buf_size -= ENCODED_HEADER_SIZE;
*buffer++ = (char)ENCODED_VERSION;
*buffer++ = (char)ENCODED_HEADER_SIZE;
*buffer++ = (char)TAG_HEADER_SIZE;
*buffer++ = (char)tags->ntags;
if (tags->ntags == 0) {
return ENCODED_HEADER_SIZE;
}
memcpy(buffer, tags->kvm, tags->kvm_used);
return ENCODED_HEADER_SIZE + tags->kvm_used;
}
size_t census_context_encode(const census_context *context, char *buffer,
size_t buf_size) {
return tag_set_encode(&context->tags[PROPAGATED_TAGS], buffer, buf_size);
}
// Decode a tag set.
static void tag_set_decode(struct tag_set *tags, const char *buffer,
size_t size) {
uint8_t version = (uint8_t)(*buffer++);
uint8_t header_size = (uint8_t)(*buffer++);
uint8_t tag_header_size = (uint8_t)(*buffer++);
tags->ntags = tags->ntags_alloc = (int)(*buffer++);
if (tags->ntags == 0) {
tags->ntags_alloc = 0;
tags->kvm_size = 0;
tags->kvm_used = 0;
tags->kvm = NULL;
return;
}
if (header_size != ENCODED_HEADER_SIZE) {
GPR_ASSERT(version != ENCODED_VERSION);
GPR_ASSERT(ENCODED_HEADER_SIZE < header_size);
buffer += (header_size - ENCODED_HEADER_SIZE);
}
tags->kvm_used = size - header_size;
tags->kvm_size = tags->kvm_used + CENSUS_MAX_TAG_KV_LEN;
tags->kvm = (char *)gpr_malloc(tags->kvm_size);
if (tag_header_size != TAG_HEADER_SIZE) {
// something new in the tag information. I don't understand it, so
// don't copy it over.
GPR_ASSERT(version != ENCODED_VERSION);
GPR_ASSERT(tag_header_size > TAG_HEADER_SIZE);
char *kvp = tags->kvm;
for (int i = 0; i < tags->ntags; i++) {
memcpy(kvp, buffer, TAG_HEADER_SIZE);
kvp += header_size;
struct raw_tag raw;
buffer =
decode_tag(&raw, (char *)buffer, tag_header_size - TAG_HEADER_SIZE);
memcpy(kvp, raw.key, (size_t)raw.key_len + raw.value_len);
kvp += raw.key_len + raw.value_len;
}
} else {
memcpy(tags->kvm, buffer, tags->kvm_used);
}
}
census_context *census_context_decode(const char *buffer, size_t size) {
census_context *context =
(census_context *)gpr_malloc(sizeof(census_context));
memset(&context->tags[LOCAL_TAGS], 0, sizeof(struct tag_set));
if (buffer == NULL) {
memset(&context->tags[PROPAGATED_TAGS], 0, sizeof(struct tag_set));
} else {
tag_set_decode(&context->tags[PROPAGATED_TAGS], buffer, size);
}
memset(&context->status, 0, sizeof(context->status));
context->status.n_propagated_tags = context->tags[PROPAGATED_TAGS].ntags;
return context;
}

@ -1,10 +0,0 @@
Files generated for use by Census stats and trace recording subsystem.
# Files
* census.pb.{h,c} - Generated from src/core/ext/census/census.proto, using the
script `tools/codegen/core/gen_nano_proto.sh src/proto/census/census.proto
$PWD/src/core/ext/census/gen src/core/ext/census/gen`
* trace_context.pb.{h,c} - Generated from
src/core/ext/census/trace_context.proto, using the script
`tools/codegen/core/gen_nano_proto.sh src/proto/census/trace_context.proto
$PWD/src/core/ext/census/gen src/core/ext/census/gen`

@ -1,161 +0,0 @@
/*
*
* Copyright 2016 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
/* Automatically generated nanopb constant definitions */
/* Generated by nanopb-0.3.5-dev */
#include "src/core/ext/census/gen/census.pb.h"
#if PB_PROTO_HEADER_VERSION != 30
#error Regenerate this file with the current version of nanopb generator.
#endif
const pb_field_t google_census_Duration_fields[3] = {
PB_FIELD( 1, INT64 , OPTIONAL, STATIC , FIRST, google_census_Duration, seconds, seconds, 0),
PB_FIELD( 2, INT32 , OPTIONAL, STATIC , OTHER, google_census_Duration, nanos, seconds, 0),
PB_LAST_FIELD
};
const pb_field_t google_census_Timestamp_fields[3] = {
PB_FIELD( 1, INT64 , OPTIONAL, STATIC , FIRST, google_census_Timestamp, seconds, seconds, 0),
PB_FIELD( 2, INT32 , OPTIONAL, STATIC , OTHER, google_census_Timestamp, nanos, seconds, 0),
PB_LAST_FIELD
};
const pb_field_t google_census_Resource_fields[4] = {
PB_FIELD( 1, STRING , OPTIONAL, CALLBACK, FIRST, google_census_Resource, name, name, 0),
PB_FIELD( 2, STRING , OPTIONAL, CALLBACK, OTHER, google_census_Resource, description, name, 0),
PB_FIELD( 3, MESSAGE , OPTIONAL, STATIC , OTHER, google_census_Resource, unit, description, &google_census_Resource_MeasurementUnit_fields),
PB_LAST_FIELD
};
const pb_field_t google_census_Resource_MeasurementUnit_fields[4] = {
PB_FIELD( 1, INT32 , OPTIONAL, STATIC , FIRST, google_census_Resource_MeasurementUnit, prefix, prefix, 0),
PB_FIELD( 2, UENUM , REPEATED, CALLBACK, OTHER, google_census_Resource_MeasurementUnit, numerator, prefix, 0),
PB_FIELD( 3, UENUM , REPEATED, CALLBACK, OTHER, google_census_Resource_MeasurementUnit, denominator, numerator, 0),
PB_LAST_FIELD
};
const pb_field_t google_census_AggregationDescriptor_fields[4] = {
PB_FIELD( 1, UENUM , OPTIONAL, STATIC , FIRST, google_census_AggregationDescriptor, type, type, 0),
PB_ONEOF_FIELD(options, 2, MESSAGE , ONEOF, STATIC , OTHER, google_census_AggregationDescriptor, bucket_boundaries, type, &google_census_AggregationDescriptor_BucketBoundaries_fields),
PB_ONEOF_FIELD(options, 3, MESSAGE , ONEOF, STATIC , OTHER, google_census_AggregationDescriptor, interval_boundaries, type, &google_census_AggregationDescriptor_IntervalBoundaries_fields),
PB_LAST_FIELD
};
const pb_field_t google_census_AggregationDescriptor_BucketBoundaries_fields[2] = {
PB_FIELD( 1, DOUBLE , REPEATED, CALLBACK, FIRST, google_census_AggregationDescriptor_BucketBoundaries, bounds, bounds, 0),
PB_LAST_FIELD
};
const pb_field_t google_census_AggregationDescriptor_IntervalBoundaries_fields[2] = {
PB_FIELD( 1, DOUBLE , REPEATED, CALLBACK, FIRST, google_census_AggregationDescriptor_IntervalBoundaries, window_size, window_size, 0),
PB_LAST_FIELD
};
const pb_field_t google_census_Distribution_fields[5] = {
PB_FIELD( 1, INT64 , OPTIONAL, STATIC , FIRST, google_census_Distribution, count, count, 0),
PB_FIELD( 2, DOUBLE , OPTIONAL, STATIC , OTHER, google_census_Distribution, mean, count, 0),
PB_FIELD( 3, MESSAGE , OPTIONAL, STATIC , OTHER, google_census_Distribution, range, mean, &google_census_Distribution_Range_fields),
PB_FIELD( 4, INT64 , REPEATED, CALLBACK, OTHER, google_census_Distribution, bucket_count, range, 0),
PB_LAST_FIELD
};
const pb_field_t google_census_Distribution_Range_fields[3] = {
PB_FIELD( 1, DOUBLE , OPTIONAL, STATIC , FIRST, google_census_Distribution_Range, min, min, 0),
PB_FIELD( 2, DOUBLE , OPTIONAL, STATIC , OTHER, google_census_Distribution_Range, max, min, 0),
PB_LAST_FIELD
};
const pb_field_t google_census_IntervalStats_fields[2] = {
PB_FIELD( 1, MESSAGE , REPEATED, CALLBACK, FIRST, google_census_IntervalStats, window, window, &google_census_IntervalStats_Window_fields),
PB_LAST_FIELD
};
const pb_field_t google_census_IntervalStats_Window_fields[4] = {
PB_FIELD( 1, MESSAGE , OPTIONAL, STATIC , FIRST, google_census_IntervalStats_Window, window_size, window_size, &google_census_Duration_fields),
PB_FIELD( 2, INT64 , OPTIONAL, STATIC , OTHER, google_census_IntervalStats_Window, count, window_size, 0),
PB_FIELD( 3, DOUBLE , OPTIONAL, STATIC , OTHER, google_census_IntervalStats_Window, mean, count, 0),
PB_LAST_FIELD
};
const pb_field_t google_census_Tag_fields[3] = {
PB_FIELD( 1, STRING , OPTIONAL, STATIC , FIRST, google_census_Tag, key, key, 0),
PB_FIELD( 2, STRING , OPTIONAL, STATIC , OTHER, google_census_Tag, value, key, 0),
PB_LAST_FIELD
};
const pb_field_t google_census_View_fields[6] = {
PB_FIELD( 1, STRING , OPTIONAL, CALLBACK, FIRST, google_census_View, name, name, 0),
PB_FIELD( 2, STRING , OPTIONAL, CALLBACK, OTHER, google_census_View, description, name, 0),
PB_FIELD( 3, STRING , OPTIONAL, CALLBACK, OTHER, google_census_View, resource_name, description, 0),
PB_FIELD( 4, MESSAGE , OPTIONAL, STATIC , OTHER, google_census_View, aggregation, resource_name, &google_census_AggregationDescriptor_fields),
PB_FIELD( 5, STRING , REPEATED, CALLBACK, OTHER, google_census_View, tag_key, aggregation, 0),
PB_LAST_FIELD
};
const pb_field_t google_census_Aggregation_fields[7] = {
PB_FIELD( 1, STRING , OPTIONAL, CALLBACK, FIRST, google_census_Aggregation, name, name, 0),
PB_FIELD( 2, STRING , OPTIONAL, CALLBACK, OTHER, google_census_Aggregation, description, name, 0),
PB_ONEOF_FIELD(data, 3, UINT64 , ONEOF, STATIC , OTHER, google_census_Aggregation, count, description, 0),
PB_ONEOF_FIELD(data, 4, MESSAGE , ONEOF, STATIC , OTHER, google_census_Aggregation, distribution, description, &google_census_Distribution_fields),
PB_ONEOF_FIELD(data, 5, MESSAGE , ONEOF, STATIC , OTHER, google_census_Aggregation, interval_stats, description, &google_census_IntervalStats_fields),
PB_FIELD( 6, MESSAGE , REPEATED, CALLBACK, OTHER, google_census_Aggregation, tag, data.interval_stats, &google_census_Tag_fields),
PB_LAST_FIELD
};
const pb_field_t google_census_Metric_fields[5] = {
PB_FIELD( 1, STRING , OPTIONAL, CALLBACK, FIRST, google_census_Metric, view_name, view_name, 0),
PB_FIELD( 2, MESSAGE , REPEATED, CALLBACK, OTHER, google_census_Metric, aggregation, view_name, &google_census_Aggregation_fields),
PB_FIELD( 3, MESSAGE , OPTIONAL, STATIC , OTHER, google_census_Metric, start, aggregation, &google_census_Timestamp_fields),
PB_FIELD( 4, MESSAGE , OPTIONAL, STATIC , OTHER, google_census_Metric, end, start, &google_census_Timestamp_fields),
PB_LAST_FIELD
};
/* Check that field information fits in pb_field_t */
#if !defined(PB_FIELD_32BIT)
/* If you get an error here, it means that you need to define PB_FIELD_32BIT
* compile-time option. You can do that in pb.h or on compiler command line.
*
* The reason you need to do this is that some of your messages contain tag
* numbers or field sizes that are larger than what can fit in 8 or 16 bit
* field descriptors.
*/
PB_STATIC_ASSERT((pb_membersize(google_census_Resource, unit) < 65536 && pb_membersize(google_census_AggregationDescriptor, options.bucket_boundaries) < 65536 && pb_membersize(google_census_AggregationDescriptor, options.interval_boundaries) < 65536 && pb_membersize(google_census_Resource, unit) < 65536 && pb_membersize(google_census_AggregationDescriptor, options.bucket_boundaries) < 65536 && pb_membersize(google_census_AggregationDescriptor, options.interval_boundaries) < 65536 && pb_membersize(google_census_Distribution, range) < 65536 && pb_membersize(google_census_IntervalStats, window) < 65536 && pb_membersize(google_census_IntervalStats_Window, window_size) < 65536 && pb_membersize(google_census_View, aggregation) < 65536 && pb_membersize(google_census_Aggregation, data.distribution) < 65536 && pb_membersize(google_census_Aggregation, data.interval_stats) < 65536 && pb_membersize(google_census_Resource, unit) < 65536 && pb_membersize(google_census_AggregationDescriptor, options.bucket_boundaries) < 65536 && pb_membersize(google_census_AggregationDescriptor, options.interval_boundaries) < 65536 && pb_membersize(google_census_Resource, unit) < 65536 && pb_membersize(google_census_AggregationDescriptor, options.bucket_boundaries) < 65536 && pb_membersize(google_census_AggregationDescriptor, options.interval_boundaries) < 65536 && pb_membersize(google_census_Distribution, range) < 65536 && pb_membersize(google_census_IntervalStats, window) < 65536 && pb_membersize(google_census_IntervalStats_Window, window_size) < 65536 && pb_membersize(google_census_View, aggregation) < 65536 && pb_membersize(google_census_Aggregation, data.distribution) < 65536 && pb_membersize(google_census_Aggregation, data.interval_stats) < 65536 && pb_membersize(google_census_Aggregation, tag) < 65536 && pb_membersize(google_census_Metric, aggregation) < 65536 && pb_membersize(google_census_Metric, start) < 65536 && pb_membersize(google_census_Metric, end) < 65536), YOU_MUST_DEFINE_PB_FIELD_32BIT_FOR_MESSAGES_google_census_Duration_google_census_Timestamp_google_census_Resource_google_census_Resource_MeasurementUnit_google_census_AggregationDescriptor_google_census_AggregationDescriptor_BucketBoundaries_google_census_AggregationDescriptor_IntervalBoundaries_google_census_Distribution_google_census_Distribution_Range_google_census_IntervalStats_google_census_IntervalStats_Window_google_census_Tag_google_census_View_google_census_Aggregation_google_census_Metric)
#endif
#if !defined(PB_FIELD_16BIT) && !defined(PB_FIELD_32BIT)
/* If you get an error here, it means that you need to define PB_FIELD_16BIT
* compile-time option. You can do that in pb.h or on compiler command line.
*
* The reason you need to do this is that some of your messages contain tag
* numbers or field sizes that are larger than what can fit in the default
* 8 bit descriptors.
*/
PB_STATIC_ASSERT((pb_membersize(google_census_Resource, unit) < 256 && pb_membersize(google_census_AggregationDescriptor, options.bucket_boundaries) < 256 && pb_membersize(google_census_AggregationDescriptor, options.interval_boundaries) < 256 && pb_membersize(google_census_Resource, unit) < 256 && pb_membersize(google_census_AggregationDescriptor, options.bucket_boundaries) < 256 && pb_membersize(google_census_AggregationDescriptor, options.interval_boundaries) < 256 && pb_membersize(google_census_Distribution, range) < 256 && pb_membersize(google_census_IntervalStats, window) < 256 && pb_membersize(google_census_IntervalStats_Window, window_size) < 256 && pb_membersize(google_census_View, aggregation) < 256 && pb_membersize(google_census_Aggregation, data.distribution) < 256 && pb_membersize(google_census_Aggregation, data.interval_stats) < 256 && pb_membersize(google_census_Resource, unit) < 256 && pb_membersize(google_census_AggregationDescriptor, options.bucket_boundaries) < 256 && pb_membersize(google_census_AggregationDescriptor, options.interval_boundaries) < 256 && pb_membersize(google_census_Resource, unit) < 256 && pb_membersize(google_census_AggregationDescriptor, options.bucket_boundaries) < 256 && pb_membersize(google_census_AggregationDescriptor, options.interval_boundaries) < 256 && pb_membersize(google_census_Distribution, range) < 256 && pb_membersize(google_census_IntervalStats, window) < 256 && pb_membersize(google_census_IntervalStats_Window, window_size) < 256 && pb_membersize(google_census_View, aggregation) < 256 && pb_membersize(google_census_Aggregation, data.distribution) < 256 && pb_membersize(google_census_Aggregation, data.interval_stats) < 256 && pb_membersize(google_census_Aggregation, tag) < 256 && pb_membersize(google_census_Metric, aggregation) < 256 && pb_membersize(google_census_Metric, start) < 256 && pb_membersize(google_census_Metric, end) < 256), YOU_MUST_DEFINE_PB_FIELD_16BIT_FOR_MESSAGES_google_census_Duration_google_census_Timestamp_google_census_Resource_google_census_Resource_MeasurementUnit_google_census_AggregationDescriptor_google_census_AggregationDescriptor_BucketBoundaries_google_census_AggregationDescriptor_IntervalBoundaries_google_census_Distribution_google_census_Distribution_Range_google_census_IntervalStats_google_census_IntervalStats_Window_google_census_Tag_google_census_View_google_census_Aggregation_google_census_Metric)
#endif
/* On some platforms (such as AVR), double is really float.
* These are not directly supported by nanopb, but see example_avr_double.
* To get rid of this error, remove any double fields from your .proto.
*/
PB_STATIC_ASSERT(sizeof(double) == 8, DOUBLE_MUST_BE_8_BYTES)

@ -1,280 +0,0 @@
/*
*
* Copyright 2016 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
/* Automatically generated nanopb header */
/* Generated by nanopb-0.3.5-dev */
#ifndef GRPC_CORE_EXT_CENSUS_GEN_CENSUS_PB_H
#define GRPC_CORE_EXT_CENSUS_GEN_CENSUS_PB_H
#include "third_party/nanopb/pb.h"
#if PB_PROTO_HEADER_VERSION != 30
#error Regenerate this file with the current version of nanopb generator.
#endif
#ifdef __cplusplus
extern "C" {
#endif
/* Enum definitions */
typedef enum _google_census_Resource_BasicUnit {
google_census_Resource_BasicUnit_UNKNOWN = 0,
google_census_Resource_BasicUnit_BITS = 1,
google_census_Resource_BasicUnit_BYTES = 2,
google_census_Resource_BasicUnit_SECS = 3,
google_census_Resource_BasicUnit_CORES = 4,
google_census_Resource_BasicUnit_MAX_UNITS = 5
} google_census_Resource_BasicUnit;
typedef enum _google_census_AggregationDescriptor_AggregationType {
google_census_AggregationDescriptor_AggregationType_UNKNOWN = 0,
google_census_AggregationDescriptor_AggregationType_COUNT = 1,
google_census_AggregationDescriptor_AggregationType_DISTRIBUTION = 2,
google_census_AggregationDescriptor_AggregationType_INTERVAL = 3
} google_census_AggregationDescriptor_AggregationType;
/* Struct definitions */
typedef struct _google_census_AggregationDescriptor_BucketBoundaries {
pb_callback_t bounds;
} google_census_AggregationDescriptor_BucketBoundaries;
typedef struct _google_census_AggregationDescriptor_IntervalBoundaries {
pb_callback_t window_size;
} google_census_AggregationDescriptor_IntervalBoundaries;
typedef struct _google_census_IntervalStats {
pb_callback_t window;
} google_census_IntervalStats;
typedef struct _google_census_AggregationDescriptor {
bool has_type;
google_census_AggregationDescriptor_AggregationType type;
pb_size_t which_options;
union {
google_census_AggregationDescriptor_BucketBoundaries bucket_boundaries;
google_census_AggregationDescriptor_IntervalBoundaries interval_boundaries;
} options;
} google_census_AggregationDescriptor;
typedef struct _google_census_Distribution_Range {
bool has_min;
double min;
bool has_max;
double max;
} google_census_Distribution_Range;
typedef struct _google_census_Duration {
bool has_seconds;
int64_t seconds;
bool has_nanos;
int32_t nanos;
} google_census_Duration;
typedef struct _google_census_Resource_MeasurementUnit {
bool has_prefix;
int32_t prefix;
pb_callback_t numerator;
pb_callback_t denominator;
} google_census_Resource_MeasurementUnit;
typedef struct _google_census_Tag {
bool has_key;
char key[255];
bool has_value;
char value[255];
} google_census_Tag;
typedef struct _google_census_Timestamp {
bool has_seconds;
int64_t seconds;
bool has_nanos;
int32_t nanos;
} google_census_Timestamp;
typedef struct _google_census_Distribution {
bool has_count;
int64_t count;
bool has_mean;
double mean;
bool has_range;
google_census_Distribution_Range range;
pb_callback_t bucket_count;
} google_census_Distribution;
typedef struct _google_census_IntervalStats_Window {
bool has_window_size;
google_census_Duration window_size;
bool has_count;
int64_t count;
bool has_mean;
double mean;
} google_census_IntervalStats_Window;
typedef struct _google_census_Metric {
pb_callback_t view_name;
pb_callback_t aggregation;
bool has_start;
google_census_Timestamp start;
bool has_end;
google_census_Timestamp end;
} google_census_Metric;
typedef struct _google_census_Resource {
pb_callback_t name;
pb_callback_t description;
bool has_unit;
google_census_Resource_MeasurementUnit unit;
} google_census_Resource;
typedef struct _google_census_View {
pb_callback_t name;
pb_callback_t description;
pb_callback_t resource_name;
bool has_aggregation;
google_census_AggregationDescriptor aggregation;
pb_callback_t tag_key;
} google_census_View;
typedef struct _google_census_Aggregation {
pb_callback_t name;
pb_callback_t description;
pb_size_t which_data;
union {
uint64_t count;
google_census_Distribution distribution;
google_census_IntervalStats interval_stats;
} data;
pb_callback_t tag;
} google_census_Aggregation;
/* Default values for struct fields */
/* Initializer values for message structs */
#define google_census_Duration_init_default {false, 0, false, 0}
#define google_census_Timestamp_init_default {false, 0, false, 0}
#define google_census_Resource_init_default {{{NULL}, NULL}, {{NULL}, NULL}, false, google_census_Resource_MeasurementUnit_init_default}
#define google_census_Resource_MeasurementUnit_init_default {false, 0, {{NULL}, NULL}, {{NULL}, NULL}}
#define google_census_AggregationDescriptor_init_default {false, (google_census_AggregationDescriptor_AggregationType)0, 0, {google_census_AggregationDescriptor_BucketBoundaries_init_default}}
#define google_census_AggregationDescriptor_BucketBoundaries_init_default {{{NULL}, NULL}}
#define google_census_AggregationDescriptor_IntervalBoundaries_init_default {{{NULL}, NULL}}
#define google_census_Distribution_init_default {false, 0, false, 0, false, google_census_Distribution_Range_init_default, {{NULL}, NULL}}
#define google_census_Distribution_Range_init_default {false, 0, false, 0}
#define google_census_IntervalStats_init_default {{{NULL}, NULL}}
#define google_census_IntervalStats_Window_init_default {false, google_census_Duration_init_default, false, 0, false, 0}
#define google_census_Tag_init_default {false, "", false, ""}
#define google_census_View_init_default {{{NULL}, NULL}, {{NULL}, NULL}, {{NULL}, NULL}, false, google_census_AggregationDescriptor_init_default, {{NULL}, NULL}}
#define google_census_Aggregation_init_default {{{NULL}, NULL}, {{NULL}, NULL}, 0, {0}, {{NULL}, NULL}}
#define google_census_Metric_init_default {{{NULL}, NULL}, {{NULL}, NULL}, false, google_census_Timestamp_init_default, false, google_census_Timestamp_init_default}
#define google_census_Duration_init_zero {false, 0, false, 0}
#define google_census_Timestamp_init_zero {false, 0, false, 0}
#define google_census_Resource_init_zero {{{NULL}, NULL}, {{NULL}, NULL}, false, google_census_Resource_MeasurementUnit_init_zero}
#define google_census_Resource_MeasurementUnit_init_zero {false, 0, {{NULL}, NULL}, {{NULL}, NULL}}
#define google_census_AggregationDescriptor_init_zero {false, (google_census_AggregationDescriptor_AggregationType)0, 0, {google_census_AggregationDescriptor_BucketBoundaries_init_zero}}
#define google_census_AggregationDescriptor_BucketBoundaries_init_zero {{{NULL}, NULL}}
#define google_census_AggregationDescriptor_IntervalBoundaries_init_zero {{{NULL}, NULL}}
#define google_census_Distribution_init_zero {false, 0, false, 0, false, google_census_Distribution_Range_init_zero, {{NULL}, NULL}}
#define google_census_Distribution_Range_init_zero {false, 0, false, 0}
#define google_census_IntervalStats_init_zero {{{NULL}, NULL}}
#define google_census_IntervalStats_Window_init_zero {false, google_census_Duration_init_zero, false, 0, false, 0}
#define google_census_Tag_init_zero {false, "", false, ""}
#define google_census_View_init_zero {{{NULL}, NULL}, {{NULL}, NULL}, {{NULL}, NULL}, false, google_census_AggregationDescriptor_init_zero, {{NULL}, NULL}}
#define google_census_Aggregation_init_zero {{{NULL}, NULL}, {{NULL}, NULL}, 0, {0}, {{NULL}, NULL}}
#define google_census_Metric_init_zero {{{NULL}, NULL}, {{NULL}, NULL}, false, google_census_Timestamp_init_zero, false, google_census_Timestamp_init_zero}
/* Field tags (for use in manual encoding/decoding) */
#define google_census_AggregationDescriptor_BucketBoundaries_bounds_tag 1
#define google_census_AggregationDescriptor_IntervalBoundaries_window_size_tag 1
#define google_census_IntervalStats_window_tag 1
#define google_census_AggregationDescriptor_bucket_boundaries_tag 2
#define google_census_AggregationDescriptor_interval_boundaries_tag 3
#define google_census_AggregationDescriptor_type_tag 1
#define google_census_Distribution_Range_min_tag 1
#define google_census_Distribution_Range_max_tag 2
#define google_census_Duration_seconds_tag 1
#define google_census_Duration_nanos_tag 2
#define google_census_Resource_MeasurementUnit_prefix_tag 1
#define google_census_Resource_MeasurementUnit_numerator_tag 2
#define google_census_Resource_MeasurementUnit_denominator_tag 3
#define google_census_Tag_key_tag 1
#define google_census_Tag_value_tag 2
#define google_census_Timestamp_seconds_tag 1
#define google_census_Timestamp_nanos_tag 2
#define google_census_Distribution_count_tag 1
#define google_census_Distribution_mean_tag 2
#define google_census_Distribution_range_tag 3
#define google_census_Distribution_bucket_count_tag 4
#define google_census_IntervalStats_Window_window_size_tag 1
#define google_census_IntervalStats_Window_count_tag 2
#define google_census_IntervalStats_Window_mean_tag 3
#define google_census_Metric_view_name_tag 1
#define google_census_Metric_aggregation_tag 2
#define google_census_Metric_start_tag 3
#define google_census_Metric_end_tag 4
#define google_census_Resource_name_tag 1
#define google_census_Resource_description_tag 2
#define google_census_Resource_unit_tag 3
#define google_census_View_name_tag 1
#define google_census_View_description_tag 2
#define google_census_View_resource_name_tag 3
#define google_census_View_aggregation_tag 4
#define google_census_View_tag_key_tag 5
#define google_census_Aggregation_count_tag 3
#define google_census_Aggregation_distribution_tag 4
#define google_census_Aggregation_interval_stats_tag 5
#define google_census_Aggregation_name_tag 1
#define google_census_Aggregation_description_tag 2
#define google_census_Aggregation_tag_tag 6
/* Struct field encoding specification for nanopb */
extern const pb_field_t google_census_Duration_fields[3];
extern const pb_field_t google_census_Timestamp_fields[3];
extern const pb_field_t google_census_Resource_fields[4];
extern const pb_field_t google_census_Resource_MeasurementUnit_fields[4];
extern const pb_field_t google_census_AggregationDescriptor_fields[4];
extern const pb_field_t google_census_AggregationDescriptor_BucketBoundaries_fields[2];
extern const pb_field_t google_census_AggregationDescriptor_IntervalBoundaries_fields[2];
extern const pb_field_t google_census_Distribution_fields[5];
extern const pb_field_t google_census_Distribution_Range_fields[3];
extern const pb_field_t google_census_IntervalStats_fields[2];
extern const pb_field_t google_census_IntervalStats_Window_fields[4];
extern const pb_field_t google_census_Tag_fields[3];
extern const pb_field_t google_census_View_fields[6];
extern const pb_field_t google_census_Aggregation_fields[7];
extern const pb_field_t google_census_Metric_fields[5];
/* Maximum encoded size of messages (where known) */
#define google_census_Duration_size 22
#define google_census_Timestamp_size 22
#define google_census_Distribution_Range_size 18
#define google_census_IntervalStats_Window_size 44
#define google_census_Tag_size 516
/* Message IDs (where set with "msgid" option) */
#ifdef PB_MSGID
#define CENSUS_MESSAGES \
#endif
#ifdef __cplusplus
} /* extern "C" */
#endif
#endif /* GRPC_CORE_EXT_CENSUS_GEN_CENSUS_PB_H */

@ -1,39 +0,0 @@
/*
*
* Copyright 2017 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
/* Automatically generated nanopb constant definitions */
/* Generated by nanopb-0.3.7-dev at Fri Jan 20 16:14:22 2017. */
#include "src/core/ext/census/gen/trace_context.pb.h"
/* @@protoc_insertion_point(includes) */
#if PB_PROTO_HEADER_VERSION != 30
#error Regenerate this file with the current version of nanopb generator.
#endif
const pb_field_t google_trace_TraceContext_fields[5] = {
PB_FIELD( 1, FIXED64 , OPTIONAL, STATIC , FIRST, google_trace_TraceContext, trace_id_hi, trace_id_hi, 0),
PB_FIELD( 2, FIXED64 , OPTIONAL, STATIC , OTHER, google_trace_TraceContext, trace_id_lo, trace_id_hi, 0),
PB_FIELD( 3, FIXED64 , OPTIONAL, STATIC , OTHER, google_trace_TraceContext, span_id, trace_id_lo, 0),
PB_FIELD( 4, FIXED32 , OPTIONAL, STATIC , OTHER, google_trace_TraceContext, span_options, span_id, 0),
PB_LAST_FIELD
};
/* @@protoc_insertion_point(eof) */

@ -1,78 +0,0 @@
/*
*
* Copyright 2017 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
/* Automatically generated nanopb header */
/* Generated by nanopb-0.3.7-dev at Fri Jan 20 16:14:22 2017. */
#ifndef GRPC_CORE_EXT_CENSUS_GEN_TRACE_CONTEXT_PB_H
#define GRPC_CORE_EXT_CENSUS_GEN_TRACE_CONTEXT_PB_H
#include "third_party/nanopb/pb.h"
/* @@protoc_insertion_point(includes) */
#if PB_PROTO_HEADER_VERSION != 30
#error Regenerate this file with the current version of nanopb generator.
#endif
#ifdef __cplusplus
extern "C" {
#endif
/* Struct definitions */
typedef struct _google_trace_TraceContext {
bool has_trace_id_hi;
uint64_t trace_id_hi;
bool has_trace_id_lo;
uint64_t trace_id_lo;
bool has_span_id;
uint64_t span_id;
bool has_span_options;
uint32_t span_options;
/* @@protoc_insertion_point(struct:google_trace_TraceContext) */
} google_trace_TraceContext;
/* Default values for struct fields */
/* Initializer values for message structs */
#define google_trace_TraceContext_init_default {false, 0, false, 0, false, 0, false, 0}
#define google_trace_TraceContext_init_zero {false, 0, false, 0, false, 0, false, 0}
/* Field tags (for use in manual encoding/decoding) */
#define google_trace_TraceContext_trace_id_hi_tag 1
#define google_trace_TraceContext_trace_id_lo_tag 2
#define google_trace_TraceContext_span_id_tag 3
#define google_trace_TraceContext_span_options_tag 4
/* Struct field encoding specification for nanopb */
extern const pb_field_t google_trace_TraceContext_fields[5];
/* Maximum encoded size of messages (where known) */
#define google_trace_TraceContext_size 32
/* Message IDs (where set with "msgid" option) */
#ifdef PB_MSGID
#define TRACE_CONTEXT_MESSAGES \
#endif
#ifdef __cplusplus
} /* extern "C" */
#endif
/* @@protoc_insertion_point(eof) */
#endif /* GRPC_CORE_EXT_CENSUS_GEN_TRACE_CONTEXT_PB_H */

@ -24,9 +24,6 @@
void grpc_census_call_set_context(grpc_call *call, census_context *context) {
GRPC_API_TRACE("grpc_census_call_set_context(call=%p, census_context=%p)", 2,
(call, context));
if (census_enabled() == CENSUS_FEATURE_NONE) {
return;
}
if (context != NULL) {
grpc_call_context_set(call, GRPC_CONTEXT_TRACING, context, NULL);
}

@ -1,196 +0,0 @@
/*
*
* Copyright 2015 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
#include "src/core/ext/census/grpc_filter.h"
#include <stdio.h>
#include <string.h>
#include <grpc/census.h>
#include <grpc/slice.h>
#include <grpc/support/alloc.h>
#include <grpc/support/log.h>
#include <grpc/support/time.h>
#include "src/core/ext/census/census_interface.h"
#include "src/core/ext/census/census_rpc_stats.h"
#include "src/core/lib/channel/channel_stack.h"
#include "src/core/lib/profiling/timers.h"
#include "src/core/lib/transport/static_metadata.h"
typedef struct call_data {
census_op_id op_id;
census_context *ctxt;
gpr_timespec start_ts;
int error;
/* recv callback */
grpc_metadata_batch *recv_initial_metadata;
grpc_closure *on_done_recv;
grpc_closure finish_recv;
} call_data;
typedef struct channel_data { uint8_t unused; } channel_data;
static void extract_and_annotate_method_tag(grpc_metadata_batch *md,
call_data *calld,
channel_data *chand) {
grpc_linked_mdelem *m;
for (m = md->list.head; m != NULL; m = m->next) {
if (grpc_slice_eq(GRPC_MDKEY(m->md), GRPC_MDSTR_PATH)) {
/* Add method tag here */
}
}
}
static void client_mutate_op(grpc_call_element *elem,
grpc_transport_stream_op_batch *op) {
call_data *calld = (call_data *)elem->call_data;
channel_data *chand = (channel_data *)elem->channel_data;
if (op->send_initial_metadata) {
extract_and_annotate_method_tag(
op->payload->send_initial_metadata.send_initial_metadata, calld, chand);
}
}
static void client_start_transport_op(grpc_exec_ctx *exec_ctx,
grpc_call_element *elem,
grpc_transport_stream_op_batch *op) {
client_mutate_op(elem, op);
grpc_call_next_op(exec_ctx, elem, op);
}
static void server_on_done_recv(grpc_exec_ctx *exec_ctx, void *ptr,
grpc_error *error) {
GPR_TIMER_BEGIN("census-server:server_on_done_recv", 0);
grpc_call_element *elem = (grpc_call_element *)ptr;
call_data *calld = (call_data *)elem->call_data;
channel_data *chand = (channel_data *)elem->channel_data;
if (error == GRPC_ERROR_NONE) {
extract_and_annotate_method_tag(calld->recv_initial_metadata, calld, chand);
}
calld->on_done_recv->cb(exec_ctx, calld->on_done_recv->cb_arg, error);
GPR_TIMER_END("census-server:server_on_done_recv", 0);
}
static void server_mutate_op(grpc_call_element *elem,
grpc_transport_stream_op_batch *op) {
call_data *calld = (call_data *)elem->call_data;
if (op->recv_initial_metadata) {
/* substitute our callback for the op callback */
calld->recv_initial_metadata =
op->payload->recv_initial_metadata.recv_initial_metadata;
calld->on_done_recv =
op->payload->recv_initial_metadata.recv_initial_metadata_ready;
op->payload->recv_initial_metadata.recv_initial_metadata_ready =
&calld->finish_recv;
}
}
static void server_start_transport_op(grpc_exec_ctx *exec_ctx,
grpc_call_element *elem,
grpc_transport_stream_op_batch *op) {
/* TODO(ctiller): this code fails. I don't know why. I expect it's
incomplete, and someone should look at it soon.
call_data *calld = elem->call_data;
GPR_ASSERT((calld->op_id.upper != 0) || (calld->op_id.lower != 0)); */
server_mutate_op(elem, op);
grpc_call_next_op(exec_ctx, elem, op);
}
static grpc_error *client_init_call_elem(grpc_exec_ctx *exec_ctx,
grpc_call_element *elem,
const grpc_call_element_args *args) {
call_data *d = (call_data *)elem->call_data;
GPR_ASSERT(d != NULL);
memset(d, 0, sizeof(*d));
d->start_ts = args->start_time;
return GRPC_ERROR_NONE;
}
static void client_destroy_call_elem(grpc_exec_ctx *exec_ctx,
grpc_call_element *elem,
const grpc_call_final_info *final_info,
grpc_closure *ignored) {
call_data *d = (call_data *)elem->call_data;
GPR_ASSERT(d != NULL);
/* TODO(hongyu): record rpc client stats and census_rpc_end_op here */
}
static grpc_error *server_init_call_elem(grpc_exec_ctx *exec_ctx,
grpc_call_element *elem,
const grpc_call_element_args *args) {
call_data *d = (call_data *)elem->call_data;
GPR_ASSERT(d != NULL);
memset(d, 0, sizeof(*d));
d->start_ts = args->start_time;
/* TODO(hongyu): call census_tracing_start_op here. */
GRPC_CLOSURE_INIT(&d->finish_recv, server_on_done_recv, elem,
grpc_schedule_on_exec_ctx);
return GRPC_ERROR_NONE;
}
static void server_destroy_call_elem(grpc_exec_ctx *exec_ctx,
grpc_call_element *elem,
const grpc_call_final_info *final_info,
grpc_closure *ignored) {
call_data *d = (call_data *)elem->call_data;
GPR_ASSERT(d != NULL);
/* TODO(hongyu): record rpc server stats and census_tracing_end_op here */
}
static grpc_error *init_channel_elem(grpc_exec_ctx *exec_ctx,
grpc_channel_element *elem,
grpc_channel_element_args *args) {
channel_data *chand = (channel_data *)elem->channel_data;
GPR_ASSERT(chand != NULL);
return GRPC_ERROR_NONE;
}
static void destroy_channel_elem(grpc_exec_ctx *exec_ctx,
grpc_channel_element *elem) {
channel_data *chand = (channel_data *)elem->channel_data;
GPR_ASSERT(chand != NULL);
}
const grpc_channel_filter grpc_client_census_filter = {
client_start_transport_op,
grpc_channel_next_op,
sizeof(call_data),
client_init_call_elem,
grpc_call_stack_ignore_set_pollset_or_pollset_set,
client_destroy_call_elem,
sizeof(channel_data),
init_channel_elem,
destroy_channel_elem,
grpc_channel_next_get_info,
"census-client"};
const grpc_channel_filter grpc_server_census_filter = {
server_start_transport_op,
grpc_channel_next_op,
sizeof(call_data),
server_init_call_elem,
grpc_call_stack_ignore_set_pollset_or_pollset_set,
server_destroy_call_elem,
sizeof(channel_data),
init_channel_elem,
destroy_channel_elem,
grpc_channel_next_get_info,
"census-server"};

@ -1,70 +0,0 @@
/*
*
* Copyright 2015 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
#include <grpc/support/port_platform.h>
#include <limits.h>
#include <string.h>
#include <grpc/census.h>
#include "src/core/ext/census/grpc_filter.h"
#include "src/core/lib/channel/channel_stack_builder.h"
#include "src/core/lib/surface/channel_init.h"
static bool is_census_enabled(const grpc_channel_args *a) {
size_t i;
if (a == NULL) return 0;
for (i = 0; i < a->num_args; i++) {
if (0 == strcmp(a->args[i].key, GRPC_ARG_ENABLE_CENSUS)) {
return a->args[i].value.integer != 0 && census_enabled();
}
}
return census_enabled() && !grpc_channel_args_want_minimal_stack(a);
}
static bool maybe_add_census_filter(grpc_exec_ctx *exec_ctx,
grpc_channel_stack_builder *builder,
void *arg) {
const grpc_channel_args *args =
grpc_channel_stack_builder_get_channel_arguments(builder);
if (is_census_enabled(args)) {
return grpc_channel_stack_builder_prepend_filter(
builder, (const grpc_channel_filter *)arg, NULL, NULL);
}
return true;
}
extern "C" void census_grpc_plugin_init(void) {
/* Only initialize census if no one else has and some features are
* available. */
if (census_enabled() == CENSUS_FEATURE_NONE &&
census_supported() != CENSUS_FEATURE_NONE) {
if (census_initialize(census_supported())) { /* enable all features. */
gpr_log(GPR_ERROR, "Could not initialize census.");
}
}
grpc_channel_init_register_stage(GRPC_CLIENT_CHANNEL, INT_MAX,
maybe_add_census_filter,
(void *)&grpc_client_census_filter);
grpc_channel_init_register_stage(GRPC_SERVER_CHANNEL, INT_MAX,
maybe_add_census_filter,
(void *)&grpc_server_census_filter);
}
extern "C" void census_grpc_plugin_shutdown(void) { census_shutdown(); }

@ -1,288 +0,0 @@
/*
*
* Copyright 2015 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
#include "src/core/ext/census/hash_table.h"
#include <stddef.h>
#include <stdio.h>
#include <grpc/support/alloc.h>
#include <grpc/support/log.h>
#include <grpc/support/port_platform.h>
#define CENSUS_HT_NUM_BUCKETS 1999
/* A single hash table data entry */
typedef struct ht_entry {
census_ht_key key;
void *data;
struct ht_entry *next;
} ht_entry;
/* hash table bucket */
typedef struct bucket {
/* NULL if bucket is empty */
ht_entry *next;
/* -1 if all buckets are empty. */
int32_t prev_non_empty_bucket;
/* -1 if all buckets are empty. */
int32_t next_non_empty_bucket;
} bucket;
struct unresizable_hash_table {
/* Number of entries in the table */
size_t size;
/* Number of buckets */
uint32_t num_buckets;
/* Array of buckets initialized at creation time. Memory consumption is
16 bytes per bucket on a 64-bit platform. */
bucket *buckets;
/* Index of the first non-empty bucket. -1 iff size == 0. */
int32_t first_non_empty_bucket;
/* Index of the last non_empty bucket. -1 iff size == 0. */
int32_t last_non_empty_bucket;
/* Immutable options of this hash table, initialized at creation time. */
census_ht_option options;
};
typedef struct entry_locator {
int32_t bucket_idx;
int is_first_in_chain;
int found;
ht_entry *prev_entry;
} entry_locator;
/* Asserts if option is not valid. */
void check_options(const census_ht_option *option) {
GPR_ASSERT(option != NULL);
GPR_ASSERT(option->num_buckets > 0);
GPR_ASSERT(option->key_type == CENSUS_HT_UINT64 ||
option->key_type == CENSUS_HT_POINTER);
if (option->key_type == CENSUS_HT_UINT64) {
GPR_ASSERT(option->hash == NULL);
} else if (option->key_type == CENSUS_HT_POINTER) {
GPR_ASSERT(option->hash != NULL);
GPR_ASSERT(option->compare_keys != NULL);
}
}
#define REMOVE_NEXT(options, ptr) \
do { \
ht_entry *tmp = (ptr)->next; \
(ptr)->next = tmp->next; \
delete_entry(options, tmp); \
} while (0)
static void delete_entry(const census_ht_option *opt, ht_entry *p) {
if (opt->delete_data != NULL) {
opt->delete_data(p->data);
}
if (opt->delete_key != NULL) {
opt->delete_key(p->key.ptr);
}
gpr_free(p);
}
static uint64_t hash(const census_ht_option *opt, census_ht_key key) {
return opt->key_type == CENSUS_HT_UINT64 ? key.val : opt->hash(key.ptr);
}
census_ht *census_ht_create(const census_ht_option *option) {
int i;
census_ht *ret = NULL;
check_options(option);
ret = (census_ht *)gpr_malloc(sizeof(census_ht));
ret->size = 0;
ret->num_buckets = option->num_buckets;
ret->buckets = (bucket *)gpr_malloc(sizeof(bucket) * ret->num_buckets);
ret->options = *option;
/* initialize each bucket */
for (i = 0; i < ret->options.num_buckets; i++) {
ret->buckets[i].prev_non_empty_bucket = -1;
ret->buckets[i].next_non_empty_bucket = -1;
ret->buckets[i].next = NULL;
}
return ret;
}
static int32_t find_bucket_idx(const census_ht *ht, census_ht_key key) {
return hash(&ht->options, key) % ht->num_buckets;
}
static int keys_match(const census_ht_option *opt, const ht_entry *p,
const census_ht_key key) {
GPR_ASSERT(opt->key_type == CENSUS_HT_UINT64 ||
opt->key_type == CENSUS_HT_POINTER);
if (opt->key_type == CENSUS_HT_UINT64) return p->key.val == key.val;
return !opt->compare_keys((p->key).ptr, key.ptr);
}
static entry_locator ht_find(const census_ht *ht, census_ht_key key) {
entry_locator loc = {0, 0, 0, NULL};
int32_t idx = 0;
ht_entry *ptr = NULL;
GPR_ASSERT(ht != NULL);
idx = find_bucket_idx(ht, key);
ptr = ht->buckets[idx].next;
if (ptr == NULL) {
/* bucket is empty */
return loc;
}
if (keys_match(&ht->options, ptr, key)) {
loc.bucket_idx = idx;
loc.is_first_in_chain = 1;
loc.found = 1;
return loc;
} else {
for (; ptr->next != NULL; ptr = ptr->next) {
if (keys_match(&ht->options, ptr->next, key)) {
loc.bucket_idx = idx;
loc.is_first_in_chain = 0;
loc.found = 1;
loc.prev_entry = ptr;
return loc;
}
}
}
/* Could not find the key */
return loc;
}
void *census_ht_find(const census_ht *ht, census_ht_key key) {
entry_locator loc = ht_find(ht, key);
if (loc.found == 0) {
return NULL;
}
return loc.is_first_in_chain ? ht->buckets[loc.bucket_idx].next->data
: loc.prev_entry->next->data;
}
void census_ht_insert(census_ht *ht, census_ht_key key, void *data) {
int32_t idx = find_bucket_idx(ht, key);
ht_entry *ptr = NULL;
entry_locator loc = ht_find(ht, key);
if (loc.found) {
/* Replace old value with new value. */
ptr = loc.is_first_in_chain ? ht->buckets[loc.bucket_idx].next
: loc.prev_entry->next;
if (ht->options.delete_data != NULL) {
ht->options.delete_data(ptr->data);
}
ptr->data = data;
return;
}
/* first entry in the table. */
if (ht->size == 0) {
ht->buckets[idx].next_non_empty_bucket = -1;
ht->buckets[idx].prev_non_empty_bucket = -1;
ht->first_non_empty_bucket = idx;
ht->last_non_empty_bucket = idx;
} else if (ht->buckets[idx].next == NULL) {
/* first entry in the bucket. */
ht->buckets[ht->last_non_empty_bucket].next_non_empty_bucket = idx;
ht->buckets[idx].prev_non_empty_bucket = ht->last_non_empty_bucket;
ht->buckets[idx].next_non_empty_bucket = -1;
ht->last_non_empty_bucket = idx;
}
ptr = (ht_entry *)gpr_malloc(sizeof(ht_entry));
ptr->key = key;
ptr->data = data;
ptr->next = ht->buckets[idx].next;
ht->buckets[idx].next = ptr;
ht->size++;
}
void census_ht_erase(census_ht *ht, census_ht_key key) {
entry_locator loc = ht_find(ht, key);
if (loc.found == 0) {
/* noop if not found */
return;
}
ht->size--;
if (loc.is_first_in_chain) {
bucket *b = &ht->buckets[loc.bucket_idx];
GPR_ASSERT(b->next != NULL);
/* The only entry in the bucket */
if (b->next->next == NULL) {
int prev = b->prev_non_empty_bucket;
int next = b->next_non_empty_bucket;
if (prev != -1) {
ht->buckets[prev].next_non_empty_bucket = next;
} else {
ht->first_non_empty_bucket = next;
}
if (next != -1) {
ht->buckets[next].prev_non_empty_bucket = prev;
} else {
ht->last_non_empty_bucket = prev;
}
}
REMOVE_NEXT(&ht->options, b);
} else {
GPR_ASSERT(loc.prev_entry->next != NULL);
REMOVE_NEXT(&ht->options, loc.prev_entry);
}
}
/* Returns NULL if input table is empty. */
census_ht_kv *census_ht_get_all_elements(const census_ht *ht, size_t *num) {
census_ht_kv *ret = NULL;
int i = 0;
int32_t idx = -1;
GPR_ASSERT(ht != NULL && num != NULL);
*num = ht->size;
if (*num == 0) {
return NULL;
}
ret = (census_ht_kv *)gpr_malloc(sizeof(census_ht_kv) * ht->size);
idx = ht->first_non_empty_bucket;
while (idx >= 0) {
ht_entry *ptr = ht->buckets[idx].next;
for (; ptr != NULL; ptr = ptr->next) {
ret[i].k = ptr->key;
ret[i].v = ptr->data;
i++;
}
idx = ht->buckets[idx].next_non_empty_bucket;
}
return ret;
}
static void ht_delete_entry_chain(const census_ht_option *options,
ht_entry *first) {
if (first == NULL) {
return;
}
if (first->next != NULL) {
ht_delete_entry_chain(options, first->next);
}
delete_entry(options, first);
}
void census_ht_destroy(census_ht *ht) {
unsigned i;
for (i = 0; i < ht->num_buckets; ++i) {
ht_delete_entry_chain(&ht->options, ht->buckets[i].next);
}
gpr_free(ht->buckets);
gpr_free(ht);
}
size_t census_ht_get_size(const census_ht *ht) { return ht->size; }

@ -1,124 +0,0 @@
/*
*
* Copyright 2015 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
#ifndef GRPC_CORE_EXT_CENSUS_HASH_TABLE_H
#define GRPC_CORE_EXT_CENSUS_HASH_TABLE_H
#include <stddef.h>
#include <grpc/support/port_platform.h>
#ifdef __cplusplus
extern "C" {
#endif
/* A chain based hash table with fixed number of buckets.
Your probably shouldn't use this code directly. It is implemented for the
use case in census trace store and stats store, where number of entries in
the table is in the scale of upto several thousands, entries are added and
removed from the table very frequently (~100k/s), the frequency of find()
operations is roughly several times of the frequency of insert() and erase()
Comparing to find(), the insert(), erase() and get_all_entries() operations
are much less freqent (<1/s).
Per bucket memory overhead is about (8 + sizeof(intptr_t) bytes.
Per entry memory overhead is about (8 + 2 * sizeof(intptr_t) bytes.
All functions are not thread-safe. Synchronization will be provided in the
upper layer (in trace store and stats store).
*/
/* Opaque hash table struct */
typedef struct unresizable_hash_table census_ht;
/* Currently, the hash_table can take two types of keys. (uint64 for trace
store and const char* for stats store). */
typedef union {
uint64_t val;
void *ptr;
} census_ht_key;
typedef enum census_ht_key_type {
CENSUS_HT_UINT64 = 0,
CENSUS_HT_POINTER = 1
} census_ht_key_type;
typedef struct census_ht_option {
/* Type of hash key */
census_ht_key_type key_type;
/* Desired number of buckets, preferably a prime number */
int32_t num_buckets;
/* Fucntion to calculate uint64 hash value of the key. Only takes effect if
key_type is POINTER. */
uint64_t (*hash)(const void *);
/* Function to compare two keys, returns 0 iff equal. Only takes effect if
key_type is POINTER */
int (*compare_keys)(const void *k1, const void *k2);
/* Value deleter. NULL if no specialized delete function is needed. */
void (*delete_data)(void *);
/* Key deleter. NULL if table does not own the key. (e.g. key is part of the
value or key is not owned by the table.) */
void (*delete_key)(void *);
} census_ht_option;
/* Creates a hashtable with fixed number of buckets according to the settings
specified in 'options' arg. Function pointers "hash" and "compare_keys" must
be provided if key_type is POINTER. Asserts if fail to create. */
census_ht *census_ht_create(const census_ht_option *options);
/* Deletes hash table instance. Frees all dynamic memory owned by ht.*/
void census_ht_destroy(census_ht *ht);
/* Inserts the input key-val pair into hash_table. If an entry with the same key
exists in the table, the corresponding value will be overwritten by the input
val. */
void census_ht_insert(census_ht *ht, census_ht_key key, void *val);
/* Returns pointer to data, returns NULL if not found. */
void *census_ht_find(const census_ht *ht, census_ht_key key);
/* Erase hash table entry with input key. Noop if key is not found. */
void census_ht_erase(census_ht *ht, census_ht_key key);
typedef struct census_ht_kv {
census_ht_key k;
void *v;
} census_ht_kv;
/* Returns an array of pointers to all values in the hash table. Order of the
elements can be arbitrary. Sets 'num' to the size of returned array. Caller
owns returned array. */
census_ht_kv *census_ht_get_all_elements(const census_ht *ht, size_t *num);
/* Returns number of elements kept. */
size_t census_ht_get_size(const census_ht *ht);
/* Functor applied on each key-value pair while iterating through entries in the
table. The functor should not mutate data. */
typedef void (*census_ht_itr_cb)(census_ht_key key, const void *val_ptr,
void *state);
/* Iterates through all key-value pairs in the hash_table. The callback function
should not invalidate data entries. */
uint64_t census_ht_for_all(const census_ht *ht, census_ht_itr_cb);
#ifdef __cplusplus
}
#endif
#endif /* GRPC_CORE_EXT_CENSUS_HASH_TABLE_H */

@ -1,51 +0,0 @@
/*
*
* Copyright 2015 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
#include <grpc/census.h>
#include "src/core/ext/census/base_resources.h"
#include "src/core/ext/census/resource.h"
static int features_enabled = CENSUS_FEATURE_NONE;
int census_initialize(int features) {
if (features_enabled != CENSUS_FEATURE_NONE) {
// Must have been a previous call to census_initialize; return error
return -1;
}
features_enabled = features & CENSUS_FEATURE_ALL;
if (features & CENSUS_FEATURE_STATS) {
initialize_resources();
define_base_resources();
}
return features_enabled;
}
void census_shutdown(void) {
if (features_enabled & CENSUS_FEATURE_STATS) {
shutdown_resources();
}
features_enabled = CENSUS_FEATURE_NONE;
}
int census_supported(void) {
/* TODO(aveitch): improve this as we implement features... */
return CENSUS_FEATURE_NONE;
}
int census_enabled(void) { return features_enabled; }

@ -1,305 +0,0 @@
/*
*
* Copyright 2017 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
#include "src/core/ext/census/intrusive_hash_map.h"
#include <string.h>
extern bool hm_index_compare(const hm_index *A, const hm_index *B);
/* Simple hashing function that takes lower 32 bits. */
static __inline uint32_t chunked_vector_hasher(uint64_t key) {
return (uint32_t)key;
}
/* Vector chunks are 1MiB divided by pointer size. */
static const size_t VECTOR_CHUNK_SIZE = (1 << 20) / sizeof(void *);
/* Helper functions which return buckets from the chunked vector. */
static __inline void **get_mutable_bucket(const chunked_vector *buckets,
uint32_t index) {
if (index < VECTOR_CHUNK_SIZE) {
return &buckets->first_[index];
}
size_t rest_index = (index - VECTOR_CHUNK_SIZE) / VECTOR_CHUNK_SIZE;
return &buckets->rest_[rest_index][index % VECTOR_CHUNK_SIZE];
}
static __inline void *get_bucket(const chunked_vector *buckets,
uint32_t index) {
if (index < VECTOR_CHUNK_SIZE) {
return buckets->first_[index];
}
size_t rest_index = (index - VECTOR_CHUNK_SIZE) / VECTOR_CHUNK_SIZE;
return buckets->rest_[rest_index][index % VECTOR_CHUNK_SIZE];
}
/* Helper function. */
static __inline size_t RestSize(const chunked_vector *vec) {
return (vec->size_ <= VECTOR_CHUNK_SIZE)
? 0
: (vec->size_ - VECTOR_CHUNK_SIZE - 1) / VECTOR_CHUNK_SIZE + 1;
}
/* Initialize chunked vector to size of 0. */
static void chunked_vector_init(chunked_vector *vec) {
vec->size_ = 0;
vec->first_ = NULL;
vec->rest_ = NULL;
}
/* Clear chunked vector and free all memory that has been allocated then
initialize chunked vector. */
static void chunked_vector_clear(chunked_vector *vec) {
if (vec->first_ != NULL) {
gpr_free(vec->first_);
}
if (vec->rest_ != NULL) {
size_t rest_size = RestSize(vec);
for (size_t i = 0; i < rest_size; ++i) {
if (vec->rest_[i] != NULL) {
gpr_free(vec->rest_[i]);
}
}
gpr_free(vec->rest_);
}
chunked_vector_init(vec);
}
/* Clear chunked vector and then resize it to n entries. Allow the first 1MB to
be read w/o an extra cache miss. The rest of the elements are stored in an
array of arrays to avoid large mallocs. */
static void chunked_vector_reset(chunked_vector *vec, size_t n) {
chunked_vector_clear(vec);
vec->size_ = n;
if (n <= VECTOR_CHUNK_SIZE) {
vec->first_ = (void **)gpr_malloc(sizeof(void *) * n);
memset(vec->first_, 0, sizeof(void *) * n);
} else {
vec->first_ = (void **)gpr_malloc(sizeof(void *) * VECTOR_CHUNK_SIZE);
memset(vec->first_, 0, sizeof(void *) * VECTOR_CHUNK_SIZE);
size_t rest_size = RestSize(vec);
vec->rest_ = (void ***)gpr_malloc(sizeof(void **) * rest_size);
memset(vec->rest_, 0, sizeof(void **) * rest_size);
int i = 0;
n -= VECTOR_CHUNK_SIZE;
while (n > 0) {
size_t this_size = GPR_MIN(n, VECTOR_CHUNK_SIZE);
vec->rest_[i] = (void **)gpr_malloc(sizeof(void *) * this_size);
memset(vec->rest_[i], 0, sizeof(void *) * this_size);
n -= this_size;
++i;
}
}
}
void intrusive_hash_map_init(intrusive_hash_map *hash_map,
uint32_t initial_log2_table_size) {
hash_map->log2_num_buckets = initial_log2_table_size;
hash_map->num_items = 0;
uint32_t num_buckets = (uint32_t)1 << hash_map->log2_num_buckets;
hash_map->extend_threshold = num_buckets >> 1;
chunked_vector_init(&hash_map->buckets);
chunked_vector_reset(&hash_map->buckets, num_buckets);
hash_map->hash_mask = num_buckets - 1;
}
bool intrusive_hash_map_empty(const intrusive_hash_map *hash_map) {
return hash_map->num_items == 0;
}
size_t intrusive_hash_map_size(const intrusive_hash_map *hash_map) {
return hash_map->num_items;
}
void intrusive_hash_map_end(const intrusive_hash_map *hash_map, hm_index *idx) {
idx->bucket_index = (uint32_t)hash_map->buckets.size_;
GPR_ASSERT(idx->bucket_index <= UINT32_MAX);
idx->item = NULL;
}
void intrusive_hash_map_next(const intrusive_hash_map *hash_map,
hm_index *idx) {
idx->item = idx->item->hash_link;
while (idx->item == NULL) {
idx->bucket_index++;
if (idx->bucket_index >= hash_map->buckets.size_) {
/* Reached end of table. */
idx->item = NULL;
return;
}
idx->item = (hm_item *)get_bucket(&hash_map->buckets, idx->bucket_index);
}
}
void intrusive_hash_map_begin(const intrusive_hash_map *hash_map,
hm_index *idx) {
for (uint32_t i = 0; i < hash_map->buckets.size_; ++i) {
if (get_bucket(&hash_map->buckets, i) != NULL) {
idx->bucket_index = i;
idx->item = (hm_item *)get_bucket(&hash_map->buckets, i);
return;
}
}
intrusive_hash_map_end(hash_map, idx);
}
hm_item *intrusive_hash_map_find(const intrusive_hash_map *hash_map,
uint64_t key) {
uint32_t index = chunked_vector_hasher(key) & hash_map->hash_mask;
hm_item *p = (hm_item *)get_bucket(&hash_map->buckets, index);
while (p != NULL) {
if (key == p->key) {
return p;
}
p = p->hash_link;
}
return NULL;
}
hm_item *intrusive_hash_map_erase(intrusive_hash_map *hash_map, uint64_t key) {
uint32_t index = chunked_vector_hasher(key) & hash_map->hash_mask;
hm_item **slot = (hm_item **)get_mutable_bucket(&hash_map->buckets, index);
hm_item *p = *slot;
if (p == NULL) {
return NULL;
}
if (key == p->key) {
*slot = p->hash_link;
p->hash_link = NULL;
hash_map->num_items--;
return p;
}
hm_item *prev = p;
p = p->hash_link;
while (p) {
if (key == p->key) {
prev->hash_link = p->hash_link;
p->hash_link = NULL;
hash_map->num_items--;
return p;
}
prev = p;
p = p->hash_link;
}
return NULL;
}
/* Insert an hm_item* into the underlying chunked vector. hash_mask is
* array_size-1. Returns true if it is a new hm_item and false if the hm_item
* already existed.
*/
static __inline bool intrusive_hash_map_internal_insert(chunked_vector *buckets,
uint32_t hash_mask,
hm_item *item) {
const uint64_t key = item->key;
uint32_t index = chunked_vector_hasher(key) & hash_mask;
hm_item **slot = (hm_item **)get_mutable_bucket(buckets, index);
hm_item *p = *slot;
item->hash_link = p;
/* Check to see if key already exists. */
while (p) {
if (p->key == key) {
return false;
}
p = p->hash_link;
}
/* Otherwise add new entry. */
*slot = item;
return true;
}
/* Extend the allocated number of elements in the hash map by a factor of 2. */
void intrusive_hash_map_extend(intrusive_hash_map *hash_map) {
uint32_t new_log2_num_buckets = 1 + hash_map->log2_num_buckets;
uint32_t new_num_buckets = (uint32_t)1 << new_log2_num_buckets;
GPR_ASSERT(new_num_buckets <= UINT32_MAX && new_num_buckets > 0);
chunked_vector new_buckets;
chunked_vector_init(&new_buckets);
chunked_vector_reset(&new_buckets, new_num_buckets);
uint32_t new_hash_mask = new_num_buckets - 1;
hm_index cur_idx;
hm_index end_idx;
intrusive_hash_map_end(hash_map, &end_idx);
intrusive_hash_map_begin(hash_map, &cur_idx);
while (!hm_index_compare(&cur_idx, &end_idx)) {
hm_item *new_item = cur_idx.item;
intrusive_hash_map_next(hash_map, &cur_idx);
intrusive_hash_map_internal_insert(&new_buckets, new_hash_mask, new_item);
}
/* Set values for new chunked_vector. extend_threshold is set to half of
* new_num_buckets. */
hash_map->log2_num_buckets = new_log2_num_buckets;
chunked_vector_clear(&hash_map->buckets);
hash_map->buckets = new_buckets;
hash_map->hash_mask = new_hash_mask;
hash_map->extend_threshold = new_num_buckets >> 1;
}
/* Insert a hm_item. The hm_item must remain live until it is removed from the
table. This object does not take the ownership of hm_item. The caller must
remove this hm_item from the table and delete it before this table is
deleted. If hm_item exists already num_items is not changed. */
bool intrusive_hash_map_insert(intrusive_hash_map *hash_map, hm_item *item) {
if (hash_map->num_items >= hash_map->extend_threshold) {
intrusive_hash_map_extend(hash_map);
}
if (intrusive_hash_map_internal_insert(&hash_map->buckets,
hash_map->hash_mask, item)) {
hash_map->num_items++;
return true;
}
return false;
}
void intrusive_hash_map_clear(intrusive_hash_map *hash_map,
void (*free_object)(void *)) {
hm_index cur;
hm_index end;
intrusive_hash_map_end(hash_map, &end);
intrusive_hash_map_begin(hash_map, &cur);
while (!hm_index_compare(&cur, &end)) {
hm_index next = cur;
intrusive_hash_map_next(hash_map, &next);
if (cur.item != NULL) {
hm_item *item = intrusive_hash_map_erase(hash_map, cur.item->key);
(*free_object)((void *)item);
gpr_free(item);
}
cur = next;
}
}
void intrusive_hash_map_free(intrusive_hash_map *hash_map,
void (*free_object)(void *)) {
intrusive_hash_map_clear(hash_map, (*free_object));
hash_map->num_items = 0;
hash_map->extend_threshold = 0;
hash_map->log2_num_buckets = 0;
hash_map->hash_mask = 0;
chunked_vector_clear(&hash_map->buckets);
}

@ -1,160 +0,0 @@
/*
*
* Copyright 2017 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
#ifndef GRPC_CORE_EXT_CENSUS_INTRUSIVE_HASH_MAP_H
#define GRPC_CORE_EXT_CENSUS_INTRUSIVE_HASH_MAP_H
#include "src/core/ext/census/intrusive_hash_map_internal.h"
#ifdef __cplusplus
extern "C" {
#endif
/* intrusive_hash_map is a fast chained hash table. This hash map is faster than
* a dense hash map when the application calls insert and erase more often than
* find. When the workload is dominated by find() a dense hash map may be
* faster.
*
* intrusive_hash_map uses an intrusive header placed within a user defined
* struct. The header field IHM_key MUST be set to a valid value before
* insertion into the hash map or undefined behavior may occur. The header field
* IHM_hash_link MUST to be set to NULL initially.
*
* EXAMPLE USAGE:
*
* typedef struct string_item {
* INTRUSIVE_HASH_MAP_HEADER;
* // User data.
* char *str_buf;
* uint16_t len;
* } string_item;
*
* static string_item *make_string_item(uint64_t key, const char *buf,
* uint16_t len) {
* string_item *item = (string_item *)gpr_malloc(sizeof(string_item));
* item->IHM_key = key;
* item->IHM_hash_link = NULL;
* item->len = len;
* item->str_buf = (char *)malloc(len);
* memcpy(item->str_buf, buf, len);
* return item;
* }
*
* intrusive_hash_map hash_map;
* intrusive_hash_map_init(&hash_map, 4);
* string_item *new_item1 = make_string_item(10, "test1", 5);
* bool ok = intrusive_hash_map_insert(&hash_map, (hm_item *)new_item1);
*
* string_item *item1 =
* (string_item *)intrusive_hash_map_find(&hash_map, 10);
*/
/* Hash map item. Stores key and a pointer to the actual object. A user defined
* version of this can be passed in provided the first 2 entries (key and
* hash_link) are the same. These entries must be first in the user defined
* struct. Pointer to struct will need to be cast as (hm_item *) when passed to
* hash map. This allows it to be intrusive. */
typedef struct hm_item {
uint64_t key;
struct hm_item *hash_link;
/* Optional user defined data after this. */
} hm_item;
/* Macro provided for ease of use. This must be first in the user defined
* struct (i.e. uint64_t key and hm_item * must be the first two elements in
* that order). */
#define INTRUSIVE_HASH_MAP_HEADER \
uint64_t IHM_key; \
struct hm_item *IHM_hash_link
/* Index struct which acts as a pseudo-iterator within the hash map. */
typedef struct hm_index {
uint32_t bucket_index; // hash map bucket index.
hm_item *item; // Pointer to hm_item within the hash map.
} hm_index;
/* Returns true if two hm_indices point to the same object within the hash map
* and false otherwise. */
__inline bool hm_index_compare(const hm_index *A, const hm_index *B) {
return (A->item == B->item && A->bucket_index == B->bucket_index);
}
/*
* Helper functions for iterating over the hash map.
*/
/* On return idx will contain an invalid index which is always equal to
* hash_map->buckets.size_ */
void intrusive_hash_map_end(const intrusive_hash_map *hash_map, hm_index *idx);
/* Iterates index to the next valid entry in the hash map and stores the
* index within idx. If end of table is reached, idx will contain the same
* values as if intrusive_hash_map_end() was called. */
void intrusive_hash_map_next(const intrusive_hash_map *hash_map, hm_index *idx);
/* On return, idx will contain the index of the first non-null entry in the hash
* map. If the hash map is empty, idx will contain the same values as if
* intrusive_hash_map_end() was called. */
void intrusive_hash_map_begin(const intrusive_hash_map *hash_map,
hm_index *idx);
/* Initialize intrusive hash map data structure. This must be called before
* the hash map can be used. The initial size of an intrusive hash map will be
* 2^initial_log2_map_size (valid range is [0, 31]). */
void intrusive_hash_map_init(intrusive_hash_map *hash_map,
uint32_t initial_log2_map_size);
/* Returns true if the hash map is empty and false otherwise. */
bool intrusive_hash_map_empty(const intrusive_hash_map *hash_map);
/* Returns the number of elements currently in the hash map. */
size_t intrusive_hash_map_size(const intrusive_hash_map *hash_map);
/* Find a hm_item within the hash map by key. Returns NULL if item was not
* found. */
hm_item *intrusive_hash_map_find(const intrusive_hash_map *hash_map,
uint64_t key);
/* Erase the hm_item that corresponds with key. If the hm_item is found, return
* the pointer to the hm_item. Else returns NULL. */
hm_item *intrusive_hash_map_erase(intrusive_hash_map *hash_map, uint64_t key);
/* Attempts to insert a new hm_item into the hash map. If an element with the
* same key already exists, it will not insert the new item and return false.
* Otherwise, it will insert the new item and return true. */
bool intrusive_hash_map_insert(intrusive_hash_map *hash_map, hm_item *item);
/* Clears entire contents of the hash map, but leaves internal data structure
* untouched. Second argument takes a function pointer to a function that will
* free the object designated by the user and pointed to by hash_map->value. */
void intrusive_hash_map_clear(intrusive_hash_map *hash_map,
void (*free_object)(void *));
/* Erase all contents of hash map and free the memory. Hash map is invalid
* after calling this function and cannot be used until it has been
* reinitialized (intrusive_hash_map_init()). This function takes a function
* pointer to a function that will free the object designated by the user and
* pointed to by hash_map->value. */
void intrusive_hash_map_free(intrusive_hash_map *hash_map,
void (*free_object)(void *));
#ifdef __cplusplus
}
#endif
#endif /* GRPC_CORE_EXT_CENSUS_INTRUSIVE_HASH_MAP_H */

@ -1,48 +0,0 @@
/*
*
* Copyright 2017 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
#ifndef GRPC_CORE_EXT_CENSUS_INTRUSIVE_HASH_MAP_INTERNAL_H
#define GRPC_CORE_EXT_CENSUS_INTRUSIVE_HASH_MAP_INTERNAL_H
#include <grpc/support/alloc.h>
#include <grpc/support/log.h>
#include <grpc/support/useful.h>
#include <stdbool.h>
/* The chunked vector is a data structure that allocates buckets for use in the
* hash map. ChunkedVector is logically equivalent to T*[N] (cast void* as
* T*). It's internally implemented as an array of 1MB arrays to avoid
* allocating large consecutive memory chunks. This is an internal data
* structure that should never be accessed directly. */
typedef struct chunked_vector {
size_t size_;
void **first_;
void ***rest_;
} chunked_vector;
/* Core intrusive hash map data structure. All internal elements are managed by
* functions and should not be altered manually. */
typedef struct intrusive_hash_map {
uint32_t num_items;
uint32_t extend_threshold;
uint32_t log2_num_buckets;
uint32_t hash_mask;
chunked_vector buckets;
} intrusive_hash_map;
#endif /* GRPC_CORE_EXT_CENSUS_INTRUSIVE_HASH_MAP_INTERNAL_H */

@ -1,586 +0,0 @@
/*
*
* Copyright 2015 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
// Implements an efficient in-memory log, optimized for multiple writers and
// a single reader. Available log space is divided up in blocks of
// CENSUS_LOG_2_MAX_RECORD_SIZE bytes. A block can be in one of the following
// three data structures:
// - Free blocks (free_block_list)
// - Blocks with unread data (dirty_block_list)
// - Blocks currently attached to cores (core_local_blocks[])
//
// census_log_start_write() moves a block from core_local_blocks[] to the end of
// dirty_block_list when block:
// - is out-of-space OR
// - has an incomplete record (an incomplete record occurs when a thread calls
// census_log_start_write() and is context-switched before calling
// census_log_end_write()
// So, blocks in dirty_block_list are ordered, from oldest to newest, by the
// time when block is detached from the core.
//
// census_log_read_next() first iterates over dirty_block_list and then
// core_local_blocks[]. It moves completely read blocks from dirty_block_list
// to free_block_list. Blocks in core_local_blocks[] are not freed, even when
// completely read.
//
// If the log is configured to discard old records and free_block_list is empty,
// census_log_start_write() iterates over dirty_block_list to allocate a
// new block. It moves the oldest available block (no pending read/write) to
// core_local_blocks[].
//
// core_local_block_struct is used to implement a map from core id to the block
// associated with that core. This mapping is advisory. It is possible that the
// block returned by this mapping is no longer associated with that core. This
// mapping is updated, lazily, by census_log_start_write().
//
// Locking in block struct:
//
// Exclusive g_log.lock must be held before calling any functions operating on
// block structs except census_log_start_write() and census_log_end_write().
//
// Writes to a block are serialized via writer_lock. census_log_start_write()
// acquires this lock and census_log_end_write() releases it. On failure to
// acquire the lock, writer allocates a new block for the current core and
// updates core_local_block accordingly.
//
// Simultaneous read and write access is allowed. Readers can safely read up to
// committed bytes (bytes_committed).
//
// reader_lock protects the block, currently being read, from getting recycled.
// start_read() acquires reader_lock and end_read() releases the lock.
//
// Read/write access to a block is disabled via try_disable_access(). It returns
// with both writer_lock and reader_lock held. These locks are subsequently
// released by enable_access() to enable access to the block.
//
// A note on naming: Most function/struct names are prepended by cl_
// (shorthand for census_log). Further, functions that manipulate structures
// include the name of the structure, which will be passed as the first
// argument. E.g. cl_block_initialize() will initialize a cl_block.
#include "src/core/ext/census/mlog.h"
#include <grpc/support/alloc.h>
#include <grpc/support/atm.h>
#include <grpc/support/cpu.h>
#include <grpc/support/log.h>
#include <grpc/support/sync.h>
#include <grpc/support/useful.h>
#include <stdbool.h>
#include <string.h>
// End of platform specific code
typedef struct census_log_block_list_struct {
struct census_log_block_list_struct* next;
struct census_log_block_list_struct* prev;
struct census_log_block* block;
} cl_block_list_struct;
typedef struct census_log_block {
// Pointer to underlying buffer.
char* buffer;
gpr_atm writer_lock;
gpr_atm reader_lock;
// Keeps completely written bytes. Declared atomic because accessed
// simultaneously by reader and writer.
gpr_atm bytes_committed;
// Bytes already read.
size_t bytes_read;
// Links for list.
cl_block_list_struct link;
// We want this structure to be cacheline aligned. We assume the following
// sizes for the various parts on 32/64bit systems:
// type 32b size 64b size
// char* 4 8
// 3x gpr_atm 12 24
// size_t 4 8
// cl_block_list_struct 12 24
// TOTAL 32 64
//
// Depending on the size of our cacheline and the architecture, we
// selectively add char buffering to this structure. The size is checked
// via assert in census_log_initialize().
#if defined(GPR_ARCH_64)
#define CL_BLOCK_PAD_SIZE (GPR_CACHELINE_SIZE - 64)
#else
#if defined(GPR_ARCH_32)
#define CL_BLOCK_PAD_SIZE (GPR_CACHELINE_SIZE - 32)
#else
#error "Unknown architecture"
#endif
#endif
#if CL_BLOCK_PAD_SIZE > 0
char padding[CL_BLOCK_PAD_SIZE];
#endif
} cl_block;
// A list of cl_blocks, doubly-linked through cl_block::link.
typedef struct census_log_block_list {
int32_t count; // Number of items in list.
cl_block_list_struct ht; // head/tail of linked list.
} cl_block_list;
// Cacheline aligned block pointers to avoid false sharing. Block pointer must
// be initialized via set_block(), before calling other functions
typedef struct census_log_core_local_block {
gpr_atm block;
// Ensure cachline alignment: we assume sizeof(gpr_atm) == 4 or 8
#if defined(GPR_ARCH_64)
#define CL_CORE_LOCAL_BLOCK_PAD_SIZE (GPR_CACHELINE_SIZE - 8)
#else
#if defined(GPR_ARCH_32)
#define CL_CORE_LOCAL_BLOCK_PAD_SIZE (GPR_CACHELINE_SIZE - 4)
#else
#error "Unknown architecture"
#endif
#endif
#if CL_CORE_LOCAL_BLOCK_PAD_SIZE > 0
char padding[CL_CORE_LOCAL_BLOCK_PAD_SIZE];
#endif
} cl_core_local_block;
struct census_log {
int discard_old_records;
// Number of cores (aka hardware-contexts)
unsigned num_cores;
// number of CENSUS_LOG_2_MAX_RECORD_SIZE blocks in log
uint32_t num_blocks;
cl_block* blocks; // Block metadata.
cl_core_local_block* core_local_blocks; // Keeps core to block mappings.
gpr_mu lock;
int initialized; // has log been initialized?
// Keeps the state of the reader iterator. A value of 0 indicates that
// iterator has reached the end. census_log_init_reader() resets the value
// to num_core to restart iteration.
uint32_t read_iterator_state;
// Points to the block being read. If non-NULL, the block is locked for
// reading(block_being_read_->reader_lock is held).
cl_block* block_being_read;
char* buffer;
cl_block_list free_block_list;
cl_block_list dirty_block_list;
gpr_atm out_of_space_count;
};
// Single internal log.
static struct census_log g_log;
// Functions that operate on an atomic memory location used as a lock.
// Returns non-zero if lock is acquired.
static int cl_try_lock(gpr_atm* lock) { return gpr_atm_acq_cas(lock, 0, 1); }
static void cl_unlock(gpr_atm* lock) { gpr_atm_rel_store(lock, 0); }
// Functions that operate on cl_core_local_block's.
static void cl_core_local_block_set_block(cl_core_local_block* clb,
cl_block* block) {
gpr_atm_rel_store(&clb->block, (gpr_atm)block);
}
static cl_block* cl_core_local_block_get_block(cl_core_local_block* clb) {
return (cl_block*)gpr_atm_acq_load(&clb->block);
}
// Functions that operate on cl_block_list_struct's.
static void cl_block_list_struct_initialize(cl_block_list_struct* bls,
cl_block* block) {
bls->next = bls->prev = bls;
bls->block = block;
}
// Functions that operate on cl_block_list's.
static void cl_block_list_initialize(cl_block_list* list) {
list->count = 0;
cl_block_list_struct_initialize(&list->ht, NULL);
}
// Returns head of *this, or NULL if empty.
static cl_block* cl_block_list_head(cl_block_list* list) {
return list->ht.next->block;
}
// Insert element *e after *pos.
static void cl_block_list_insert(cl_block_list* list, cl_block_list_struct* pos,
cl_block_list_struct* e) {
list->count++;
e->next = pos->next;
e->prev = pos;
e->next->prev = e;
e->prev->next = e;
}
// Insert block at the head of the list
static void cl_block_list_insert_at_head(cl_block_list* list, cl_block* block) {
cl_block_list_insert(list, &list->ht, &block->link);
}
// Insert block at the tail of the list.
static void cl_block_list_insert_at_tail(cl_block_list* list, cl_block* block) {
cl_block_list_insert(list, list->ht.prev, &block->link);
}
// Removes block *b. Requires *b be in the list.
static void cl_block_list_remove(cl_block_list* list, cl_block* b) {
list->count--;
b->link.next->prev = b->link.prev;
b->link.prev->next = b->link.next;
}
// Functions that operate on cl_block's
static void cl_block_initialize(cl_block* block, char* buffer) {
block->buffer = buffer;
gpr_atm_rel_store(&block->writer_lock, 0);
gpr_atm_rel_store(&block->reader_lock, 0);
gpr_atm_rel_store(&block->bytes_committed, 0);
block->bytes_read = 0;
cl_block_list_struct_initialize(&block->link, block);
}
// Guards against exposing partially written buffer to the reader.
static void cl_block_set_bytes_committed(cl_block* block,
size_t bytes_committed) {
gpr_atm_rel_store(&block->bytes_committed, (gpr_atm)bytes_committed);
}
static size_t cl_block_get_bytes_committed(cl_block* block) {
return (size_t)gpr_atm_acq_load(&block->bytes_committed);
}
// Tries to disable future read/write access to this block. Succeeds if:
// - no in-progress write AND
// - no in-progress read AND
// - 'discard_data' set to true OR no unread data
// On success, clears the block state and returns with writer_lock_ and
// reader_lock_ held. These locks are released by a subsequent
// cl_block_access_enable() call.
static bool cl_block_try_disable_access(cl_block* block, int discard_data) {
if (!cl_try_lock(&block->writer_lock)) {
return false;
}
if (!cl_try_lock(&block->reader_lock)) {
cl_unlock(&block->writer_lock);
return false;
}
if (!discard_data &&
(block->bytes_read != cl_block_get_bytes_committed(block))) {
cl_unlock(&block->reader_lock);
cl_unlock(&block->writer_lock);
return false;
}
cl_block_set_bytes_committed(block, 0);
block->bytes_read = 0;
return true;
}
static void cl_block_enable_access(cl_block* block) {
cl_unlock(&block->reader_lock);
cl_unlock(&block->writer_lock);
}
// Returns with writer_lock held.
static void* cl_block_start_write(cl_block* block, size_t size) {
if (!cl_try_lock(&block->writer_lock)) {
return NULL;
}
size_t bytes_committed = cl_block_get_bytes_committed(block);
if (bytes_committed + size > CENSUS_LOG_MAX_RECORD_SIZE) {
cl_unlock(&block->writer_lock);
return NULL;
}
return block->buffer + bytes_committed;
}
// Releases writer_lock and increments committed bytes by 'bytes_written'.
// 'bytes_written' must be <= 'size' specified in the corresponding
// StartWrite() call. This function is thread-safe.
static void cl_block_end_write(cl_block* block, size_t bytes_written) {
cl_block_set_bytes_committed(
block, cl_block_get_bytes_committed(block) + bytes_written);
cl_unlock(&block->writer_lock);
}
// Returns a pointer to the first unread byte in buffer. The number of bytes
// available are returned in 'bytes_available'. Acquires reader lock that is
// released by a subsequent cl_block_end_read() call. Returns NULL if:
// - read in progress
// - no data available
static void* cl_block_start_read(cl_block* block, size_t* bytes_available) {
if (!cl_try_lock(&block->reader_lock)) {
return NULL;
}
// bytes_committed may change from under us. Use bytes_available to update
// bytes_read below.
size_t bytes_committed = cl_block_get_bytes_committed(block);
GPR_ASSERT(bytes_committed >= block->bytes_read);
*bytes_available = bytes_committed - block->bytes_read;
if (*bytes_available == 0) {
cl_unlock(&block->reader_lock);
return NULL;
}
void* record = block->buffer + block->bytes_read;
block->bytes_read += *bytes_available;
return record;
}
static void cl_block_end_read(cl_block* block) {
cl_unlock(&block->reader_lock);
}
// Internal functions operating on g_log
// Allocates a new free block (or recycles an available dirty block if log is
// configured to discard old records). Returns NULL if out-of-space.
static cl_block* cl_allocate_block(void) {
cl_block* block = cl_block_list_head(&g_log.free_block_list);
if (block != NULL) {
cl_block_list_remove(&g_log.free_block_list, block);
return block;
}
if (!g_log.discard_old_records) {
// No free block and log is configured to keep old records.
return NULL;
}
// Recycle dirty block. Start from the oldest.
for (block = cl_block_list_head(&g_log.dirty_block_list); block != NULL;
block = block->link.next->block) {
if (cl_block_try_disable_access(block, 1 /* discard data */)) {
cl_block_list_remove(&g_log.dirty_block_list, block);
return block;
}
}
return NULL;
}
// Allocates a new block and updates core id => block mapping. 'old_block'
// points to the block that the caller thinks is attached to
// 'core_id'. 'old_block' may be NULL. Returns true if:
// - allocated a new block OR
// - 'core_id' => 'old_block' mapping changed (another thread allocated a
// block before lock was acquired).
static bool cl_allocate_core_local_block(uint32_t core_id,
cl_block* old_block) {
// Now that we have the lock, check if core-local mapping has changed.
cl_core_local_block* core_local_block = &g_log.core_local_blocks[core_id];
cl_block* block = cl_core_local_block_get_block(core_local_block);
if ((block != NULL) && (block != old_block)) {
return true;
}
if (block != NULL) {
cl_core_local_block_set_block(core_local_block, NULL);
cl_block_list_insert_at_tail(&g_log.dirty_block_list, block);
}
block = cl_allocate_block();
if (block == NULL) {
return false;
}
cl_core_local_block_set_block(core_local_block, block);
cl_block_enable_access(block);
return true;
}
static cl_block* cl_get_block(void* record) {
uintptr_t p = (uintptr_t)((char*)record - g_log.buffer);
uintptr_t index = p >> CENSUS_LOG_2_MAX_RECORD_SIZE;
return &g_log.blocks[index];
}
// Gets the next block to read and tries to free 'prev' block (if not NULL).
// Returns NULL if reached the end.
static cl_block* cl_next_block_to_read(cl_block* prev) {
cl_block* block = NULL;
if (g_log.read_iterator_state == g_log.num_cores) {
// We are traversing dirty list; find the next dirty block.
if (prev != NULL) {
// Try to free the previous block if there is no unread data. This
// block
// may have unread data if previously incomplete record completed
// between
// read_next() calls.
block = prev->link.next->block;
if (cl_block_try_disable_access(prev, 0 /* do not discard data */)) {
cl_block_list_remove(&g_log.dirty_block_list, prev);
cl_block_list_insert_at_head(&g_log.free_block_list, prev);
}
} else {
block = cl_block_list_head(&g_log.dirty_block_list);
}
if (block != NULL) {
return block;
}
// We are done with the dirty list; moving on to core-local blocks.
}
while (g_log.read_iterator_state > 0) {
g_log.read_iterator_state--;
block = cl_core_local_block_get_block(
&g_log.core_local_blocks[g_log.read_iterator_state]);
if (block != NULL) {
return block;
}
}
return NULL;
}
#define CL_LOG_2_MB 20 // 2^20 = 1MB
// External functions: primary stats_log interface
void census_log_initialize(size_t size_in_mb, int discard_old_records) {
// Check cacheline alignment.
GPR_ASSERT(sizeof(cl_block) % GPR_CACHELINE_SIZE == 0);
GPR_ASSERT(sizeof(cl_core_local_block) % GPR_CACHELINE_SIZE == 0);
GPR_ASSERT(!g_log.initialized);
g_log.discard_old_records = discard_old_records;
g_log.num_cores = gpr_cpu_num_cores();
// Ensure that we will not get any overflow in calaculating num_blocks
GPR_ASSERT(CL_LOG_2_MB >= CENSUS_LOG_2_MAX_RECORD_SIZE);
GPR_ASSERT(size_in_mb < 1000);
// Ensure at least 2x as many blocks as there are cores.
g_log.num_blocks =
(uint32_t)GPR_MAX(2 * g_log.num_cores, (size_in_mb << CL_LOG_2_MB) >>
CENSUS_LOG_2_MAX_RECORD_SIZE);
gpr_mu_init(&g_log.lock);
g_log.read_iterator_state = 0;
g_log.block_being_read = NULL;
g_log.core_local_blocks = (cl_core_local_block*)gpr_malloc_aligned(
g_log.num_cores * sizeof(cl_core_local_block), GPR_CACHELINE_SIZE_LOG);
memset(g_log.core_local_blocks, 0,
g_log.num_cores * sizeof(cl_core_local_block));
g_log.blocks = (cl_block*)gpr_malloc_aligned(
g_log.num_blocks * sizeof(cl_block), GPR_CACHELINE_SIZE_LOG);
memset(g_log.blocks, 0, g_log.num_blocks * sizeof(cl_block));
g_log.buffer =
(char*)gpr_malloc(g_log.num_blocks * CENSUS_LOG_MAX_RECORD_SIZE);
memset(g_log.buffer, 0, g_log.num_blocks * CENSUS_LOG_MAX_RECORD_SIZE);
cl_block_list_initialize(&g_log.free_block_list);
cl_block_list_initialize(&g_log.dirty_block_list);
for (uint32_t i = 0; i < g_log.num_blocks; ++i) {
cl_block* block = g_log.blocks + i;
cl_block_initialize(block, g_log.buffer + (CENSUS_LOG_MAX_RECORD_SIZE * i));
cl_block_try_disable_access(block, 1 /* discard data */);
cl_block_list_insert_at_tail(&g_log.free_block_list, block);
}
gpr_atm_rel_store(&g_log.out_of_space_count, 0);
g_log.initialized = 1;
}
void census_log_shutdown(void) {
GPR_ASSERT(g_log.initialized);
gpr_mu_destroy(&g_log.lock);
gpr_free_aligned(g_log.core_local_blocks);
g_log.core_local_blocks = NULL;
gpr_free_aligned(g_log.blocks);
g_log.blocks = NULL;
gpr_free(g_log.buffer);
g_log.buffer = NULL;
g_log.initialized = 0;
}
void* census_log_start_write(size_t size) {
// Used to bound number of times block allocation is attempted.
GPR_ASSERT(size > 0);
GPR_ASSERT(g_log.initialized);
if (size > CENSUS_LOG_MAX_RECORD_SIZE) {
return NULL;
}
uint32_t attempts_remaining = g_log.num_blocks;
uint32_t core_id = gpr_cpu_current_cpu();
do {
void* record = NULL;
cl_block* block =
cl_core_local_block_get_block(&g_log.core_local_blocks[core_id]);
if (block && (record = cl_block_start_write(block, size))) {
return record;
}
// Need to allocate a new block. We are here if:
// - No block associated with the core OR
// - Write in-progress on the block OR
// - block is out of space
gpr_mu_lock(&g_log.lock);
bool allocated = cl_allocate_core_local_block(core_id, block);
gpr_mu_unlock(&g_log.lock);
if (!allocated) {
gpr_atm_no_barrier_fetch_add(&g_log.out_of_space_count, 1);
return NULL;
}
} while (attempts_remaining--);
// Give up.
gpr_atm_no_barrier_fetch_add(&g_log.out_of_space_count, 1);
return NULL;
}
void census_log_end_write(void* record, size_t bytes_written) {
GPR_ASSERT(g_log.initialized);
cl_block_end_write(cl_get_block(record), bytes_written);
}
void census_log_init_reader(void) {
GPR_ASSERT(g_log.initialized);
gpr_mu_lock(&g_log.lock);
// If a block is locked for reading unlock it.
if (g_log.block_being_read != NULL) {
cl_block_end_read(g_log.block_being_read);
g_log.block_being_read = NULL;
}
g_log.read_iterator_state = g_log.num_cores;
gpr_mu_unlock(&g_log.lock);
}
const void* census_log_read_next(size_t* bytes_available) {
GPR_ASSERT(g_log.initialized);
gpr_mu_lock(&g_log.lock);
if (g_log.block_being_read != NULL) {
cl_block_end_read(g_log.block_being_read);
}
do {
g_log.block_being_read = cl_next_block_to_read(g_log.block_being_read);
if (g_log.block_being_read != NULL) {
void* record =
cl_block_start_read(g_log.block_being_read, bytes_available);
if (record != NULL) {
gpr_mu_unlock(&g_log.lock);
return record;
}
}
} while (g_log.block_being_read != NULL);
gpr_mu_unlock(&g_log.lock);
return NULL;
}
size_t census_log_remaining_space(void) {
GPR_ASSERT(g_log.initialized);
size_t space = 0;
gpr_mu_lock(&g_log.lock);
if (g_log.discard_old_records) {
// Remaining space is not meaningful; just return the entire log space.
space = g_log.num_blocks << CENSUS_LOG_2_MAX_RECORD_SIZE;
} else {
GPR_ASSERT(g_log.free_block_list.count >= 0);
space = (size_t)g_log.free_block_list.count * CENSUS_LOG_MAX_RECORD_SIZE;
}
gpr_mu_unlock(&g_log.lock);
return space;
}
int64_t census_log_out_of_space_count(void) {
GPR_ASSERT(g_log.initialized);
return gpr_atm_acq_load(&g_log.out_of_space_count);
}

@ -1,88 +0,0 @@
/*
*
* Copyright 2015 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
/* A very fast in-memory log, optimized for multiple writers. */
#ifndef GRPC_CORE_EXT_CENSUS_MLOG_H
#define GRPC_CORE_EXT_CENSUS_MLOG_H
#include <grpc/support/port_platform.h>
#include <stddef.h>
/* Maximum record size, in bytes. */
#define CENSUS_LOG_2_MAX_RECORD_SIZE 14 /* 2^14 = 16KB */
#define CENSUS_LOG_MAX_RECORD_SIZE (1 << CENSUS_LOG_2_MAX_RECORD_SIZE)
#ifdef __cplusplus
extern "C" {
#endif
/* Initialize the statistics logging subsystem with the given log size. A log
size of 0 will result in the smallest possible log for the platform
(approximately CENSUS_LOG_MAX_RECORD_SIZE * gpr_cpu_num_cores()). If
discard_old_records is non-zero, then new records will displace older ones
when the log is full. This function must be called before any other
census_log functions.
*/
void census_log_initialize(size_t size_in_mb, int discard_old_records);
/* Shutdown the logging subsystem. Caller must ensure that:
- no in progress or future call to any census_log functions
- no incomplete records
*/
void census_log_shutdown(void);
/* Allocates and returns a 'size' bytes record and marks it in use. A
subsequent census_log_end_write() marks the record complete. The
'bytes_written' census_log_end_write() argument must be <=
'size'. Returns NULL if out-of-space AND:
- log is configured to keep old records OR
- all blocks are pinned by incomplete records.
*/
void* census_log_start_write(size_t size);
void census_log_end_write(void* record, size_t bytes_written);
void census_log_init_reader(void);
/* census_log_read_next() iterates over blocks with data and for each block
returns a pointer to the first unread byte. The number of bytes that can be
read are returned in 'bytes_available'. Reader is expected to read all
available data. Reading the data consumes it i.e. it cannot be read again.
census_log_read_next() returns NULL if the end is reached i.e last block
is read. census_log_init_reader() starts the iteration or aborts the
current iteration.
*/
const void* census_log_read_next(size_t* bytes_available);
/* Returns estimated remaining space across all blocks, in bytes. If log is
configured to discard old records, returns total log space. Otherwise,
returns space available in empty blocks (partially filled blocks are
treated as full).
*/
size_t census_log_remaining_space(void);
/* Returns the number of times grpc_stats_log_start_write() failed due to
out-of-space. */
int64_t census_log_out_of_space_count(void);
#ifdef __cplusplus
}
#endif
#endif /* GRPC_CORE_EXT_CENSUS_MLOG_H */

@ -1,48 +0,0 @@
/*
* Copyright 2015 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
#include <grpc/census.h>
/* TODO(aveitch): These are all placeholder implementations. */
census_timestamp census_start_rpc_op_timestamp(void) {
census_timestamp ct;
/* TODO(aveitch): assumes gpr_timespec implementation of census_timestamp. */
ct.ts = gpr_now(GPR_CLOCK_MONOTONIC);
return ct;
}
census_context *census_start_client_rpc_op(
const census_context *context, int64_t rpc_name_id,
const census_rpc_name_info *rpc_name_info, const char *peer, int trace_mask,
const census_timestamp *start_time) {
return NULL;
}
census_context *census_start_server_rpc_op(
const char *buffer, int64_t rpc_name_id,
const census_rpc_name_info *rpc_name_info, const char *peer, int trace_mask,
census_timestamp *start_time) {
return NULL;
}
census_context *census_start_op(census_context *context, const char *family,
const char *name, int trace_mask) {
return NULL;
}
void census_end_op(census_context *context, int status) {}

@ -1,49 +0,0 @@
/*
*
* Copyright 2016 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
#include <grpc/census.h>
#include <grpc/support/log.h>
/* Placeholders for the pending APIs */
int census_get_trace_record(census_trace_record *trace_record) {
(void)trace_record;
abort();
}
void census_record_values(census_context *context, census_value *values,
size_t nvalues) {
(void)context;
(void)values;
(void)nvalues;
abort();
}
void census_set_rpc_client_peer(census_context *context, const char *peer) {
(void)context;
(void)peer;
abort();
}
void census_trace_scan_end() { abort(); }
int census_trace_scan_start(int consume) {
(void)consume;
abort();
}

@ -1,303 +0,0 @@
/*
*
* Copyright 2016 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
#include "src/core/ext/census/resource.h"
#include "third_party/nanopb/pb_decode.h"
#include <grpc/census.h>
#include <grpc/support/alloc.h>
#include <grpc/support/log.h>
#include <grpc/support/sync.h>
#include <stdbool.h>
#include <string.h>
// Protect local resource data structures.
static gpr_mu resource_lock;
// Deleteing and creating resources are relatively rare events, and should not
// be done in the critical path of performance sensitive code. We record
// current resource id's used in a simple array, and just search it each time
// we need to assign a new id, or look up a resource.
static resource **resources = NULL;
// Number of entries in *resources
static size_t n_resources = 0;
// Number of defined resources
static size_t n_defined_resources = 0;
void initialize_resources(void) {
gpr_mu_init(&resource_lock);
gpr_mu_lock(&resource_lock);
GPR_ASSERT(resources == NULL && n_resources == 0 && n_defined_resources == 0);
gpr_mu_unlock(&resource_lock);
}
// Delete a resource given it's ID. The ID must be a valid resource ID. Must be
// called with resource_lock held.
static void delete_resource_locked(size_t rid) {
GPR_ASSERT(resources[rid] != NULL);
gpr_free(resources[rid]->name);
gpr_free(resources[rid]->description);
gpr_free(resources[rid]->numerators);
gpr_free(resources[rid]->denominators);
gpr_free(resources[rid]);
resources[rid] = NULL;
n_defined_resources--;
}
void shutdown_resources(void) {
gpr_mu_lock(&resource_lock);
for (size_t i = 0; i < n_resources; i++) {
if (resources[i] != NULL) {
delete_resource_locked(i);
}
}
GPR_ASSERT(n_defined_resources == 0);
gpr_free(resources);
resources = NULL;
n_resources = 0;
gpr_mu_unlock(&resource_lock);
}
// Check the contents of string fields in a resource proto.
static bool validate_string(pb_istream_t *stream, const pb_field_t *field,
void **arg) {
resource *vresource = (resource *)*arg;
switch (field->tag) {
case google_census_Resource_name_tag:
// Name must have at least one character
if (stream->bytes_left == 0) {
gpr_log(GPR_INFO, "Zero-length Resource name.");
return false;
}
vresource->name = (char *)gpr_malloc(stream->bytes_left + 1);
vresource->name[stream->bytes_left] = '\0';
if (!pb_read(stream, (uint8_t *)vresource->name, stream->bytes_left)) {
return false;
}
// Can't have same name as an existing resource.
for (size_t i = 0; i < n_resources; i++) {
resource *compare = resources[i];
if (compare == vresource || compare == NULL) continue;
if (strcmp(compare->name, vresource->name) == 0) {
gpr_log(GPR_INFO, "Duplicate Resource name %s.", vresource->name);
return false;
}
}
break;
case google_census_Resource_description_tag:
if (stream->bytes_left == 0) {
return true;
}
vresource->description = (char *)gpr_malloc(stream->bytes_left + 1);
vresource->description[stream->bytes_left] = '\0';
if (!pb_read(stream, (uint8_t *)vresource->description,
stream->bytes_left)) {
return false;
}
break;
default:
// No other string fields in Resource. Print warning and skip.
gpr_log(GPR_INFO, "Unknown string field type in Resource protobuf.");
if (!pb_read(stream, NULL, stream->bytes_left)) {
return false;
}
break;
}
return true;
}
// Decode numerators/denominators in a stream. The `count` and `bup`
// (BasicUnit pointer) are pointers to the approriate fields in a resource
// struct.
static bool validate_units_helper(pb_istream_t *stream, int *count,
google_census_Resource_BasicUnit **bup) {
while (stream->bytes_left) {
(*count)++;
// Have to allocate a new array of values. Normal case is 0 or 1, so
// this should normally not be an issue.
google_census_Resource_BasicUnit *new_bup =
(google_census_Resource_BasicUnit *)gpr_malloc(
(size_t)*count * sizeof(google_census_Resource_BasicUnit));
if (*count != 1) {
memcpy(new_bup, *bup,
(size_t)(*count - 1) * sizeof(google_census_Resource_BasicUnit));
gpr_free(*bup);
}
*bup = new_bup;
uint64_t value;
if (!pb_decode_varint(stream, &value)) {
return false;
}
*(*bup + *count - 1) = (google_census_Resource_BasicUnit)value;
}
return true;
}
// Validate units field of a Resource proto.
static bool validate_units(pb_istream_t *stream, const pb_field_t *field,
void **arg) {
resource *vresource = (resource *)(*arg);
switch (field->tag) {
case google_census_Resource_MeasurementUnit_numerator_tag:
return validate_units_helper(stream, &vresource->n_numerators,
&vresource->numerators);
break;
case google_census_Resource_MeasurementUnit_denominator_tag:
return validate_units_helper(stream, &vresource->n_denominators,
&vresource->denominators);
break;
default:
gpr_log(GPR_ERROR, "Unknown field type.");
return false;
break;
}
return true;
}
// Validate the contents of a Resource proto. `id` is the intended resource id.
static bool validate_resource_pb(const uint8_t *resource_pb,
size_t resource_pb_size, size_t id) {
GPR_ASSERT(id < n_resources);
if (resource_pb == NULL) {
return false;
}
google_census_Resource vresource;
vresource.name.funcs.decode = &validate_string;
vresource.name.arg = resources[id];
vresource.description.funcs.decode = &validate_string;
vresource.description.arg = resources[id];
vresource.unit.numerator.funcs.decode = &validate_units;
vresource.unit.numerator.arg = resources[id];
vresource.unit.denominator.funcs.decode = &validate_units;
vresource.unit.denominator.arg = resources[id];
pb_istream_t stream =
pb_istream_from_buffer((uint8_t *)resource_pb, resource_pb_size);
if (!pb_decode(&stream, google_census_Resource_fields, &vresource)) {
return false;
}
// A Resource must have a name, a unit, with at least one numerator.
return (resources[id]->name != NULL && vresource.has_unit &&
resources[id]->n_numerators > 0);
}
// Allocate a blank resource, and return associated ID. Must be called with
// resource_lock held.
size_t allocate_resource(void) {
// use next_id to optimize expected placement of next new resource.
static size_t next_id = 0;
size_t id = n_resources; // resource ID - initialize to invalid value.
// Expand resources if needed.
if (n_resources == n_defined_resources) {
size_t new_n_resources = n_resources ? n_resources * 2 : 2;
resource **new_resources =
(resource **)gpr_malloc(new_n_resources * sizeof(resource *));
if (n_resources != 0) {
memcpy(new_resources, resources, n_resources * sizeof(resource *));
}
memset(new_resources + n_resources, 0,
(new_n_resources - n_resources) * sizeof(resource *));
gpr_free(resources);
resources = new_resources;
n_resources = new_n_resources;
id = n_defined_resources;
} else {
GPR_ASSERT(n_defined_resources < n_resources);
// Find a free id.
for (size_t base = 0; base < n_resources; base++) {
id = (next_id + base) % n_resources;
if (resources[id] == NULL) break;
}
}
GPR_ASSERT(id < n_resources && resources[id] == NULL);
resources[id] = (resource *)gpr_malloc(sizeof(resource));
memset(resources[id], 0, sizeof(resource));
n_defined_resources++;
next_id = (id + 1) % n_resources;
return id;
}
int32_t census_define_resource(const uint8_t *resource_pb,
size_t resource_pb_size) {
if (resource_pb == NULL) {
return -1;
}
gpr_mu_lock(&resource_lock);
size_t id = allocate_resource();
// Validate pb, extract name.
if (!validate_resource_pb(resource_pb, resource_pb_size, id)) {
delete_resource_locked(id);
gpr_mu_unlock(&resource_lock);
return -1;
}
gpr_mu_unlock(&resource_lock);
return (int32_t)id;
}
void census_delete_resource(int32_t rid) {
gpr_mu_lock(&resource_lock);
if (rid >= 0 && (size_t)rid < n_resources && resources[rid] != NULL) {
delete_resource_locked((size_t)rid);
}
gpr_mu_unlock(&resource_lock);
}
int32_t census_resource_id(const char *name) {
gpr_mu_lock(&resource_lock);
for (int32_t id = 0; (size_t)id < n_resources; id++) {
if (resources[id] != NULL && strcmp(resources[id]->name, name) == 0) {
gpr_mu_unlock(&resource_lock);
return id;
}
}
gpr_mu_unlock(&resource_lock);
return -1;
}
int32_t define_resource(const resource *base) {
GPR_ASSERT(base != NULL && base->name != NULL && base->n_numerators > 0 &&
base->numerators != NULL);
gpr_mu_lock(&resource_lock);
size_t id = allocate_resource();
size_t len = strlen(base->name) + 1;
resources[id]->name = (char *)gpr_malloc(len);
memcpy(resources[id]->name, base->name, len);
if (base->description) {
len = strlen(base->description) + 1;
resources[id]->description = (char *)gpr_malloc(len);
memcpy(resources[id]->description, base->description, len);
}
resources[id]->prefix = base->prefix;
resources[id]->n_numerators = base->n_numerators;
len = (size_t)base->n_numerators * sizeof(*base->numerators);
resources[id]->numerators =
(google_census_Resource_BasicUnit *)gpr_malloc(len);
memcpy(resources[id]->numerators, base->numerators, len);
resources[id]->n_denominators = base->n_denominators;
if (base->n_denominators != 0) {
len = (size_t)base->n_denominators * sizeof(*base->denominators);
resources[id]->denominators =
(google_census_Resource_BasicUnit *)gpr_malloc(len);
memcpy(resources[id]->denominators, base->denominators, len);
}
gpr_mu_unlock(&resource_lock);
return (int32_t)id;
}

@ -1,56 +0,0 @@
/*
*
* Copyright 2016 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
/* Census-internal resource definition and manipluation functions. */
#ifndef GRPC_CORE_EXT_CENSUS_RESOURCE_H
#define GRPC_CORE_EXT_CENSUS_RESOURCE_H
#include <grpc/grpc.h>
#include "src/core/ext/census/gen/census.pb.h"
#ifdef __cplusplus
extern "C" {
#endif
/* Internal representation of a resource. */
typedef struct {
char *name;
char *description;
int32_t prefix;
int n_numerators;
google_census_Resource_BasicUnit *numerators;
int n_denominators;
google_census_Resource_BasicUnit *denominators;
} resource;
/* Initialize and shutdown the resources subsystem. */
void initialize_resources(void);
void shutdown_resources(void);
/* Add a new resource, given a proposed resource structure. Returns the
resource ID, or -ve on failure.
TODO(aveitch): this function exists to support addition of the base
resources. It should be removed when we have the ability to add resources
from configuration files. */
int32_t define_resource(const resource *base);
#ifdef __cplusplus
}
#endif
#endif /* GRPC_CORE_EXT_CENSUS_RESOURCE_H */

@ -1,36 +0,0 @@
/*
*
* Copyright 2015 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
#ifndef GRPC_CORE_EXT_CENSUS_RPC_METRIC_ID_H
#define GRPC_CORE_EXT_CENSUS_RPC_METRIC_ID_H
/* Metric ID's used for RPC measurements. */
/* Count of client requests sent. */
#define CENSUS_METRIC_RPC_CLIENT_REQUESTS ((uint32_t)0)
/* Count of server requests sent. */
#define CENSUS_METRIC_RPC_SERVER_REQUESTS ((uint32_t)1)
/* Client error counts. */
#define CENSUS_METRIC_RPC_CLIENT_ERRORS ((uint32_t)2)
/* Server error counts. */
#define CENSUS_METRIC_RPC_SERVER_ERRORS ((uint32_t)3)
/* Client side request latency. */
#define CENSUS_METRIC_RPC_CLIENT_LATENCY ((uint32_t)4)
/* Server side request latency. */
#define CENSUS_METRIC_RPC_SERVER_LATENCY ((uint32_t)5)
#endif /* GRPC_CORE_EXT_CENSUS_RPC_METRIC_ID_H */

@ -1,71 +0,0 @@
/*
*
* Copyright 2016 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
#include "src/core/ext/census/trace_context.h"
#include <grpc/census.h>
#include <grpc/support/log.h>
#include <stdbool.h>
#include "third_party/nanopb/pb_decode.h"
#include "third_party/nanopb/pb_encode.h"
// This function assumes the TraceContext is valid.
size_t encode_trace_context(google_trace_TraceContext *ctxt, uint8_t *buffer,
const size_t buf_size) {
// Create a stream that will write to our buffer.
pb_ostream_t stream = pb_ostream_from_buffer(buffer, buf_size);
// encode message
bool status = pb_encode(&stream, google_trace_TraceContext_fields, ctxt);
if (!status) {
gpr_log(GPR_DEBUG, "TraceContext encoding failed: %s",
PB_GET_ERROR(&stream));
return 0;
}
return stream.bytes_written;
}
bool decode_trace_context(google_trace_TraceContext *ctxt, uint8_t *buffer,
const size_t nbytes) {
// Create a stream that reads nbytes from the buffer.
pb_istream_t stream = pb_istream_from_buffer(buffer, nbytes);
// decode message
bool status = pb_decode(&stream, google_trace_TraceContext_fields, ctxt);
if (!status) {
gpr_log(GPR_DEBUG, "TraceContext decoding failed: %s",
PB_GET_ERROR(&stream));
return false;
}
// check fields
if (!ctxt->has_trace_id_hi || !ctxt->has_trace_id_lo) {
gpr_log(GPR_DEBUG, "Invalid TraceContext: missing trace_id");
return false;
}
if (!ctxt->has_span_id) {
gpr_log(GPR_DEBUG, "Invalid TraceContext: missing span_id");
return false;
}
return true;
}

@ -1,64 +0,0 @@
/*
*
* Copyright 2016 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
/* Functions for manipulating trace contexts as defined in
src/proto/census/trace.proto */
#ifndef GRPC_CORE_EXT_CENSUS_TRACE_CONTEXT_H
#define GRPC_CORE_EXT_CENSUS_TRACE_CONTEXT_H
#include "src/core/ext/census/gen/trace_context.pb.h"
/* Span option flags. */
#define SPAN_OPTIONS_IS_SAMPLED 0x01
/* Maximum number of bytes required to encode a TraceContext (31)
1 byte for trace_id field
1 byte for trace_id length
1 byte for trace_id.hi field
8 bytes for trace_id.hi (uint64_t)
1 byte for trace_id.lo field
8 bytes for trace_id.lo (uint64_t)
1 byte for span_id field
8 bytes for span_id (uint64_t)
1 byte for is_sampled field
1 byte for is_sampled (bool) */
#define TRACE_MAX_CONTEXT_SIZE 31
#ifdef __cplusplus
extern "C" {
#endif
/* Encode a trace context (ctxt) into proto format to the buffer provided. The
size of buffer must be at least TRACE_MAX_CONTEXT_SIZE. On success, returns the
number of bytes successfully encoded into buffer. On failure, returns 0. */
size_t encode_trace_context(google_trace_TraceContext *ctxt, uint8_t *buffer,
const size_t buf_size);
/* Decode a proto-encoded TraceContext from the provided buffer into the
TraceContext structure (ctxt). The function expects to be supplied the number
of bytes to be read from buffer (nbytes). This function will also validate that
the TraceContext has a span_id and a trace_id, and will return false if either
of these do not exist. On success, returns true and false otherwise. */
bool decode_trace_context(google_trace_TraceContext *ctxt, uint8_t *buffer,
const size_t nbytes);
#ifdef __cplusplus
}
#endif
#endif /* GRPC_CORE_EXT_CENSUS_TRACE_CONTEXT_H */

@ -1,46 +0,0 @@
/*
*
* Copyright 2016 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
#ifndef GRPC_CORE_EXT_CENSUS_TRACE_LABEL_H
#define GRPC_CORE_EXT_CENSUS_TRACE_LABEL_H
#include "src/core/ext/census/trace_string.h"
/* Trace label (key/value pair) stores a label name and the label value. The
value can be one of trace_string/int64_t/bool. */
typedef struct trace_label {
trace_string key;
enum label_type {
/* Unknown value for debugging/error purposes */
LABEL_UNKNOWN = 0,
/* A string value */
LABEL_STRING = 1,
/* An integer value. */
LABEL_INT = 2,
/* A boolean value. */
LABEL_BOOL = 3,
} value_type;
union value {
trace_string label_str;
int64_t label_int;
bool label_bool;
} value;
} trace_label;
#endif /* GRPC_CORE_EXT_CENSUS_TRACE_LABEL_H */

@ -1,56 +0,0 @@
/*
*
* Copyright 2016 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
#ifndef GRPC_CORE_EXT_CENSUS_TRACE_PROPAGATION_H
#define GRPC_CORE_EXT_CENSUS_TRACE_PROPAGATION_H
#include "src/core/ext/census/tracing.h"
#ifdef __cplusplus
extern "C" {
#endif
/* Encoding and decoding functions for receiving and sending trace contexts
over the wire. Only RPC libraries should be calling these
functions. These functions return the number of bytes encoded/decoded
(0 if a failure has occurred). buf_size indicates the size of the
input/output buffer. trace_span_context is a struct that includes the
trace ID, span ID, and a set of option flags (is_sampled, etc.). */
/* Converts a span context to a binary byte buffer. */
size_t trace_span_context_to_binary(const trace_span_context *ctxt,
uint8_t *buf, size_t buf_size);
/* Reads a binary byte buffer and populates a span context structure. */
size_t binary_to_trace_span_context(const uint8_t *buf, size_t buf_size,
trace_span_context *ctxt);
/* Converts a span context to an http metadata compatible string. */
size_t trace_span_context_to_http_format(const trace_span_context *ctxt,
char *buf, size_t buf_size);
/* Reads an http metadata compatible string and populates a span context
structure. */
size_t http_format_to_trace_span_context(const char *buf, size_t buf_size,
trace_span_context *ctxt);
#ifdef __cplusplus
}
#endif
#endif /* GRPC_CORE_EXT_CENSUS_TRACE_PROPAGATION_H */

@ -1,30 +0,0 @@
/*
*
* Copyright 2016 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
#ifndef GRPC_CORE_EXT_CENSUS_TRACE_STATUS_H
#define GRPC_CORE_EXT_CENSUS_TRACE_STATUS_H
#include "src/core/ext/census/trace_string.h"
/* Stores a status code and status message for a trace. */
typedef struct trace_status {
int64_t errorCode;
trace_string errorMessage;
} trace_status;
#endif /* GRPC_CORE_EXT_CENSUS_TRACE_STATUS_H */

@ -1,35 +0,0 @@
/*
*
* Copyright 2016 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
#ifndef GRPC_CORE_EXT_CENSUS_TRACE_STRING_H
#define GRPC_CORE_EXT_CENSUS_TRACE_STRING_H
#include <grpc/slice.h>
/* String struct for tracing messages. Since this is a C API, we do not have
access to a string class. This is intended for use by higher level
languages which wrap around the C API, as most of them have a string class.
This will also be more efficient when copying, as we have an explicitly
specified length. Also, grpc_slice has reference counting which allows for
interning. */
typedef struct trace_string {
char *string;
size_t length;
} trace_string;
#endif /* GRPC_CORE_EXT_CENSUS_TRACE_STRING_H */

@ -1,55 +0,0 @@
/*
*
* Copyright 2016 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
#include "src/core/ext/census/tracing.h"
#include <grpc/census.h>
#include <grpc/support/alloc.h>
#include <grpc/support/log.h>
#include "src/core/ext/census/mlog.h"
void trace_start_span(const trace_span_context *span_ctxt,
const trace_string name, const start_span_options *opts,
trace_span_context *new_span_ctxt,
bool has_remote_parent) {
// Noop implementation.
}
void trace_add_span_annotation(const trace_string description,
const trace_label *labels, const size_t n_labels,
trace_span_context *span_ctxt) {
// Noop implementation.
}
void trace_add_span_network_event_annotation(const trace_string description,
const trace_label *labels,
const size_t n_labels,
const gpr_timespec timestamp,
bool sent, uint64_t id,
trace_span_context *span_ctxt) {
// Noop implementation.
}
void trace_add_span_labels(const trace_label *labels, const size_t n_labels,
trace_span_context *span_ctxt) {
// Noop implementation.
}
void trace_end_span(const trace_status *status, trace_span_context *span_ctxt) {
// Noop implementation.
}

@ -1,117 +0,0 @@
/*
*
* Copyright 2016 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
#ifndef GRPC_CORE_EXT_CENSUS_TRACING_H
#define GRPC_CORE_EXT_CENSUS_TRACING_H
#include <grpc/support/time.h>
#include <stdbool.h>
#include "src/core/ext/census/trace_context.h"
#include "src/core/ext/census/trace_label.h"
#include "src/core/ext/census/trace_status.h"
#ifdef __cplusplus
extern "C" {
#endif
/* This is the low level tracing API that other languages will interface with.
This is not intended to be accessed by the end-user, therefore it has been
designed with performance in mind rather than ease of use. */
/* The tracing level. */
enum TraceLevel {
/* Annotations on this context will be silently discarded. */
NO_TRACING = 0,
/* Annotations will not be saved to a persistent store. They will be
available via local APIs only. This setting is not propagated to child
spans. */
TRANSIENT_TRACING = 1,
/* Annotations are recorded for the entire distributed trace and they are
saved to a persistent store. This setting is propagated to child spans. */
PERSISTENT_TRACING = 2,
};
typedef struct trace_span_context {
/* Trace span context stores Span ID, Trace ID, and option flags. */
/* Trace ID is 128 bits split into 2 64-bit chunks (hi and lo). */
uint64_t trace_id_hi;
uint64_t trace_id_lo;
/* Span ID is 64 bits. */
uint64_t span_id;
/* Span-options is 32-bit value which contains flag options. */
uint32_t span_options;
} trace_span_context;
typedef struct start_span_options {
/* If set, this will override the Span.local_start_time for the Span. */
gpr_timespec local_start_timestamp;
/* Linked spans can be used to identify spans that are linked to this span in
a different trace. This can be used (for example) in batching operations,
where a single batch handler processes multiple requests from different
traces. If set, points to a list of Spans are linked to the created Span.*/
trace_span_context *linked_spans;
/* The number of linked spans. */
size_t n_linked_spans;
} start_span_options;
/* Create a new child Span (or root if parent is NULL), with parent being the
designated Span. The child span will have the provided name and starting
span options (optional). The bool has_remote_parent marks whether the
context refers to a remote parent span or not. */
void trace_start_span(const trace_span_context *span_ctxt,
const trace_string name, const start_span_options *opts,
trace_span_context *new_span_ctxt,
bool has_remote_parent);
/* Add a new Annotation to the Span. Annotations consist of a description
(trace_string) and a set of n labels (trace_label). This can be populated
with arbitrary user data. */
void trace_add_span_annotation(const trace_string description,
const trace_label *labels, const size_t n_labels,
trace_span_context *span_ctxt);
/* Add a new NetworkEvent annotation to a Span. This function is only intended
to be used by RPC systems (either client or server), not by higher level
applications. The timestamp type will be system-defined, the sent argument
designates whether this is a network send event (client request, server
reply)or receive (server request, client reply). The id argument corresponds
to Span.Annotation.NetworkEvent.id from the data model, and serves to uniquely
identify each network message. */
void trace_add_span_network_event(const trace_string description,
const trace_label *labels,
const size_t n_labels,
const gpr_timespec timestamp, bool sent,
uint64_t id, trace_span_context *span_ctxt);
/* Add a set of labels to the Span. These will correspond to the field
Span.labels in the data model. */
void trace_add_span_labels(const trace_label *labels, const size_t n_labels,
trace_span_context *span_ctxt);
/* Mark the end of Span Execution with the given status. Only the timing of the
first EndSpan call for a given Span will be recorded, and implementations are
free to ignore all further calls using the Span. EndSpanOptions can
optionally be NULL. */
void trace_end_span(const trace_status *status, trace_span_context *span_ctxt);
#ifdef __cplusplus
}
#endif
#endif /* GRPC_CORE_EXT_CENSUS_TRACING_H */

@ -1,301 +0,0 @@
/*
*
* Copyright 2015 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
#include "src/core/ext/census/window_stats.h"
#include <grpc/support/alloc.h>
#include <grpc/support/log.h>
#include <grpc/support/time.h>
#include <grpc/support/useful.h>
#include <math.h>
#include <stddef.h>
#include <string.h>
/* typedefs make typing long names easier. Use cws (for census_window_stats) */
typedef census_window_stats_stat_info cws_stat_info;
typedef struct census_window_stats_sum cws_sum;
/* Each interval is composed of a number of buckets, which hold a count of
entries and a single statistic */
typedef struct census_window_stats_bucket {
int64_t count;
void *statistic;
} cws_bucket;
/* Each interval has a set of buckets, and the variables needed to keep
track of their current state */
typedef struct census_window_stats_interval_stats {
/* The buckets. There will be 'granularity' + 1 of these. */
cws_bucket *buckets;
/* Index of the bucket containing the smallest time interval. */
int bottom_bucket;
/* The smallest time storable in the current window. */
int64_t bottom;
/* The largest time storable in the current window + 1ns */
int64_t top;
/* The width of each bucket in ns. */
int64_t width;
} cws_interval_stats;
typedef struct census_window_stats {
/* Number of intervals. */
int nintervals;
/* Number of buckets in each interval. 'granularity' + 1. */
int nbuckets;
/* Record of stat_info. */
cws_stat_info stat_info;
/* Stats for each interval. */
cws_interval_stats *interval_stats;
/* The time the newset stat was recorded. */
int64_t newest_time;
} window_stats;
/* Calculate an actual bucket index from a logical index 'IDX'. Other
parameters supply information on the interval struct and overall stats. */
#define BUCKET_IDX(IS, IDX, WSTATS) \
((IS->bottom_bucket + (IDX)) % WSTATS->nbuckets)
/* The maximum seconds value we can have in a valid timespec. More than this
will result in overflow in timespec_to_ns(). This works out to ~292 years.
TODO: consider using doubles instead of int64. */
static int64_t max_seconds = (GPR_INT64_MAX - GPR_NS_PER_SEC) / GPR_NS_PER_SEC;
static int64_t timespec_to_ns(const gpr_timespec ts) {
if (ts.tv_sec > max_seconds) {
return GPR_INT64_MAX - 1;
}
return ts.tv_sec * GPR_NS_PER_SEC + ts.tv_nsec;
}
static void cws_initialize_statistic(void *statistic,
const cws_stat_info *stat_info) {
if (stat_info->stat_initialize == NULL) {
memset(statistic, 0, stat_info->stat_size);
} else {
stat_info->stat_initialize(statistic);
}
}
/* Create and initialize a statistic */
static void *cws_create_statistic(const cws_stat_info *stat_info) {
void *stat = gpr_malloc(stat_info->stat_size);
cws_initialize_statistic(stat, stat_info);
return stat;
}
window_stats *census_window_stats_create(int nintervals,
const gpr_timespec intervals[],
int granularity,
const cws_stat_info *stat_info) {
window_stats *ret;
int i;
/* validate inputs */
GPR_ASSERT(nintervals > 0 && granularity > 2 && intervals != NULL &&
stat_info != NULL);
for (i = 0; i < nintervals; i++) {
int64_t ns = timespec_to_ns(intervals[i]);
GPR_ASSERT(intervals[i].tv_sec >= 0 && intervals[i].tv_nsec >= 0 &&
intervals[i].tv_nsec < GPR_NS_PER_SEC && ns >= 100 &&
granularity * 10 <= ns);
}
/* Allocate and initialize relevant data structures */
ret = (window_stats *)gpr_malloc(sizeof(window_stats));
ret->nintervals = nintervals;
ret->nbuckets = granularity + 1;
ret->stat_info = *stat_info;
ret->interval_stats =
(cws_interval_stats *)gpr_malloc(nintervals * sizeof(cws_interval_stats));
for (i = 0; i < nintervals; i++) {
int64_t size_ns = timespec_to_ns(intervals[i]);
cws_interval_stats *is = ret->interval_stats + i;
cws_bucket *buckets = is->buckets =
(cws_bucket *)gpr_malloc(ret->nbuckets * sizeof(cws_bucket));
int b;
for (b = 0; b < ret->nbuckets; b++) {
buckets[b].statistic = cws_create_statistic(stat_info);
buckets[b].count = 0;
}
is->bottom_bucket = 0;
is->bottom = 0;
is->width = size_ns / granularity;
/* Check for possible overflow issues, and maximize interval size if the
user requested something large enough. */
if ((GPR_INT64_MAX - is->width) > size_ns) {
is->top = size_ns + is->width;
} else {
is->top = GPR_INT64_MAX;
is->width = GPR_INT64_MAX / (granularity + 1);
}
/* If size doesn't divide evenly, we can have a width slightly too small;
better to have it slightly large. */
if ((size_ns - (granularity + 1) * is->width) > 0) {
is->width += 1;
}
}
ret->newest_time = 0;
return ret;
}
/* When we try adding a measurement above the current interval range, we
need to "shift" the buckets sufficiently to cover the new range. */
static void cws_shift_buckets(const window_stats *wstats,
cws_interval_stats *is, int64_t when_ns) {
int i;
/* number of bucket time widths to "shift" */
int shift;
/* number of buckets to clear */
int nclear;
GPR_ASSERT(when_ns >= is->top);
/* number of bucket time widths to "shift" */
shift = ((when_ns - is->top) / is->width) + 1;
/* number of buckets to clear - limited by actual number of buckets */
nclear = GPR_MIN(shift, wstats->nbuckets);
for (i = 0; i < nclear; i++) {
int b = BUCKET_IDX(is, i, wstats);
is->buckets[b].count = 0;
cws_initialize_statistic(is->buckets[b].statistic, &wstats->stat_info);
}
/* adjust top/bottom times and current bottom bucket */
is->bottom_bucket = BUCKET_IDX(is, shift, wstats);
is->top += shift * is->width;
is->bottom += shift * is->width;
}
void census_window_stats_add(window_stats *wstats, const gpr_timespec when,
const void *stat_value) {
int i;
int64_t when_ns = timespec_to_ns(when);
GPR_ASSERT(wstats->interval_stats != NULL);
for (i = 0; i < wstats->nintervals; i++) {
cws_interval_stats *is = wstats->interval_stats + i;
cws_bucket *bucket;
if (when_ns < is->bottom) { /* Below smallest time in interval: drop */
continue;
}
if (when_ns >= is->top) { /* above limit: shift buckets */
cws_shift_buckets(wstats, is, when_ns);
}
/* Add the stat. */
GPR_ASSERT(is->bottom <= when_ns && when_ns < is->top);
bucket = is->buckets +
BUCKET_IDX(is, (when_ns - is->bottom) / is->width, wstats);
bucket->count++;
wstats->stat_info.stat_add(bucket->statistic, stat_value);
}
if (when_ns > wstats->newest_time) {
wstats->newest_time = when_ns;
}
}
/* Add a specific bucket contents to an accumulating total. */
static void cws_add_bucket_to_sum(cws_sum *sum, const cws_bucket *bucket,
const cws_stat_info *stat_info) {
sum->count += bucket->count;
stat_info->stat_add(sum->statistic, bucket->statistic);
}
/* Add a proportion to an accumulating sum. */
static void cws_add_proportion_to_sum(double p, cws_sum *sum,
const cws_bucket *bucket,
const cws_stat_info *stat_info) {
sum->count += p * bucket->count;
stat_info->stat_add_proportion(p, sum->statistic, bucket->statistic);
}
void census_window_stats_get_sums(const window_stats *wstats,
const gpr_timespec when, cws_sum sums[]) {
int i;
int64_t when_ns = timespec_to_ns(when);
GPR_ASSERT(wstats->interval_stats != NULL);
for (i = 0; i < wstats->nintervals; i++) {
int when_bucket;
int new_bucket;
double last_proportion = 1.0;
double bottom_proportion;
cws_interval_stats *is = wstats->interval_stats + i;
cws_sum *sum = sums + i;
sum->count = 0;
cws_initialize_statistic(sum->statistic, &wstats->stat_info);
if (when_ns < is->bottom) {
continue;
}
if (when_ns >= is->top) {
cws_shift_buckets(wstats, is, when_ns);
}
/* Calculating the appropriate amount of which buckets to use can get
complicated. Essentially there are two cases:
1) if the "top" bucket (new_bucket, where the newest additions to the
stats recorded are entered) corresponds to 'when', then we need
to take a proportion of it - (if when < newest_time) or the full
thing. We also (possibly) need to take a corresponding
proportion of the bottom bucket.
2) Other cases, we just take a straight proportion.
*/
when_bucket = (when_ns - is->bottom) / is->width;
new_bucket = (wstats->newest_time - is->bottom) / is->width;
if (new_bucket == when_bucket) {
int64_t bottom_bucket_time = is->bottom + when_bucket * is->width;
if (when_ns < wstats->newest_time) {
last_proportion = (double)(when_ns - bottom_bucket_time) /
(double)(wstats->newest_time - bottom_bucket_time);
bottom_proportion =
(double)(is->width - (when_ns - bottom_bucket_time)) / is->width;
} else {
bottom_proportion =
(double)(is->width - (wstats->newest_time - bottom_bucket_time)) /
is->width;
}
} else {
last_proportion =
(double)(when_ns + 1 - is->bottom - when_bucket * is->width) /
is->width;
bottom_proportion = 1.0 - last_proportion;
}
cws_add_proportion_to_sum(last_proportion, sum,
is->buckets + BUCKET_IDX(is, when_bucket, wstats),
&wstats->stat_info);
if (when_bucket != 0) { /* last bucket isn't also bottom bucket */
int b;
/* Add all of "bottom" bucket if we are looking at a subset of the
full interval, or a proportion if we are adding full interval. */
cws_add_proportion_to_sum(
(when_bucket == wstats->nbuckets - 1 ? bottom_proportion : 1.0), sum,
is->buckets + is->bottom_bucket, &wstats->stat_info);
/* Add all the remaining buckets (everything but top and bottom). */
for (b = 1; b < when_bucket; b++) {
cws_add_bucket_to_sum(sum, is->buckets + BUCKET_IDX(is, b, wstats),
&wstats->stat_info);
}
}
}
}
void census_window_stats_destroy(window_stats *wstats) {
int i;
GPR_ASSERT(wstats->interval_stats != NULL);
for (i = 0; i < wstats->nintervals; i++) {
int b;
for (b = 0; b < wstats->nbuckets; b++) {
gpr_free(wstats->interval_stats[i].buckets[b].statistic);
}
gpr_free(wstats->interval_stats[i].buckets);
}
gpr_free(wstats->interval_stats);
/* Ensure any use-after free triggers assert. */
wstats->interval_stats = NULL;
gpr_free(wstats);
}

@ -1,166 +0,0 @@
/*
*
* Copyright 2015 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
#ifndef GRPC_CORE_EXT_CENSUS_WINDOW_STATS_H
#define GRPC_CORE_EXT_CENSUS_WINDOW_STATS_H
#include <grpc/support/time.h>
#ifdef __cplusplus
extern "C" {
#endif
/* Keep rolling sums of a user-defined statistic (containing a number of
measurements) over a a number of time intervals ("windows"). For example,
you can use a window_stats object to answer questions such as
"Approximately how many RPCs/s did I receive over the past minute, and
approximately how many bytes did I send out over that period?".
The type of data to record, and the time intervals to keep are specified
when creating the object via a call to census_window_stats_create().
A window's interval is divided into one or more "buckets"; the interval
must be divisible by the number of buckets. Internally, these buckets
control the granularity of window_stats' measurements. Increasing the
number of buckets lets the object respond more quickly to changes in the
overall rate of data added into the object, at the cost of additional
memory usage.
Here's some code which keeps one minute/hour measurements for two values
(latency in seconds and bytes transferred), with each interval divided into
4 buckets.
typedef struct my_stat {
double latency;
int bytes;
} my_stat;
void add_my_stat(void* base, const void* addme) {
my_stat* b = (my_stat*)base;
const my_stat* a = (const my_stat*)addme;
b->latency += a->latency;
b->bytes += a->bytes;
}
void add_proportion_my_stat(double p, void* base, const void* addme) {
(my_stat*)result->latency += p * (const my_stat*)base->latency;
(my_stat*)result->bytes += p * (const my_stat*)base->bytes;
}
#define kNumIntervals 2
#define kMinInterval 0
#define kHourInterval 1
#define kNumBuckets 4
const struct census_window_stats_stat_info kMyStatInfo
= { sizeof(my_stat), NULL, add_my_stat, add_proportion_my_stat };
gpr_timespec intervals[kNumIntervals] = {{60, 0}, {3600, 0}};
my_stat stat;
my_stat sums[kNumIntervals];
census_window_stats_sums result[kNumIntervals];
struct census_window_stats* stats
= census_window_stats_create(kNumIntervals, intervals, kNumBuckets,
&kMyStatInfo);
// Record a new event, taking 15.3ms, transferring 1784 bytes.
stat.latency = 0.153;
stat.bytes = 1784;
census_window_stats_add(stats, gpr_now(GPR_CLOCK_REALTIME), &stat);
// Get sums and print them out
result[kMinInterval].statistic = &sums[kMinInterval];
result[kHourInterval].statistic = &sums[kHourInterval];
census_window_stats_get_sums(stats, gpr_now(GPR_CLOCK_REALTIME), result);
printf("%d events/min, average time %gs, average bytes %g\n",
result[kMinInterval].count,
(my_stat*)result[kMinInterval].statistic->latency /
result[kMinInterval].count,
(my_stat*)result[kMinInterval].statistic->bytes /
result[kMinInterval].count
);
printf("%d events/hr, average time %gs, average bytes %g\n",
result[kHourInterval].count,
(my_stat*)result[kHourInterval].statistic->latency /
result[kHourInterval].count,
(my_stat*)result[kHourInterval].statistic->bytes /
result[kHourInterval].count
);
*/
/* Opaque structure for representing window_stats object */
struct census_window_stats;
/* Information provided by API user on the information they want to record */
typedef struct census_window_stats_stat_info {
/* Number of bytes in user-defined object. */
size_t stat_size;
/* Function to initialize a user-defined statistics object. If this is set
* to NULL, then the object will be zero-initialized. */
void (*stat_initialize)(void *stat);
/* Function to add one user-defined statistics object ('addme') to 'base' */
void (*stat_add)(void *base, const void *addme);
/* As for previous function, but only add a proportion 'p'. This API will
currently only use 'p' values in the range [0,1], but other values are
possible in the future, and should be supported. */
void (*stat_add_proportion)(double p, void *base, const void *addme);
} census_window_stats_stat_info;
/* Create a new window_stats object. 'nintervals' is the number of
'intervals', and must be >=1. 'granularity' is the number of buckets, with
a larger number using more memory, but providing greater accuracy of
results. 'granularity should be > 2. We also require that each interval be
at least 10 * 'granularity' nanoseconds in size. 'stat_info' contains
information about the statistic to be gathered. Intervals greater than ~192
years will be treated as essentially infinite in size. This function will
GPR_ASSERT() if the object cannot be created or any of the parameters have
invalid values. This function is thread-safe. */
struct census_window_stats *census_window_stats_create(
int nintervals, const gpr_timespec intervals[], int granularity,
const census_window_stats_stat_info *stat_info);
/* Add a new measurement (in 'stat_value'), as of a given time ('when').
This function is thread-compatible. */
void census_window_stats_add(struct census_window_stats *wstats,
const gpr_timespec when, const void *stat_value);
/* Structure used to record a single intervals sum for a given statistic */
typedef struct census_window_stats_sum {
/* Total count of samples. Note that because some internal interpolation
is performed, the count of samples returned for each interval may not be an
integral value. */
double count;
/* Sum for statistic */
void *statistic;
} census_window_stats_sums;
/* Retrieve a set of all values stored in a window_stats object 'wstats'. The
number of 'sums' MUST be the same as the number 'nintervals' used in
census_window_stats_create(). This function is thread-compatible. */
void census_window_stats_get_sums(const struct census_window_stats *wstats,
const gpr_timespec when,
struct census_window_stats_sum sums[]);
/* Destroy a window_stats object. Once this function has been called, the
object will no longer be usable from any of the above functions (and
calling them will most likely result in a NULL-pointer dereference or
assertion failure). This function is thread-compatible. */
void census_window_stats_destroy(struct census_window_stats *wstats);
#ifdef __cplusplus
}
#endif
#endif /* GRPC_CORE_EXT_CENSUS_WINDOW_STATS_H */

@ -0,0 +1,158 @@
/*
*
* Copyright 2015 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
#include "src/core/ext/filters/client_channel/backup_poller.h"
#include <grpc/grpc.h>
#include <grpc/support/alloc.h>
#include <grpc/support/log.h>
#include <grpc/support/sync.h>
#include "src/core/ext/filters/client_channel/client_channel.h"
#include "src/core/lib/iomgr/error.h"
#include "src/core/lib/iomgr/pollset.h"
#include "src/core/lib/iomgr/timer.h"
#include "src/core/lib/support/env.h"
#include "src/core/lib/support/string.h"
#include "src/core/lib/surface/channel.h"
#include "src/core/lib/surface/completion_queue.h"
#define DEFAULT_POLL_INTERVAL_MS 5000
typedef struct backup_poller {
grpc_timer polling_timer;
grpc_closure run_poller_closure;
grpc_closure shutdown_closure;
gpr_mu* pollset_mu;
grpc_pollset* pollset; // guarded by pollset_mu
bool shutting_down; // guarded by pollset_mu
gpr_refcount refs;
gpr_refcount shutdown_refs;
} backup_poller;
static gpr_once g_once = GPR_ONCE_INIT;
static gpr_mu g_poller_mu;
static backup_poller* g_poller = NULL; // guarded by g_poller_mu
// g_poll_interval_ms is set only once at the first time
// grpc_client_channel_start_backup_polling() is called, after that it is
// treated as const.
static int g_poll_interval_ms = DEFAULT_POLL_INTERVAL_MS;
static void init_globals() {
gpr_mu_init(&g_poller_mu);
char* env = gpr_getenv("GRPC_CLIENT_CHANNEL_BACKUP_POLL_INTERVAL_MS");
if (env != NULL) {
int poll_interval_ms = gpr_parse_nonnegative_int(env);
if (poll_interval_ms == -1) {
gpr_log(GPR_ERROR,
"Invalid GRPC_CLIENT_CHANNEL_BACKUP_POLL_INTERVAL_MS: %s, "
"default value %d will be used.",
env, g_poll_interval_ms);
} else {
g_poll_interval_ms = poll_interval_ms;
}
}
gpr_free(env);
}
static void backup_poller_shutdown_unref(grpc_exec_ctx* exec_ctx,
backup_poller* p) {
if (gpr_unref(&p->shutdown_refs)) {
grpc_pollset_destroy(exec_ctx, p->pollset);
gpr_free(p->pollset);
gpr_free(p);
}
}
static void done_poller(grpc_exec_ctx* exec_ctx, void* arg, grpc_error* error) {
backup_poller_shutdown_unref(exec_ctx, (backup_poller*)arg);
}
static void g_poller_unref(grpc_exec_ctx* exec_ctx) {
if (gpr_unref(&g_poller->refs)) {
gpr_mu_lock(&g_poller_mu);
backup_poller* p = g_poller;
g_poller = NULL;
gpr_mu_unlock(&g_poller_mu);
gpr_mu_lock(p->pollset_mu);
p->shutting_down = true;
grpc_pollset_shutdown(exec_ctx, p->pollset,
GRPC_CLOSURE_INIT(&p->shutdown_closure, done_poller,
p, grpc_schedule_on_exec_ctx));
gpr_mu_unlock(p->pollset_mu);
grpc_timer_cancel(exec_ctx, &p->polling_timer);
}
}
static void run_poller(grpc_exec_ctx* exec_ctx, void* arg, grpc_error* error) {
backup_poller* p = (backup_poller*)arg;
if (error != GRPC_ERROR_NONE) {
if (error != GRPC_ERROR_CANCELLED) {
GRPC_LOG_IF_ERROR("run_poller", GRPC_ERROR_REF(error));
}
backup_poller_shutdown_unref(exec_ctx, p);
return;
}
gpr_mu_lock(p->pollset_mu);
if (p->shutting_down) {
gpr_mu_unlock(p->pollset_mu);
backup_poller_shutdown_unref(exec_ctx, p);
return;
}
grpc_error* err = grpc_pollset_work(exec_ctx, p->pollset, NULL,
grpc_exec_ctx_now(exec_ctx));
gpr_mu_unlock(p->pollset_mu);
GRPC_LOG_IF_ERROR("Run client channel backup poller", err);
grpc_timer_init(exec_ctx, &p->polling_timer,
grpc_exec_ctx_now(exec_ctx) + g_poll_interval_ms,
&p->run_poller_closure);
}
void grpc_client_channel_start_backup_polling(
grpc_exec_ctx* exec_ctx, grpc_pollset_set* interested_parties) {
gpr_once_init(&g_once, init_globals);
if (g_poll_interval_ms == 0) {
return;
}
gpr_mu_lock(&g_poller_mu);
if (g_poller == NULL) {
g_poller = (backup_poller*)gpr_zalloc(sizeof(backup_poller));
g_poller->pollset = (grpc_pollset*)gpr_zalloc(grpc_pollset_size());
g_poller->shutting_down = false;
grpc_pollset_init(g_poller->pollset, &g_poller->pollset_mu);
gpr_ref_init(&g_poller->refs, 0);
// one for timer cancellation, one for pollset shutdown
gpr_ref_init(&g_poller->shutdown_refs, 2);
GRPC_CLOSURE_INIT(&g_poller->run_poller_closure, run_poller, g_poller,
grpc_schedule_on_exec_ctx);
grpc_timer_init(exec_ctx, &g_poller->polling_timer,
grpc_exec_ctx_now(exec_ctx) + g_poll_interval_ms,
&g_poller->run_poller_closure);
}
gpr_ref(&g_poller->refs);
gpr_mu_unlock(&g_poller_mu);
grpc_pollset_set_add_pollset(exec_ctx, interested_parties, g_poller->pollset);
}
void grpc_client_channel_stop_backup_polling(
grpc_exec_ctx* exec_ctx, grpc_pollset_set* interested_parties) {
if (g_poll_interval_ms == 0) {
return;
}
grpc_pollset_set_del_pollset(exec_ctx, interested_parties, g_poller->pollset);
g_poller_unref(exec_ctx);
}

@ -16,22 +16,19 @@
*
*/
#ifndef GRPC_CORE_EXT_CENSUS_GRPC_FILTER_H
#define GRPC_CORE_EXT_CENSUS_GRPC_FILTER_H
#ifndef GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_BACKUP_POLLER_H
#define GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_BACKUP_POLLER_H
#include <grpc/grpc.h>
#include "src/core/lib/channel/channel_stack.h"
#include "src/core/lib/iomgr/exec_ctx.h"
#ifdef __cplusplus
extern "C" {
#endif
/* Start polling \a interested_parties periodically in the timer thread */
void grpc_client_channel_start_backup_polling(
grpc_exec_ctx* exec_ctx, grpc_pollset_set* interested_parties);
/* Census filters: provides tracing and stats collection functionalities. It
needs to reside right below the surface filter in the channel stack. */
extern const grpc_channel_filter grpc_client_census_filter;
extern const grpc_channel_filter grpc_server_census_filter;
/* Stop polling \a interested_parties */
void grpc_client_channel_stop_backup_polling(
grpc_exec_ctx* exec_ctx, grpc_pollset_set* interested_parties);
#ifdef __cplusplus
}
#endif
#endif /* GRPC_CORE_EXT_CENSUS_GRPC_FILTER_H */
#endif /* GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_BACKUP_POLLER_H */

@ -31,6 +31,7 @@
#include <grpc/support/sync.h>
#include <grpc/support/useful.h>
#include "src/core/ext/filters/client_channel/backup_poller.h"
#include "src/core/ext/filters/client_channel/http_connect_handshaker.h"
#include "src/core/ext/filters/client_channel/lb_policy_registry.h"
#include "src/core/ext/filters/client_channel/proxy_mapper_registry.h"
@ -712,6 +713,7 @@ static grpc_error *cc_init_channel_elem(grpc_exec_ctx *exec_ctx,
chand->interested_parties = grpc_pollset_set_create();
grpc_connectivity_state_init(&chand->state_tracker, GRPC_CHANNEL_IDLE,
"client_channel");
grpc_client_channel_start_backup_polling(exec_ctx, chand->interested_parties);
// Record client channel factory.
const grpc_arg *arg = grpc_channel_args_find(args->channel_args,
GRPC_ARG_CLIENT_CHANNEL_FACTORY);
@ -790,6 +792,7 @@ static void cc_destroy_channel_elem(grpc_exec_ctx *exec_ctx,
if (chand->method_params_table != NULL) {
grpc_slice_hash_table_unref(exec_ctx, chand->method_params_table);
}
grpc_client_channel_stop_backup_polling(exec_ctx, chand->interested_parties);
grpc_connectivity_state_destroy(exec_ctx, &chand->state_tracker);
grpc_pollset_set_destroy(exec_ctx, chand->interested_parties);
GRPC_COMBINER_UNREF(exec_ctx, chand->combiner, "client_channel");
@ -898,7 +901,7 @@ static void waiting_for_pick_batches_fail(grpc_exec_ctx *exec_ctx,
call_data *calld = (call_data *)elem->call_data;
if (GRPC_TRACER_ON(grpc_client_channel_trace)) {
gpr_log(GPR_DEBUG,
"chand=%p calld=%p: failing %" PRIdPTR " pending batches: %s",
"chand=%p calld=%p: failing %" PRIuPTR " pending batches: %s",
elem->channel_data, calld, calld->waiting_for_pick_batches_count,
grpc_error_string(error));
}
@ -940,7 +943,7 @@ static void waiting_for_pick_batches_resume(grpc_exec_ctx *exec_ctx,
channel_data *chand = (channel_data *)elem->channel_data;
call_data *calld = (call_data *)elem->call_data;
if (GRPC_TRACER_ON(grpc_client_channel_trace)) {
gpr_log(GPR_DEBUG, "chand=%p calld=%p: sending %" PRIdPTR
gpr_log(GPR_DEBUG, "chand=%p calld=%p: sending %" PRIuPTR
" pending batches to subchannel_call=%p",
chand, calld, calld->waiting_for_pick_batches_count,
calld->subchannel_call);
@ -1205,6 +1208,9 @@ static void pick_after_resolver_result_cancel_locked(grpc_exec_ctx *exec_ctx,
"Pick cancelled", &error, 1));
}
static void pick_after_resolver_result_start_locked(grpc_exec_ctx *exec_ctx,
grpc_call_element *elem);
static void pick_after_resolver_result_done_locked(grpc_exec_ctx *exec_ctx,
void *arg,
grpc_error *error) {
@ -1228,7 +1234,7 @@ static void pick_after_resolver_result_done_locked(grpc_exec_ctx *exec_ctx,
chand, calld);
}
async_pick_done_locked(exec_ctx, elem, GRPC_ERROR_REF(error));
} else {
} else if (chand->lb_policy != NULL) {
if (GRPC_TRACER_ON(grpc_client_channel_trace)) {
gpr_log(GPR_DEBUG, "chand=%p calld=%p: resolver returned, doing pick",
chand, calld);
@ -1242,6 +1248,30 @@ static void pick_after_resolver_result_done_locked(grpc_exec_ctx *exec_ctx,
async_pick_done_locked(exec_ctx, elem, GRPC_ERROR_NONE);
}
}
// TODO(roth): It should be impossible for chand->lb_policy to be NULL
// here, so the rest of this code should never actually be executed.
// However, we have reports of a crash on iOS that triggers this case,
// so we are temporarily adding this to restore branches that were
// removed in https://github.com/grpc/grpc/pull/12297. Need to figure
// out what is actually causing this to occur and then figure out the
// right way to deal with it.
else if (chand->resolver != NULL) {
// No LB policy, so try again.
if (GRPC_TRACER_ON(grpc_client_channel_trace)) {
gpr_log(GPR_DEBUG,
"chand=%p calld=%p: resolver returned but no LB policy, "
"trying again",
chand, calld);
}
pick_after_resolver_result_start_locked(exec_ctx, elem);
} else {
if (GRPC_TRACER_ON(grpc_client_channel_trace)) {
gpr_log(GPR_DEBUG, "chand=%p calld=%p: resolver disconnected", chand,
calld);
}
async_pick_done_locked(
exec_ctx, elem, GRPC_ERROR_CREATE_FROM_STATIC_STRING("Disconnected"));
}
}
static void pick_after_resolver_result_start_locked(grpc_exec_ctx *exec_ctx,
@ -1599,8 +1629,8 @@ int grpc_client_channel_num_external_connectivity_watchers(
return count;
}
static void on_external_watch_complete(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error) {
static void on_external_watch_complete_locked(grpc_exec_ctx *exec_ctx,
void *arg, grpc_error *error) {
external_connectivity_watcher *w = (external_connectivity_watcher *)arg;
grpc_closure *follow_up = w->on_complete;
grpc_polling_entity_del_from_pollset_set(exec_ctx, &w->pollent,
@ -1619,8 +1649,8 @@ static void watch_connectivity_state_locked(grpc_exec_ctx *exec_ctx, void *arg,
if (w->state != NULL) {
external_connectivity_watcher_list_append(w->chand, w);
GRPC_CLOSURE_RUN(exec_ctx, w->watcher_timer_init, GRPC_ERROR_NONE);
GRPC_CLOSURE_INIT(&w->my_closure, on_external_watch_complete, w,
grpc_schedule_on_exec_ctx);
GRPC_CLOSURE_INIT(&w->my_closure, on_external_watch_complete_locked, w,
grpc_combiner_scheduler(w->chand->combiner));
grpc_connectivity_state_notify_on_state_change(
exec_ctx, &w->chand->state_tracker, w->state, &w->my_closure);
} else {

@ -611,7 +611,6 @@ static void update_lb_connectivity_status_locked(
case GRPC_CHANNEL_SHUTDOWN:
GPR_ASSERT(rr_state_error != GRPC_ERROR_NONE);
break;
case GRPC_CHANNEL_INIT:
case GRPC_CHANNEL_IDLE:
case GRPC_CHANNEL_CONNECTING:
case GRPC_CHANNEL_READY:
@ -1027,15 +1026,19 @@ static void glb_shutdown_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol) {
while (pp != NULL) {
pending_pick *next = pp->next;
*pp->target = NULL;
GRPC_CLOSURE_SCHED(exec_ctx, &pp->wrapped_on_complete_arg.wrapper_closure,
GRPC_ERROR_NONE);
GRPC_CLOSURE_SCHED(
exec_ctx, &pp->wrapped_on_complete_arg.wrapper_closure,
GRPC_ERROR_CREATE_FROM_STATIC_STRING("Channel Shutdown"));
gpr_free(pp);
pp = next;
}
while (pping != NULL) {
pending_ping *next = pping->next;
GRPC_CLOSURE_SCHED(exec_ctx, &pping->wrapped_notify_arg.wrapper_closure,
GRPC_ERROR_NONE);
GRPC_CLOSURE_SCHED(
exec_ctx, &pping->wrapped_notify_arg.wrapper_closure,
GRPC_ERROR_CREATE_FROM_STATIC_STRING("Channel Shutdown"));
gpr_free(pping);
pping = next;
}
}
@ -1786,7 +1789,6 @@ static void glb_lb_channel_on_connectivity_changed_cb(grpc_exec_ctx *exec_ctx,
// embedded RR policy. Note that the current RR policy, if any, will stay in
// effect until an update from the new lb_call is received.
switch (glb_policy->lb_channel_connectivity) {
case GRPC_CHANNEL_INIT:
case GRPC_CHANNEL_CONNECTING:
case GRPC_CHANNEL_TRANSIENT_FAILURE: {
/* resub. */
@ -1803,9 +1805,8 @@ static void glb_lb_channel_on_connectivity_changed_cb(grpc_exec_ctx *exec_ctx,
break;
}
case GRPC_CHANNEL_IDLE:
// lb channel inactive (probably shutdown prior to update). Restart lb
// call to kick the lb channel into gear.
GPR_ASSERT(glb_policy->lb_call == NULL);
// lb channel inactive (probably shutdown prior to update). Restart lb
// call to kick the lb channel into gear.
/* fallthrough */
case GRPC_CHANNEL_READY:
if (glb_policy->lb_call != NULL) {

@ -20,6 +20,7 @@
#include <grpc/support/alloc.h>
#include "src/core/ext/filters/client_channel/lb_policy/subchannel_list.h"
#include "src/core/ext/filters/client_channel/lb_policy_registry.h"
#include "src/core/ext/filters/client_channel/subchannel.h"
#include "src/core/ext/filters/client_channel/subchannel_index.h"
@ -42,99 +43,73 @@ typedef struct {
/** base policy: must be first */
grpc_lb_policy base;
/** all our subchannels */
grpc_subchannel **subchannels;
grpc_subchannel **new_subchannels;
size_t num_subchannels;
size_t num_new_subchannels;
grpc_closure connectivity_changed;
/** remaining members are protected by the combiner */
/** the selected channel */
grpc_connected_subchannel *selected;
/** the subchannel key for \a selected, or NULL if \a selected not set */
const grpc_subchannel_key *selected_key;
grpc_lb_subchannel_list *subchannel_list;
/** latest pending subchannel list */
grpc_lb_subchannel_list *latest_pending_subchannel_list;
/** selected subchannel in \a subchannel_list */
grpc_lb_subchannel_data *selected;
/** have we started picking? */
bool started_picking;
/** are we shut down? */
bool shutdown;
/** are we updating the selected subchannel? */
bool updating_selected;
/** are we updating the subchannel candidates? */
bool updating_subchannels;
/** args from the latest update received while already updating, or NULL */
grpc_lb_policy_args *pending_update_args;
/** which subchannel are we watching? */
size_t checking_subchannel;
/** what is the connectivity of that channel? */
grpc_connectivity_state checking_connectivity;
/** list of picks that are waiting on connectivity */
pending_pick *pending_picks;
/** our connectivity state tracker */
grpc_connectivity_state_tracker state_tracker;
} pick_first_lb_policy;
static void pf_destroy(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol) {
pick_first_lb_policy *p = (pick_first_lb_policy *)pol;
GPR_ASSERT(p->subchannel_list == NULL);
GPR_ASSERT(p->latest_pending_subchannel_list == NULL);
GPR_ASSERT(p->pending_picks == NULL);
for (size_t i = 0; i < p->num_subchannels; i++) {
GRPC_SUBCHANNEL_UNREF(exec_ctx, p->subchannels[i], "pick_first_destroy");
}
if (p->selected != NULL) {
GRPC_CONNECTED_SUBCHANNEL_UNREF(exec_ctx, p->selected,
"picked_first_destroy");
}
grpc_connectivity_state_destroy(exec_ctx, &p->state_tracker);
grpc_subchannel_index_unref();
if (p->pending_update_args != NULL) {
grpc_channel_args_destroy(exec_ctx, p->pending_update_args->args);
gpr_free(p->pending_update_args);
}
gpr_free(p->subchannels);
gpr_free(p->new_subchannels);
gpr_free(p);
grpc_subchannel_index_unref();
if (GRPC_TRACER_ON(grpc_lb_pick_first_trace)) {
gpr_log(GPR_DEBUG, "Pick First %p destroyed.", (void *)p);
}
}
static void pf_shutdown_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol) {
pick_first_lb_policy *p = (pick_first_lb_policy *)pol;
pending_pick *pp;
p->shutdown = true;
pp = p->pending_picks;
p->pending_picks = NULL;
grpc_connectivity_state_set(
exec_ctx, &p->state_tracker, GRPC_CHANNEL_SHUTDOWN,
GRPC_ERROR_CREATE_FROM_STATIC_STRING("Channel shutdown"), "shutdown");
/* cancel subscription */
if (p->selected != NULL) {
grpc_connected_subchannel_notify_on_state_change(
exec_ctx, p->selected, NULL, NULL, &p->connectivity_changed);
} else if (p->num_subchannels > 0 && p->started_picking) {
grpc_subchannel_notify_on_state_change(
exec_ctx, p->subchannels[p->checking_subchannel], NULL, NULL,
&p->connectivity_changed);
static void shutdown_locked(grpc_exec_ctx *exec_ctx, pick_first_lb_policy *p,
grpc_error *error) {
if (GRPC_TRACER_ON(grpc_lb_pick_first_trace)) {
gpr_log(GPR_DEBUG, "Pick First %p Shutting down", p);
}
while (pp != NULL) {
pending_pick *next = pp->next;
p->shutdown = true;
pending_pick *pp;
while ((pp = p->pending_picks) != NULL) {
p->pending_picks = pp->next;
*pp->target = NULL;
GRPC_CLOSURE_SCHED(exec_ctx, pp->on_complete, GRPC_ERROR_NONE);
GRPC_CLOSURE_SCHED(exec_ctx, pp->on_complete, GRPC_ERROR_REF(error));
gpr_free(pp);
pp = next;
}
grpc_connectivity_state_set(exec_ctx, &p->state_tracker,
GRPC_CHANNEL_SHUTDOWN, GRPC_ERROR_REF(error),
"shutdown");
if (p->subchannel_list != NULL) {
grpc_lb_subchannel_list_shutdown_and_unref(exec_ctx, p->subchannel_list,
"pf_shutdown");
p->subchannel_list = NULL;
}
if (p->latest_pending_subchannel_list != NULL) {
grpc_lb_subchannel_list_shutdown_and_unref(
exec_ctx, p->latest_pending_subchannel_list, "pf_shutdown");
p->latest_pending_subchannel_list = NULL;
}
GRPC_ERROR_UNREF(error);
}
static void pf_shutdown_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol) {
shutdown_locked(exec_ctx, (pick_first_lb_policy *)pol,
GRPC_ERROR_CREATE_FROM_STATIC_STRING("Channel shutdown"));
}
static void pf_cancel_pick_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
grpc_connected_subchannel **target,
grpc_error *error) {
pick_first_lb_policy *p = (pick_first_lb_policy *)pol;
pending_pick *pp;
pp = p->pending_picks;
pending_pick *pp = p->pending_picks;
p->pending_picks = NULL;
while (pp != NULL) {
pending_pick *next = pp->next;
@ -158,8 +133,7 @@ static void pf_cancel_picks_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
uint32_t initial_metadata_flags_eq,
grpc_error *error) {
pick_first_lb_policy *p = (pick_first_lb_policy *)pol;
pending_pick *pp;
pp = p->pending_picks;
pending_pick *pp = p->pending_picks;
p->pending_picks = NULL;
while (pp != NULL) {
pending_pick *next = pp->next;
@ -181,15 +155,12 @@ static void pf_cancel_picks_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
static void start_picking_locked(grpc_exec_ctx *exec_ctx,
pick_first_lb_policy *p) {
p->started_picking = true;
if (p->subchannels != NULL) {
GPR_ASSERT(p->num_subchannels > 0);
p->checking_subchannel = 0;
p->checking_connectivity = GRPC_CHANNEL_IDLE;
GRPC_LB_POLICY_WEAK_REF(&p->base, "pick_first_connectivity");
grpc_subchannel_notify_on_state_change(
exec_ctx, p->subchannels[p->checking_subchannel],
p->base.interested_parties, &p->checking_connectivity,
&p->connectivity_changed);
if (p->subchannel_list != NULL && p->subchannel_list->num_subchannels > 0) {
p->subchannel_list->checking_subchannel = 0;
grpc_lb_subchannel_list_ref_for_connectivity_watch(
p->subchannel_list, "connectivity_watch+start_picking");
grpc_lb_subchannel_data_start_connectivity_watch(
exec_ctx, &p->subchannel_list->subchannels[0]);
}
}
@ -206,19 +177,17 @@ static int pf_pick_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
grpc_call_context_element *context, void **user_data,
grpc_closure *on_complete) {
pick_first_lb_policy *p = (pick_first_lb_policy *)pol;
pending_pick *pp;
/* Check atomically for a selected channel */
// If we have a selected subchannel already, return synchronously.
if (p->selected != NULL) {
*target = GRPC_CONNECTED_SUBCHANNEL_REF(p->selected, "picked");
*target = GRPC_CONNECTED_SUBCHANNEL_REF(p->selected->connected_subchannel,
"picked");
return 1;
}
/* No subchannel selected yet, so try again */
// No subchannel selected yet, so handle asynchronously.
if (!p->started_picking) {
start_picking_locked(exec_ctx, p);
}
pp = (pending_pick *)gpr_malloc(sizeof(*pp));
pending_pick *pp = (pending_pick *)gpr_malloc(sizeof(*pp));
pp->next = p->pending_picks;
pp->target = target;
pp->initial_metadata_flags = pick_args->initial_metadata_flags;
@ -227,19 +196,15 @@ static int pf_pick_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
return 0;
}
static void destroy_subchannels_locked(grpc_exec_ctx *exec_ctx,
pick_first_lb_policy *p) {
size_t num_subchannels = p->num_subchannels;
grpc_subchannel **subchannels = p->subchannels;
p->num_subchannels = 0;
p->subchannels = NULL;
GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, &p->base, "destroy_subchannels");
for (size_t i = 0; i < num_subchannels; i++) {
GRPC_SUBCHANNEL_UNREF(exec_ctx, subchannels[i], "pick_first");
static void destroy_unselected_subchannels_locked(grpc_exec_ctx *exec_ctx,
pick_first_lb_policy *p) {
for (size_t i = 0; i < p->subchannel_list->num_subchannels; ++i) {
grpc_lb_subchannel_data *sd = &p->subchannel_list->subchannels[i];
if (p->selected != sd) {
grpc_lb_subchannel_data_unref_subchannel(exec_ctx, sd,
"selected_different_subchannel");
}
}
gpr_free(subchannels);
}
static grpc_connectivity_state pf_check_connectivity_locked(
@ -261,46 +226,24 @@ static void pf_ping_one_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
grpc_closure *closure) {
pick_first_lb_policy *p = (pick_first_lb_policy *)pol;
if (p->selected) {
grpc_connected_subchannel_ping(exec_ctx, p->selected, closure);
grpc_connected_subchannel_ping(exec_ctx, p->selected->connected_subchannel,
closure);
} else {
GRPC_CLOSURE_SCHED(exec_ctx, closure,
GRPC_ERROR_CREATE_FROM_STATIC_STRING("Not connected"));
}
}
/* unsubscribe all subchannels */
static void stop_connectivity_watchers(grpc_exec_ctx *exec_ctx,
pick_first_lb_policy *p) {
if (p->num_subchannels > 0) {
GPR_ASSERT(p->selected == NULL);
if (GRPC_TRACER_ON(grpc_lb_pick_first_trace)) {
gpr_log(GPR_DEBUG, "Pick First %p unsubscribing from subchannel %p",
(void *)p, (void *)p->subchannels[p->checking_subchannel]);
}
grpc_subchannel_notify_on_state_change(
exec_ctx, p->subchannels[p->checking_subchannel], NULL, NULL,
&p->connectivity_changed);
p->updating_subchannels = true;
} else if (p->selected != NULL) {
if (GRPC_TRACER_ON(grpc_lb_pick_first_trace)) {
gpr_log(GPR_DEBUG,
"Pick First %p unsubscribing from selected subchannel %p",
(void *)p, (void *)p->selected);
}
grpc_connected_subchannel_notify_on_state_change(
exec_ctx, p->selected, NULL, NULL, &p->connectivity_changed);
p->updating_selected = true;
}
}
static void pf_connectivity_changed_locked(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error);
/* true upon success */
static void pf_update_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy,
const grpc_lb_policy_args *args) {
pick_first_lb_policy *p = (pick_first_lb_policy *)policy;
const grpc_arg *arg =
grpc_channel_args_find(args->args, GRPC_ARG_LB_ADDRESSES);
if (arg == NULL || arg->type != GRPC_ARG_POINTER) {
if (p->subchannels == NULL) {
if (p->subchannel_list == NULL) {
// If we don't have a current subchannel list, go into TRANSIENT FAILURE.
grpc_connectivity_state_set(
exec_ctx, &p->state_tracker, GRPC_CHANNEL_TRANSIENT_FAILURE,
@ -317,270 +260,222 @@ static void pf_update_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy,
}
const grpc_lb_addresses *addresses =
(const grpc_lb_addresses *)arg->value.pointer.p;
if (addresses->num_addresses == 0) {
// Empty update. Unsubscribe from all current subchannels and put the
// channel in TRANSIENT_FAILURE.
if (GRPC_TRACER_ON(grpc_lb_pick_first_trace)) {
gpr_log(GPR_INFO, "Pick First %p received update with %lu addresses",
(void *)p, (unsigned long)addresses->num_addresses);
}
grpc_lb_subchannel_list *subchannel_list = grpc_lb_subchannel_list_create(
exec_ctx, &p->base, &grpc_lb_pick_first_trace, addresses, args,
pf_connectivity_changed_locked);
if (subchannel_list->num_subchannels == 0) {
// Empty update or no valid subchannels. Unsubscribe from all current
// subchannels and put the channel in TRANSIENT_FAILURE.
grpc_connectivity_state_set(
exec_ctx, &p->state_tracker, GRPC_CHANNEL_TRANSIENT_FAILURE,
GRPC_ERROR_CREATE_FROM_STATIC_STRING("Empty update"),
"pf_update_empty");
stop_connectivity_watchers(exec_ctx, p);
if (p->subchannel_list != NULL) {
grpc_lb_subchannel_list_shutdown_and_unref(exec_ctx, p->subchannel_list,
"sl_shutdown_empty_update");
}
p->subchannel_list = subchannel_list; // Empty list.
p->selected = NULL;
return;
}
if (GRPC_TRACER_ON(grpc_lb_pick_first_trace)) {
gpr_log(GPR_INFO, "Pick First %p received update with %lu addresses",
(void *)p, (unsigned long)addresses->num_addresses);
}
grpc_subchannel_args *sc_args = (grpc_subchannel_args *)gpr_zalloc(
sizeof(*sc_args) * addresses->num_addresses);
/* We remove the following keys in order for subchannel keys belonging to
* subchannels point to the same address to match. */
static const char *keys_to_remove[] = {GRPC_ARG_SUBCHANNEL_ADDRESS,
GRPC_ARG_LB_ADDRESSES};
size_t sc_args_count = 0;
/* Create list of subchannel args for new addresses in \a args. */
for (size_t i = 0; i < addresses->num_addresses; i++) {
// If there were any balancer, we would have chosen grpclb policy instead.
GPR_ASSERT(!addresses->addresses[i].is_balancer);
if (addresses->addresses[i].user_data != NULL) {
gpr_log(GPR_ERROR,
"This LB policy doesn't support user data. It will be ignored");
if (p->selected == NULL) {
// We don't yet have a selected subchannel, so replace the current
// subchannel list immediately.
if (p->subchannel_list != NULL) {
grpc_lb_subchannel_list_shutdown_and_unref(exec_ctx, p->subchannel_list,
"pf_update_before_selected");
}
grpc_arg addr_arg =
grpc_create_subchannel_address_arg(&addresses->addresses[i].address);
grpc_channel_args *new_args = grpc_channel_args_copy_and_add_and_remove(
args->args, keys_to_remove, GPR_ARRAY_SIZE(keys_to_remove), &addr_arg,
1);
gpr_free(addr_arg.value.string);
sc_args[sc_args_count++].args = new_args;
}
/* Check if p->selected is amongst them. If so, we are done. */
if (p->selected != NULL) {
GPR_ASSERT(p->selected_key != NULL);
for (size_t i = 0; i < sc_args_count; i++) {
grpc_subchannel_key *ith_sc_key = grpc_subchannel_key_create(&sc_args[i]);
const bool found_selected =
grpc_subchannel_key_compare(p->selected_key, ith_sc_key) == 0;
grpc_subchannel_key_destroy(exec_ctx, ith_sc_key);
if (found_selected) {
p->subchannel_list = subchannel_list;
} else {
// We do have a selected subchannel.
// Check if it's present in the new list. If so, we're done.
for (size_t i = 0; i < subchannel_list->num_subchannels; ++i) {
grpc_lb_subchannel_data *sd = &subchannel_list->subchannels[i];
if (sd->subchannel == p->selected->subchannel) {
// The currently selected subchannel is in the update: we are done.
if (GRPC_TRACER_ON(grpc_lb_pick_first_trace)) {
gpr_log(GPR_INFO,
"Pick First %p found already selected subchannel %p amongst "
"updates. Update done.",
(void *)p, (void *)p->selected);
"Pick First %p found already selected subchannel %p "
"at update index %" PRIuPTR " of %" PRIuPTR "; update done",
p, p->selected->subchannel, i,
subchannel_list->num_subchannels);
}
for (size_t j = 0; j < sc_args_count; j++) {
grpc_channel_args_destroy(exec_ctx,
(grpc_channel_args *)sc_args[j].args);
grpc_lb_subchannel_list_ref_for_connectivity_watch(
subchannel_list, "connectivity_watch+replace_selected");
grpc_lb_subchannel_data_start_connectivity_watch(exec_ctx, sd);
if (p->subchannel_list != NULL) {
grpc_lb_subchannel_list_shutdown_and_unref(
exec_ctx, p->subchannel_list, "pf_update_includes_selected");
}
p->subchannel_list = subchannel_list;
if (p->selected->connected_subchannel != NULL) {
sd->connected_subchannel = GRPC_CONNECTED_SUBCHANNEL_REF(
p->selected->connected_subchannel, "pf_update_includes_selected");
}
p->selected = sd;
destroy_unselected_subchannels_locked(exec_ctx, p);
// If there was a previously pending update (which may or may
// not have contained the currently selected subchannel), drop
// it, so that it doesn't override what we've done here.
if (p->latest_pending_subchannel_list != NULL) {
grpc_lb_subchannel_list_shutdown_and_unref(
exec_ctx, p->latest_pending_subchannel_list,
"pf_update_includes_selected+outdated");
p->latest_pending_subchannel_list = NULL;
}
gpr_free(sc_args);
return;
}
}
}
// We only check for already running updates here because if the previous
// steps were successful, the update can be considered done without any
// interference (ie, no callbacks were scheduled).
if (p->updating_selected || p->updating_subchannels) {
if (GRPC_TRACER_ON(grpc_lb_pick_first_trace)) {
gpr_log(GPR_INFO,
"Update already in progress for pick first %p. Deferring update.",
(void *)p);
}
if (p->pending_update_args != NULL) {
grpc_channel_args_destroy(exec_ctx, p->pending_update_args->args);
gpr_free(p->pending_update_args);
}
p->pending_update_args =
(grpc_lb_policy_args *)gpr_zalloc(sizeof(*p->pending_update_args));
p->pending_update_args->client_channel_factory =
args->client_channel_factory;
p->pending_update_args->args = grpc_channel_args_copy(args->args);
p->pending_update_args->combiner = args->combiner;
return;
}
/* Create the subchannels for the new subchannel args/addresses. */
grpc_subchannel **new_subchannels =
(grpc_subchannel **)gpr_zalloc(sizeof(*new_subchannels) * sc_args_count);
size_t num_new_subchannels = 0;
for (size_t i = 0; i < sc_args_count; i++) {
grpc_subchannel *subchannel = grpc_client_channel_factory_create_subchannel(
exec_ctx, args->client_channel_factory, &sc_args[i]);
if (GRPC_TRACER_ON(grpc_lb_pick_first_trace)) {
char *address_uri =
grpc_sockaddr_to_uri(&addresses->addresses[i].address);
gpr_log(GPR_INFO,
"Pick First %p created subchannel %p for address uri %s",
(void *)p, (void *)subchannel, address_uri);
gpr_free(address_uri);
// Not keeping the previous selected subchannel, so set the latest
// pending subchannel list to the new subchannel list. We will wait
// for it to report READY before swapping it into the current
// subchannel list.
if (p->latest_pending_subchannel_list != NULL) {
if (GRPC_TRACER_ON(grpc_lb_pick_first_trace)) {
gpr_log(GPR_DEBUG,
"Pick First %p Shutting down latest pending subchannel list "
"%p, about to be replaced by newer latest %p",
(void *)p, (void *)p->latest_pending_subchannel_list,
(void *)subchannel_list);
}
grpc_lb_subchannel_list_shutdown_and_unref(
exec_ctx, p->latest_pending_subchannel_list,
"sl_outdated_dont_smash");
}
grpc_channel_args_destroy(exec_ctx, (grpc_channel_args *)sc_args[i].args);
if (subchannel != NULL) new_subchannels[num_new_subchannels++] = subchannel;
p->latest_pending_subchannel_list = subchannel_list;
}
gpr_free(sc_args);
if (num_new_subchannels == 0) {
gpr_free(new_subchannels);
// Empty update. Unsubscribe from all current subchannels and put the
// channel in TRANSIENT_FAILURE.
grpc_connectivity_state_set(
exec_ctx, &p->state_tracker, GRPC_CHANNEL_TRANSIENT_FAILURE,
GRPC_ERROR_CREATE_FROM_STATIC_STRING("No valid addresses in update"),
"pf_update_no_valid_addresses");
stop_connectivity_watchers(exec_ctx, p);
return;
}
/* Destroy the current subchannels. Repurpose pf_shutdown/destroy. */
stop_connectivity_watchers(exec_ctx, p);
/* Save new subchannels. The switch over will happen in
* pf_connectivity_changed_locked */
if (p->updating_selected || p->updating_subchannels) {
p->num_new_subchannels = num_new_subchannels;
p->new_subchannels = new_subchannels;
} else { /* nothing is updating. Get things moving from here */
p->num_subchannels = num_new_subchannels;
p->subchannels = new_subchannels;
p->new_subchannels = NULL;
p->num_new_subchannels = 0;
if (p->started_picking) {
p->checking_subchannel = 0;
p->checking_connectivity = GRPC_CHANNEL_IDLE;
grpc_subchannel_notify_on_state_change(
exec_ctx, p->subchannels[p->checking_subchannel],
p->base.interested_parties, &p->checking_connectivity,
&p->connectivity_changed);
}
// If we've started picking, start trying to connect to the first
// subchannel in the new list.
if (p->started_picking) {
grpc_lb_subchannel_list_ref_for_connectivity_watch(
subchannel_list, "connectivity_watch+update");
grpc_lb_subchannel_data_start_connectivity_watch(
exec_ctx, &subchannel_list->subchannels[0]);
}
}
static void pf_connectivity_changed_locked(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error) {
pick_first_lb_policy *p = (pick_first_lb_policy *)arg;
grpc_subchannel *selected_subchannel;
pending_pick *pp;
grpc_lb_subchannel_data *sd = (grpc_lb_subchannel_data *)arg;
pick_first_lb_policy *p = (pick_first_lb_policy *)sd->subchannel_list->policy;
if (GRPC_TRACER_ON(grpc_lb_pick_first_trace)) {
gpr_log(
GPR_DEBUG,
"Pick First %p connectivity changed. Updating selected: %d; Updating "
"subchannels: %d; Checking %lu index (%lu total); State: %d; ",
(void *)p, p->updating_selected, p->updating_subchannels,
(unsigned long)p->checking_subchannel,
(unsigned long)p->num_subchannels, p->checking_connectivity);
}
bool restart = false;
if (p->updating_selected && error != GRPC_ERROR_NONE) {
/* Captured the unsubscription for p->selected */
GPR_ASSERT(p->selected != NULL);
GRPC_CONNECTED_SUBCHANNEL_UNREF(exec_ctx, p->selected,
"pf_update_connectivity");
if (GRPC_TRACER_ON(grpc_lb_pick_first_trace)) {
gpr_log(GPR_DEBUG, "Pick First %p unreffing selected subchannel %p",
(void *)p, (void *)p->selected);
}
p->updating_selected = false;
if (p->num_new_subchannels == 0) {
p->selected = NULL;
return;
}
restart = true;
gpr_log(GPR_DEBUG,
"Pick First %p connectivity changed for subchannel %p (%" PRIuPTR
" of %" PRIuPTR
"), subchannel_list %p: state=%s p->shutdown=%d "
"sd->subchannel_list->shutting_down=%d error=%s",
(void *)p, (void *)sd->subchannel,
sd->subchannel_list->checking_subchannel,
sd->subchannel_list->num_subchannels, (void *)sd->subchannel_list,
grpc_connectivity_state_name(sd->pending_connectivity_state_unsafe),
p->shutdown, sd->subchannel_list->shutting_down,
grpc_error_string(error));
}
if (p->updating_subchannels && error != GRPC_ERROR_NONE) {
/* Captured the unsubscription for the checking subchannel */
GPR_ASSERT(p->selected == NULL);
for (size_t i = 0; i < p->num_subchannels; i++) {
GRPC_SUBCHANNEL_UNREF(exec_ctx, p->subchannels[i],
"pf_update_connectivity");
if (GRPC_TRACER_ON(grpc_lb_pick_first_trace)) {
gpr_log(GPR_DEBUG, "Pick First %p unreffing subchannel %p", (void *)p,
(void *)p->subchannels[i]);
}
}
gpr_free(p->subchannels);
p->subchannels = NULL;
p->num_subchannels = 0;
p->updating_subchannels = false;
if (p->num_new_subchannels == 0) return;
restart = true;
}
if (restart) {
p->selected = NULL;
p->selected_key = NULL;
GPR_ASSERT(p->new_subchannels != NULL);
GPR_ASSERT(p->num_new_subchannels > 0);
p->num_subchannels = p->num_new_subchannels;
p->subchannels = p->new_subchannels;
p->num_new_subchannels = 0;
p->new_subchannels = NULL;
if (p->started_picking) {
/* If we were picking, continue to do so over the new subchannels,
* starting from the 0th index. */
p->checking_subchannel = 0;
p->checking_connectivity = GRPC_CHANNEL_IDLE;
/* reuses the weak ref from start_picking_locked */
grpc_subchannel_notify_on_state_change(
exec_ctx, p->subchannels[p->checking_subchannel],
p->base.interested_parties, &p->checking_connectivity,
&p->connectivity_changed);
}
if (p->pending_update_args != NULL) {
const grpc_lb_policy_args *args = p->pending_update_args;
p->pending_update_args = NULL;
pf_update_locked(exec_ctx, &p->base, args);
}
// If the policy is shutting down, unref and return.
if (p->shutdown) {
grpc_lb_subchannel_data_stop_connectivity_watch(exec_ctx, sd);
grpc_lb_subchannel_data_unref_subchannel(exec_ctx, sd, "pf_shutdown");
grpc_lb_subchannel_list_unref_for_connectivity_watch(
exec_ctx, sd->subchannel_list, "pf_shutdown");
return;
}
GRPC_ERROR_REF(error);
if (p->shutdown) {
GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, &p->base, "pick_first_connectivity");
GRPC_ERROR_UNREF(error);
// If the subchannel list is shutting down, stop watching.
if (sd->subchannel_list->shutting_down || error == GRPC_ERROR_CANCELLED) {
grpc_lb_subchannel_data_stop_connectivity_watch(exec_ctx, sd);
grpc_lb_subchannel_data_unref_subchannel(exec_ctx, sd, "pf_sl_shutdown");
grpc_lb_subchannel_list_unref_for_connectivity_watch(
exec_ctx, sd->subchannel_list, "pf_sl_shutdown");
return;
} else if (p->selected != NULL) {
if (p->checking_connectivity == GRPC_CHANNEL_TRANSIENT_FAILURE) {
/* if the selected channel goes bad, we're done */
p->checking_connectivity = GRPC_CHANNEL_SHUTDOWN;
}
grpc_connectivity_state_set(exec_ctx, &p->state_tracker,
p->checking_connectivity, GRPC_ERROR_REF(error),
"selected_changed");
if (p->checking_connectivity != GRPC_CHANNEL_SHUTDOWN) {
grpc_connected_subchannel_notify_on_state_change(
exec_ctx, p->selected, p->base.interested_parties,
&p->checking_connectivity, &p->connectivity_changed);
}
// If we're still here, the notification must be for a subchannel in
// either the current or latest pending subchannel lists.
GPR_ASSERT(sd->subchannel_list == p->subchannel_list ||
sd->subchannel_list == p->latest_pending_subchannel_list);
// Update state.
sd->curr_connectivity_state = sd->pending_connectivity_state_unsafe;
// Handle updates for the currently selected subchannel.
if (p->selected == sd) {
// If the new state is anything other than READY and there is a
// pending update, switch to the pending update.
if (sd->curr_connectivity_state != GRPC_CHANNEL_READY &&
p->latest_pending_subchannel_list != NULL) {
p->selected = NULL;
grpc_lb_subchannel_list_shutdown_and_unref(
exec_ctx, p->subchannel_list, "selected_not_ready+switch_to_update");
p->subchannel_list = p->latest_pending_subchannel_list;
p->latest_pending_subchannel_list = NULL;
grpc_connectivity_state_set(
exec_ctx, &p->state_tracker, GRPC_CHANNEL_TRANSIENT_FAILURE,
GRPC_ERROR_REF(error), "selected_not_ready+switch_to_update");
} else {
GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, &p->base, "pick_first_connectivity");
if (sd->curr_connectivity_state == GRPC_CHANNEL_TRANSIENT_FAILURE) {
/* if the selected channel goes bad, we're done */
sd->curr_connectivity_state = GRPC_CHANNEL_SHUTDOWN;
}
grpc_connectivity_state_set(exec_ctx, &p->state_tracker,
sd->curr_connectivity_state,
GRPC_ERROR_REF(error), "selected_changed");
if (sd->curr_connectivity_state != GRPC_CHANNEL_SHUTDOWN) {
// Renew notification.
grpc_lb_subchannel_data_start_connectivity_watch(exec_ctx, sd);
} else {
grpc_lb_subchannel_data_stop_connectivity_watch(exec_ctx, sd);
grpc_lb_subchannel_list_unref_for_connectivity_watch(
exec_ctx, sd->subchannel_list, "pf_selected_shutdown");
shutdown_locked(exec_ctx, p, GRPC_ERROR_REF(error));
}
}
} else {
loop:
switch (p->checking_connectivity) {
case GRPC_CHANNEL_INIT:
GPR_UNREACHABLE_CODE(return );
case GRPC_CHANNEL_READY:
return;
}
// If we get here, there are two possible cases:
// 1. We do not currently have a selected subchannel, and the update is
// for a subchannel in p->subchannel_list that we're trying to
// connect to. The goal here is to find a subchannel that we can
// select.
// 2. We do currently have a selected subchannel, and the update is
// for a subchannel in p->latest_pending_subchannel_list. The
// goal here is to find a subchannel from the update that we can
// select in place of the current one.
if (sd->curr_connectivity_state == GRPC_CHANNEL_TRANSIENT_FAILURE ||
sd->curr_connectivity_state == GRPC_CHANNEL_SHUTDOWN) {
grpc_lb_subchannel_data_stop_connectivity_watch(exec_ctx, sd);
}
while (true) {
switch (sd->curr_connectivity_state) {
case GRPC_CHANNEL_READY: {
// Case 2. Promote p->latest_pending_subchannel_list to
// p->subchannel_list.
if (sd->subchannel_list == p->latest_pending_subchannel_list) {
GPR_ASSERT(p->subchannel_list != NULL);
grpc_lb_subchannel_list_shutdown_and_unref(
exec_ctx, p->subchannel_list, "finish_update");
p->subchannel_list = p->latest_pending_subchannel_list;
p->latest_pending_subchannel_list = NULL;
}
// Cases 1 and 2.
grpc_connectivity_state_set(exec_ctx, &p->state_tracker,
GRPC_CHANNEL_READY, GRPC_ERROR_NONE,
"connecting_ready");
selected_subchannel = p->subchannels[p->checking_subchannel];
p->selected = GRPC_CONNECTED_SUBCHANNEL_REF(
grpc_subchannel_get_connected_subchannel(selected_subchannel),
"picked_first");
sd->connected_subchannel = GRPC_CONNECTED_SUBCHANNEL_REF(
grpc_subchannel_get_connected_subchannel(sd->subchannel),
"connected");
p->selected = sd;
if (GRPC_TRACER_ON(grpc_lb_pick_first_trace)) {
gpr_log(GPR_INFO,
"Pick First %p selected subchannel %p (connected %p)",
(void *)p, (void *)selected_subchannel, (void *)p->selected);
gpr_log(GPR_INFO, "Pick First %p selected subchannel %p", (void *)p,
(void *)sd->subchannel);
}
p->selected_key = grpc_subchannel_get_key(selected_subchannel);
/* drop the pick list: we are connected now */
GRPC_LB_POLICY_WEAK_REF(&p->base, "destroy_subchannels");
destroy_subchannels_locked(exec_ctx, p);
/* update any calls that were waiting for a pick */
// Drop all other subchannels, since we are now connected.
destroy_unselected_subchannels_locked(exec_ctx, p);
// Update any calls that were waiting for a pick.
pending_pick *pp;
while ((pp = p->pending_picks)) {
p->pending_picks = pp->next;
*pp->target = GRPC_CONNECTED_SUBCHANNEL_REF(p->selected, "picked");
*pp->target = GRPC_CONNECTED_SUBCHANNEL_REF(
p->selected->connected_subchannel, "picked");
if (GRPC_TRACER_ON(grpc_lb_pick_first_trace)) {
gpr_log(GPR_INFO,
"Servicing pending pick with selected subchannel %p",
@ -589,76 +484,86 @@ static void pf_connectivity_changed_locked(grpc_exec_ctx *exec_ctx, void *arg,
GRPC_CLOSURE_SCHED(exec_ctx, pp->on_complete, GRPC_ERROR_NONE);
gpr_free(pp);
}
grpc_connected_subchannel_notify_on_state_change(
exec_ctx, p->selected, p->base.interested_parties,
&p->checking_connectivity, &p->connectivity_changed);
break;
case GRPC_CHANNEL_TRANSIENT_FAILURE:
p->checking_subchannel =
(p->checking_subchannel + 1) % p->num_subchannels;
if (p->checking_subchannel == 0) {
/* only trigger transient failure when we've tried all alternatives
*/
// Renew notification.
grpc_lb_subchannel_data_start_connectivity_watch(exec_ctx, sd);
return;
}
case GRPC_CHANNEL_TRANSIENT_FAILURE: {
do {
sd->subchannel_list->checking_subchannel =
(sd->subchannel_list->checking_subchannel + 1) %
sd->subchannel_list->num_subchannels;
sd = &sd->subchannel_list
->subchannels[sd->subchannel_list->checking_subchannel];
} while (sd->subchannel == NULL);
// Case 1: Only set state to TRANSIENT_FAILURE if we've tried
// all subchannels.
if (sd->subchannel_list->checking_subchannel == 0 &&
sd->subchannel_list == p->subchannel_list) {
grpc_connectivity_state_set(
exec_ctx, &p->state_tracker, GRPC_CHANNEL_TRANSIENT_FAILURE,
GRPC_ERROR_REF(error), "connecting_transient_failure");
}
sd->curr_connectivity_state =
grpc_subchannel_check_connectivity(sd->subchannel, &error);
GRPC_ERROR_UNREF(error);
p->checking_connectivity = grpc_subchannel_check_connectivity(
p->subchannels[p->checking_subchannel], &error);
if (p->checking_connectivity == GRPC_CHANNEL_TRANSIENT_FAILURE) {
grpc_subchannel_notify_on_state_change(
exec_ctx, p->subchannels[p->checking_subchannel],
p->base.interested_parties, &p->checking_connectivity,
&p->connectivity_changed);
} else {
goto loop;
if (sd->curr_connectivity_state == GRPC_CHANNEL_TRANSIENT_FAILURE) {
// Reuses the connectivity refs from the previous watch.
grpc_lb_subchannel_data_start_connectivity_watch(exec_ctx, sd);
return;
}
break;
break; // Go back to top of loop.
}
case GRPC_CHANNEL_CONNECTING:
case GRPC_CHANNEL_IDLE:
grpc_connectivity_state_set(
exec_ctx, &p->state_tracker, GRPC_CHANNEL_CONNECTING,
GRPC_ERROR_REF(error), "connecting_changed");
grpc_subchannel_notify_on_state_change(
exec_ctx, p->subchannels[p->checking_subchannel],
p->base.interested_parties, &p->checking_connectivity,
&p->connectivity_changed);
break;
case GRPC_CHANNEL_SHUTDOWN:
p->num_subchannels--;
GPR_SWAP(grpc_subchannel *, p->subchannels[p->checking_subchannel],
p->subchannels[p->num_subchannels]);
GRPC_SUBCHANNEL_UNREF(exec_ctx, p->subchannels[p->num_subchannels],
"pick_first");
if (p->num_subchannels == 0) {
case GRPC_CHANNEL_IDLE: {
// Only update connectivity state in case 1.
if (sd->subchannel_list == p->subchannel_list) {
grpc_connectivity_state_set(
exec_ctx, &p->state_tracker, GRPC_CHANNEL_SHUTDOWN,
GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
"Pick first exhausted channels", &error, 1),
"no_more_channels");
while ((pp = p->pending_picks)) {
p->pending_picks = pp->next;
*pp->target = NULL;
GRPC_CLOSURE_SCHED(exec_ctx, pp->on_complete, GRPC_ERROR_NONE);
gpr_free(pp);
}
GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, &p->base,
"pick_first_connectivity");
} else {
exec_ctx, &p->state_tracker, GRPC_CHANNEL_CONNECTING,
GRPC_ERROR_REF(error), "connecting_changed");
}
// Renew notification.
grpc_lb_subchannel_data_start_connectivity_watch(exec_ctx, sd);
return;
}
case GRPC_CHANNEL_SHUTDOWN: {
grpc_lb_subchannel_data_unref_subchannel(exec_ctx, sd,
"pf_candidate_shutdown");
// Advance to next subchannel and check its state.
grpc_lb_subchannel_data *original_sd = sd;
do {
sd->subchannel_list->checking_subchannel =
(sd->subchannel_list->checking_subchannel + 1) %
sd->subchannel_list->num_subchannels;
sd = &sd->subchannel_list
->subchannels[sd->subchannel_list->checking_subchannel];
} while (sd->subchannel == NULL && sd != original_sd);
if (sd == original_sd) {
grpc_lb_subchannel_list_unref_for_connectivity_watch(
exec_ctx, sd->subchannel_list, "pf_candidate_shutdown");
shutdown_locked(exec_ctx, p,
GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
"Pick first exhausted channels", &error, 1));
return;
}
if (sd->subchannel_list == p->subchannel_list) {
grpc_connectivity_state_set(
exec_ctx, &p->state_tracker, GRPC_CHANNEL_TRANSIENT_FAILURE,
GRPC_ERROR_REF(error), "subchannel_failed");
p->checking_subchannel %= p->num_subchannels;
GRPC_ERROR_UNREF(error);
p->checking_connectivity = grpc_subchannel_check_connectivity(
p->subchannels[p->checking_subchannel], &error);
goto loop;
}
sd->curr_connectivity_state =
grpc_subchannel_check_connectivity(sd->subchannel, &error);
GRPC_ERROR_UNREF(error);
if (sd->curr_connectivity_state == GRPC_CHANNEL_TRANSIENT_FAILURE) {
// Reuses the connectivity refs from the previous watch.
grpc_lb_subchannel_data_start_connectivity_watch(exec_ctx, sd);
return;
}
// For any other state, go back to top of loop.
// We will reuse the connectivity refs from the previous watch.
}
}
}
GRPC_ERROR_UNREF(error);
}
static const grpc_lb_policy_vtable pick_first_lb_policy_vtable = {
@ -688,8 +593,6 @@ static grpc_lb_policy *create_pick_first(grpc_exec_ctx *exec_ctx,
pf_update_locked(exec_ctx, &p->base, args);
grpc_lb_policy_init(&p->base, &pick_first_lb_policy_vtable, args->combiner);
grpc_subchannel_index_ref();
GRPC_CLOSURE_INIT(&p->connectivity_changed, pf_connectivity_changed_locked, p,
grpc_combiner_scheduler(args->combiner));
return &p->base;
}

@ -28,6 +28,7 @@
#include <grpc/support/alloc.h>
#include "src/core/ext/filters/client_channel/lb_policy/subchannel_list.h"
#include "src/core/ext/filters/client_channel/lb_policy_registry.h"
#include "src/core/ext/filters/client_channel/subchannel.h"
#include "src/core/ext/filters/client_channel/subchannel_index.h"
@ -64,12 +65,11 @@ typedef struct pending_pick {
grpc_closure *on_complete;
} pending_pick;
typedef struct rr_subchannel_list rr_subchannel_list;
typedef struct round_robin_lb_policy {
/** base policy: must be first */
grpc_lb_policy base;
rr_subchannel_list *subchannel_list;
grpc_lb_subchannel_list *subchannel_list;
/** have we started picking? */
bool started_picking;
@ -89,157 +89,9 @@ typedef struct round_robin_lb_policy {
* lists if they equal \a latest_pending_subchannel_list. In other words,
* racing callbacks that reference outdated subchannel lists won't perform any
* update. */
rr_subchannel_list *latest_pending_subchannel_list;
grpc_lb_subchannel_list *latest_pending_subchannel_list;
} round_robin_lb_policy;
typedef struct {
/** backpointer to owning subchannel list */
rr_subchannel_list *subchannel_list;
/** subchannel itself */
grpc_subchannel *subchannel;
/** notification that connectivity has changed on subchannel */
grpc_closure connectivity_changed_closure;
/** last observed connectivity. Not updated by
* \a grpc_subchannel_notify_on_state_change. Used to determine the previous
* state while processing the new state in \a rr_connectivity_changed */
grpc_connectivity_state prev_connectivity_state;
/** current connectivity state. Updated by \a
* grpc_subchannel_notify_on_state_change */
grpc_connectivity_state curr_connectivity_state;
/** connectivity state to be updated by the watcher, not guarded by
* the combiner. Will be moved to curr_connectivity_state inside of
* the combiner by rr_connectivity_changed_locked(). */
grpc_connectivity_state pending_connectivity_state_unsafe;
/** the subchannel's target user data */
void *user_data;
/** vtable to operate over \a user_data */
const grpc_lb_user_data_vtable *user_data_vtable;
} subchannel_data;
struct rr_subchannel_list {
/** backpointer to owning policy */
round_robin_lb_policy *policy;
/** all our subchannels */
size_t num_subchannels;
subchannel_data *subchannels;
/** how many subchannels are in state READY */
size_t num_ready;
/** how many subchannels are in state TRANSIENT_FAILURE */
size_t num_transient_failures;
/** how many subchannels are in state SHUTDOWN */
size_t num_shutdown;
/** how many subchannels are in state IDLE */
size_t num_idle;
/** There will be one ref for each entry in subchannels for which there is a
* pending connectivity state watcher callback. */
gpr_refcount refcount;
/** Is this list shutting down? This may be true due to the shutdown of the
* policy itself or because a newer update has arrived while this one hadn't
* finished processing. */
bool shutting_down;
};
static rr_subchannel_list *rr_subchannel_list_create(round_robin_lb_policy *p,
size_t num_subchannels) {
rr_subchannel_list *subchannel_list =
(rr_subchannel_list *)gpr_zalloc(sizeof(*subchannel_list));
subchannel_list->policy = p;
subchannel_list->subchannels =
(subchannel_data *)gpr_zalloc(sizeof(subchannel_data) * num_subchannels);
subchannel_list->num_subchannels = num_subchannels;
gpr_ref_init(&subchannel_list->refcount, 1);
if (GRPC_TRACER_ON(grpc_lb_round_robin_trace)) {
gpr_log(GPR_INFO, "[RR %p] Created subchannel list %p for %lu subchannels",
(void *)p, (void *)subchannel_list, (unsigned long)num_subchannels);
}
return subchannel_list;
}
static void rr_subchannel_list_destroy(grpc_exec_ctx *exec_ctx,
rr_subchannel_list *subchannel_list) {
GPR_ASSERT(subchannel_list->shutting_down);
if (GRPC_TRACER_ON(grpc_lb_round_robin_trace)) {
gpr_log(GPR_INFO, "[RR %p] Destroying subchannel_list %p",
(void *)subchannel_list->policy, (void *)subchannel_list);
}
for (size_t i = 0; i < subchannel_list->num_subchannels; i++) {
subchannel_data *sd = &subchannel_list->subchannels[i];
if (sd->subchannel != NULL) {
GRPC_SUBCHANNEL_UNREF(exec_ctx, sd->subchannel,
"rr_subchannel_list_destroy");
}
sd->subchannel = NULL;
if (sd->user_data != NULL) {
GPR_ASSERT(sd->user_data_vtable != NULL);
sd->user_data_vtable->destroy(exec_ctx, sd->user_data);
sd->user_data = NULL;
}
}
gpr_free(subchannel_list->subchannels);
gpr_free(subchannel_list);
}
static void rr_subchannel_list_ref(rr_subchannel_list *subchannel_list,
const char *reason) {
gpr_ref_non_zero(&subchannel_list->refcount);
if (GRPC_TRACER_ON(grpc_lb_round_robin_trace)) {
const gpr_atm count = gpr_atm_acq_load(&subchannel_list->refcount.count);
gpr_log(GPR_INFO, "[RR %p] subchannel_list %p REF %lu->%lu (%s)",
(void *)subchannel_list->policy, (void *)subchannel_list,
(unsigned long)(count - 1), (unsigned long)count, reason);
}
}
static void rr_subchannel_list_unref(grpc_exec_ctx *exec_ctx,
rr_subchannel_list *subchannel_list,
const char *reason) {
const bool done = gpr_unref(&subchannel_list->refcount);
if (GRPC_TRACER_ON(grpc_lb_round_robin_trace)) {
const gpr_atm count = gpr_atm_acq_load(&subchannel_list->refcount.count);
gpr_log(GPR_INFO, "[RR %p] subchannel_list %p UNREF %lu->%lu (%s)",
(void *)subchannel_list->policy, (void *)subchannel_list,
(unsigned long)(count + 1), (unsigned long)count, reason);
}
if (done) {
rr_subchannel_list_destroy(exec_ctx, subchannel_list);
}
}
/** Mark \a subchannel_list as discarded. Unsubscribes all its subchannels. The
* watcher's callback will ultimately unref \a subchannel_list. */
static void rr_subchannel_list_shutdown_and_unref(
grpc_exec_ctx *exec_ctx, rr_subchannel_list *subchannel_list,
const char *reason) {
GPR_ASSERT(!subchannel_list->shutting_down);
if (GRPC_TRACER_ON(grpc_lb_round_robin_trace)) {
gpr_log(GPR_DEBUG, "[RR %p] Shutting down subchannel_list %p (%s)",
(void *)subchannel_list->policy, (void *)subchannel_list, reason);
}
GPR_ASSERT(!subchannel_list->shutting_down);
subchannel_list->shutting_down = true;
for (size_t i = 0; i < subchannel_list->num_subchannels; i++) {
subchannel_data *sd = &subchannel_list->subchannels[i];
if (sd->subchannel != NULL) { // if subchannel isn't shutdown, unsubscribe.
if (GRPC_TRACER_ON(grpc_lb_round_robin_trace)) {
gpr_log(
GPR_DEBUG,
"[RR %p] Unsubscribing from subchannel %p as part of shutting down "
"subchannel_list %p",
(void *)subchannel_list->policy, (void *)sd->subchannel,
(void *)subchannel_list);
}
grpc_subchannel_notify_on_state_change(exec_ctx, sd->subchannel, NULL,
NULL,
&sd->connectivity_changed_closure);
}
}
rr_subchannel_list_unref(exec_ctx, subchannel_list, reason);
}
/** Returns the index into p->subchannel_list->subchannels of the next
* subchannel in READY state, or p->subchannel_list->num_subchannels if no
* subchannel is READY.
@ -299,8 +151,8 @@ static void update_last_ready_subchannel_index_locked(round_robin_lb_policy *p,
"[RR %p] setting last_ready_subchannel_index=%lu (SC %p, CSC %p)",
(void *)p, (unsigned long)last_ready_index,
(void *)p->subchannel_list->subchannels[last_ready_index].subchannel,
(void *)grpc_subchannel_get_connected_subchannel(
p->subchannel_list->subchannels[last_ready_index].subchannel));
(void *)p->subchannel_list->subchannels[last_ready_index]
.connected_subchannel);
}
}
@ -310,42 +162,47 @@ static void rr_destroy(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol) {
gpr_log(GPR_DEBUG, "[RR %p] Destroying Round Robin policy at %p",
(void *)pol, (void *)pol);
}
GPR_ASSERT(p->subchannel_list == NULL);
GPR_ASSERT(p->latest_pending_subchannel_list == NULL);
grpc_connectivity_state_destroy(exec_ctx, &p->state_tracker);
grpc_subchannel_index_unref();
gpr_free(p);
}
static void rr_shutdown_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol) {
round_robin_lb_policy *p = (round_robin_lb_policy *)pol;
static void shutdown_locked(grpc_exec_ctx *exec_ctx, round_robin_lb_policy *p,
grpc_error *error) {
if (GRPC_TRACER_ON(grpc_lb_round_robin_trace)) {
gpr_log(GPR_DEBUG, "[RR %p] Shutting down Round Robin policy at %p",
(void *)pol, (void *)pol);
gpr_log(GPR_DEBUG, "[RR %p] Shutting down", p);
}
p->shutdown = true;
pending_pick *pp;
while ((pp = p->pending_picks)) {
while ((pp = p->pending_picks) != NULL) {
p->pending_picks = pp->next;
*pp->target = NULL;
GRPC_CLOSURE_SCHED(
exec_ctx, pp->on_complete,
GRPC_ERROR_CREATE_FROM_STATIC_STRING("Channel Shutdown"));
GRPC_CLOSURE_SCHED(exec_ctx, pp->on_complete, GRPC_ERROR_REF(error));
gpr_free(pp);
}
grpc_connectivity_state_set(
exec_ctx, &p->state_tracker, GRPC_CHANNEL_SHUTDOWN,
GRPC_ERROR_CREATE_FROM_STATIC_STRING("Channel Shutdown"), "rr_shutdown");
const bool latest_is_current =
p->subchannel_list == p->latest_pending_subchannel_list;
rr_subchannel_list_shutdown_and_unref(exec_ctx, p->subchannel_list,
"sl_shutdown_rr_shutdown");
p->subchannel_list = NULL;
if (!latest_is_current && p->latest_pending_subchannel_list != NULL &&
!p->latest_pending_subchannel_list->shutting_down) {
rr_subchannel_list_shutdown_and_unref(exec_ctx,
p->latest_pending_subchannel_list,
"sl_shutdown_pending_rr_shutdown");
grpc_connectivity_state_set(exec_ctx, &p->state_tracker,
GRPC_CHANNEL_SHUTDOWN, GRPC_ERROR_REF(error),
"rr_shutdown");
if (p->subchannel_list != NULL) {
grpc_lb_subchannel_list_shutdown_and_unref(exec_ctx, p->subchannel_list,
"sl_shutdown_rr_shutdown");
p->subchannel_list = NULL;
}
if (p->latest_pending_subchannel_list != NULL) {
grpc_lb_subchannel_list_shutdown_and_unref(
exec_ctx, p->latest_pending_subchannel_list,
"sl_shutdown_pending_rr_shutdown");
p->latest_pending_subchannel_list = NULL;
}
GRPC_ERROR_UNREF(error);
}
static void rr_shutdown_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol) {
round_robin_lb_policy *p = (round_robin_lb_policy *)pol;
shutdown_locked(exec_ctx, p,
GRPC_ERROR_CREATE_FROM_STATIC_STRING("Channel Shutdown"));
}
static void rr_cancel_pick_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
@ -400,13 +257,10 @@ static void start_picking_locked(grpc_exec_ctx *exec_ctx,
round_robin_lb_policy *p) {
p->started_picking = true;
for (size_t i = 0; i < p->subchannel_list->num_subchannels; i++) {
subchannel_data *sd = &p->subchannel_list->subchannels[i];
GRPC_LB_POLICY_WEAK_REF(&p->base, "start_picking_locked");
rr_subchannel_list_ref(sd->subchannel_list, "started_picking");
grpc_subchannel_notify_on_state_change(
exec_ctx, sd->subchannel, p->base.interested_parties,
&sd->pending_connectivity_state_unsafe,
&sd->connectivity_changed_closure);
grpc_lb_subchannel_list_ref_for_connectivity_watch(p->subchannel_list,
"connectivity_watch");
grpc_lb_subchannel_data_start_connectivity_watch(
exec_ctx, &p->subchannel_list->subchannels[i]);
}
}
@ -431,10 +285,10 @@ static int rr_pick_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
const size_t next_ready_index = get_next_ready_subchannel_index_locked(p);
if (next_ready_index < p->subchannel_list->num_subchannels) {
/* readily available, report right away */
subchannel_data *sd = &p->subchannel_list->subchannels[next_ready_index];
*target = GRPC_CONNECTED_SUBCHANNEL_REF(
grpc_subchannel_get_connected_subchannel(sd->subchannel),
"rr_picked");
grpc_lb_subchannel_data *sd =
&p->subchannel_list->subchannels[next_ready_index];
*target =
GRPC_CONNECTED_SUBCHANNEL_REF(sd->connected_subchannel, "rr_picked");
if (user_data != NULL) {
*user_data = sd->user_data;
}
@ -465,8 +319,8 @@ static int rr_pick_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
return 0;
}
static void update_state_counters_locked(subchannel_data *sd) {
rr_subchannel_list *subchannel_list = sd->subchannel_list;
static void update_state_counters_locked(grpc_lb_subchannel_data *sd) {
grpc_lb_subchannel_list *subchannel_list = sd->subchannel_list;
if (sd->prev_connectivity_state == GRPC_CHANNEL_READY) {
GPR_ASSERT(subchannel_list->num_ready > 0);
--subchannel_list->num_ready;
@ -480,6 +334,7 @@ static void update_state_counters_locked(subchannel_data *sd) {
GPR_ASSERT(subchannel_list->num_idle > 0);
--subchannel_list->num_idle;
}
sd->prev_connectivity_state = sd->curr_connectivity_state;
if (sd->curr_connectivity_state == GRPC_CHANNEL_READY) {
++subchannel_list->num_ready;
} else if (sd->curr_connectivity_state == GRPC_CHANNEL_TRANSIENT_FAILURE) {
@ -492,12 +347,12 @@ static void update_state_counters_locked(subchannel_data *sd) {
}
/** Sets the policy's connectivity status based on that of the passed-in \a sd
* (the subchannel_data associted with the updated subchannel) and the
* (the grpc_lb_subchannel_data associted with the updated subchannel) and the
* subchannel list \a sd belongs to (sd->subchannel_list). \a error will only be
* used upon policy transition to TRANSIENT_FAILURE or SHUTDOWN. Returns the
* connectivity status set. */
static grpc_connectivity_state update_lb_connectivity_status_locked(
grpc_exec_ctx *exec_ctx, subchannel_data *sd, grpc_error *error) {
grpc_exec_ctx *exec_ctx, grpc_lb_subchannel_data *sd, grpc_error *error) {
/* In priority order. The first rule to match terminates the search (ie, if we
* are on rule n, all previous rules were unfulfilled).
*
@ -519,8 +374,8 @@ static grpc_connectivity_state update_lb_connectivity_status_locked(
* CHECK: p->num_idle == p->subchannel_list->num_subchannels.
*/
grpc_connectivity_state new_state = sd->curr_connectivity_state;
rr_subchannel_list *subchannel_list = sd->subchannel_list;
round_robin_lb_policy *p = subchannel_list->policy;
grpc_lb_subchannel_list *subchannel_list = sd->subchannel_list;
round_robin_lb_policy *p = (round_robin_lb_policy *)subchannel_list->policy;
if (subchannel_list->num_ready > 0) { /* 1) READY */
grpc_connectivity_state_set(exec_ctx, &p->state_tracker, GRPC_CHANNEL_READY,
GRPC_ERROR_NONE, "rr_ready");
@ -556,8 +411,9 @@ static grpc_connectivity_state update_lb_connectivity_status_locked(
static void rr_connectivity_changed_locked(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error) {
subchannel_data *sd = (subchannel_data *)arg;
round_robin_lb_policy *p = sd->subchannel_list->policy;
grpc_lb_subchannel_data *sd = (grpc_lb_subchannel_data *)arg;
round_robin_lb_policy *p =
(round_robin_lb_policy *)sd->subchannel_list->policy;
if (GRPC_TRACER_ON(grpc_lb_round_robin_trace)) {
gpr_log(
GPR_DEBUG,
@ -572,71 +428,50 @@ static void rr_connectivity_changed_locked(grpc_exec_ctx *exec_ctx, void *arg,
}
// If the policy is shutting down, unref and return.
if (p->shutdown) {
rr_subchannel_list_unref(exec_ctx, sd->subchannel_list,
"pol_shutdown+started_picking");
GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, &p->base, "pol_shutdown");
grpc_lb_subchannel_data_stop_connectivity_watch(exec_ctx, sd);
grpc_lb_subchannel_data_unref_subchannel(exec_ctx, sd, "rr_shutdown");
grpc_lb_subchannel_list_unref_for_connectivity_watch(
exec_ctx, sd->subchannel_list, "rr_shutdown");
return;
}
if (sd->subchannel_list->shutting_down && error == GRPC_ERROR_CANCELLED) {
// the subchannel list associated with sd has been discarded. This callback
// corresponds to the unsubscription. The unrefs correspond to the picking
// ref (start_picking_locked or update_started_picking).
rr_subchannel_list_unref(exec_ctx, sd->subchannel_list,
"sl_shutdown+started_picking");
GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, &p->base, "sl_shutdown+picking");
return;
}
// Dispose of outdated subchannel lists.
if (sd->subchannel_list != p->subchannel_list &&
sd->subchannel_list != p->latest_pending_subchannel_list) {
const char *reason = NULL;
if (sd->subchannel_list->shutting_down) {
reason = "sl_outdated_straggler";
rr_subchannel_list_unref(exec_ctx, sd->subchannel_list, reason);
} else {
reason = "sl_outdated";
rr_subchannel_list_shutdown_and_unref(exec_ctx, sd->subchannel_list,
reason);
}
GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, &p->base, reason);
// If the subchannel list is shutting down, stop watching.
if (sd->subchannel_list->shutting_down || error == GRPC_ERROR_CANCELLED) {
grpc_lb_subchannel_data_stop_connectivity_watch(exec_ctx, sd);
grpc_lb_subchannel_data_unref_subchannel(exec_ctx, sd, "rr_sl_shutdown");
grpc_lb_subchannel_list_unref_for_connectivity_watch(
exec_ctx, sd->subchannel_list, "rr_sl_shutdown");
return;
}
// If we're still here, the notification must be for a subchannel in
// either the current or latest pending subchannel lists.
GPR_ASSERT(sd->subchannel_list == p->subchannel_list ||
sd->subchannel_list == p->latest_pending_subchannel_list);
// Now that we're inside the combiner, copy the pending connectivity
// state (which was set by the connectivity state watcher) to
// curr_connectivity_state, which is what we use inside of the combiner.
sd->curr_connectivity_state = sd->pending_connectivity_state_unsafe;
// Update state counters and determine new overall state.
update_state_counters_locked(sd);
sd->prev_connectivity_state = sd->curr_connectivity_state;
const grpc_connectivity_state new_policy_connectivity_state =
update_lb_connectivity_status_locked(exec_ctx, sd, GRPC_ERROR_REF(error));
// If the sd's new state is SHUTDOWN, unref the subchannel, and if the new
// policy's state is SHUTDOWN, clean up.
if (sd->curr_connectivity_state == GRPC_CHANNEL_SHUTDOWN) {
GRPC_SUBCHANNEL_UNREF(exec_ctx, sd->subchannel, "rr_subchannel_shutdown");
sd->subchannel = NULL;
if (sd->user_data != NULL) {
GPR_ASSERT(sd->user_data_vtable != NULL);
sd->user_data_vtable->destroy(exec_ctx, sd->user_data);
sd->user_data = NULL;
}
grpc_lb_subchannel_data_stop_connectivity_watch(exec_ctx, sd);
grpc_lb_subchannel_data_unref_subchannel(exec_ctx, sd,
"rr_connectivity_shutdown");
grpc_lb_subchannel_list_unref_for_connectivity_watch(
exec_ctx, sd->subchannel_list, "rr_connectivity_shutdown");
if (new_policy_connectivity_state == GRPC_CHANNEL_SHUTDOWN) {
// the policy is shutting down. Flush all the pending picks...
pending_pick *pp;
while ((pp = p->pending_picks)) {
p->pending_picks = pp->next;
*pp->target = NULL;
GRPC_CLOSURE_SCHED(exec_ctx, pp->on_complete, GRPC_ERROR_NONE);
gpr_free(pp);
}
shutdown_locked(exec_ctx, p, GRPC_ERROR_REF(error));
}
rr_subchannel_list_unref(exec_ctx, sd->subchannel_list,
"sd_shutdown+started_picking");
// unref the "rr_connectivity_update" weak ref from start_picking.
GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, &p->base,
"rr_connectivity_sd_shutdown");
} else { // sd not in SHUTDOWN
if (sd->curr_connectivity_state == GRPC_CHANNEL_READY) {
if (sd->connected_subchannel == NULL) {
sd->connected_subchannel = GRPC_CONNECTED_SUBCHANNEL_REF(
grpc_subchannel_get_connected_subchannel(sd->subchannel),
"connected");
}
if (sd->subchannel_list != p->subchannel_list) {
// promote sd->subchannel_list to p->subchannel_list.
// sd->subchannel_list must be equal to
@ -657,8 +492,8 @@ static void rr_connectivity_changed_locked(grpc_exec_ctx *exec_ctx, void *arg,
}
if (p->subchannel_list != NULL) {
// dispose of the current subchannel_list
rr_subchannel_list_shutdown_and_unref(exec_ctx, p->subchannel_list,
"sl_phase_out_shutdown");
grpc_lb_subchannel_list_shutdown_and_unref(
exec_ctx, p->subchannel_list, "sl_phase_out_shutdown");
}
p->subchannel_list = p->latest_pending_subchannel_list;
p->latest_pending_subchannel_list = NULL;
@ -668,7 +503,7 @@ static void rr_connectivity_changed_locked(grpc_exec_ctx *exec_ctx, void *arg,
* p->pending_picks. This preemtively replicates rr_pick()'s actions. */
const size_t next_ready_index = get_next_ready_subchannel_index_locked(p);
GPR_ASSERT(next_ready_index < p->subchannel_list->num_subchannels);
subchannel_data *selected =
grpc_lb_subchannel_data *selected =
&p->subchannel_list->subchannels[next_ready_index];
if (p->pending_picks != NULL) {
// if the selected subchannel is going to be used for the pending
@ -679,8 +514,7 @@ static void rr_connectivity_changed_locked(grpc_exec_ctx *exec_ctx, void *arg,
while ((pp = p->pending_picks)) {
p->pending_picks = pp->next;
*pp->target = GRPC_CONNECTED_SUBCHANNEL_REF(
grpc_subchannel_get_connected_subchannel(selected->subchannel),
"rr_picked");
selected->connected_subchannel, "rr_picked");
if (pp->user_data != NULL) {
*pp->user_data = selected->user_data;
}
@ -695,12 +529,8 @@ static void rr_connectivity_changed_locked(grpc_exec_ctx *exec_ctx, void *arg,
gpr_free(pp);
}
}
/* renew notification: reuses the "rr_connectivity_update" weak ref on the
* policy as well as the sd->subchannel_list ref. */
grpc_subchannel_notify_on_state_change(
exec_ctx, sd->subchannel, p->base.interested_parties,
&sd->pending_connectivity_state_unsafe,
&sd->connectivity_changed_closure);
// Renew notification.
grpc_lb_subchannel_data_start_connectivity_watch(exec_ctx, sd);
}
}
@ -724,13 +554,12 @@ static void rr_ping_one_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
round_robin_lb_policy *p = (round_robin_lb_policy *)pol;
const size_t next_ready_index = get_next_ready_subchannel_index_locked(p);
if (next_ready_index < p->subchannel_list->num_subchannels) {
subchannel_data *selected =
grpc_lb_subchannel_data *selected =
&p->subchannel_list->subchannels[next_ready_index];
grpc_connected_subchannel *target = GRPC_CONNECTED_SUBCHANNEL_REF(
grpc_subchannel_get_connected_subchannel(selected->subchannel),
"rr_picked");
selected->connected_subchannel, "rr_ping");
grpc_connected_subchannel_ping(exec_ctx, target, closure);
GRPC_CONNECTED_SUBCHANNEL_UNREF(exec_ctx, target, "rr_picked");
GRPC_CONNECTED_SUBCHANNEL_UNREF(exec_ctx, target, "rr_ping");
} else {
GRPC_CLOSURE_SCHED(exec_ctx, closure, GRPC_ERROR_CREATE_FROM_STATIC_STRING(
"Round Robin not connected"));
@ -743,130 +572,68 @@ static void rr_update_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy,
const grpc_arg *arg =
grpc_channel_args_find(args->args, GRPC_ARG_LB_ADDRESSES);
if (arg == NULL || arg->type != GRPC_ARG_POINTER) {
gpr_log(GPR_ERROR, "[RR %p] update provided no addresses; ignoring", p);
// If we don't have a current subchannel list, go into TRANSIENT_FAILURE.
// Otherwise, keep using the current subchannel list (ignore this update).
if (p->subchannel_list == NULL) {
// If we don't have a current subchannel list, go into TRANSIENT FAILURE.
grpc_connectivity_state_set(
exec_ctx, &p->state_tracker, GRPC_CHANNEL_TRANSIENT_FAILURE,
GRPC_ERROR_CREATE_FROM_STATIC_STRING("Missing update in args"),
"rr_update_missing");
} else {
// otherwise, keep using the current subchannel list (ignore this update).
gpr_log(GPR_ERROR,
"[RR %p] No valid LB addresses channel arg for update, ignoring.",
(void *)p);
}
return;
}
grpc_lb_addresses *addresses = (grpc_lb_addresses *)arg->value.pointer.p;
rr_subchannel_list *subchannel_list =
rr_subchannel_list_create(p, addresses->num_addresses);
if (addresses->num_addresses == 0) {
if (GRPC_TRACER_ON(grpc_lb_round_robin_trace)) {
gpr_log(GPR_DEBUG, "[RR %p] received update with %" PRIuPTR " addresses", p,
addresses->num_addresses);
}
grpc_lb_subchannel_list *subchannel_list = grpc_lb_subchannel_list_create(
exec_ctx, &p->base, &grpc_lb_round_robin_trace, addresses, args,
rr_connectivity_changed_locked);
if (subchannel_list->num_subchannels == 0) {
grpc_connectivity_state_set(
exec_ctx, &p->state_tracker, GRPC_CHANNEL_TRANSIENT_FAILURE,
GRPC_ERROR_CREATE_FROM_STATIC_STRING("Empty update"),
"rr_update_empty");
if (p->subchannel_list != NULL) {
rr_subchannel_list_shutdown_and_unref(exec_ctx, p->subchannel_list,
"sl_shutdown_empty_update");
grpc_lb_subchannel_list_shutdown_and_unref(exec_ctx, p->subchannel_list,
"sl_shutdown_empty_update");
}
p->subchannel_list = subchannel_list; // empty list
return;
}
size_t subchannel_index = 0;
if (p->latest_pending_subchannel_list != NULL && p->started_picking) {
if (GRPC_TRACER_ON(grpc_lb_round_robin_trace)) {
gpr_log(GPR_DEBUG,
"[RR %p] Shutting down latest pending subchannel list %p, about "
"to be replaced by newer latest %p",
(void *)p, (void *)p->latest_pending_subchannel_list,
(void *)subchannel_list);
}
rr_subchannel_list_shutdown_and_unref(
exec_ctx, p->latest_pending_subchannel_list, "sl_outdated_dont_smash");
}
p->latest_pending_subchannel_list = subchannel_list;
grpc_subchannel_args sc_args;
/* We need to remove the LB addresses in order to be able to compare the
* subchannel keys of subchannels from a different batch of addresses. */
static const char *keys_to_remove[] = {GRPC_ARG_SUBCHANNEL_ADDRESS,
GRPC_ARG_LB_ADDRESSES};
/* Create subchannels for addresses in the update. */
for (size_t i = 0; i < addresses->num_addresses; i++) {
// If there were any balancer, we would have chosen grpclb policy instead.
GPR_ASSERT(!addresses->addresses[i].is_balancer);
memset(&sc_args, 0, sizeof(grpc_subchannel_args));
grpc_arg addr_arg =
grpc_create_subchannel_address_arg(&addresses->addresses[i].address);
grpc_channel_args *new_args = grpc_channel_args_copy_and_add_and_remove(
args->args, keys_to_remove, GPR_ARRAY_SIZE(keys_to_remove), &addr_arg,
1);
gpr_free(addr_arg.value.string);
sc_args.args = new_args;
grpc_subchannel *subchannel = grpc_client_channel_factory_create_subchannel(
exec_ctx, args->client_channel_factory, &sc_args);
grpc_channel_args_destroy(exec_ctx, new_args);
grpc_error *error;
// Get the connectivity state of the subchannel. Already existing ones may
// be in a state other than INIT.
const grpc_connectivity_state subchannel_connectivity_state =
grpc_subchannel_check_connectivity(subchannel, &error);
if (error != GRPC_ERROR_NONE) {
// The subchannel is in error (e.g. shutting down). Ignore it.
GRPC_SUBCHANNEL_UNREF(exec_ctx, subchannel, "new_sc_connectivity_error");
GRPC_ERROR_UNREF(error);
continue;
}
if (GRPC_TRACER_ON(grpc_lb_round_robin_trace)) {
char *address_uri =
grpc_sockaddr_to_uri(&addresses->addresses[i].address);
gpr_log(
GPR_DEBUG,
"[RR %p] index %lu: Created subchannel %p for address uri %s into "
"subchannel_list %p. Connectivity state %s",
(void *)p, (unsigned long)subchannel_index, (void *)subchannel,
address_uri, (void *)subchannel_list,
grpc_connectivity_state_name(subchannel_connectivity_state));
gpr_free(address_uri);
}
subchannel_data *sd = &subchannel_list->subchannels[subchannel_index++];
sd->subchannel_list = subchannel_list;
sd->subchannel = subchannel;
GRPC_CLOSURE_INIT(&sd->connectivity_changed_closure,
rr_connectivity_changed_locked, sd,
grpc_combiner_scheduler(args->combiner));
/* use some sentinel value outside of the range of
* grpc_connectivity_state to signal an undefined previous state. We
* won't be referring to this value again and it'll be overwritten after
* the first call to rr_connectivity_changed_locked */
sd->prev_connectivity_state = GRPC_CHANNEL_INIT;
sd->curr_connectivity_state = subchannel_connectivity_state;
sd->user_data_vtable = addresses->user_data_vtable;
if (sd->user_data_vtable != NULL) {
sd->user_data =
sd->user_data_vtable->copy(addresses->addresses[i].user_data);
if (p->started_picking) {
if (p->latest_pending_subchannel_list != NULL) {
if (GRPC_TRACER_ON(grpc_lb_round_robin_trace)) {
gpr_log(GPR_DEBUG,
"[RR %p] Shutting down latest pending subchannel list %p, "
"about to be replaced by newer latest %p",
(void *)p, (void *)p->latest_pending_subchannel_list,
(void *)subchannel_list);
}
grpc_lb_subchannel_list_shutdown_and_unref(
exec_ctx, p->latest_pending_subchannel_list, "sl_outdated");
}
if (p->started_picking) {
rr_subchannel_list_ref(sd->subchannel_list, "update_started_picking");
GRPC_LB_POLICY_WEAK_REF(&p->base, "rr_connectivity_update");
/* 2. Watch every new subchannel. A subchannel list becomes active the
p->latest_pending_subchannel_list = subchannel_list;
for (size_t i = 0; i < subchannel_list->num_subchannels; ++i) {
/* Watch every new subchannel. A subchannel list becomes active the
* moment one of its subchannels is READY. At that moment, we swap
* p->subchannel_list for sd->subchannel_list, provided the subchannel
* list is still valid (ie, isn't shutting down) */
grpc_subchannel_notify_on_state_change(
exec_ctx, sd->subchannel, p->base.interested_parties,
&sd->pending_connectivity_state_unsafe,
&sd->connectivity_changed_closure);
grpc_lb_subchannel_list_ref_for_connectivity_watch(subchannel_list,
"connectivity_watch");
grpc_lb_subchannel_data_start_connectivity_watch(
exec_ctx, &subchannel_list->subchannels[i]);
}
}
if (!p->started_picking) {
} else {
// The policy isn't picking yet. Save the update for later, disposing of
// previous version if any.
if (p->subchannel_list != NULL) {
rr_subchannel_list_shutdown_and_unref(exec_ctx, p->subchannel_list,
"rr_update_before_started_picking");
grpc_lb_subchannel_list_shutdown_and_unref(
exec_ctx, p->subchannel_list, "rr_update_before_started_picking");
}
p->subchannel_list = subchannel_list;
p->latest_pending_subchannel_list = NULL;
}
}

@ -0,0 +1,265 @@
/*
*
* Copyright 2015 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
#include <string.h>
#include <grpc/support/alloc.h>
#include "src/core/ext/filters/client_channel/lb_policy/subchannel_list.h"
#include "src/core/lib/channel/channel_args.h"
#include "src/core/lib/debug/trace.h"
#include "src/core/lib/iomgr/closure.h"
#include "src/core/lib/iomgr/combiner.h"
#include "src/core/lib/iomgr/sockaddr_utils.h"
#include "src/core/lib/transport/connectivity_state.h"
void grpc_lb_subchannel_data_unref_subchannel(grpc_exec_ctx *exec_ctx,
grpc_lb_subchannel_data *sd,
const char *reason) {
if (sd->subchannel != NULL) {
if (GRPC_TRACER_ON(*sd->subchannel_list->tracer)) {
gpr_log(
GPR_DEBUG, "[%s %p] subchannel list %p index %" PRIuPTR
" of %" PRIuPTR " (subchannel %p): unreffing subchannel",
sd->subchannel_list->tracer->name, sd->subchannel_list->policy,
sd->subchannel_list, (size_t)(sd - sd->subchannel_list->subchannels),
sd->subchannel_list->num_subchannels, sd->subchannel);
}
GRPC_SUBCHANNEL_UNREF(exec_ctx, sd->subchannel, reason);
sd->subchannel = NULL;
if (sd->connected_subchannel != NULL) {
GRPC_CONNECTED_SUBCHANNEL_UNREF(exec_ctx, sd->connected_subchannel,
reason);
sd->connected_subchannel = NULL;
}
if (sd->user_data != NULL) {
GPR_ASSERT(sd->user_data_vtable != NULL);
sd->user_data_vtable->destroy(exec_ctx, sd->user_data);
sd->user_data = NULL;
}
}
}
void grpc_lb_subchannel_data_start_connectivity_watch(
grpc_exec_ctx *exec_ctx, grpc_lb_subchannel_data *sd) {
if (GRPC_TRACER_ON(*sd->subchannel_list->tracer)) {
gpr_log(GPR_DEBUG,
"[%s %p] subchannel list %p index %" PRIuPTR " of %" PRIuPTR
" (subchannel %p): requesting connectivity change notification",
sd->subchannel_list->tracer->name, sd->subchannel_list->policy,
sd->subchannel_list,
(size_t)(sd - sd->subchannel_list->subchannels),
sd->subchannel_list->num_subchannels, sd->subchannel);
}
sd->connectivity_notification_pending = true;
grpc_subchannel_notify_on_state_change(
exec_ctx, sd->subchannel, sd->subchannel_list->policy->interested_parties,
&sd->pending_connectivity_state_unsafe,
&sd->connectivity_changed_closure);
}
void grpc_lb_subchannel_data_stop_connectivity_watch(
grpc_exec_ctx *exec_ctx, grpc_lb_subchannel_data *sd) {
if (GRPC_TRACER_ON(*sd->subchannel_list->tracer)) {
gpr_log(
GPR_DEBUG, "[%s %p] subchannel list %p index %" PRIuPTR " of %" PRIuPTR
" (subchannel %p): stopping connectivity watch",
sd->subchannel_list->tracer->name, sd->subchannel_list->policy,
sd->subchannel_list, (size_t)(sd - sd->subchannel_list->subchannels),
sd->subchannel_list->num_subchannels, sd->subchannel);
}
GPR_ASSERT(sd->connectivity_notification_pending);
sd->connectivity_notification_pending = false;
}
grpc_lb_subchannel_list *grpc_lb_subchannel_list_create(
grpc_exec_ctx *exec_ctx, grpc_lb_policy *p, grpc_tracer_flag *tracer,
const grpc_lb_addresses *addresses, const grpc_lb_policy_args *args,
grpc_iomgr_cb_func connectivity_changed_cb) {
grpc_lb_subchannel_list *subchannel_list =
(grpc_lb_subchannel_list *)gpr_zalloc(sizeof(*subchannel_list));
if (GRPC_TRACER_ON(*tracer)) {
gpr_log(GPR_DEBUG,
"[%s %p] Creating subchannel list %p for %" PRIuPTR " subchannels",
tracer->name, p, subchannel_list, addresses->num_addresses);
}
subchannel_list->policy = p;
subchannel_list->tracer = tracer;
gpr_ref_init(&subchannel_list->refcount, 1);
subchannel_list->subchannels = (grpc_lb_subchannel_data *)gpr_zalloc(
sizeof(grpc_lb_subchannel_data) * addresses->num_addresses);
// We need to remove the LB addresses in order to be able to compare the
// subchannel keys of subchannels from a different batch of addresses.
static const char *keys_to_remove[] = {GRPC_ARG_SUBCHANNEL_ADDRESS,
GRPC_ARG_LB_ADDRESSES};
// Create a subchannel for each address.
grpc_subchannel_args sc_args;
size_t subchannel_index = 0;
for (size_t i = 0; i < addresses->num_addresses; i++) {
// If there were any balancer, we would have chosen grpclb policy instead.
GPR_ASSERT(!addresses->addresses[i].is_balancer);
memset(&sc_args, 0, sizeof(grpc_subchannel_args));
grpc_arg addr_arg =
grpc_create_subchannel_address_arg(&addresses->addresses[i].address);
grpc_channel_args *new_args = grpc_channel_args_copy_and_add_and_remove(
args->args, keys_to_remove, GPR_ARRAY_SIZE(keys_to_remove), &addr_arg,
1);
gpr_free(addr_arg.value.string);
sc_args.args = new_args;
grpc_subchannel *subchannel = grpc_client_channel_factory_create_subchannel(
exec_ctx, args->client_channel_factory, &sc_args);
grpc_channel_args_destroy(exec_ctx, new_args);
if (subchannel == NULL) {
// Subchannel could not be created.
if (GRPC_TRACER_ON(*tracer)) {
char *address_uri =
grpc_sockaddr_to_uri(&addresses->addresses[i].address);
gpr_log(GPR_DEBUG,
"[%s %p] could not create subchannel for address uri %s, "
"ignoring",
tracer->name, subchannel_list->policy, address_uri);
gpr_free(address_uri);
}
continue;
}
if (GRPC_TRACER_ON(*tracer)) {
char *address_uri =
grpc_sockaddr_to_uri(&addresses->addresses[i].address);
gpr_log(GPR_DEBUG, "[%s %p] subchannel list %p index %" PRIuPTR
": Created subchannel %p for address uri %s",
tracer->name, p, subchannel_list, subchannel_index, subchannel,
address_uri);
gpr_free(address_uri);
}
grpc_lb_subchannel_data *sd =
&subchannel_list->subchannels[subchannel_index++];
sd->subchannel_list = subchannel_list;
sd->subchannel = subchannel;
GRPC_CLOSURE_INIT(&sd->connectivity_changed_closure,
connectivity_changed_cb, sd,
grpc_combiner_scheduler(args->combiner));
// We assume that the current state is IDLE. If not, we'll get a
// callback telling us that.
sd->prev_connectivity_state = GRPC_CHANNEL_IDLE;
sd->curr_connectivity_state = GRPC_CHANNEL_IDLE;
sd->pending_connectivity_state_unsafe = GRPC_CHANNEL_IDLE;
sd->user_data_vtable = addresses->user_data_vtable;
if (sd->user_data_vtable != NULL) {
sd->user_data =
sd->user_data_vtable->copy(addresses->addresses[i].user_data);
}
}
subchannel_list->num_subchannels = subchannel_index;
subchannel_list->num_idle = subchannel_index;
return subchannel_list;
}
static void subchannel_list_destroy(grpc_exec_ctx *exec_ctx,
grpc_lb_subchannel_list *subchannel_list) {
if (GRPC_TRACER_ON(*subchannel_list->tracer)) {
gpr_log(GPR_DEBUG, "[%s %p] Destroying subchannel_list %p",
subchannel_list->tracer->name, subchannel_list->policy,
subchannel_list);
}
for (size_t i = 0; i < subchannel_list->num_subchannels; i++) {
grpc_lb_subchannel_data *sd = &subchannel_list->subchannels[i];
grpc_lb_subchannel_data_unref_subchannel(exec_ctx, sd,
"subchannel_list_destroy");
}
gpr_free(subchannel_list->subchannels);
gpr_free(subchannel_list);
}
void grpc_lb_subchannel_list_ref(grpc_lb_subchannel_list *subchannel_list,
const char *reason) {
gpr_ref_non_zero(&subchannel_list->refcount);
if (GRPC_TRACER_ON(*subchannel_list->tracer)) {
const gpr_atm count = gpr_atm_acq_load(&subchannel_list->refcount.count);
gpr_log(GPR_DEBUG, "[%s %p] subchannel_list %p REF %lu->%lu (%s)",
subchannel_list->tracer->name, subchannel_list->policy,
subchannel_list, (unsigned long)(count - 1), (unsigned long)count,
reason);
}
}
void grpc_lb_subchannel_list_unref(grpc_exec_ctx *exec_ctx,
grpc_lb_subchannel_list *subchannel_list,
const char *reason) {
const bool done = gpr_unref(&subchannel_list->refcount);
if (GRPC_TRACER_ON(*subchannel_list->tracer)) {
const gpr_atm count = gpr_atm_acq_load(&subchannel_list->refcount.count);
gpr_log(GPR_DEBUG, "[%s %p] subchannel_list %p UNREF %lu->%lu (%s)",
subchannel_list->tracer->name, subchannel_list->policy,
subchannel_list, (unsigned long)(count + 1), (unsigned long)count,
reason);
}
if (done) {
subchannel_list_destroy(exec_ctx, subchannel_list);
}
}
void grpc_lb_subchannel_list_ref_for_connectivity_watch(
grpc_lb_subchannel_list *subchannel_list, const char *reason) {
GRPC_LB_POLICY_WEAK_REF(subchannel_list->policy, reason);
grpc_lb_subchannel_list_ref(subchannel_list, reason);
}
void grpc_lb_subchannel_list_unref_for_connectivity_watch(
grpc_exec_ctx *exec_ctx, grpc_lb_subchannel_list *subchannel_list,
const char *reason) {
GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, subchannel_list->policy, reason);
grpc_lb_subchannel_list_unref(exec_ctx, subchannel_list, reason);
}
static void subchannel_data_cancel_connectivity_watch(
grpc_exec_ctx *exec_ctx, grpc_lb_subchannel_data *sd, const char *reason) {
if (GRPC_TRACER_ON(*sd->subchannel_list->tracer)) {
gpr_log(
GPR_DEBUG, "[%s %p] subchannel list %p index %" PRIuPTR " of %" PRIuPTR
" (subchannel %p): canceling connectivity watch (%s)",
sd->subchannel_list->tracer->name, sd->subchannel_list->policy,
sd->subchannel_list, (size_t)(sd - sd->subchannel_list->subchannels),
sd->subchannel_list->num_subchannels, sd->subchannel, reason);
}
grpc_subchannel_notify_on_state_change(exec_ctx, sd->subchannel, NULL, NULL,
&sd->connectivity_changed_closure);
}
void grpc_lb_subchannel_list_shutdown_and_unref(
grpc_exec_ctx *exec_ctx, grpc_lb_subchannel_list *subchannel_list,
const char *reason) {
if (GRPC_TRACER_ON(*subchannel_list->tracer)) {
gpr_log(GPR_DEBUG, "[%s %p] Shutting down subchannel_list %p (%s)",
subchannel_list->tracer->name, subchannel_list->policy,
subchannel_list, reason);
}
GPR_ASSERT(!subchannel_list->shutting_down);
subchannel_list->shutting_down = true;
for (size_t i = 0; i < subchannel_list->num_subchannels; i++) {
grpc_lb_subchannel_data *sd = &subchannel_list->subchannels[i];
// If there's a pending notification for this subchannel, cancel it;
// the callback is responsible for unreffing the subchannel.
// Otherwise, unref the subchannel directly.
if (sd->connectivity_notification_pending) {
subchannel_data_cancel_connectivity_watch(exec_ctx, sd, reason);
} else if (sd->subchannel != NULL) {
grpc_lb_subchannel_data_unref_subchannel(exec_ctx, sd, reason);
}
}
grpc_lb_subchannel_list_unref(exec_ctx, subchannel_list, reason);
}

@ -0,0 +1,153 @@
/*
*
* Copyright 2015 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
#ifndef GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_LB_POLICY_SUBCHANNEL_LIST_H
#define GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_LB_POLICY_SUBCHANNEL_LIST_H
#include "src/core/ext/filters/client_channel/lb_policy_registry.h"
#include "src/core/ext/filters/client_channel/subchannel.h"
#include "src/core/lib/debug/trace.h"
#include "src/core/lib/transport/connectivity_state.h"
// TODO(roth): This code is intended to be shared between pick_first and
// round_robin. However, the interface needs more work to provide clean
// encapsulation. For example, the structs here have some fields that are
// only used in one of the two (e.g., the state counters in
// grpc_lb_subchannel_list and the prev_connectivity_state field in
// grpc_lb_subchannel_data are only used in round_robin, and the
// checking_subchannel field in grpc_lb_subchannel_list is only used by
// pick_first). Also, there is probably some code duplication between the
// connectivity state notification callback code in both pick_first and
// round_robin that could be refactored and moved here. In a future PR,
// need to clean this up.
#ifdef __cplusplus
extern "C" {
#endif
typedef struct grpc_lb_subchannel_list grpc_lb_subchannel_list;
typedef struct {
/** backpointer to owning subchannel list */
grpc_lb_subchannel_list *subchannel_list;
/** subchannel itself */
grpc_subchannel *subchannel;
grpc_connected_subchannel *connected_subchannel;
/** Is a connectivity notification pending? */
bool connectivity_notification_pending;
/** notification that connectivity has changed on subchannel */
grpc_closure connectivity_changed_closure;
/** previous and current connectivity states. Updated by \a
* \a connectivity_changed_closure based on
* \a pending_connectivity_state_unsafe. */
grpc_connectivity_state prev_connectivity_state;
grpc_connectivity_state curr_connectivity_state;
/** connectivity state to be updated by
* grpc_subchannel_notify_on_state_change(), not guarded by
* the combiner. To be copied to \a curr_connectivity_state by
* \a connectivity_changed_closure. */
grpc_connectivity_state pending_connectivity_state_unsafe;
/** the subchannel's target user data */
void *user_data;
/** vtable to operate over \a user_data */
const grpc_lb_user_data_vtable *user_data_vtable;
} grpc_lb_subchannel_data;
/// Unrefs the subchannel contained in sd.
void grpc_lb_subchannel_data_unref_subchannel(grpc_exec_ctx *exec_ctx,
grpc_lb_subchannel_data *sd,
const char *reason);
/// Starts watching the connectivity state of the subchannel.
/// The connectivity_changed_cb callback must invoke either
/// grpc_lb_subchannel_data_stop_connectivity_watch() or again call
/// grpc_lb_subchannel_data_start_connectivity_watch().
void grpc_lb_subchannel_data_start_connectivity_watch(
grpc_exec_ctx *exec_ctx, grpc_lb_subchannel_data *sd);
/// Stops watching the connectivity state of the subchannel.
void grpc_lb_subchannel_data_stop_connectivity_watch(
grpc_exec_ctx *exec_ctx, grpc_lb_subchannel_data *sd);
struct grpc_lb_subchannel_list {
/** backpointer to owning policy */
grpc_lb_policy *policy;
grpc_tracer_flag *tracer;
/** all our subchannels */
size_t num_subchannels;
grpc_lb_subchannel_data *subchannels;
/** Index into subchannels of the one we're currently checking.
* Used when connecting to subchannels serially instead of in parallel. */
// TODO(roth): When we have time, we can probably make this go away
// and compute the index dynamically by subtracting
// subchannel_list->subchannels from the subchannel_data pointer.
size_t checking_subchannel;
/** how many subchannels are in state READY */
size_t num_ready;
/** how many subchannels are in state TRANSIENT_FAILURE */
size_t num_transient_failures;
/** how many subchannels are in state SHUTDOWN */
size_t num_shutdown;
/** how many subchannels are in state IDLE */
size_t num_idle;
/** There will be one ref for each entry in subchannels for which there is a
* pending connectivity state watcher callback. */
gpr_refcount refcount;
/** Is this list shutting down? This may be true due to the shutdown of the
* policy itself or because a newer update has arrived while this one hadn't
* finished processing. */
bool shutting_down;
};
grpc_lb_subchannel_list *grpc_lb_subchannel_list_create(
grpc_exec_ctx *exec_ctx, grpc_lb_policy *p, grpc_tracer_flag *tracer,
const grpc_lb_addresses *addresses, const grpc_lb_policy_args *args,
grpc_iomgr_cb_func connectivity_changed_cb);
void grpc_lb_subchannel_list_ref(grpc_lb_subchannel_list *subchannel_list,
const char *reason);
void grpc_lb_subchannel_list_unref(grpc_exec_ctx *exec_ctx,
grpc_lb_subchannel_list *subchannel_list,
const char *reason);
/// Takes and releases refs needed for a connectivity notification.
/// This includes a ref to subchannel_list and a weak ref to the LB policy.
void grpc_lb_subchannel_list_ref_for_connectivity_watch(
grpc_lb_subchannel_list *subchannel_list, const char *reason);
void grpc_lb_subchannel_list_unref_for_connectivity_watch(
grpc_exec_ctx *exec_ctx, grpc_lb_subchannel_list *subchannel_list,
const char *reason);
/// Mark subchannel_list as discarded. Unsubscribes all its subchannels. The
/// connectivity state notification callback will ultimately unref it.
void grpc_lb_subchannel_list_shutdown_and_unref(
grpc_exec_ctx *exec_ctx, grpc_lb_subchannel_list *subchannel_list,
const char *reason);
#ifdef __cplusplus
}
#endif
#endif /* GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_LB_POLICY_SUBCHANNEL_LIST_H */

@ -127,8 +127,8 @@ void grpc_connected_subchannel_process_transport_op(
grpc_connectivity_state grpc_subchannel_check_connectivity(
grpc_subchannel *channel, grpc_error **error);
/** call notify when the connectivity state of a channel changes from *state.
Updates *state with the new state of the channel */
/** Calls notify when the connectivity state of a channel becomes different
from *state. Updates *state with the new state of the channel. */
void grpc_subchannel_notify_on_state_change(
grpc_exec_ctx *exec_ctx, grpc_subchannel *channel,
grpc_pollset_set *interested_parties, grpc_connectivity_state *state,

@ -115,6 +115,8 @@ static void on_handshake_done(grpc_exec_ctx *exec_ctx, void *arg,
}
memset(c->result, 0, sizeof(*c->result));
} else {
grpc_endpoint_delete_from_pollset_set(exec_ctx, args->endpoint,
c->args.interested_parties);
c->result->transport =
grpc_create_chttp2_transport(exec_ctx, args->args, args->endpoint, 1);
GPR_ASSERT(c->result->transport);
@ -136,6 +138,8 @@ static void start_handshake_locked(grpc_exec_ctx *exec_ctx,
c->handshake_mgr = grpc_handshake_manager_create();
grpc_handshakers_add(exec_ctx, HANDSHAKER_CLIENT, c->args.channel_args,
c->handshake_mgr);
grpc_endpoint_add_to_pollset_set(exec_ctx, c->endpoint,
c->args.interested_parties);
grpc_handshake_manager_do_handshake(
exec_ctx, c->handshake_mgr, c->endpoint, c->args.channel_args,
c->args.deadline, NULL /* acceptor */, on_handshake_done, c);

@ -54,7 +54,6 @@
#include "src/core/lib/transport/transport.h"
#include "src/core/lib/transport/transport_impl.h"
#define DEFAULT_WINDOW 65535
#define DEFAULT_CONNECTION_WINDOW_TARGET (1024 * 1024)
#define MAX_WINDOW 0x7fffffffu
#define MAX_WRITE_BUFFER_SIZE (64 * 1024 * 1024)
@ -222,7 +221,7 @@ static void destruct_transport(grpc_exec_ctx *exec_ctx,
t->write_cb_pool = next;
}
t->flow_control.bdp_estimator.Destroy();
t->flow_control.Destroy();
GRPC_ERROR_UNREF(t->closed_with_error);
gpr_free(t->ping_acks);
@ -282,10 +281,6 @@ static void init_transport(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
t->endpoint_reading = 1;
t->next_stream_id = is_client ? 1 : 2;
t->is_client = is_client;
t->flow_control.remote_window = DEFAULT_WINDOW;
t->flow_control.announced_window = DEFAULT_WINDOW;
t->flow_control.target_initial_window_size = DEFAULT_WINDOW;
t->flow_control.t = t;
t->deframe_state = is_client ? GRPC_DTS_FH_0 : GRPC_DTS_CLIENT_PREFIX_0;
t->is_first_frame = true;
grpc_connectivity_state_init(
@ -325,8 +320,6 @@ static void init_transport(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
keepalive_watchdog_fired_locked, t,
grpc_combiner_scheduler(t->combiner));
t->flow_control.bdp_estimator.Init(t->peer_string);
grpc_chttp2_goaway_parser_init(&t->goaway_parser);
grpc_chttp2_hpack_parser_init(exec_ctx, &t->hpack_parser);
@ -350,8 +343,7 @@ static void init_transport(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
window -- this should by rights be 0 */
t->force_send_settings = 1 << GRPC_CHTTP2_SETTINGS_INITIAL_WINDOW_SIZE;
t->sent_local_settings = 0;
t->write_buffer_size = DEFAULT_WINDOW;
t->flow_control.enable_bdp_probe = true;
t->write_buffer_size = grpc_core::chttp2::kDefaultWindow;
if (is_client) {
grpc_slice_buffer_add(&t->outbuf, grpc_slice_from_copied_string(
@ -396,6 +388,8 @@ static void init_transport(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
t->opt_target = GRPC_CHTTP2_OPTIMIZE_FOR_LATENCY;
bool enable_bdp = true;
if (channel_args) {
for (i = 0; i < channel_args->num_args; i++) {
if (0 == strcmp(channel_args->args[i].key,
@ -456,8 +450,7 @@ static void init_transport(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
&channel_args->args[i], {0, 0, MAX_WRITE_BUFFER_SIZE});
} else if (0 ==
strcmp(channel_args->args[i].key, GRPC_ARG_HTTP2_BDP_PROBE)) {
t->flow_control.enable_bdp_probe =
grpc_channel_arg_get_integer(&channel_args->args[i], {1, 0, 1});
enable_bdp = grpc_channel_arg_get_bool(&channel_args->args[i], true);
} else if (0 == strcmp(channel_args->args[i].key,
GRPC_ARG_KEEPALIVE_TIME_MS)) {
const int value = grpc_channel_arg_get_integer(
@ -552,6 +545,8 @@ static void init_transport(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
}
}
t->flow_control.Init(exec_ctx, t, enable_bdp);
/* No pings allowed before receiving a header or data frame. */
t->ping_state.pings_before_data_required = 0;
t->ping_state.is_delayed_ping_timer_set = false;
@ -572,15 +567,13 @@ static void init_transport(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
t->keepalive_state = GRPC_CHTTP2_KEEPALIVE_STATE_DISABLED;
}
if (t->flow_control.enable_bdp_probe) {
if (enable_bdp) {
GRPC_CHTTP2_REF_TRANSPORT(t, "bdp_ping");
schedule_bdp_ping_locked(exec_ctx, t);
}
grpc_chttp2_act_on_flowctl_action(
exec_ctx,
grpc_chttp2_flowctl_get_action(exec_ctx, &t->flow_control, NULL), t,
NULL);
grpc_chttp2_act_on_flowctl_action(
exec_ctx, t->flow_control->PeriodicUpdate(exec_ctx), t, NULL);
}
grpc_chttp2_initiate_write(exec_ctx, t,
GRPC_CHTTP2_INITIATE_WRITE_INITIAL_WRITE);
@ -718,7 +711,7 @@ static int init_stream(grpc_exec_ctx *exec_ctx, grpc_transport *gt,
post_destructive_reclaimer(exec_ctx, t);
}
s->flow_control.s = s;
s->flow_control.Init(t->flow_control.get(), s);
GPR_TIMER_END("init_stream", 0);
return 0;
@ -769,7 +762,7 @@ static void destroy_stream_locked(grpc_exec_ctx *exec_ctx, void *sp,
GRPC_ERROR_UNREF(s->write_closed_error);
GRPC_ERROR_UNREF(s->byte_stream_error);
grpc_chttp2_flowctl_destroy_stream(&t->flow_control, &s->flow_control);
s->flow_control.Destroy();
GRPC_CHTTP2_UNREF_TRANSPORT(exec_ctx, t, "stream");
@ -1638,13 +1631,10 @@ static void perform_stream_op_locked(grpc_exec_ctx *exec_ctx, void *stream_op,
if (s->id != 0) {
if (!s->read_closed) {
already_received = s->frame_storage.length;
grpc_chttp2_flowctl_incoming_bs_update(
&t->flow_control, &s->flow_control, GRPC_HEADER_SIZE_IN_BYTES,
already_received);
grpc_chttp2_act_on_flowctl_action(
exec_ctx, grpc_chttp2_flowctl_get_action(exec_ctx, &t->flow_control,
&s->flow_control),
t, s);
s->flow_control->IncomingByteStreamUpdate(GRPC_HEADER_SIZE_IN_BYTES,
already_received);
grpc_chttp2_act_on_flowctl_action(exec_ctx,
s->flow_control->MakeAction(), t, s);
}
}
grpc_chttp2_maybe_complete_recv_message(exec_ctx, t, s);
@ -2420,49 +2410,44 @@ static void end_all_the_calls(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
* INPUT PROCESSING - PARSING
*/
void grpc_chttp2_act_on_flowctl_action(grpc_exec_ctx *exec_ctx,
grpc_chttp2_flowctl_action action,
grpc_chttp2_transport *t,
grpc_chttp2_stream *s) {
switch (action.send_stream_update) {
case GRPC_CHTTP2_FLOWCTL_NO_ACTION_NEEDED:
break;
case GRPC_CHTTP2_FLOWCTL_UPDATE_IMMEDIATELY:
grpc_chttp2_mark_stream_writable(exec_ctx, t, s);
grpc_chttp2_initiate_write(
exec_ctx, t, GRPC_CHTTP2_INITIATE_WRITE_STREAM_FLOW_CONTROL);
template <class F>
static void WithUrgency(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
grpc_core::chttp2::FlowControlAction::Urgency urgency,
grpc_chttp2_initiate_write_reason reason, F action) {
switch (urgency) {
case grpc_core::chttp2::FlowControlAction::Urgency::NO_ACTION_NEEDED:
break;
case GRPC_CHTTP2_FLOWCTL_QUEUE_UPDATE:
grpc_chttp2_mark_stream_writable(exec_ctx, t, s);
case grpc_core::chttp2::FlowControlAction::Urgency::UPDATE_IMMEDIATELY:
grpc_chttp2_initiate_write(exec_ctx, t, reason);
// fallthrough
case grpc_core::chttp2::FlowControlAction::Urgency::QUEUE_UPDATE:
action();
break;
}
switch (action.send_transport_update) {
case GRPC_CHTTP2_FLOWCTL_NO_ACTION_NEEDED:
break;
case GRPC_CHTTP2_FLOWCTL_UPDATE_IMMEDIATELY:
grpc_chttp2_initiate_write(
exec_ctx, t, GRPC_CHTTP2_INITIATE_WRITE_TRANSPORT_FLOW_CONTROL);
break;
// this is the same as no action b/c every time the transport enters the
// writing path it will maybe do an update
case GRPC_CHTTP2_FLOWCTL_QUEUE_UPDATE:
break;
}
if (action.send_setting_update != GRPC_CHTTP2_FLOWCTL_NO_ACTION_NEEDED) {
if (action.initial_window_size > 0) {
queue_setting_update(exec_ctx, t,
GRPC_CHTTP2_SETTINGS_INITIAL_WINDOW_SIZE,
(uint32_t)action.initial_window_size);
}
if (action.max_frame_size > 0) {
queue_setting_update(exec_ctx, t, GRPC_CHTTP2_SETTINGS_MAX_FRAME_SIZE,
(uint32_t)action.max_frame_size);
}
if (action.send_setting_update == GRPC_CHTTP2_FLOWCTL_UPDATE_IMMEDIATELY) {
grpc_chttp2_initiate_write(exec_ctx, t,
GRPC_CHTTP2_INITIATE_WRITE_SEND_SETTINGS);
}
}
}
void grpc_chttp2_act_on_flowctl_action(
grpc_exec_ctx *exec_ctx, const grpc_core::chttp2::FlowControlAction &action,
grpc_chttp2_transport *t, grpc_chttp2_stream *s) {
WithUrgency(
exec_ctx, t, action.send_stream_update(),
GRPC_CHTTP2_INITIATE_WRITE_STREAM_FLOW_CONTROL,
[exec_ctx, t, s]() { grpc_chttp2_mark_stream_writable(exec_ctx, t, s); });
WithUrgency(exec_ctx, t, action.send_transport_update(),
GRPC_CHTTP2_INITIATE_WRITE_TRANSPORT_FLOW_CONTROL, []() {});
WithUrgency(exec_ctx, t, action.send_initial_window_update(),
GRPC_CHTTP2_INITIATE_WRITE_SEND_SETTINGS,
[exec_ctx, t, &action]() {
queue_setting_update(exec_ctx, t,
GRPC_CHTTP2_SETTINGS_INITIAL_WINDOW_SIZE,
action.initial_window_size());
});
WithUrgency(
exec_ctx, t, action.send_max_frame_size_update(),
GRPC_CHTTP2_INITIATE_WRITE_SEND_SETTINGS, [exec_ctx, t, &action]() {
queue_setting_update(exec_ctx, t, GRPC_CHTTP2_SETTINGS_MAX_FRAME_SIZE,
action.max_frame_size());
});
}
static grpc_error *try_http_parsing(grpc_exec_ctx *exec_ctx,
@ -2518,7 +2503,7 @@ static void read_action_locked(grpc_exec_ctx *exec_ctx, void *tp,
grpc_error *errors[3] = {GRPC_ERROR_REF(error), GRPC_ERROR_NONE,
GRPC_ERROR_NONE};
for (; i < t->read_buffer.count && errors[1] == GRPC_ERROR_NONE; i++) {
t->flow_control.bdp_estimator->AddIncomingBytes(
t->flow_control->bdp_estimator()->AddIncomingBytes(
(int64_t)GRPC_SLICE_LENGTH(t->read_buffer.slices[i]));
errors[1] =
grpc_chttp2_perform_read(exec_ctx, t, t->read_buffer.slices[i]);
@ -2535,8 +2520,8 @@ static void read_action_locked(grpc_exec_ctx *exec_ctx, void *tp,
GPR_TIMER_END("reading_action.parse", 0);
GPR_TIMER_BEGIN("post_parse_locked", 0);
if (t->flow_control.initial_window_update != 0) {
if (t->flow_control.initial_window_update > 0) {
if (t->initial_window_update != 0) {
if (t->initial_window_update > 0) {
grpc_chttp2_stream *s;
while (grpc_chttp2_list_pop_stalled_by_stream(t, &s)) {
grpc_chttp2_mark_stream_writable(exec_ctx, t, s);
@ -2545,7 +2530,7 @@ static void read_action_locked(grpc_exec_ctx *exec_ctx, void *tp,
GRPC_CHTTP2_INITIATE_WRITE_FLOW_CONTROL_UNSTALLED_BY_SETTING);
}
}
t->flow_control.initial_window_update = 0;
t->initial_window_update = 0;
}
GPR_TIMER_END("post_parse_locked", 0);
}
@ -2568,10 +2553,8 @@ static void read_action_locked(grpc_exec_ctx *exec_ctx, void *tp,
if (keep_reading) {
grpc_endpoint_read(exec_ctx, t->ep, &t->read_buffer,
&t->read_action_locked);
grpc_chttp2_act_on_flowctl_action(
exec_ctx,
grpc_chttp2_flowctl_get_action(exec_ctx, &t->flow_control, NULL), t,
NULL);
grpc_chttp2_act_on_flowctl_action(exec_ctx, t->flow_control->MakeAction(),
t, NULL);
GRPC_CHTTP2_UNREF_TRANSPORT(exec_ctx, t, "keep_reading");
} else {
GRPC_CHTTP2_UNREF_TRANSPORT(exec_ctx, t, "reading_action");
@ -2588,7 +2571,7 @@ static void read_action_locked(grpc_exec_ctx *exec_ctx, void *tp,
// that kicks off finishes, it's unreffed
static void schedule_bdp_ping_locked(grpc_exec_ctx *exec_ctx,
grpc_chttp2_transport *t) {
t->flow_control.bdp_estimator->SchedulePing();
t->flow_control->bdp_estimator()->SchedulePing();
send_ping_locked(exec_ctx, t, &t->start_bdp_ping_locked,
&t->finish_bdp_ping_locked);
}
@ -2604,7 +2587,7 @@ static void start_bdp_ping_locked(grpc_exec_ctx *exec_ctx, void *tp,
if (t->keepalive_state == GRPC_CHTTP2_KEEPALIVE_STATE_WAITING) {
grpc_timer_cancel(exec_ctx, &t->keepalive_ping_timer);
}
t->flow_control.bdp_estimator->StartPing();
t->flow_control->bdp_estimator()->StartPing();
}
static void finish_bdp_ping_locked(grpc_exec_ctx *exec_ctx, void *tp,
@ -2618,7 +2601,10 @@ static void finish_bdp_ping_locked(grpc_exec_ctx *exec_ctx, void *tp,
GRPC_CHTTP2_UNREF_TRANSPORT(exec_ctx, t, "bdp_ping");
return;
}
grpc_millis next_ping = t->flow_control.bdp_estimator->CompletePing(exec_ctx);
grpc_millis next_ping =
t->flow_control->bdp_estimator()->CompletePing(exec_ctx);
grpc_chttp2_act_on_flowctl_action(
exec_ctx, t->flow_control->PeriodicUpdate(exec_ctx), t, nullptr);
GPR_ASSERT(!t->have_next_bdp_ping_timer);
t->have_next_bdp_ping_timer = true;
grpc_timer_init(exec_ctx, &t->next_bdp_ping_timer, next_ping,
@ -2844,13 +2830,10 @@ static void incoming_byte_stream_next_locked(grpc_exec_ctx *exec_ctx,
size_t cur_length = s->frame_storage.length;
if (!s->read_closed) {
grpc_chttp2_flowctl_incoming_bs_update(&t->flow_control, &s->flow_control,
bs->next_action.max_size_hint,
cur_length);
grpc_chttp2_act_on_flowctl_action(
exec_ctx, grpc_chttp2_flowctl_get_action(exec_ctx, &t->flow_control,
&s->flow_control),
t, s);
s->flow_control->IncomingByteStreamUpdate(bs->next_action.max_size_hint,
cur_length);
grpc_chttp2_act_on_flowctl_action(exec_ctx, s->flow_control->MakeAction(),
t, s);
}
GPR_ASSERT(s->unprocessed_incoming_frames_buffer.length == 0);
if (s->frame_storage.length > 0) {

@ -16,7 +16,7 @@
*
*/
#include "src/core/ext/transport/chttp2/transport/internal.h"
#include "src/core/ext/transport/chttp2/transport/flow_control.h"
#include <inttypes.h>
#include <limits.h>
@ -28,38 +28,15 @@
#include <grpc/support/string_util.h>
#include <grpc/support/useful.h>
#include "src/core/ext/transport/chttp2/transport/internal.h"
#include "src/core/lib/support/string.h"
static uint32_t grpc_chttp2_target_announced_window(
const grpc_chttp2_transport_flowctl* tfc);
#ifndef NDEBUG
typedef struct {
int64_t remote_window;
int64_t target_window;
int64_t announced_window;
int64_t remote_window_delta;
int64_t local_window_delta;
int64_t announced_window_delta;
uint32_t local_init_window;
uint32_t local_max_frame;
} shadow_flow_control;
static void pretrace(shadow_flow_control* shadow_fc,
grpc_chttp2_transport_flowctl* tfc,
grpc_chttp2_stream_flowctl* sfc) {
shadow_fc->remote_window = tfc->remote_window;
shadow_fc->target_window = grpc_chttp2_target_announced_window(tfc);
shadow_fc->announced_window = tfc->announced_window;
if (sfc != NULL) {
shadow_fc->remote_window_delta = sfc->remote_window_delta;
shadow_fc->local_window_delta = sfc->local_window_delta;
shadow_fc->announced_window_delta = sfc->announced_window_delta;
}
}
namespace grpc_core {
namespace chttp2 {
namespace {
#define TRACE_PADDING 30
static constexpr const int kTracePadding = 30;
static char* fmt_int64_diff_str(int64_t old_val, int64_t new_val) {
char* str;
@ -68,7 +45,7 @@ static char* fmt_int64_diff_str(int64_t old_val, int64_t new_val) {
} else {
gpr_asprintf(&str, "%" PRId64 "", old_val);
}
char* str_lp = gpr_leftpad(str, ' ', TRACE_PADDING);
char* str_lp = gpr_leftpad(str, ' ', kTracePadding);
gpr_free(str);
return str_lp;
}
@ -80,47 +57,58 @@ static char* fmt_uint32_diff_str(uint32_t old_val, uint32_t new_val) {
} else {
gpr_asprintf(&str, "%" PRIu32 "", old_val);
}
char* str_lp = gpr_leftpad(str, ' ', TRACE_PADDING);
char* str_lp = gpr_leftpad(str, ' ', kTracePadding);
gpr_free(str);
return str_lp;
}
} // namespace
void FlowControlTrace::Init(const char* reason, TransportFlowControl* tfc,
StreamFlowControl* sfc) {
tfc_ = tfc;
sfc_ = sfc;
reason_ = reason;
remote_window_ = tfc->remote_window();
target_window_ = tfc->target_window();
announced_window_ = tfc->announced_window();
if (sfc != nullptr) {
remote_window_delta_ = sfc->remote_window_delta();
local_window_delta_ = sfc->local_window_delta();
announced_window_delta_ = sfc->announced_window_delta();
}
}
static void posttrace(shadow_flow_control* shadow_fc,
grpc_chttp2_transport_flowctl* tfc,
grpc_chttp2_stream_flowctl* sfc, const char* reason) {
void FlowControlTrace::Finish() {
uint32_t acked_local_window =
tfc->t->settings[GRPC_SENT_SETTINGS]
[GRPC_CHTTP2_SETTINGS_INITIAL_WINDOW_SIZE];
tfc_->transport()->settings[GRPC_SENT_SETTINGS]
[GRPC_CHTTP2_SETTINGS_INITIAL_WINDOW_SIZE];
uint32_t remote_window =
tfc->t->settings[GRPC_PEER_SETTINGS]
[GRPC_CHTTP2_SETTINGS_INITIAL_WINDOW_SIZE];
char* trw_str =
fmt_int64_diff_str(shadow_fc->remote_window, tfc->remote_window);
char* tlw_str = fmt_int64_diff_str(shadow_fc->target_window,
grpc_chttp2_target_announced_window(tfc));
tfc_->transport()->settings[GRPC_PEER_SETTINGS]
[GRPC_CHTTP2_SETTINGS_INITIAL_WINDOW_SIZE];
char* trw_str = fmt_int64_diff_str(remote_window_, tfc_->remote_window());
char* tlw_str = fmt_int64_diff_str(target_window_, tfc_->target_window());
char* taw_str =
fmt_int64_diff_str(shadow_fc->announced_window, tfc->announced_window);
fmt_int64_diff_str(announced_window_, tfc_->announced_window());
char* srw_str;
char* slw_str;
char* saw_str;
if (sfc != NULL) {
srw_str = fmt_int64_diff_str(shadow_fc->remote_window_delta + remote_window,
sfc->remote_window_delta + remote_window);
slw_str =
fmt_int64_diff_str(shadow_fc->local_window_delta + acked_local_window,
sfc->local_window_delta + acked_local_window);
saw_str = fmt_int64_diff_str(
shadow_fc->announced_window_delta + acked_local_window,
sfc->announced_window_delta + acked_local_window);
if (sfc_ != nullptr) {
srw_str = fmt_int64_diff_str(remote_window_delta_ + remote_window,
sfc_->remote_window_delta() + remote_window);
slw_str = fmt_int64_diff_str(local_window_delta_ + acked_local_window,
local_window_delta_ + acked_local_window);
saw_str = fmt_int64_diff_str(announced_window_delta_ + acked_local_window,
announced_window_delta_ + acked_local_window);
} else {
srw_str = gpr_leftpad("", ' ', TRACE_PADDING);
slw_str = gpr_leftpad("", ' ', TRACE_PADDING);
saw_str = gpr_leftpad("", ' ', TRACE_PADDING);
srw_str = gpr_leftpad("", ' ', kTracePadding);
slw_str = gpr_leftpad("", ' ', kTracePadding);
saw_str = gpr_leftpad("", ' ', kTracePadding);
}
gpr_log(GPR_DEBUG,
"%p[%u][%s] | %s | trw:%s, ttw:%s, taw:%s, srw:%s, slw:%s, saw:%s",
tfc, sfc != NULL ? sfc->s->id : 0, tfc->t->is_client ? "cli" : "svr",
reason, trw_str, tlw_str, taw_str, srw_str, slw_str, saw_str);
tfc_, sfc_ != nullptr ? sfc_->stream()->id : 0,
tfc_->transport()->is_client ? "cli" : "svr", reason_, trw_str,
tlw_str, taw_str, srw_str, slw_str, saw_str);
gpr_free(trw_str);
gpr_free(tlw_str);
gpr_free(taw_str);
@ -129,13 +117,13 @@ static void posttrace(shadow_flow_control* shadow_fc,
gpr_free(saw_str);
}
static const char* urgency_to_string(grpc_chttp2_flowctl_urgency urgency) {
switch (urgency) {
case GRPC_CHTTP2_FLOWCTL_NO_ACTION_NEEDED:
const char* FlowControlAction::UrgencyString(Urgency u) {
switch (u) {
case Urgency::NO_ACTION_NEEDED:
return "no action";
case GRPC_CHTTP2_FLOWCTL_UPDATE_IMMEDIATELY:
case Urgency::UPDATE_IMMEDIATELY:
return "update immediately";
case GRPC_CHTTP2_FLOWCTL_QUEUE_UPDATE:
case Urgency::QUEUE_UPDATE:
return "queue update";
default:
GPR_UNREACHABLE_CODE(return "unknown");
@ -143,209 +131,132 @@ static const char* urgency_to_string(grpc_chttp2_flowctl_urgency urgency) {
GPR_UNREACHABLE_CODE(return "unknown");
}
static void trace_action(grpc_chttp2_transport_flowctl* tfc,
grpc_chttp2_flowctl_action action) {
void FlowControlAction::Trace(grpc_chttp2_transport* t) const {
char* iw_str = fmt_uint32_diff_str(
tfc->t->settings[GRPC_SENT_SETTINGS]
[GRPC_CHTTP2_SETTINGS_INITIAL_WINDOW_SIZE],
action.initial_window_size);
t->settings[GRPC_SENT_SETTINGS][GRPC_CHTTP2_SETTINGS_INITIAL_WINDOW_SIZE],
initial_window_size_);
char* mf_str = fmt_uint32_diff_str(
tfc->t->settings[GRPC_SENT_SETTINGS][GRPC_CHTTP2_SETTINGS_MAX_FRAME_SIZE],
action.max_frame_size);
gpr_log(GPR_DEBUG, "t[%s], s[%s], settings[%s] iw:%s mf:%s",
urgency_to_string(action.send_transport_update),
urgency_to_string(action.send_stream_update),
urgency_to_string(action.send_setting_update), iw_str, mf_str);
t->settings[GRPC_SENT_SETTINGS][GRPC_CHTTP2_SETTINGS_MAX_FRAME_SIZE],
max_frame_size_);
gpr_log(GPR_DEBUG, "t[%s], s[%s], iw:%s:%s mf:%s:%s",
UrgencyString(send_transport_update_),
UrgencyString(send_stream_update_),
UrgencyString(send_initial_window_update_), iw_str,
UrgencyString(send_max_frame_size_update_), mf_str);
gpr_free(iw_str);
gpr_free(mf_str);
}
#define PRETRACE(tfc, sfc) \
shadow_flow_control shadow_fc; \
GRPC_FLOW_CONTROL_IF_TRACING(pretrace(&shadow_fc, tfc, sfc))
#define POSTTRACE(tfc, sfc, reason) \
GRPC_FLOW_CONTROL_IF_TRACING(posttrace(&shadow_fc, tfc, sfc, reason))
#define TRACEACTION(tfc, action) \
GRPC_FLOW_CONTROL_IF_TRACING(trace_action(tfc, action))
#else
#define PRETRACE(tfc, sfc)
#define POSTTRACE(tfc, sfc, reason)
#define TRACEACTION(tfc, action)
#endif
/* How many bytes of incoming flow control would we like to advertise */
static uint32_t grpc_chttp2_target_announced_window(
const grpc_chttp2_transport_flowctl* tfc) {
return (uint32_t)GPR_MIN((int64_t)((1u << 31) - 1),
tfc->announced_stream_total_over_incoming_window +
tfc->target_initial_window_size);
}
// we have sent data on the wire, we must track this in our bookkeeping for the
// remote peer's flow control.
void grpc_chttp2_flowctl_sent_data(grpc_chttp2_transport_flowctl* tfc,
grpc_chttp2_stream_flowctl* sfc,
int64_t size) {
PRETRACE(tfc, sfc);
tfc->remote_window -= size;
sfc->remote_window_delta -= size;
POSTTRACE(tfc, sfc, " data sent");
}
static void announced_window_delta_preupdate(grpc_chttp2_transport_flowctl* tfc,
grpc_chttp2_stream_flowctl* sfc) {
if (sfc->announced_window_delta > 0) {
tfc->announced_stream_total_over_incoming_window -=
sfc->announced_window_delta;
} else {
tfc->announced_stream_total_under_incoming_window +=
-sfc->announced_window_delta;
}
}
static void announced_window_delta_postupdate(
grpc_chttp2_transport_flowctl* tfc, grpc_chttp2_stream_flowctl* sfc) {
if (sfc->announced_window_delta > 0) {
tfc->announced_stream_total_over_incoming_window +=
sfc->announced_window_delta;
} else {
tfc->announced_stream_total_under_incoming_window -=
-sfc->announced_window_delta;
TransportFlowControl::TransportFlowControl(grpc_exec_ctx* exec_ctx,
const grpc_chttp2_transport* t,
bool enable_bdp_probe)
: t_(t),
enable_bdp_probe_(enable_bdp_probe),
bdp_estimator_(t->peer_string),
pid_controller_(grpc_core::PidController::Args()
.set_gain_p(4)
.set_gain_i(8)
.set_gain_d(0)
.set_initial_control_value(TargetLogBdp())
.set_min_control_value(-1)
.set_max_control_value(25)
.set_integral_range(10)),
last_pid_update_(grpc_exec_ctx_now(exec_ctx)) {}
uint32_t TransportFlowControl::MaybeSendUpdate(bool writing_anyway) {
FlowControlTrace trace("t updt sent", this, nullptr);
const uint32_t target_announced_window = (const uint32_t)target_window();
if ((writing_anyway || announced_window_ <= target_announced_window / 2) &&
announced_window_ != target_announced_window) {
const uint32_t announce = (uint32_t)GPR_CLAMP(
target_announced_window - announced_window_, 0, UINT32_MAX);
announced_window_ += announce;
return announce;
}
return 0;
}
// We have received data from the wire. We must track this in our own flow
// control bookkeeping.
// Returns an error if the incoming frame violates our flow control.
grpc_error* grpc_chttp2_flowctl_recv_data(grpc_chttp2_transport_flowctl* tfc,
grpc_chttp2_stream_flowctl* sfc,
int64_t incoming_frame_size) {
uint32_t sent_init_window =
tfc->t->settings[GRPC_SENT_SETTINGS]
[GRPC_CHTTP2_SETTINGS_INITIAL_WINDOW_SIZE];
uint32_t acked_init_window =
tfc->t->settings[GRPC_ACKED_SETTINGS]
[GRPC_CHTTP2_SETTINGS_INITIAL_WINDOW_SIZE];
PRETRACE(tfc, sfc);
if (incoming_frame_size > tfc->announced_window) {
grpc_error* TransportFlowControl::ValidateRecvData(
int64_t incoming_frame_size) {
if (incoming_frame_size > announced_window_) {
char* msg;
gpr_asprintf(&msg,
"frame of size %" PRId64 " overflows local window of %" PRId64,
incoming_frame_size, tfc->announced_window);
incoming_frame_size, announced_window_);
grpc_error* err = GRPC_ERROR_CREATE_FROM_COPIED_STRING(msg);
gpr_free(msg);
return err;
}
return GRPC_ERROR_NONE;
}
if (sfc != NULL) {
int64_t acked_stream_window =
sfc->announced_window_delta + acked_init_window;
int64_t sent_stream_window = sfc->announced_window_delta + sent_init_window;
if (incoming_frame_size > acked_stream_window) {
if (incoming_frame_size <= sent_stream_window) {
gpr_log(
GPR_ERROR,
"Incoming frame of size %" PRId64
" exceeds local window size of %" PRId64
".\n"
"The (un-acked, future) window size would be %" PRId64
" which is not exceeded.\n"
"This would usually cause a disconnection, but allowing it due to"
"broken HTTP2 implementations in the wild.\n"
"See (for example) https://github.com/netty/netty/issues/6520.",
incoming_frame_size, acked_stream_window, sent_stream_window);
} else {
char* msg;
gpr_asprintf(&msg, "frame of size %" PRId64
" overflows local window of %" PRId64,
incoming_frame_size, acked_stream_window);
grpc_error* err = GRPC_ERROR_CREATE_FROM_COPIED_STRING(msg);
gpr_free(msg);
return err;
}
}
StreamFlowControl::StreamFlowControl(TransportFlowControl* tfc,
const grpc_chttp2_stream* s)
: tfc_(tfc), s_(s) {}
announced_window_delta_preupdate(tfc, sfc);
sfc->announced_window_delta -= incoming_frame_size;
announced_window_delta_postupdate(tfc, sfc);
sfc->local_window_delta -= incoming_frame_size;
}
grpc_error* StreamFlowControl::RecvData(int64_t incoming_frame_size) {
FlowControlTrace trace(" data recv", tfc_, this);
tfc->announced_window -= incoming_frame_size;
grpc_error* error = GRPC_ERROR_NONE;
error = tfc_->ValidateRecvData(incoming_frame_size);
if (error != GRPC_ERROR_NONE) return error;
POSTTRACE(tfc, sfc, " data recv");
return GRPC_ERROR_NONE;
}
// Returns a non zero announce integer if we should send a transport window
// update
uint32_t grpc_chttp2_flowctl_maybe_send_transport_update(
grpc_chttp2_transport_flowctl* tfc, bool writing_anyway) {
PRETRACE(tfc, NULL);
uint32_t target_announced_window = grpc_chttp2_target_announced_window(tfc);
uint32_t threshold_to_send_transport_window_update =
tfc->t->outbuf.count > 0 ? 3 * target_announced_window / 4
: target_announced_window / 2;
if ((writing_anyway ||
tfc->announced_window <= threshold_to_send_transport_window_update) &&
tfc->announced_window != target_announced_window) {
uint32_t announce = (uint32_t)GPR_CLAMP(
target_announced_window - tfc->announced_window, 0, UINT32_MAX);
tfc->announced_window += announce;
POSTTRACE(tfc, NULL, "t updt sent");
return announce;
uint32_t sent_init_window =
tfc_->transport()->settings[GRPC_SENT_SETTINGS]
[GRPC_CHTTP2_SETTINGS_INITIAL_WINDOW_SIZE];
uint32_t acked_init_window =
tfc_->transport()->settings[GRPC_ACKED_SETTINGS]
[GRPC_CHTTP2_SETTINGS_INITIAL_WINDOW_SIZE];
int64_t acked_stream_window = announced_window_delta_ + acked_init_window;
int64_t sent_stream_window = announced_window_delta_ + sent_init_window;
if (incoming_frame_size > acked_stream_window) {
if (incoming_frame_size <= sent_stream_window) {
gpr_log(GPR_ERROR,
"Incoming frame of size %" PRId64
" exceeds local window size of %" PRId64
".\n"
"The (un-acked, future) window size would be %" PRId64
" which is not exceeded.\n"
"This would usually cause a disconnection, but allowing it due to"
"broken HTTP2 implementations in the wild.\n"
"See (for example) https://github.com/netty/netty/issues/6520.",
incoming_frame_size, acked_stream_window, sent_stream_window);
} else {
char* msg;
gpr_asprintf(&msg, "frame of size %" PRId64
" overflows local window of %" PRId64,
incoming_frame_size, acked_stream_window);
grpc_error* err = GRPC_ERROR_CREATE_FROM_COPIED_STRING(msg);
gpr_free(msg);
return err;
}
}
GRPC_FLOW_CONTROL_IF_TRACING(
gpr_log(GPR_DEBUG, "%p[0][%s] will not send transport update", tfc,
tfc->t->is_client ? "cli" : "svr"));
return 0;
UpdateAnnouncedWindowDelta(tfc_, -incoming_frame_size);
local_window_delta_ -= incoming_frame_size;
tfc_->CommitRecvData(incoming_frame_size);
return GRPC_ERROR_NONE;
}
// Returns a non zero announce integer if we should send a stream window update
uint32_t grpc_chttp2_flowctl_maybe_send_stream_update(
grpc_chttp2_transport_flowctl* tfc, grpc_chttp2_stream_flowctl* sfc) {
PRETRACE(tfc, sfc);
if (sfc->local_window_delta > sfc->announced_window_delta) {
uint32_t StreamFlowControl::MaybeSendUpdate() {
FlowControlTrace trace("s updt sent", tfc_, this);
if (local_window_delta_ > announced_window_delta_) {
uint32_t announce = (uint32_t)GPR_CLAMP(
sfc->local_window_delta - sfc->announced_window_delta, 0, UINT32_MAX);
announced_window_delta_preupdate(tfc, sfc);
sfc->announced_window_delta += announce;
announced_window_delta_postupdate(tfc, sfc);
POSTTRACE(tfc, sfc, "s updt sent");
local_window_delta_ - announced_window_delta_, 0, UINT32_MAX);
UpdateAnnouncedWindowDelta(tfc_, announce);
return announce;
}
GRPC_FLOW_CONTROL_IF_TRACING(
gpr_log(GPR_DEBUG, "%p[%u][%s] will not send stream update", tfc,
sfc->s->id, tfc->t->is_client ? "cli" : "svr"));
return 0;
}
// we have received a WINDOW_UPDATE frame for a transport
void grpc_chttp2_flowctl_recv_transport_update(
grpc_chttp2_transport_flowctl* tfc, uint32_t size) {
PRETRACE(tfc, NULL);
tfc->remote_window += size;
POSTTRACE(tfc, NULL, "t updt recv");
}
// we have received a WINDOW_UPDATE frame for a stream
void grpc_chttp2_flowctl_recv_stream_update(grpc_chttp2_transport_flowctl* tfc,
grpc_chttp2_stream_flowctl* sfc,
uint32_t size) {
PRETRACE(tfc, sfc);
sfc->remote_window_delta += size;
POSTTRACE(tfc, sfc, "s updt recv");
}
void grpc_chttp2_flowctl_incoming_bs_update(grpc_chttp2_transport_flowctl* tfc,
grpc_chttp2_stream_flowctl* sfc,
size_t max_size_hint,
size_t have_already) {
PRETRACE(tfc, sfc);
void StreamFlowControl::IncomingByteStreamUpdate(size_t max_size_hint,
size_t have_already) {
FlowControlTrace trace("app st recv", tfc_, this);
uint32_t max_recv_bytes;
uint32_t sent_init_window =
tfc->t->settings[GRPC_SENT_SETTINGS]
[GRPC_CHTTP2_SETTINGS_INITIAL_WINDOW_SIZE];
tfc_->transport()->settings[GRPC_SENT_SETTINGS]
[GRPC_CHTTP2_SETTINGS_INITIAL_WINDOW_SIZE];
/* clamp max recv hint to an allowable size */
if (max_size_hint >= UINT32_MAX - sent_init_window) {
@ -363,65 +274,18 @@ void grpc_chttp2_flowctl_incoming_bs_update(grpc_chttp2_transport_flowctl* tfc,
/* add some small lookahead to keep pipelines flowing */
GPR_ASSERT(max_recv_bytes <= UINT32_MAX - sent_init_window);
if (sfc->local_window_delta < max_recv_bytes) {
if (local_window_delta_ < max_recv_bytes) {
uint32_t add_max_recv_bytes =
(uint32_t)(max_recv_bytes - sfc->local_window_delta);
sfc->local_window_delta += add_max_recv_bytes;
}
POSTTRACE(tfc, sfc, "app st recv");
}
void grpc_chttp2_flowctl_destroy_stream(grpc_chttp2_transport_flowctl* tfc,
grpc_chttp2_stream_flowctl* sfc) {
announced_window_delta_preupdate(tfc, sfc);
}
// Returns an urgency with which to make an update
static grpc_chttp2_flowctl_urgency delta_is_significant(
const grpc_chttp2_transport_flowctl* tfc, int32_t value,
grpc_chttp2_setting_id setting_id) {
int64_t delta = (int64_t)value -
(int64_t)tfc->t->settings[GRPC_LOCAL_SETTINGS][setting_id];
// TODO(ncteisen): tune this
if (delta != 0 && (delta <= -value / 5 || delta >= value / 5)) {
return GRPC_CHTTP2_FLOWCTL_QUEUE_UPDATE;
} else {
return GRPC_CHTTP2_FLOWCTL_NO_ACTION_NEEDED;
}
}
// Takes in a target and uses the pid controller to return a stabilized
// guess at the new bdp.
static double get_pid_controller_guess(grpc_exec_ctx* exec_ctx,
grpc_chttp2_transport_flowctl* tfc,
double target) {
grpc_millis now = grpc_exec_ctx_now(exec_ctx);
if (!tfc->pid_controller_initialized) {
tfc->last_pid_update = now;
tfc->pid_controller_initialized = true;
tfc->pid_controller.Init(grpc_core::PidController::Args()
.set_gain_p(4)
.set_gain_i(8)
.set_gain_d(0)
.set_initial_control_value(target)
.set_min_control_value(-1)
.set_max_control_value(25)
.set_integral_range(10));
return pow(2, target);
(uint32_t)(max_recv_bytes - local_window_delta_);
local_window_delta_ += add_max_recv_bytes;
}
double bdp_error = target - tfc->pid_controller->last_control_value();
double dt = (double)(now - tfc->last_pid_update) * 1e-3;
double log2_bdp_guess = tfc->pid_controller->Update(bdp_error, dt);
tfc->last_pid_update = now;
return pow(2, log2_bdp_guess);
}
// Take in a target and modifies it based on the memory pressure of the system
static double get_target_under_memory_pressure(
grpc_chttp2_transport_flowctl* tfc, double target) {
static double AdjustForMemoryPressure(grpc_resource_quota* quota,
double target) {
// do not increase window under heavy memory pressure.
double memory_pressure = grpc_resource_quota_get_memory_pressure(
grpc_resource_user_quota(grpc_endpoint_get_resource_user(tfc->t->ep)));
double memory_pressure = grpc_resource_quota_get_memory_pressure(quota);
static const double kLowMemPressure = 0.1;
static const double kZeroTarget = 22;
static const double kHighMemPressure = 0.8;
@ -436,75 +300,82 @@ static double get_target_under_memory_pressure(
return target;
}
grpc_chttp2_flowctl_action grpc_chttp2_flowctl_get_action(
grpc_exec_ctx* exec_ctx, grpc_chttp2_transport_flowctl* tfc,
grpc_chttp2_stream_flowctl* sfc) {
grpc_chttp2_flowctl_action action;
memset(&action, 0, sizeof(action));
double TransportFlowControl::TargetLogBdp() {
return AdjustForMemoryPressure(
grpc_resource_user_quota(grpc_endpoint_get_resource_user(t_->ep)),
1 + log2(bdp_estimator_.EstimateBdp()));
}
double TransportFlowControl::SmoothLogBdp(grpc_exec_ctx* exec_ctx,
double value) {
grpc_millis now = grpc_exec_ctx_now(exec_ctx);
double bdp_error = value - pid_controller_.last_control_value();
const double dt = (double)(now - last_pid_update_) * 1e-3;
last_pid_update_ = now;
return pid_controller_.Update(bdp_error, dt);
}
FlowControlAction::Urgency TransportFlowControl::DeltaUrgency(
int32_t value, grpc_chttp2_setting_id setting_id) {
int64_t delta =
(int64_t)value - (int64_t)t_->settings[GRPC_LOCAL_SETTINGS][setting_id];
// TODO(ncteisen): tune this
if (sfc != NULL && !sfc->s->read_closed) {
uint32_t sent_init_window =
tfc->t->settings[GRPC_SENT_SETTINGS]
[GRPC_CHTTP2_SETTINGS_INITIAL_WINDOW_SIZE];
if ((int64_t)sfc->local_window_delta >
(int64_t)sfc->announced_window_delta &&
(int64_t)sfc->announced_window_delta + sent_init_window <=
sent_init_window / 2) {
action.send_stream_update = GRPC_CHTTP2_FLOWCTL_UPDATE_IMMEDIATELY;
} else if (sfc->local_window_delta > sfc->announced_window_delta) {
action.send_stream_update = GRPC_CHTTP2_FLOWCTL_QUEUE_UPDATE;
}
if (delta != 0 && (delta <= -value / 5 || delta >= value / 5)) {
return FlowControlAction::Urgency::QUEUE_UPDATE;
} else {
return FlowControlAction::Urgency::NO_ACTION_NEEDED;
}
if (tfc->enable_bdp_probe) {
}
FlowControlAction TransportFlowControl::PeriodicUpdate(
grpc_exec_ctx* exec_ctx) {
FlowControlAction action;
if (enable_bdp_probe_) {
// get bdp estimate and update initial_window accordingly.
int64_t estimate = -1;
if (tfc->bdp_estimator->EstimateBdp(&estimate)) {
double target = 1 + log2((double)estimate);
// target might change based on how much memory pressure we are under
// TODO(ncteisen): experiment with setting target to be huge under low
// memory pressure.
target = get_target_under_memory_pressure(tfc, target);
// run our target through the pid controller to stabilize change.
// TODO(ncteisen): experiment with other controllers here.
double bdp_guess = get_pid_controller_guess(exec_ctx, tfc, target);
// Though initial window 'could' drop to 0, we keep the floor at 128
tfc->target_initial_window_size =
(int32_t)GPR_CLAMP(bdp_guess, 128, INT32_MAX);
grpc_chttp2_flowctl_urgency init_window_update_urgency =
delta_is_significant(tfc, tfc->target_initial_window_size,
GRPC_CHTTP2_SETTINGS_INITIAL_WINDOW_SIZE);
if (init_window_update_urgency != GRPC_CHTTP2_FLOWCTL_NO_ACTION_NEEDED) {
action.send_setting_update = init_window_update_urgency;
action.initial_window_size = (uint32_t)tfc->target_initial_window_size;
}
}
// target might change based on how much memory pressure we are under
// TODO(ncteisen): experiment with setting target to be huge under low
// memory pressure.
const double target = pow(2, SmoothLogBdp(exec_ctx, TargetLogBdp()));
// Though initial window 'could' drop to 0, we keep the floor at 128
target_initial_window_size_ = (int32_t)GPR_CLAMP(target, 128, INT32_MAX);
action.set_send_initial_window_update(
DeltaUrgency(target_initial_window_size_,
GRPC_CHTTP2_SETTINGS_INITIAL_WINDOW_SIZE),
target_initial_window_size_);
// get bandwidth estimate and update max_frame accordingly.
double bw_dbl = -1;
if (tfc->bdp_estimator->EstimateBandwidth(&bw_dbl)) {
// we target the max of BDP or bandwidth in microseconds.
int32_t frame_size = (int32_t)GPR_CLAMP(
GPR_MAX((int32_t)GPR_CLAMP(bw_dbl, 0, INT_MAX) / 1000,
tfc->target_initial_window_size),
16384, 16777215);
grpc_chttp2_flowctl_urgency frame_size_urgency = delta_is_significant(
tfc, frame_size, GRPC_CHTTP2_SETTINGS_MAX_FRAME_SIZE);
if (frame_size_urgency != GRPC_CHTTP2_FLOWCTL_NO_ACTION_NEEDED) {
if (frame_size_urgency > action.send_setting_update) {
action.send_setting_update = frame_size_urgency;
}
action.max_frame_size = (uint32_t)frame_size;
}
}
double bw_dbl = bdp_estimator_.EstimateBandwidth();
// we target the max of BDP or bandwidth in microseconds.
int32_t frame_size = (int32_t)GPR_CLAMP(
GPR_MAX((int32_t)GPR_CLAMP(bw_dbl, 0, INT_MAX) / 1000,
target_initial_window_size_),
16384, 16777215);
action.set_send_max_frame_size_update(
DeltaUrgency(frame_size, GRPC_CHTTP2_SETTINGS_MAX_FRAME_SIZE),
frame_size);
}
uint32_t target_announced_window = grpc_chttp2_target_announced_window(tfc);
if (tfc->announced_window < target_announced_window / 2) {
action.send_transport_update = GRPC_CHTTP2_FLOWCTL_UPDATE_IMMEDIATELY;
return UpdateAction(action);
}
FlowControlAction StreamFlowControl::UpdateAction(FlowControlAction action) {
// TODO(ncteisen): tune this
if (!s_->read_closed) {
uint32_t sent_init_window =
tfc_->transport()->settings[GRPC_SENT_SETTINGS]
[GRPC_CHTTP2_SETTINGS_INITIAL_WINDOW_SIZE];
if (local_window_delta_ > announced_window_delta_ &&
announced_window_delta_ + sent_init_window <= sent_init_window / 2) {
action.set_send_stream_update(
FlowControlAction::Urgency::UPDATE_IMMEDIATELY);
} else if (local_window_delta_ > announced_window_delta_) {
action.set_send_stream_update(FlowControlAction::Urgency::QUEUE_UPDATE);
}
}
TRACEACTION(tfc, action);
return action;
}
} // namespace chttp2
} // namespace grpc_core

@ -0,0 +1,336 @@
/*
*
* Copyright 2017 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
#ifndef GRPC_CORE_EXT_TRANSPORT_CHTTP2_TRANSPORT_FLOW_CONTROL_H
#define GRPC_CORE_EXT_TRANSPORT_CHTTP2_TRANSPORT_FLOW_CONTROL_H
#include <stdint.h>
#include <grpc/support/useful.h>
#include "src/core/ext/transport/chttp2/transport/http2_settings.h"
#include "src/core/lib/support/manual_constructor.h"
#include "src/core/lib/transport/bdp_estimator.h"
#include "src/core/lib/transport/pid_controller.h"
struct grpc_chttp2_transport;
struct grpc_chttp2_stream;
extern "C" grpc_tracer_flag grpc_flowctl_trace;
namespace grpc {
namespace testing {
class TrickledCHTTP2; // to make this a friend
} // namespace testing
} // namespace grpc
namespace grpc_core {
namespace chttp2 {
static constexpr uint32_t kDefaultWindow = 65535;
class TransportFlowControl;
class StreamFlowControl;
class FlowControlAction {
public:
enum class Urgency : uint8_t {
// Nothing to be done.
NO_ACTION_NEEDED = 0,
// Initiate a write to update the initial window immediately.
UPDATE_IMMEDIATELY,
// Push the flow control update into a send buffer, to be sent
// out the next time a write is initiated.
QUEUE_UPDATE,
};
Urgency send_stream_update() const { return send_stream_update_; }
Urgency send_transport_update() const { return send_transport_update_; }
Urgency send_initial_window_update() const {
return send_initial_window_update_;
}
Urgency send_max_frame_size_update() const {
return send_max_frame_size_update_;
}
uint32_t initial_window_size() const { return initial_window_size_; }
uint32_t max_frame_size() const { return max_frame_size_; }
FlowControlAction& set_send_stream_update(Urgency u) {
send_stream_update_ = u;
return *this;
}
FlowControlAction& set_send_transport_update(Urgency u) {
send_transport_update_ = u;
return *this;
}
FlowControlAction& set_send_initial_window_update(Urgency u,
uint32_t update) {
send_initial_window_update_ = u;
initial_window_size_ = update;
return *this;
}
FlowControlAction& set_send_max_frame_size_update(Urgency u,
uint32_t update) {
send_max_frame_size_update_ = u;
max_frame_size_ = update;
return *this;
}
static const char* UrgencyString(Urgency u);
void Trace(grpc_chttp2_transport* t) const;
private:
Urgency send_stream_update_ = Urgency::NO_ACTION_NEEDED;
Urgency send_transport_update_ = Urgency::NO_ACTION_NEEDED;
Urgency send_initial_window_update_ = Urgency::NO_ACTION_NEEDED;
Urgency send_max_frame_size_update_ = Urgency::NO_ACTION_NEEDED;
uint32_t initial_window_size_ = 0;
uint32_t max_frame_size_ = 0;
};
class FlowControlTrace {
public:
FlowControlTrace(const char* reason, TransportFlowControl* tfc,
StreamFlowControl* sfc) {
if (enabled_) Init(reason, tfc, sfc);
}
~FlowControlTrace() {
if (enabled_) Finish();
}
private:
void Init(const char* reason, TransportFlowControl* tfc,
StreamFlowControl* sfc);
void Finish();
const bool enabled_ = GRPC_TRACER_ON(grpc_flowctl_trace);
TransportFlowControl* tfc_;
StreamFlowControl* sfc_;
const char* reason_;
int64_t remote_window_;
int64_t target_window_;
int64_t announced_window_;
int64_t remote_window_delta_;
int64_t local_window_delta_;
int64_t announced_window_delta_;
};
class TransportFlowControl {
public:
TransportFlowControl(grpc_exec_ctx* exec_ctx, const grpc_chttp2_transport* t,
bool enable_bdp_probe);
~TransportFlowControl() {}
bool bdp_probe() const { return enable_bdp_probe_; }
// returns an announce if we should send a transport update to our peer,
// else returns zero; writing_anyway indicates if a write would happen
// regardless of the send - if it is false and this function returns non-zero,
// this announce will cause a write to occur
uint32_t MaybeSendUpdate(bool writing_anyway);
// Reads the flow control data and returns and actionable struct that will
// tell chttp2 exactly what it needs to do
FlowControlAction MakeAction() { return UpdateAction(FlowControlAction()); }
// Call periodically (at a low-ish rate, 100ms - 10s makes sense)
// to perform more complex flow control calculations and return an action
// to let chttp2 change its parameters
FlowControlAction PeriodicUpdate(grpc_exec_ctx* exec_ctx);
void StreamSentData(int64_t size) { remote_window_ -= size; }
grpc_error* ValidateRecvData(int64_t incoming_frame_size);
void CommitRecvData(int64_t incoming_frame_size) {
announced_window_ -= incoming_frame_size;
}
grpc_error* RecvData(int64_t incoming_frame_size) {
FlowControlTrace trace(" data recv", this, nullptr);
grpc_error* error = ValidateRecvData(incoming_frame_size);
if (error != GRPC_ERROR_NONE) return error;
CommitRecvData(incoming_frame_size);
return GRPC_ERROR_NONE;
}
// we have received a WINDOW_UPDATE frame for a transport
void RecvUpdate(uint32_t size) {
FlowControlTrace trace("t updt recv", this, nullptr);
remote_window_ += size;
}
int64_t remote_window() const { return remote_window_; }
int64_t target_window() const {
return (uint32_t)GPR_MIN((int64_t)((1u << 31) - 1),
announced_stream_total_over_incoming_window_ +
target_initial_window_size_);
}
int64_t announced_window() const { return announced_window_; }
const grpc_chttp2_transport* transport() const { return t_; }
void PreUpdateAnnouncedWindowOverIncomingWindow(int64_t delta) {
if (delta > 0) {
announced_stream_total_over_incoming_window_ -= delta;
} else {
announced_stream_total_under_incoming_window_ += -delta;
}
}
void PostUpdateAnnouncedWindowOverIncomingWindow(int64_t delta) {
if (delta > 0) {
announced_stream_total_over_incoming_window_ += delta;
} else {
announced_stream_total_under_incoming_window_ -= -delta;
}
}
BdpEstimator* bdp_estimator() { return &bdp_estimator_; }
void TestOnlyForceHugeWindow() {
announced_window_ = 1024 * 1024 * 1024;
remote_window_ = 1024 * 1024 * 1024;
}
private:
friend class ::grpc::testing::TrickledCHTTP2;
double TargetLogBdp();
double SmoothLogBdp(grpc_exec_ctx* exec_ctx, double value);
FlowControlAction::Urgency DeltaUrgency(int32_t value,
grpc_chttp2_setting_id setting_id);
FlowControlAction UpdateAction(FlowControlAction action) {
if (announced_window_ < target_window() / 2) {
action.set_send_transport_update(
FlowControlAction::Urgency::UPDATE_IMMEDIATELY);
}
return action;
}
const grpc_chttp2_transport* const t_;
/** Our bookkeeping for the remote peer's available window */
int64_t remote_window_ = kDefaultWindow;
/** calculating what we should give for local window:
we track the total amount of flow control over initial window size
across all streams: this is data that we want to receive right now (it
has an outstanding read)
and the total amount of flow control under initial window size across all
streams: this is data we've read early
we want to adjust incoming_window such that:
incoming_window = total_over - max(bdp - total_under, 0) */
int64_t announced_stream_total_over_incoming_window_ = 0;
int64_t announced_stream_total_under_incoming_window_ = 0;
/** This is out window according to what we have sent to our remote peer. The
* difference between this and target window is what we use to decide when
* to send WINDOW_UPDATE frames. */
int64_t announced_window_ = kDefaultWindow;
int32_t target_initial_window_size_ = kDefaultWindow;
/** should we probe bdp? */
const bool enable_bdp_probe_;
/* bdp estimation */
grpc_core::BdpEstimator bdp_estimator_;
/* pid controller */
grpc_core::PidController pid_controller_;
grpc_millis last_pid_update_ = 0;
};
class StreamFlowControl {
public:
StreamFlowControl(TransportFlowControl* tfc, const grpc_chttp2_stream* s);
~StreamFlowControl() {
tfc_->PreUpdateAnnouncedWindowOverIncomingWindow(announced_window_delta_);
}
FlowControlAction UpdateAction(FlowControlAction action);
FlowControlAction MakeAction() { return UpdateAction(tfc_->MakeAction()); }
// we have sent data on the wire, we must track this in our bookkeeping for
// the remote peer's flow control.
void SentData(int64_t outgoing_frame_size) {
FlowControlTrace tracer(" data sent", tfc_, this);
tfc_->StreamSentData(outgoing_frame_size);
remote_window_delta_ -= outgoing_frame_size;
}
// we have received data from the wire
grpc_error* RecvData(int64_t incoming_frame_size);
// returns an announce if we should send a stream update to our peer, else
// returns zero
uint32_t MaybeSendUpdate();
// we have received a WINDOW_UPDATE frame for a stream
void RecvUpdate(uint32_t size) {
FlowControlTrace trace("s updt recv", tfc_, this);
remote_window_delta_ += size;
}
// the application is asking for a certain amount of bytes
void IncomingByteStreamUpdate(size_t max_size_hint, size_t have_already);
int64_t remote_window_delta() const { return remote_window_delta_; }
int64_t local_window_delta() const { return local_window_delta_; }
int64_t announced_window_delta() const { return announced_window_delta_; }
const grpc_chttp2_stream* stream() const { return s_; }
void TestOnlyForceHugeWindow() {
announced_window_delta_ = 1024 * 1024 * 1024;
local_window_delta_ = 1024 * 1024 * 1024;
remote_window_delta_ = 1024 * 1024 * 1024;
}
private:
friend class ::grpc::testing::TrickledCHTTP2;
TransportFlowControl* const tfc_;
const grpc_chttp2_stream* const s_;
void UpdateAnnouncedWindowDelta(TransportFlowControl* tfc, int64_t change) {
tfc->PreUpdateAnnouncedWindowOverIncomingWindow(announced_window_delta_);
announced_window_delta_ += change;
tfc->PostUpdateAnnouncedWindowOverIncomingWindow(announced_window_delta_);
}
/** window available for us to send to peer, over or under the initial
* window
* size of the transport... ie:
* remote_window = remote_window_delta + transport.initial_window_size */
int64_t remote_window_delta_ = 0;
/** window available for peer to send to us (as a delta on
* transport.initial_window_size)
* local_window = local_window_delta + transport.initial_window_size */
int64_t local_window_delta_ = 0;
/** window available for peer to send to us over this stream that we have
* announced to the peer */
int64_t announced_window_delta_ = 0;
};
} // namespace chttp2
} // namespace grpc_core
#endif

Some files were not shown because too many files have changed in this diff Show More

Loading…
Cancel
Save