Merge remote-tracking branch 'upstream/master' into support/xamarin

pull/15969/head
Jan Tattermusch 6 years ago
commit 3511f45964
  1. 58
      BUILD
  2. 6
      CMakeLists.txt
  3. 6
      Makefile
  4. 4
      README.md
  5. 43
      TROUBLESHOOTING.md
  6. 2
      build.yaml
  7. 1
      config.m4
  8. 1
      config.w32
  9. 19
      doc/interop-test-descriptions.md
  10. 50
      doc/keepalive.md
  11. 1
      gRPC-C++.podspec
  12. 3
      gRPC-Core.podspec
  13. 2
      grpc.def
  14. 2
      grpc.gemspec
  15. 4
      grpc.gyp
  16. 23
      include/grpc/grpc.h
  17. 4
      include/grpc/impl/codegen/grpc_types.h
  18. 2
      include/grpc/support/string_util.h
  19. 53
      include/grpcpp/ext/server_load_reporting.h
  20. 2
      include/grpcpp/impl/codegen/completion_queue.h
  21. 11
      include/grpcpp/impl/codegen/method_handler_impl.h
  22. 2
      include/grpcpp/impl/codegen/server_context.h
  23. 2
      package.xml
  24. 2
      requirements.txt
  25. 5
      src/android/test/interop/app/src/main/cpp/grpc-interop.cc
  26. 420
      src/boringssl/crypto_test_data.cc
  27. 1190
      src/boringssl/err_data.c
  28. 481
      src/core/ext/filters/client_channel/client_channel.cc
  29. 5
      src/core/ext/filters/client_channel/client_channel.h
  30. 113
      src/core/ext/filters/client_channel/client_channel_channelz.cc
  31. 71
      src/core/ext/filters/client_channel/client_channel_channelz.h
  32. 9
      src/core/ext/filters/client_channel/client_channel_plugin.cc
  33. 15
      src/core/ext/filters/client_channel/lb_policy.h
  34. 29
      src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.cc
  35. 59
      src/core/ext/filters/client_channel/lb_policy/pick_first/pick_first.cc
  36. 62
      src/core/ext/filters/client_channel/lb_policy/round_robin/round_robin.cc
  37. 13
      src/core/ext/filters/client_channel/lb_policy/subchannel_list.h
  38. 8
      src/core/ext/filters/client_channel/lb_policy_factory.cc
  39. 4
      src/core/ext/filters/client_channel/lb_policy_factory.h
  40. 17
      src/core/ext/filters/client_channel/subchannel.cc
  41. 4
      src/core/ext/filters/client_channel/subchannel.h
  42. 65
      src/core/ext/filters/load_reporting/registered_opencensus_objects.h
  43. 54
      src/core/ext/filters/load_reporting/server_load_reporting_filter.cc
  44. 9
      src/core/ext/filters/load_reporting/server_load_reporting_filter.h
  45. 3
      src/core/ext/transport/chttp2/transport/chttp2_transport.cc
  46. 2
      src/core/ext/transport/chttp2/transport/hpack_parser.cc
  47. 46
      src/core/lib/channel/channel_trace.cc
  48. 2
      src/core/lib/channel/channel_trace.h
  49. 151
      src/core/lib/channel/channelz.cc
  50. 62
      src/core/lib/channel/channelz.h
  51. 117
      src/core/lib/channel/channelz_registry.cc
  52. 97
      src/core/lib/channel/channelz_registry.h
  53. 28
      src/core/lib/gpr/string.cc
  54. 10
      src/core/lib/gpr/string.h
  55. 7
      src/core/lib/gprpp/abstract.h
  56. 58
      src/core/lib/gprpp/inlined_vector.h
  57. 4
      src/core/lib/gprpp/memory.h
  58. 5
      src/core/lib/gprpp/ref_counted_ptr.h
  59. 5
      src/core/lib/iomgr/ev_epollex_linux.cc
  60. 336
      src/core/lib/iomgr/executor.cc
  61. 61
      src/core/lib/iomgr/executor.h
  62. 6
      src/core/lib/iomgr/lockfree_event.cc
  63. 5
      src/core/lib/iomgr/tcp_posix.cc
  64. 13
      src/core/lib/json/json.cc
  65. 5
      src/core/lib/json/json.h
  66. 25
      src/core/lib/surface/channel.cc
  67. 4
      src/core/lib/surface/init.cc
  68. 30
      src/core/tsi/alts/zero_copy_frame_protector/alts_grpc_integrity_only_record_protocol.cc
  69. 10
      src/cpp/server/load_reporter/constants.h
  70. 3
      src/cpp/server/load_reporter/load_data_store.h
  71. 79
      src/cpp/server/load_reporter/load_reporter.cc
  72. 5
      src/cpp/server/load_reporter/load_reporter.h
  73. 370
      src/cpp/server/load_reporter/load_reporter_async_service_impl.cc
  74. 194
      src/cpp/server/load_reporter/load_reporter_async_service_impl.h
  75. 41
      src/cpp/server/load_reporter/load_reporting_service_server_builder_option.cc
  76. 60
      src/cpp/server/load_reporter/load_reporting_service_server_builder_plugin.cc
  77. 62
      src/cpp/server/load_reporter/load_reporting_service_server_builder_plugin.h
  78. 45
      src/cpp/server/load_reporter/util.cc
  79. 26
      src/csharp/Grpc.Core/GrpcEnvironment.cs
  80. 41
      src/php/lib/Grpc/BaseStub.php
  81. 33
      src/php/lib/Grpc/CallInvoker.php
  82. 47
      src/php/lib/Grpc/DefaultCallInvoker.php
  83. 227
      src/php/tests/unit_tests/CallInvokerTest.php
  84. 1
      src/php/tests/unit_tests/InterceptorTest.php
  85. 4
      src/proto/grpc/lb/v1/load_reporter.proto
  86. 1
      src/python/grpcio/grpc_core_dependencies.py
  87. 4
      src/ruby/ext/grpc/rb_grpc_imports.generated.c
  88. 6
      src/ruby/ext/grpc/rb_grpc_imports.generated.h
  89. 2
      templates/tools/dockerfile/bazel.include
  90. 3
      templates/tools/dockerfile/interoptest/grpc_interop_dart/Dockerfile.template
  91. 2
      templates/tools/dockerfile/test/bazel/Dockerfile.template
  92. 14
      test/core/channel/channel_trace_test.cc
  93. 71
      test/core/channel/channelz_registry_test.cc
  94. 100
      test/core/channel/channelz_test.cc
  95. 16
      test/core/end2end/tests/channelz.cc
  96. 1
      test/core/gpr/BUILD
  97. 192
      test/core/gprpp/inlined_vector_test.cc
  98. 1
      test/core/iomgr/BUILD
  99. 2
      test/core/surface/public_headers_must_be_c89.c
  100. 16
      test/cpp/end2end/BUILD
  101. Some files were not shown because too many files have changed in this diff Show More

58
BUILD

@ -98,10 +98,10 @@ GRPC_PUBLIC_HDRS = [
"include/grpc/grpc.h",
"include/grpc/grpc_posix.h",
"include/grpc/grpc_security_constants.h",
"include/grpc/load_reporting.h",
"include/grpc/slice.h",
"include/grpc/slice_buffer.h",
"include/grpc/status.h",
"include/grpc/load_reporting.h",
"include/grpc/support/workaround_list.h",
]
@ -860,6 +860,7 @@ grpc_cc_library(
"src/core/lib/iomgr/exec_ctx.h",
"src/core/lib/iomgr/executor.h",
"src/core/lib/iomgr/gethostname.h",
"src/core/lib/iomgr/gevent_util.h",
"src/core/lib/iomgr/iocp_windows.h",
"src/core/lib/iomgr/iomgr.h",
"src/core/lib/iomgr/iomgr_custom.h",
@ -1028,6 +1029,7 @@ grpc_cc_library(
"src/core/ext/filters/client_channel/backup_poller.cc",
"src/core/ext/filters/client_channel/channel_connectivity.cc",
"src/core/ext/filters/client_channel/client_channel.cc",
"src/core/ext/filters/client_channel/client_channel_channelz.cc",
"src/core/ext/filters/client_channel/client_channel_factory.cc",
"src/core/ext/filters/client_channel/client_channel_plugin.cc",
"src/core/ext/filters/client_channel/connector.cc",
@ -1050,6 +1052,7 @@ grpc_cc_library(
hdrs = [
"src/core/ext/filters/client_channel/backup_poller.h",
"src/core/ext/filters/client_channel/client_channel.h",
"src/core/ext/filters/client_channel/client_channel_channelz.h",
"src/core/ext/filters/client_channel/client_channel_factory.h",
"src/core/ext/filters/client_channel/connector.h",
"src/core/ext/filters/client_channel/http_connect_handshaker.h",
@ -1198,9 +1201,9 @@ grpc_cc_library(
"src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_channel.cc",
"src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_client_stats.cc",
"src/core/ext/filters/client_channel/lb_policy/grpclb/load_balancer_api.cc",
"src/core/ext/filters/client_channel/lb_policy/grpclb/proto/grpc/lb/v1/load_balancer.pb.c",
"src/core/ext/filters/client_channel/lb_policy/grpclb/proto/grpc/lb/v1/google/protobuf/duration.pb.c",
"src/core/ext/filters/client_channel/lb_policy/grpclb/proto/grpc/lb/v1/google/protobuf/timestamp.pb.c",
"src/core/ext/filters/client_channel/lb_policy/grpclb/proto/grpc/lb/v1/load_balancer.pb.c",
],
hdrs = [
"src/core/ext/filters/client_channel/lb_policy/grpclb/client_load_reporting_filter.h",
@ -1208,9 +1211,9 @@ grpc_cc_library(
"src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_channel.h",
"src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_client_stats.h",
"src/core/ext/filters/client_channel/lb_policy/grpclb/load_balancer_api.h",
"src/core/ext/filters/client_channel/lb_policy/grpclb/proto/grpc/lb/v1/load_balancer.pb.h",
"src/core/ext/filters/client_channel/lb_policy/grpclb/proto/grpc/lb/v1/google/protobuf/duration.pb.h",
"src/core/ext/filters/client_channel/lb_policy/grpclb/proto/grpc/lb/v1/google/protobuf/timestamp.pb.h",
"src/core/ext/filters/client_channel/lb_policy/grpclb/proto/grpc/lb/v1/load_balancer.pb.h",
],
external_deps = [
"nanopb",
@ -1231,9 +1234,9 @@ grpc_cc_library(
"src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_channel_secure.cc",
"src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_client_stats.cc",
"src/core/ext/filters/client_channel/lb_policy/grpclb/load_balancer_api.cc",
"src/core/ext/filters/client_channel/lb_policy/grpclb/proto/grpc/lb/v1/load_balancer.pb.c",
"src/core/ext/filters/client_channel/lb_policy/grpclb/proto/grpc/lb/v1/google/protobuf/duration.pb.c",
"src/core/ext/filters/client_channel/lb_policy/grpclb/proto/grpc/lb/v1/google/protobuf/timestamp.pb.c",
"src/core/ext/filters/client_channel/lb_policy/grpclb/proto/grpc/lb/v1/load_balancer.pb.c",
],
hdrs = [
"src/core/ext/filters/client_channel/lb_policy/grpclb/client_load_reporting_filter.h",
@ -1241,9 +1244,9 @@ grpc_cc_library(
"src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_channel.h",
"src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_client_stats.h",
"src/core/ext/filters/client_channel/lb_policy/grpclb/load_balancer_api.h",
"src/core/ext/filters/client_channel/lb_policy/grpclb/proto/grpc/lb/v1/load_balancer.pb.h",
"src/core/ext/filters/client_channel/lb_policy/grpclb/proto/grpc/lb/v1/google/protobuf/duration.pb.h",
"src/core/ext/filters/client_channel/lb_policy/grpclb/proto/grpc/lb/v1/google/protobuf/timestamp.pb.h",
"src/core/ext/filters/client_channel/lb_policy/grpclb/proto/grpc/lb/v1/load_balancer.pb.h",
],
external_deps = [
"nanopb",
@ -1330,6 +1333,51 @@ grpc_cc_library(
],
)
grpc_cc_library(
name = "lb_server_load_reporting_service_server_builder_plugin",
srcs = [
"src/cpp/server/load_reporter/load_reporting_service_server_builder_plugin.cc",
],
hdrs = [
"src/cpp/server/load_reporter/load_reporting_service_server_builder_plugin.h",
],
language = "c++",
deps = [
"lb_load_reporter_service",
],
)
grpc_cc_library(
name = "grpcpp_server_load_reporting",
srcs = [
"src/cpp/server/load_reporter/load_reporting_service_server_builder_option.cc",
"src/cpp/server/load_reporter/util.cc",
],
language = "c++",
public_hdrs = [
"include/grpcpp/ext/server_load_reporting.h",
],
deps = [
"lb_server_load_reporting_filter",
"lb_server_load_reporting_service_server_builder_plugin",
],
alwayslink = 1,
)
grpc_cc_library(
name = "lb_load_reporter_service",
srcs = [
"src/cpp/server/load_reporter/load_reporter_async_service_impl.cc",
],
hdrs = [
"src/cpp/server/load_reporter/load_reporter_async_service_impl.h",
],
language = "c++",
deps = [
"lb_load_reporter",
],
)
grpc_cc_library(
name = "lb_get_cpu_stats",
srcs = [

@ -1187,6 +1187,7 @@ add_library(grpc
src/core/ext/filters/client_channel/backup_poller.cc
src/core/ext/filters/client_channel/channel_connectivity.cc
src/core/ext/filters/client_channel/client_channel.cc
src/core/ext/filters/client_channel/client_channel_channelz.cc
src/core/ext/filters/client_channel/client_channel_factory.cc
src/core/ext/filters/client_channel/client_channel_plugin.cc
src/core/ext/filters/client_channel/connector.cc
@ -1523,6 +1524,7 @@ add_library(grpc_cronet
src/core/ext/filters/client_channel/backup_poller.cc
src/core/ext/filters/client_channel/channel_connectivity.cc
src/core/ext/filters/client_channel/client_channel.cc
src/core/ext/filters/client_channel/client_channel_channelz.cc
src/core/ext/filters/client_channel/client_channel_factory.cc
src/core/ext/filters/client_channel/client_channel_plugin.cc
src/core/ext/filters/client_channel/connector.cc
@ -1881,6 +1883,7 @@ add_library(grpc_test_util
src/core/ext/filters/client_channel/backup_poller.cc
src/core/ext/filters/client_channel/channel_connectivity.cc
src/core/ext/filters/client_channel/client_channel.cc
src/core/ext/filters/client_channel/client_channel_channelz.cc
src/core/ext/filters/client_channel/client_channel_factory.cc
src/core/ext/filters/client_channel/client_channel_plugin.cc
src/core/ext/filters/client_channel/connector.cc
@ -2188,6 +2191,7 @@ add_library(grpc_test_util_unsecure
src/core/ext/filters/client_channel/backup_poller.cc
src/core/ext/filters/client_channel/channel_connectivity.cc
src/core/ext/filters/client_channel/client_channel.cc
src/core/ext/filters/client_channel/client_channel_channelz.cc
src/core/ext/filters/client_channel/client_channel_factory.cc
src/core/ext/filters/client_channel/client_channel_plugin.cc
src/core/ext/filters/client_channel/connector.cc
@ -2508,6 +2512,7 @@ add_library(grpc_unsecure
src/core/ext/filters/client_channel/backup_poller.cc
src/core/ext/filters/client_channel/channel_connectivity.cc
src/core/ext/filters/client_channel/client_channel.cc
src/core/ext/filters/client_channel/client_channel_channelz.cc
src/core/ext/filters/client_channel/client_channel_factory.cc
src/core/ext/filters/client_channel/client_channel_plugin.cc
src/core/ext/filters/client_channel/connector.cc
@ -3315,6 +3320,7 @@ add_library(grpc++_cronet
src/core/ext/filters/client_channel/backup_poller.cc
src/core/ext/filters/client_channel/channel_connectivity.cc
src/core/ext/filters/client_channel/client_channel.cc
src/core/ext/filters/client_channel/client_channel_channelz.cc
src/core/ext/filters/client_channel/client_channel_factory.cc
src/core/ext/filters/client_channel/client_channel_plugin.cc
src/core/ext/filters/client_channel/connector.cc

@ -3633,6 +3633,7 @@ LIBGRPC_SRC = \
src/core/ext/filters/client_channel/backup_poller.cc \
src/core/ext/filters/client_channel/channel_connectivity.cc \
src/core/ext/filters/client_channel/client_channel.cc \
src/core/ext/filters/client_channel/client_channel_channelz.cc \
src/core/ext/filters/client_channel/client_channel_factory.cc \
src/core/ext/filters/client_channel/client_channel_plugin.cc \
src/core/ext/filters/client_channel/connector.cc \
@ -3968,6 +3969,7 @@ LIBGRPC_CRONET_SRC = \
src/core/ext/filters/client_channel/backup_poller.cc \
src/core/ext/filters/client_channel/channel_connectivity.cc \
src/core/ext/filters/client_channel/client_channel.cc \
src/core/ext/filters/client_channel/client_channel_channelz.cc \
src/core/ext/filters/client_channel/client_channel_factory.cc \
src/core/ext/filters/client_channel/client_channel_plugin.cc \
src/core/ext/filters/client_channel/connector.cc \
@ -4324,6 +4326,7 @@ LIBGRPC_TEST_UTIL_SRC = \
src/core/ext/filters/client_channel/backup_poller.cc \
src/core/ext/filters/client_channel/channel_connectivity.cc \
src/core/ext/filters/client_channel/client_channel.cc \
src/core/ext/filters/client_channel/client_channel_channelz.cc \
src/core/ext/filters/client_channel/client_channel_factory.cc \
src/core/ext/filters/client_channel/client_channel_plugin.cc \
src/core/ext/filters/client_channel/connector.cc \
@ -4622,6 +4625,7 @@ LIBGRPC_TEST_UTIL_UNSECURE_SRC = \
src/core/ext/filters/client_channel/backup_poller.cc \
src/core/ext/filters/client_channel/channel_connectivity.cc \
src/core/ext/filters/client_channel/client_channel.cc \
src/core/ext/filters/client_channel/client_channel_channelz.cc \
src/core/ext/filters/client_channel/client_channel_factory.cc \
src/core/ext/filters/client_channel/client_channel_plugin.cc \
src/core/ext/filters/client_channel/connector.cc \
@ -4920,6 +4924,7 @@ LIBGRPC_UNSECURE_SRC = \
src/core/ext/filters/client_channel/backup_poller.cc \
src/core/ext/filters/client_channel/channel_connectivity.cc \
src/core/ext/filters/client_channel/client_channel.cc \
src/core/ext/filters/client_channel/client_channel_channelz.cc \
src/core/ext/filters/client_channel/client_channel_factory.cc \
src/core/ext/filters/client_channel/client_channel_plugin.cc \
src/core/ext/filters/client_channel/connector.cc \
@ -5715,6 +5720,7 @@ LIBGRPC++_CRONET_SRC = \
src/core/ext/filters/client_channel/backup_poller.cc \
src/core/ext/filters/client_channel/channel_connectivity.cc \
src/core/ext/filters/client_channel/client_channel.cc \
src/core/ext/filters/client_channel/client_channel_channelz.cc \
src/core/ext/filters/client_channel/client_channel_factory.cc \
src/core/ext/filters/client_channel/client_channel_plugin.cc \
src/core/ext/filters/client_channel/connector.cc \

@ -45,6 +45,10 @@ Please read [How to contribute](CONTRIBUTING.md) which will guide you through th
the gRPC codebase.
The document also contains info on how the contributing process works and contains best practices for creating contributions.
# Troubleshooting
Sometimes things go wrong. Please check out the [Troubleshooting guide](TROUBLESHOOTING.md) if you are experiencing issues with gRPC.
# Performance
See [Performance dashboard](http://performance-dot-grpc-testing.appspot.com/explore?dashboard=5636470266134528) for the performance numbers for the latest released version.

@ -0,0 +1,43 @@
# Troubleshooting gRPC
This guide is for troubleshooting gRPC implementations based on C core library (sources for most of them are living in the `grpc/grpc` repository).
## Enabling extra logging and tracing
Extra logging can be very useful for diagnosing problems. All gRPC implementations based on C core library support
the `GRPC_VERBOSITY` and `GRPC_TRACE` environment variables that can be used to increase the amount of information
that gets printed to stderr.
## GRPC_VERBOSITY
`GRPC_VERBOSITY` is used to set the minimum level of log messages printed by gRPC (supported values are `DEBUG`, `INFO` and `ERROR`). If this environment variable is unset, only `ERROR` logs will be printed.
## GRPC_TRACE
`GRPC_TRACE` can be used to enable extra logging for some internal gRPC components. Enabling the right traces can be invaluable
for diagnosing for what is going wrong when things aren't working as intended. Possible values for `GRPC_TRACE` are listed in [Environment Variables Overview](doc/environment_variables.md).
Multiple traces can be enable at once (use comma as separator).
```
# Enable debug logs for an application
GRPC_VERBOSITY=debug ./helloworld_application_using_grpc
```
```
# Print information about invocations of low-level C core API.
# Note that trace logs of log level DEBUG won't be displayed.
# Also note that most tracers user log level INFO, so without setting
# GPRC_VERBOSITY accordingly, no traces will be printed.
GRPC_VERBOSITY=info GRPC_TRACE=api ./helloworld_application_using_grpc
```
```
# Print info from 3 different tracers, including tracing logs with log level DEBUG
GRPC_VERBOSITY=debug GRPC_TRACE=tcp,http,api ./helloworld_application_using_grpc
```
Known limitations: `GPRC_TRACE=tcp` is currently not implemented for Windows (you won't see any tcp traces).
Please note that the `GRPC_TRACE` environment variable has nothing to do with gRPC's "tracing" feature (= tracing RPCs in
microservice environment to gain insight about how requests are processed by deployment), it is merely used to enable printing
of extra logs.

@ -564,6 +564,7 @@ filegroups:
headers:
- src/core/ext/filters/client_channel/backup_poller.h
- src/core/ext/filters/client_channel/client_channel.h
- src/core/ext/filters/client_channel/client_channel_channelz.h
- src/core/ext/filters/client_channel/client_channel_factory.h
- src/core/ext/filters/client_channel/connector.h
- src/core/ext/filters/client_channel/http_connect_handshaker.h
@ -586,6 +587,7 @@ filegroups:
- src/core/ext/filters/client_channel/backup_poller.cc
- src/core/ext/filters/client_channel/channel_connectivity.cc
- src/core/ext/filters/client_channel/client_channel.cc
- src/core/ext/filters/client_channel/client_channel_channelz.cc
- src/core/ext/filters/client_channel/client_channel_factory.cc
- src/core/ext/filters/client_channel/client_channel_plugin.cc
- src/core/ext/filters/client_channel/connector.cc

@ -332,6 +332,7 @@ if test "$PHP_GRPC" != "no"; then
src/core/ext/filters/client_channel/backup_poller.cc \
src/core/ext/filters/client_channel/channel_connectivity.cc \
src/core/ext/filters/client_channel/client_channel.cc \
src/core/ext/filters/client_channel/client_channel_channelz.cc \
src/core/ext/filters/client_channel/client_channel_factory.cc \
src/core/ext/filters/client_channel/client_channel_plugin.cc \
src/core/ext/filters/client_channel/connector.cc \

@ -307,6 +307,7 @@ if (PHP_GRPC != "no") {
"src\\core\\ext\\filters\\client_channel\\backup_poller.cc " +
"src\\core\\ext\\filters\\client_channel\\channel_connectivity.cc " +
"src\\core\\ext\\filters\\client_channel\\client_channel.cc " +
"src\\core\\ext\\filters\\client_channel\\client_channel_channelz.cc " +
"src\\core\\ext\\filters\\client_channel\\client_channel_factory.cc " +
"src\\core\\ext\\filters\\client_channel\\client_channel_plugin.cc " +
"src\\core\\ext\\filters\\client_channel\\connector.cc " +

@ -899,6 +899,25 @@ Status: TODO
This test verifies that a client sending faster than a server can drain sees
pushback (i.e., attempts to send succeed only after appropriate delays).
### Experimental Tests
These tests are not yet standardized, and are not yet implemented in all
languages. Therefore they are not part of our interop matrix.
#### rpc_soak
The client performs many large_unary RPCs in sequence over the same channel.
The number of RPCs is configured by the experimental flag, `soak_iterations`.
#### channel_soak
The client performs many large_unary RPCs in sequence. Before each RPC, it
tears down and rebuilds the channel. The number of RPCs is configured by
the experimental flag, `soak_iterations`.
This tests puts stress on several gRPC components; the resolver, the load
balancer, and the RPC hotpath.
### TODO Tests
#### High priority:

@ -0,0 +1,50 @@
# Keepalive User Guide for gRPC Core (and dependants)
The keepalive ping is a way to check if a channel is currently working by sending HTTP2 pings over the transport. It is sent periodically, and if the ping is not acknowledged by the peer within a certain timeout period, the transport is disconnected.
This guide documents the knobs within gRPC core to control the current behavior of the keepalive ping.
The keepalive ping is controlled by two important channel arguments -
* **GRPC_ARG_KEEPALIVE_TIME_MS**
* This channel argument controls the period (in milliseconds) after which a keepalive ping is sent on the transport.
* **GRPC_ARG_KEEPALIVE_TIMEOUT_MS**
* This channel argument controls the amount of time (in milliseconds), the sender of the keepalive ping waits for an acknowledgement. If it does not receive an acknowledgement within this time, it will close the connection.
The above two channel arguments should be sufficient for most users, but the following arguments can also be useful in certain use cases.
* **GRPC_ARG_KEEPALIVE_PERMIT_WITHOUT_CALLS**
* This channel argument if set to 1 (0 : false; 1 : true), allows keepalive pings to be sent even if there are no calls in flight.
* **GRPC_ARG_HTTP2_MAX_PINGS_WITHOUT_DATA**
* This channel argument controls the maximum number of pings that can be sent when there is no other data (data frame or header frame) to be sent. GRPC Core will not continue sending pings if we run over the limit. Setting it to 0 allows sending pings without sending data.
* **GRPC_ARG_HTTP2_MIN_SENT_PING_INTERVAL_WITHOUT_DATA_MS**
* If there is no data being sent on the transport, this channel argument controls the minimum time (in milliseconds) gRPC Core will wait between successive pings.
* **GRPC_ARG_HTTP2_MIN_RECV_PING_INTERVAL_WITHOUT_DATA_MS**
* If there is no data being sent on the transport, this channel argument on the server side controls the minimum time (in milliseconds) that gRPC Core would expect between receiving successive pings. If the time between successive pings is less that than this time, then the ping will be considered a bad ping from the peer. Such a ping counts as a ‘ping strike’.
On the client side, this does not have any effect.
* **GRPC_ARG_HTTP2_MAX_PING_STRIKES**
* This arg controls the maximum number of bad pings that the server will tolerate before sending an HTTP2 GOAWAY frame and closing the transport. Setting it to 0 allows the server to accept any number of bad pings.
### Defaults Values
Channel Argument| Client|Server
----------------|-------|------
GRPC_ARG_KEEPALIVE_TIME_MS|INT_MAX (disabled)|7200000 (2 hours)
GRPC_ARG_KEEPALIVE_TIMEOUT_MS|20000 (20 seconds)|20000 (20 seconds)
GRPC_ARG_KEEPALIVE_PERMIT_WITHOUT_CALLS|0 (false)|0 (false)
GRPC_ARG_HTTP2_MAX_PINGS_WITHOUT_DATA|2|2
GRPC_ARG_HTTP2_MIN_SENT_PING_INTERVAL_WITHOUT_DATA_MS|300000 (5 minutes)|300000 (5 minutes)
GRPC_ARG_HTTP2_MIN_RECV_PING_INTERVAL_WITHOUT_DATA_MS|N/A|300000 (5 minutes)
GRPC_ARG_HTTP2_MAX_PING_STRIKES|N/A|2
### FAQ
* When is the keepalive timer started?
* The keepalive timer is started when a transport is done connecting (after handshake).
* What happens when the keepalive timer fires?
* When the keepalive timer fires, gRPC Core would try to send a keepalive ping on the transport. This ping can be blocked if -
* there is no active call on that transport and GRPC_ARG_KEEPALIVE_PERMIT_WITHOUT_CALLS is false.
* the number of pings already sent on the transport without any data has already exceeded GRPC_ARG_HTTP2_MAX_PINGS_WITHOUT_DATA.
* the time expired since the previous ping is less than GRPC_ARG_HTTP2_MIN_SENT_PING_INTERVAL_WITHOUT_DATA_MS.
* If a keepalive ping is not blocked and is sent on the transport, then the keepalive watchdog timer is started which would close the transport if the ping is not acknowledged before it fires.
* Why am I receiving a GOAWAY with error code ENHANCE_YOUR_CALM?
* A server sends a GOAWAY with ENHANCE_YOUR_CALM if the client sends too many misbehaving pings. For example -
* if a server has GRPC_ARG_KEEPALIVE_PERMIT_WITHOUT_CALLS set to false, and the client sends pings without there being any call in flight.
* if the client's GRPC_ARG_HTTP2_MIN_SENT_PING_INTERVAL_WITHOUT_DATA_MS setting is lower than the server's GRPC_ARG_HTTP2_MIN_RECV_PING_INTERVAL_WITHOUT_DATA_MS.

@ -317,6 +317,7 @@ Pod::Spec.new do |s|
'src/core/ext/transport/chttp2/client/chttp2_connector.h',
'src/core/ext/filters/client_channel/backup_poller.h',
'src/core/ext/filters/client_channel/client_channel.h',
'src/core/ext/filters/client_channel/client_channel_channelz.h',
'src/core/ext/filters/client_channel/client_channel_factory.h',
'src/core/ext/filters/client_channel/connector.h',
'src/core/ext/filters/client_channel/http_connect_handshaker.h',

@ -328,6 +328,7 @@ Pod::Spec.new do |s|
'src/core/ext/transport/chttp2/client/chttp2_connector.h',
'src/core/ext/filters/client_channel/backup_poller.h',
'src/core/ext/filters/client_channel/client_channel.h',
'src/core/ext/filters/client_channel/client_channel_channelz.h',
'src/core/ext/filters/client_channel/client_channel_factory.h',
'src/core/ext/filters/client_channel/connector.h',
'src/core/ext/filters/client_channel/http_connect_handshaker.h',
@ -753,6 +754,7 @@ Pod::Spec.new do |s|
'src/core/ext/filters/client_channel/backup_poller.cc',
'src/core/ext/filters/client_channel/channel_connectivity.cc',
'src/core/ext/filters/client_channel/client_channel.cc',
'src/core/ext/filters/client_channel/client_channel_channelz.cc',
'src/core/ext/filters/client_channel/client_channel_factory.cc',
'src/core/ext/filters/client_channel/client_channel_plugin.cc',
'src/core/ext/filters/client_channel/connector.cc',
@ -916,6 +918,7 @@ Pod::Spec.new do |s|
'src/core/ext/transport/chttp2/client/chttp2_connector.h',
'src/core/ext/filters/client_channel/backup_poller.h',
'src/core/ext/filters/client_channel/client_channel.h',
'src/core/ext/filters/client_channel/client_channel_channelz.h',
'src/core/ext/filters/client_channel/client_channel_factory.h',
'src/core/ext/filters/client_channel/connector.h',
'src/core/ext/filters/client_channel/http_connect_handshaker.h',

@ -69,6 +69,8 @@ EXPORTS
grpc_resource_quota_unref
grpc_resource_quota_resize
grpc_resource_quota_arg_vtable
grpc_channelz_get_top_channels
grpc_channelz_get_channel
grpc_insecure_channel_create_from_fd
grpc_server_add_insecure_channel_from_fd
grpc_use_signal

@ -265,6 +265,7 @@ Gem::Specification.new do |s|
s.files += %w( src/core/ext/transport/chttp2/client/chttp2_connector.h )
s.files += %w( src/core/ext/filters/client_channel/backup_poller.h )
s.files += %w( src/core/ext/filters/client_channel/client_channel.h )
s.files += %w( src/core/ext/filters/client_channel/client_channel_channelz.h )
s.files += %w( src/core/ext/filters/client_channel/client_channel_factory.h )
s.files += %w( src/core/ext/filters/client_channel/connector.h )
s.files += %w( src/core/ext/filters/client_channel/http_connect_handshaker.h )
@ -693,6 +694,7 @@ Gem::Specification.new do |s|
s.files += %w( src/core/ext/filters/client_channel/backup_poller.cc )
s.files += %w( src/core/ext/filters/client_channel/channel_connectivity.cc )
s.files += %w( src/core/ext/filters/client_channel/client_channel.cc )
s.files += %w( src/core/ext/filters/client_channel/client_channel_channelz.cc )
s.files += %w( src/core/ext/filters/client_channel/client_channel_factory.cc )
s.files += %w( src/core/ext/filters/client_channel/client_channel_plugin.cc )
s.files += %w( src/core/ext/filters/client_channel/connector.cc )

@ -524,6 +524,7 @@
'src/core/ext/filters/client_channel/backup_poller.cc',
'src/core/ext/filters/client_channel/channel_connectivity.cc',
'src/core/ext/filters/client_channel/client_channel.cc',
'src/core/ext/filters/client_channel/client_channel_channelz.cc',
'src/core/ext/filters/client_channel/client_channel_factory.cc',
'src/core/ext/filters/client_channel/client_channel_plugin.cc',
'src/core/ext/filters/client_channel/connector.cc',
@ -783,6 +784,7 @@
'src/core/ext/filters/client_channel/backup_poller.cc',
'src/core/ext/filters/client_channel/channel_connectivity.cc',
'src/core/ext/filters/client_channel/client_channel.cc',
'src/core/ext/filters/client_channel/client_channel_channelz.cc',
'src/core/ext/filters/client_channel/client_channel_factory.cc',
'src/core/ext/filters/client_channel/client_channel_plugin.cc',
'src/core/ext/filters/client_channel/connector.cc',
@ -1015,6 +1017,7 @@
'src/core/ext/filters/client_channel/backup_poller.cc',
'src/core/ext/filters/client_channel/channel_connectivity.cc',
'src/core/ext/filters/client_channel/client_channel.cc',
'src/core/ext/filters/client_channel/client_channel_channelz.cc',
'src/core/ext/filters/client_channel/client_channel_factory.cc',
'src/core/ext/filters/client_channel/client_channel_plugin.cc',
'src/core/ext/filters/client_channel/connector.cc',
@ -1259,6 +1262,7 @@
'src/core/ext/filters/client_channel/backup_poller.cc',
'src/core/ext/filters/client_channel/channel_connectivity.cc',
'src/core/ext/filters/client_channel/client_channel.cc',
'src/core/ext/filters/client_channel/client_channel_channelz.cc',
'src/core/ext/filters/client_channel/client_channel_factory.cc',
'src/core/ext/filters/client_channel/client_channel_plugin.cc',
'src/core/ext/filters/client_channel/connector.cc',

@ -454,6 +454,29 @@ GRPCAPI void grpc_resource_quota_resize(grpc_resource_quota* resource_quota,
*/
GRPCAPI const grpc_arg_pointer_vtable* grpc_resource_quota_arg_vtable(void);
/************* CHANNELZ API *************/
/** Channelz is under active development. The following APIs will see some
churn as the feature is implemented. This comment will be removed once
channelz is officially supported, and these APIs become stable. For now
you may track the progress by following this github issue:
https://github.com/grpc/grpc/issues/15340
the following APIs return allocated JSON strings that match the response
objects from the channelz proto, found here:
https://github.com/grpc/grpc/blob/master/src/proto/grpc/channelz/channelz.proto.
For easy conversion to protobuf, The JSON is formatted according to:
https://developers.google.com/protocol-buffers/docs/proto3#json. */
/* Gets all root channels (i.e. channels the application has directly
created). This does not include subchannels nor non-top level channels.
The returned string is allocated and must be freed by the application. */
GRPCAPI char* grpc_channelz_get_top_channels(intptr_t start_channel_id);
/* Returns a single Channel, or else a NOT_FOUND code. The returned string
is allocated and must be freed by the application. */
GRPCAPI char* grpc_channelz_get_channel(intptr_t channel_id);
#ifdef __cplusplus
}
#endif

@ -196,8 +196,8 @@ typedef struct {
data frame, Int valued, milliseconds. */
#define GRPC_ARG_HTTP2_MIN_SENT_PING_INTERVAL_WITHOUT_DATA_MS \
"grpc.http2.min_time_between_pings_ms"
/** Minimum allowed time between receiving successive ping frames without
sending any data frame. Int valued, milliseconds */
/** Minimum allowed time between a server receiving successive ping frames
without sending any data frame. Int valued, milliseconds */
#define GRPC_ARG_HTTP2_MIN_RECV_PING_INTERVAL_WITHOUT_DATA_MS \
"grpc.http2.min_ping_interval_without_data_ms"
/** Channel arg to override the http2 :scheme header */

@ -21,6 +21,8 @@
#include <grpc/support/port_platform.h>
#include <grpc/impl/codegen/gpr_types.h>
#ifdef __cplusplus
extern "C" {
#endif

@ -0,0 +1,53 @@
/*
*
* Copyright 2018 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
#ifndef GRPCPP_EXT_SERVER_LOAD_REPORTING_H
#define GRPCPP_EXT_SERVER_LOAD_REPORTING_H
#include <grpc/support/port_platform.h>
#include <grpc/load_reporting.h>
#include <grpcpp/impl/codegen/config.h>
#include <grpcpp/impl/codegen/server_context.h>
#include <grpcpp/impl/server_builder_option.h>
namespace grpc {
namespace load_reporter {
namespace experimental {
// The ServerBuilderOption to enable server-side load reporting feature. To
// enable the feature, please make sure the binary builds with the
// grpcpp_server_load_reporting library and set this option in the
// ServerBuilder.
class LoadReportingServiceServerBuilderOption : public ServerBuilderOption {
public:
void UpdateArguments(::grpc::ChannelArguments* args) override;
void UpdatePlugins(std::vector<std::unique_ptr<::grpc::ServerBuilderPlugin>>*
plugins) override;
};
// Adds the load reporting cost with \a cost_name and \a cost_value in the
// trailing metadata of the server context.
void AddLoadReportingCost(grpc::ServerContext* ctx,
const grpc::string& cost_name, double cost_value);
} // namespace experimental
} // namespace load_reporter
} // namespace grpc
#endif // GRPCPP_EXT_SERVER_LOAD_REPORTING_H

@ -367,7 +367,7 @@ class ServerCompletionQueue : public CompletionQueue {
protected:
/// Default constructor
ServerCompletionQueue() {}
ServerCompletionQueue() : polling_type_(GRPC_CQ_DEFAULT_POLLING) {}
private:
/// \param is_frequently_polled Informs the GRPC library about whether the

@ -113,14 +113,15 @@ class ClientStreamingHandler : public MethodHandler {
return func_(service_, param.server_context, &reader, &rsp);
});
GPR_CODEGEN_ASSERT(!param.server_context->sent_initial_metadata_);
CallOpSet<CallOpSendInitialMetadata, CallOpSendMessage,
CallOpServerSendStatus>
ops;
ops.SendInitialMetadata(param.server_context->initial_metadata_,
param.server_context->initial_metadata_flags());
if (param.server_context->compression_level_set()) {
ops.set_compression_level(param.server_context->compression_level());
if (!param.server_context->sent_initial_metadata_) {
ops.SendInitialMetadata(param.server_context->initial_metadata_,
param.server_context->initial_metadata_flags());
if (param.server_context->compression_level_set()) {
ops.set_compression_level(param.server_context->compression_level());
}
}
if (status.ok()) {
status = ops.SendMessage(rsp);

@ -201,7 +201,7 @@ class ServerContext {
/// \param algorithm The compression algorithm used for the server call.
void set_compression_algorithm(grpc_compression_algorithm algorithm);
/// Set the load reporting costs in \a cost_data for the call.
/// Set the serialized load reporting costs in \a cost_data for the call.
void SetLoadReportingCosts(const std::vector<grpc::string>& cost_data);
/// Return the authentication context for this server call.

@ -270,6 +270,7 @@
<file baseinstalldir="/" name="src/core/ext/transport/chttp2/client/chttp2_connector.h" role="src" />
<file baseinstalldir="/" name="src/core/ext/filters/client_channel/backup_poller.h" role="src" />
<file baseinstalldir="/" name="src/core/ext/filters/client_channel/client_channel.h" role="src" />
<file baseinstalldir="/" name="src/core/ext/filters/client_channel/client_channel_channelz.h" role="src" />
<file baseinstalldir="/" name="src/core/ext/filters/client_channel/client_channel_factory.h" role="src" />
<file baseinstalldir="/" name="src/core/ext/filters/client_channel/connector.h" role="src" />
<file baseinstalldir="/" name="src/core/ext/filters/client_channel/http_connect_handshaker.h" role="src" />
@ -698,6 +699,7 @@
<file baseinstalldir="/" name="src/core/ext/filters/client_channel/backup_poller.cc" role="src" />
<file baseinstalldir="/" name="src/core/ext/filters/client_channel/channel_connectivity.cc" role="src" />
<file baseinstalldir="/" name="src/core/ext/filters/client_channel/client_channel.cc" role="src" />
<file baseinstalldir="/" name="src/core/ext/filters/client_channel/client_channel_channelz.cc" role="src" />
<file baseinstalldir="/" name="src/core/ext/filters/client_channel/client_channel_factory.cc" role="src" />
<file baseinstalldir="/" name="src/core/ext/filters/client_channel/client_channel_plugin.cc" role="src" />
<file baseinstalldir="/" name="src/core/ext/filters/client_channel/connector.cc" role="src" />

@ -1,6 +1,6 @@
# GRPC Python setup requirements
coverage>=4.0
cython>=0.27
cython==0.28.3
enum34>=1.0.4
protobuf>=3.5.0.post1
six>=1.10

@ -45,9 +45,10 @@ std::shared_ptr<grpc::testing::InteropClient> GetClient(const char* host,
credentials = grpc::InsecureChannelCredentials();
}
grpc::testing::ChannelCreationFunc channel_creation_func =
std::bind(grpc::CreateChannel, host_port, credentials);
return std::shared_ptr<grpc::testing::InteropClient>(
new grpc::testing::InteropClient(
grpc::CreateChannel(host_port, credentials), true, false));
new grpc::testing::InteropClient(channel_creation_func, true, false));
}
extern "C" JNIEXPORT jboolean JNICALL

File diff suppressed because one or more lines are too long

File diff suppressed because it is too large Load Diff

@ -126,9 +126,9 @@ typedef struct client_channel_channel_data {
/* the following properties are guarded by a mutex since APIs require them
to be instantaneously available */
gpr_mu info_mu;
char* info_lb_policy_name;
grpc_core::UniquePtr<char> info_lb_policy_name;
/** service config in JSON form */
char* info_service_config_json;
grpc_core::UniquePtr<char> info_service_config_json;
} channel_data;
typedef struct {
@ -284,6 +284,78 @@ static void parse_retry_throttle_params(
}
}
// Invoked from the resolver NextLocked() callback when the resolver
// is shutting down.
static void on_resolver_shutdown_locked(channel_data* chand,
grpc_error* error) {
if (grpc_client_channel_trace.enabled()) {
gpr_log(GPR_INFO, "chand=%p: shutting down", chand);
}
if (chand->lb_policy != nullptr) {
if (grpc_client_channel_trace.enabled()) {
gpr_log(GPR_INFO, "chand=%p: shutting down lb_policy=%p", chand,
chand->lb_policy.get());
}
grpc_pollset_set_del_pollset_set(chand->lb_policy->interested_parties(),
chand->interested_parties);
chand->lb_policy.reset();
}
if (chand->resolver != nullptr) {
// This should never happen; it can only be triggered by a resolver
// implementation spotaneously deciding to report shutdown without
// being orphaned. This code is included just to be defensive.
if (grpc_client_channel_trace.enabled()) {
gpr_log(GPR_INFO, "chand=%p: spontaneous shutdown from resolver %p",
chand, chand->resolver.get());
}
chand->resolver.reset();
set_channel_connectivity_state_locked(
chand, GRPC_CHANNEL_SHUTDOWN,
GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
"Resolver spontaneous shutdown", &error, 1),
"resolver_spontaneous_shutdown");
}
grpc_closure_list_fail_all(&chand->waiting_for_resolver_result_closures,
GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
"Channel disconnected", &error, 1));
GRPC_CLOSURE_LIST_SCHED(&chand->waiting_for_resolver_result_closures);
GRPC_CHANNEL_STACK_UNREF(chand->owning_stack, "resolver");
grpc_channel_args_destroy(chand->resolver_result);
chand->resolver_result = nullptr;
GRPC_ERROR_UNREF(error);
}
// Returns the LB policy name from the resolver result.
static grpc_core::UniquePtr<char>
get_lb_policy_name_from_resolver_result_locked(channel_data* chand) {
// Find LB policy name in channel args.
const grpc_arg* channel_arg =
grpc_channel_args_find(chand->resolver_result, GRPC_ARG_LB_POLICY_NAME);
const char* lb_policy_name = grpc_channel_arg_get_string(channel_arg);
// Special case: If at least one balancer address is present, we use
// the grpclb policy, regardless of what the resolver actually specified.
channel_arg =
grpc_channel_args_find(chand->resolver_result, GRPC_ARG_LB_ADDRESSES);
if (channel_arg != nullptr && channel_arg->type == GRPC_ARG_POINTER) {
grpc_lb_addresses* addresses =
static_cast<grpc_lb_addresses*>(channel_arg->value.pointer.p);
if (grpc_lb_addresses_contains_balancer_address(*addresses)) {
if (lb_policy_name != nullptr &&
gpr_stricmp(lb_policy_name, "grpclb") != 0) {
gpr_log(GPR_INFO,
"resolver requested LB policy %s but provided at least one "
"balancer address -- forcing use of grpclb LB policy",
lb_policy_name);
}
lb_policy_name = "grpclb";
}
}
// Use pick_first if nothing was specified and we didn't select grpclb
// above.
if (lb_policy_name == nullptr) lb_policy_name = "pick_first";
return grpc_core::UniquePtr<char>(gpr_strdup(lb_policy_name));
}
static void request_reresolution_locked(void* arg, grpc_error* error) {
reresolution_request_args* args =
static_cast<reresolution_request_args*>(arg);
@ -304,234 +376,183 @@ static void request_reresolution_locked(void* arg, grpc_error* error) {
chand->lb_policy->SetReresolutionClosureLocked(&args->closure);
}
// TODO(roth): The logic in this function is very hard to follow. We
// should refactor this so that it's easier to understand, perhaps as
// part of changing the resolver API to more clearly differentiate
// between transient failures and shutdown.
static void on_resolver_result_changed_locked(void* arg, grpc_error* error) {
channel_data* chand = static_cast<channel_data*>(arg);
if (grpc_client_channel_trace.enabled()) {
gpr_log(GPR_INFO,
"chand=%p: got resolver result: resolver_result=%p error=%s", chand,
chand->resolver_result, grpc_error_string(error));
}
// Extract the following fields from the resolver result, if non-nullptr.
bool lb_policy_updated = false;
bool lb_policy_created = false;
char* lb_policy_name_dup = nullptr;
bool lb_policy_name_changed = false;
grpc_core::OrphanablePtr<grpc_core::LoadBalancingPolicy> new_lb_policy;
char* service_config_json = nullptr;
grpc_core::RefCountedPtr<ServerRetryThrottleData> retry_throttle_data;
grpc_core::RefCountedPtr<MethodParamsTable> method_params_table;
if (chand->resolver_result != nullptr) {
if (chand->resolver != nullptr) {
// Find LB policy name.
const grpc_arg* channel_arg = grpc_channel_args_find(
chand->resolver_result, GRPC_ARG_LB_POLICY_NAME);
const char* lb_policy_name = grpc_channel_arg_get_string(channel_arg);
// Special case: If at least one balancer address is present, we use
// the grpclb policy, regardless of what the resolver actually specified.
channel_arg =
grpc_channel_args_find(chand->resolver_result, GRPC_ARG_LB_ADDRESSES);
if (channel_arg != nullptr && channel_arg->type == GRPC_ARG_POINTER) {
grpc_lb_addresses* addresses =
static_cast<grpc_lb_addresses*>(channel_arg->value.pointer.p);
bool found_balancer_address = false;
for (size_t i = 0; i < addresses->num_addresses; ++i) {
if (addresses->addresses[i].is_balancer) {
found_balancer_address = true;
break;
}
}
if (found_balancer_address) {
if (lb_policy_name != nullptr &&
strcmp(lb_policy_name, "grpclb") != 0) {
gpr_log(GPR_INFO,
"resolver requested LB policy %s but provided at least one "
"balancer address -- forcing use of grpclb LB policy",
lb_policy_name);
}
lb_policy_name = "grpclb";
}
}
// Use pick_first if nothing was specified and we didn't select grpclb
// above.
if (lb_policy_name == nullptr) lb_policy_name = "pick_first";
// Check to see if we're already using the right LB policy.
// Note: It's safe to use chand->info_lb_policy_name here without
// taking a lock on chand->info_mu, because this function is the
// only thing that modifies its value, and it can only be invoked
// once at any given time.
lb_policy_name_changed =
chand->info_lb_policy_name == nullptr ||
gpr_stricmp(chand->info_lb_policy_name, lb_policy_name) != 0;
if (chand->lb_policy != nullptr && !lb_policy_name_changed) {
// Continue using the same LB policy. Update with new addresses.
lb_policy_updated = true;
chand->lb_policy->UpdateLocked(*chand->resolver_result);
} else {
// Instantiate new LB policy.
grpc_core::LoadBalancingPolicy::Args lb_policy_args;
lb_policy_args.combiner = chand->combiner;
lb_policy_args.client_channel_factory = chand->client_channel_factory;
lb_policy_args.args = chand->resolver_result;
new_lb_policy =
grpc_core::LoadBalancingPolicyRegistry::CreateLoadBalancingPolicy(
lb_policy_name, lb_policy_args);
if (GPR_UNLIKELY(new_lb_policy == nullptr)) {
gpr_log(GPR_ERROR, "could not create LB policy \"%s\"",
lb_policy_name);
} else {
lb_policy_created = true;
reresolution_request_args* args =
static_cast<reresolution_request_args*>(
gpr_zalloc(sizeof(*args)));
args->chand = chand;
args->lb_policy = new_lb_policy.get();
GRPC_CLOSURE_INIT(&args->closure, request_reresolution_locked, args,
grpc_combiner_scheduler(chand->combiner));
GRPC_CHANNEL_STACK_REF(chand->owning_stack, "re-resolution");
new_lb_policy->SetReresolutionClosureLocked(&args->closure);
}
}
// Before we clean up, save a copy of lb_policy_name, since it might
// be pointing to data inside chand->resolver_result.
// The copy will be saved in chand->lb_policy_name below.
lb_policy_name_dup = gpr_strdup(lb_policy_name);
// Find service config.
channel_arg = grpc_channel_args_find(chand->resolver_result,
GRPC_ARG_SERVICE_CONFIG);
service_config_json =
gpr_strdup(grpc_channel_arg_get_string(channel_arg));
if (service_config_json != nullptr) {
grpc_core::UniquePtr<grpc_core::ServiceConfig> service_config =
grpc_core::ServiceConfig::Create(service_config_json);
if (service_config != nullptr) {
if (chand->enable_retries) {
channel_arg = grpc_channel_args_find(chand->resolver_result,
GRPC_ARG_SERVER_URI);
const char* server_uri = grpc_channel_arg_get_string(channel_arg);
GPR_ASSERT(server_uri != nullptr);
grpc_uri* uri = grpc_uri_parse(server_uri, true);
GPR_ASSERT(uri->path[0] != '\0');
service_config_parsing_state parsing_state;
memset(&parsing_state, 0, sizeof(parsing_state));
parsing_state.server_name =
uri->path[0] == '/' ? uri->path + 1 : uri->path;
service_config->ParseGlobalParams(parse_retry_throttle_params,
&parsing_state);
grpc_uri_destroy(uri);
retry_throttle_data = std::move(parsing_state.retry_throttle_data);
}
method_params_table = service_config->CreateMethodConfigTable(
ClientChannelMethodParams::CreateFromJson);
}
}
// Creates a new LB policy, replacing any previous one.
// If the new policy is created successfully, sets *connectivity_state and
// *connectivity_error to its initial connectivity state; otherwise,
// leaves them unchanged.
static void create_new_lb_policy_locked(
channel_data* chand, char* lb_policy_name,
grpc_connectivity_state* connectivity_state,
grpc_error** connectivity_error) {
grpc_core::LoadBalancingPolicy::Args lb_policy_args;
lb_policy_args.combiner = chand->combiner;
lb_policy_args.client_channel_factory = chand->client_channel_factory;
lb_policy_args.args = chand->resolver_result;
grpc_core::OrphanablePtr<grpc_core::LoadBalancingPolicy> new_lb_policy =
grpc_core::LoadBalancingPolicyRegistry::CreateLoadBalancingPolicy(
lb_policy_name, lb_policy_args);
if (GPR_UNLIKELY(new_lb_policy == nullptr)) {
gpr_log(GPR_ERROR, "could not create LB policy \"%s\"", lb_policy_name);
} else {
if (grpc_client_channel_trace.enabled()) {
gpr_log(GPR_INFO, "chand=%p: created new LB policy \"%s\" (%p)", chand,
lb_policy_name, new_lb_policy.get());
}
}
if (grpc_client_channel_trace.enabled()) {
gpr_log(GPR_INFO,
"chand=%p: resolver result: lb_policy_name=\"%s\"%s, "
"service_config=\"%s\"",
chand, lb_policy_name_dup,
lb_policy_name_changed ? " (changed)" : "", service_config_json);
}
// Now swap out fields in chand. Note that the new values may still
// be nullptr if (e.g.) the resolver failed to return results or the
// results did not contain the necessary data.
//
// First, swap out the data used by cc_get_channel_info().
gpr_mu_lock(&chand->info_mu);
if (lb_policy_name_dup != nullptr) {
gpr_free(chand->info_lb_policy_name);
chand->info_lb_policy_name = lb_policy_name_dup;
}
if (service_config_json != nullptr) {
gpr_free(chand->info_service_config_json);
chand->info_service_config_json = service_config_json;
}
gpr_mu_unlock(&chand->info_mu);
// Swap out the retry throttle data.
chand->retry_throttle_data = std::move(retry_throttle_data);
// Swap out the method params table.
chand->method_params_table = std::move(method_params_table);
// If we have a new LB policy or are shutting down (in which case
// new_lb_policy will be nullptr), swap out the LB policy, unreffing the
// old one and removing its fds from chand->interested_parties.
// Note that we do NOT do this if either (a) we updated the existing
// LB policy above or (b) we failed to create the new LB policy (in
// which case we want to continue using the most recent one we had).
if (new_lb_policy != nullptr || error != GRPC_ERROR_NONE ||
chand->resolver == nullptr) {
// Swap out the LB policy and update the fds in
// chand->interested_parties.
if (chand->lb_policy != nullptr) {
if (grpc_client_channel_trace.enabled()) {
gpr_log(GPR_INFO, "chand=%p: unreffing lb_policy=%p", chand,
gpr_log(GPR_INFO, "chand=%p: shutting down lb_policy=%p", chand,
chand->lb_policy.get());
}
grpc_pollset_set_del_pollset_set(chand->lb_policy->interested_parties(),
chand->interested_parties);
chand->lb_policy->HandOffPendingPicksLocked(new_lb_policy.get());
chand->lb_policy.reset();
}
chand->lb_policy = std::move(new_lb_policy);
grpc_pollset_set_add_pollset_set(chand->lb_policy->interested_parties(),
chand->interested_parties);
// Set up re-resolution callback.
reresolution_request_args* args =
static_cast<reresolution_request_args*>(gpr_zalloc(sizeof(*args)));
args->chand = chand;
args->lb_policy = chand->lb_policy.get();
GRPC_CLOSURE_INIT(&args->closure, request_reresolution_locked, args,
grpc_combiner_scheduler(chand->combiner));
GRPC_CHANNEL_STACK_REF(chand->owning_stack, "re-resolution");
chand->lb_policy->SetReresolutionClosureLocked(&args->closure);
// Get the new LB policy's initial connectivity state and start a
// connectivity watch.
GRPC_ERROR_UNREF(*connectivity_error);
*connectivity_state =
chand->lb_policy->CheckConnectivityLocked(connectivity_error);
if (chand->exit_idle_when_lb_policy_arrives) {
chand->lb_policy->ExitIdleLocked();
chand->exit_idle_when_lb_policy_arrives = false;
}
watch_lb_policy_locked(chand, chand->lb_policy.get(), *connectivity_state);
}
}
// Returns the service config (as a JSON string) from the resolver result.
// Also updates state in chand.
static grpc_core::UniquePtr<char>
get_service_config_from_resolver_result_locked(channel_data* chand) {
const grpc_arg* channel_arg =
grpc_channel_args_find(chand->resolver_result, GRPC_ARG_SERVICE_CONFIG);
const char* service_config_json = grpc_channel_arg_get_string(channel_arg);
if (service_config_json != nullptr) {
if (grpc_client_channel_trace.enabled()) {
gpr_log(GPR_INFO, "chand=%p: resolver returned service config: \"%s\"",
chand, service_config_json);
}
grpc_core::UniquePtr<grpc_core::ServiceConfig> service_config =
grpc_core::ServiceConfig::Create(service_config_json);
if (service_config != nullptr) {
if (chand->enable_retries) {
channel_arg =
grpc_channel_args_find(chand->resolver_result, GRPC_ARG_SERVER_URI);
const char* server_uri = grpc_channel_arg_get_string(channel_arg);
GPR_ASSERT(server_uri != nullptr);
grpc_uri* uri = grpc_uri_parse(server_uri, true);
GPR_ASSERT(uri->path[0] != '\0');
service_config_parsing_state parsing_state;
memset(&parsing_state, 0, sizeof(parsing_state));
parsing_state.server_name =
uri->path[0] == '/' ? uri->path + 1 : uri->path;
service_config->ParseGlobalParams(parse_retry_throttle_params,
&parsing_state);
grpc_uri_destroy(uri);
chand->retry_throttle_data =
std::move(parsing_state.retry_throttle_data);
}
chand->method_params_table = service_config->CreateMethodConfigTable(
ClientChannelMethodParams::CreateFromJson);
}
}
return grpc_core::UniquePtr<char>(gpr_strdup(service_config_json));
}
// Callback invoked when a resolver result is available.
static void on_resolver_result_changed_locked(void* arg, grpc_error* error) {
channel_data* chand = static_cast<channel_data*>(arg);
if (grpc_client_channel_trace.enabled()) {
const char* disposition =
chand->resolver_result != nullptr
? ""
: (error == GRPC_ERROR_NONE ? " (transient error)"
: " (resolver shutdown)");
gpr_log(GPR_INFO,
"chand=%p: got resolver result: resolver_result=%p error=%s%s",
chand, chand->resolver_result, grpc_error_string(error),
disposition);
}
// Now that we've swapped out the relevant fields of chand, check for
// error or shutdown.
// Handle shutdown.
if (error != GRPC_ERROR_NONE || chand->resolver == nullptr) {
on_resolver_shutdown_locked(chand, GRPC_ERROR_REF(error));
return;
}
// Data used to set the channel's connectivity state.
bool set_connectivity_state = true;
grpc_connectivity_state connectivity_state = GRPC_CHANNEL_TRANSIENT_FAILURE;
grpc_error* connectivity_error =
GRPC_ERROR_CREATE_FROM_STATIC_STRING("No load balancing policy");
// chand->resolver_result will be null in the case of a transient
// resolution error. In that case, we don't have any new result to
// process, which means that we keep using the previous result (if any).
if (chand->resolver_result == nullptr) {
if (grpc_client_channel_trace.enabled()) {
gpr_log(GPR_INFO, "chand=%p: shutting down", chand);
gpr_log(GPR_INFO, "chand=%p: resolver transient failure", chand);
}
if (chand->resolver != nullptr) {
if (grpc_client_channel_trace.enabled()) {
gpr_log(GPR_INFO, "chand=%p: shutting down resolver", chand);
}
chand->resolver.reset();
}
set_channel_connectivity_state_locked(
chand, GRPC_CHANNEL_SHUTDOWN,
GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
"Got resolver result after disconnection", &error, 1),
"resolver_gone");
grpc_closure_list_fail_all(&chand->waiting_for_resolver_result_closures,
GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
"Channel disconnected", &error, 1));
GRPC_CLOSURE_LIST_SCHED(&chand->waiting_for_resolver_result_closures);
GRPC_CHANNEL_STACK_UNREF(chand->owning_stack, "resolver");
grpc_channel_args_destroy(chand->resolver_result);
chand->resolver_result = nullptr;
} else { // Not shutting down.
grpc_connectivity_state state = GRPC_CHANNEL_TRANSIENT_FAILURE;
grpc_error* state_error =
GRPC_ERROR_CREATE_FROM_STATIC_STRING("No load balancing policy");
if (lb_policy_created) {
} else {
grpc_core::UniquePtr<char> lb_policy_name =
get_lb_policy_name_from_resolver_result_locked(chand);
// Check to see if we're already using the right LB policy.
// Note: It's safe to use chand->info_lb_policy_name here without
// taking a lock on chand->info_mu, because this function is the
// only thing that modifies its value, and it can only be invoked
// once at any given time.
bool lb_policy_name_changed = chand->info_lb_policy_name == nullptr ||
gpr_stricmp(chand->info_lb_policy_name.get(),
lb_policy_name.get()) != 0;
if (chand->lb_policy != nullptr && !lb_policy_name_changed) {
// Continue using the same LB policy. Update with new addresses.
if (grpc_client_channel_trace.enabled()) {
gpr_log(GPR_INFO, "chand=%p: initializing new LB policy", chand);
gpr_log(GPR_INFO, "chand=%p: updating existing LB policy \"%s\" (%p)",
chand, lb_policy_name.get(), chand->lb_policy.get());
}
GRPC_ERROR_UNREF(state_error);
state = chand->lb_policy->CheckConnectivityLocked(&state_error);
grpc_pollset_set_add_pollset_set(chand->lb_policy->interested_parties(),
chand->interested_parties);
GRPC_CLOSURE_LIST_SCHED(&chand->waiting_for_resolver_result_closures);
if (chand->exit_idle_when_lb_policy_arrives) {
chand->lb_policy->ExitIdleLocked();
chand->exit_idle_when_lb_policy_arrives = false;
}
watch_lb_policy_locked(chand, chand->lb_policy.get(), state);
} else if (chand->resolver_result == nullptr) {
// Transient failure.
GRPC_CLOSURE_LIST_SCHED(&chand->waiting_for_resolver_result_closures);
}
if (!lb_policy_updated) {
set_channel_connectivity_state_locked(
chand, state, GRPC_ERROR_REF(state_error), "new_lb+resolver");
}
chand->lb_policy->UpdateLocked(*chand->resolver_result);
// No need to set the channel's connectivity state; the existing
// watch on the LB policy will take care of that.
set_connectivity_state = false;
} else {
// Instantiate new LB policy.
create_new_lb_policy_locked(chand, lb_policy_name.get(),
&connectivity_state, &connectivity_error);
}
// Find service config.
grpc_core::UniquePtr<char> service_config_json =
get_service_config_from_resolver_result_locked(chand);
// Swap out the data used by cc_get_channel_info().
gpr_mu_lock(&chand->info_mu);
chand->info_lb_policy_name = std::move(lb_policy_name);
chand->info_service_config_json = std::move(service_config_json);
gpr_mu_unlock(&chand->info_mu);
// Clean up.
grpc_channel_args_destroy(chand->resolver_result);
chand->resolver_result = nullptr;
chand->resolver->NextLocked(&chand->resolver_result,
&chand->on_resolver_result_changed);
GRPC_ERROR_UNREF(state_error);
}
// Set the channel's connectivity state if needed.
if (set_connectivity_state) {
set_channel_connectivity_state_locked(
chand, connectivity_state, connectivity_error, "resolver_result");
} else {
GRPC_ERROR_UNREF(connectivity_error);
}
// Invoke closures that were waiting for results and renew the watch.
GRPC_CLOSURE_LIST_SCHED(&chand->waiting_for_resolver_result_closures);
chand->resolver->NextLocked(&chand->resolver_result,
&chand->on_resolver_result_changed);
}
static void start_transport_op_locked(void* arg, grpc_error* error_ignored) {
@ -611,15 +632,11 @@ static void cc_get_channel_info(grpc_channel_element* elem,
channel_data* chand = static_cast<channel_data*>(elem->channel_data);
gpr_mu_lock(&chand->info_mu);
if (info->lb_policy_name != nullptr) {
*info->lb_policy_name = chand->info_lb_policy_name == nullptr
? nullptr
: gpr_strdup(chand->info_lb_policy_name);
*info->lb_policy_name = gpr_strdup(chand->info_lb_policy_name.get());
}
if (info->service_config_json != nullptr) {
*info->service_config_json =
chand->info_service_config_json == nullptr
? nullptr
: gpr_strdup(chand->info_service_config_json);
gpr_strdup(chand->info_service_config_json.get());
}
gpr_mu_unlock(&chand->info_mu);
}
@ -699,19 +716,15 @@ static grpc_error* cc_init_channel_elem(grpc_channel_element* elem,
return GRPC_ERROR_NONE;
}
static void shutdown_resolver_locked(void* arg, grpc_error* error) {
grpc_core::Resolver* resolver = static_cast<grpc_core::Resolver*>(arg);
resolver->Orphan();
}
/* Destructor for channel_data */
static void cc_destroy_channel_elem(grpc_channel_element* elem) {
channel_data* chand = static_cast<channel_data*>(elem->channel_data);
if (chand->resolver != nullptr) {
GRPC_CLOSURE_SCHED(
GRPC_CLOSURE_CREATE(shutdown_resolver_locked, chand->resolver.release(),
grpc_combiner_scheduler(chand->combiner)),
GRPC_ERROR_NONE);
// The only way we can get here is if we never started resolving,
// because we take a ref to the channel stack when we start
// resolving and do not release it until the resolver callback is
// invoked after the resolver shuts down.
chand->resolver.reset();
}
if (chand->client_channel_factory != nullptr) {
grpc_client_channel_factory_unref(chand->client_channel_factory);
@ -721,8 +734,10 @@ static void cc_destroy_channel_elem(grpc_channel_element* elem) {
chand->interested_parties);
chand->lb_policy.reset();
}
gpr_free(chand->info_lb_policy_name);
gpr_free(chand->info_service_config_json);
// TODO(roth): Once we convert the filter API to C++, there will no
// longer be any need to explicitly reset these smart pointer data members.
chand->info_lb_policy_name.reset();
chand->info_service_config_json.reset();
chand->retry_throttle_data.reset();
chand->method_params_table.reset();
grpc_client_channel_stop_backup_polling(chand->interested_parties);
@ -3159,6 +3174,16 @@ static void try_to_connect_locked(void* arg, grpc_error* error_ignored) {
GRPC_CHANNEL_STACK_UNREF(chand->owning_stack, "try_to_connect");
}
void grpc_client_channel_populate_child_refs(
grpc_channel_element* elem, grpc_core::ChildRefsList* child_subchannels,
grpc_core::ChildRefsList* child_channels) {
channel_data* chand = static_cast<channel_data*>(elem->channel_data);
if (chand->lb_policy != nullptr) {
chand->lb_policy->FillChildRefsForChannelz(child_subchannels,
child_channels);
}
}
grpc_connectivity_state grpc_client_channel_check_connectivity_state(
grpc_channel_element* elem, int try_to_connect) {
channel_data* chand = static_cast<channel_data*>(elem->channel_data);

@ -21,6 +21,7 @@
#include <grpc/support/port_platform.h>
#include "src/core/ext/filters/client_channel/client_channel_channelz.h"
#include "src/core/ext/filters/client_channel/client_channel_factory.h"
#include "src/core/ext/filters/client_channel/resolver.h"
#include "src/core/lib/channel/channel_stack.h"
@ -39,6 +40,10 @@ extern grpc_core::TraceFlag grpc_client_channel_trace;
extern const grpc_channel_filter grpc_client_channel_filter;
void grpc_client_channel_populate_child_refs(
grpc_channel_element* elem, grpc_core::ChildRefsList* child_subchannels,
grpc_core::ChildRefsList* child_channels);
grpc_connectivity_state grpc_client_channel_check_connectivity_state(
grpc_channel_element* elem, int try_to_connect);

@ -0,0 +1,113 @@
/*
*
* Copyright 2018 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
#include <grpc/support/port_platform.h>
#include "src/core/ext/filters/client_channel/client_channel.h"
#include "src/core/ext/filters/client_channel/client_channel_channelz.h"
#include "src/core/lib/gpr/useful.h"
#include "src/core/lib/surface/channel.h"
#include "src/core/lib/transport/connectivity_state.h"
namespace grpc_core {
namespace channelz {
namespace {
void* client_channel_channelz_copy(void* p) { return p; }
void client_channel_channelz_destroy(void* p) {}
int client_channel_channelz_cmp(void* a, void* b) { return GPR_ICMP(a, b); }
} // namespace
static const grpc_arg_pointer_vtable client_channel_channelz_vtable = {
client_channel_channelz_copy, client_channel_channelz_destroy,
client_channel_channelz_cmp};
ClientChannelNode::ClientChannelNode(grpc_channel* channel,
size_t channel_tracer_max_nodes,
bool is_top_level_channel)
: ChannelNode(channel, channel_tracer_max_nodes, is_top_level_channel) {
client_channel_ =
grpc_channel_stack_last_element(grpc_channel_get_channel_stack(channel));
GPR_ASSERT(client_channel_->filter == &grpc_client_channel_filter);
}
void ClientChannelNode::PopulateConnectivityState(grpc_json* json) {
grpc_connectivity_state state;
if (ChannelIsDestroyed()) {
state = GRPC_CHANNEL_SHUTDOWN;
} else {
state =
grpc_client_channel_check_connectivity_state(client_channel_, false);
}
json = grpc_json_create_child(nullptr, json, "state", nullptr,
GRPC_JSON_OBJECT, false);
grpc_json_create_child(nullptr, json, "state",
grpc_connectivity_state_name(state), GRPC_JSON_STRING,
false);
}
void ClientChannelNode::PopulateChildRefs(grpc_json* json) {
ChildRefsList child_subchannels;
ChildRefsList child_channels;
grpc_json* json_iterator = nullptr;
grpc_client_channel_populate_child_refs(client_channel_, &child_subchannels,
&child_channels);
if (!child_subchannels.empty()) {
grpc_json* array_parent = grpc_json_create_child(
nullptr, json, "subchannelRef", nullptr, GRPC_JSON_ARRAY, false);
for (size_t i = 0; i < child_subchannels.size(); ++i) {
json_iterator =
grpc_json_create_child(json_iterator, array_parent, nullptr, nullptr,
GRPC_JSON_OBJECT, false);
grpc_json_add_number_string_child(json_iterator, nullptr, "subchannelId",
child_subchannels[i]);
}
}
if (!child_channels.empty()) {
grpc_json* array_parent = grpc_json_create_child(
nullptr, json, "channelRef", nullptr, GRPC_JSON_ARRAY, false);
json_iterator = nullptr;
for (size_t i = 0; i < child_channels.size(); ++i) {
json_iterator =
grpc_json_create_child(json_iterator, array_parent, nullptr, nullptr,
GRPC_JSON_OBJECT, false);
grpc_json_add_number_string_child(json_iterator, nullptr, "channelId",
child_channels[i]);
}
}
}
grpc_arg ClientChannelNode::CreateChannelArg() {
return grpc_channel_arg_pointer_create(
const_cast<char*>(GRPC_ARG_CHANNELZ_CHANNEL_NODE_CREATION_FUNC),
reinterpret_cast<void*>(MakeClientChannelNode),
&client_channel_channelz_vtable);
}
RefCountedPtr<ChannelNode> ClientChannelNode::MakeClientChannelNode(
grpc_channel* channel, size_t channel_tracer_max_nodes,
bool is_top_level_channel) {
return MakePolymorphicRefCounted<ChannelNode, ClientChannelNode>(
channel, channel_tracer_max_nodes, is_top_level_channel);
}
} // namespace channelz
} // namespace grpc_core

@ -0,0 +1,71 @@
/*
*
* Copyright 2018 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
#ifndef GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_CLIENT_CHANNEL_CHANNELZ_H
#define GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_CLIENT_CHANNEL_CHANNELZ_H
#include <grpc/support/port_platform.h>
#include "src/core/lib/channel/channel_args.h"
#include "src/core/lib/channel/channel_stack.h"
#include "src/core/lib/channel/channelz.h"
#include "src/core/lib/gprpp/inlined_vector.h"
namespace grpc_core {
// TODO(ncteisen), this only contains the uuids of the children for now,
// since that is all that is strictly needed. In a future enhancement we will
// add human readable names as in the channelz.proto
typedef InlinedVector<intptr_t, 10> ChildRefsList;
namespace channelz {
// Subtype of ChannelNode that overrides and provides client_channel specific
// functionality like querying for connectivity_state and subchannel data.
class ClientChannelNode : public ChannelNode {
public:
static RefCountedPtr<ChannelNode> MakeClientChannelNode(
grpc_channel* channel, size_t channel_tracer_max_nodes,
bool is_top_level_channel);
// Override this functionality since client_channels have a notion of
// channel connectivity.
void PopulateConnectivityState(grpc_json* json) override;
// Override this functionality since client_channels have subchannels
void PopulateChildRefs(grpc_json* json) override;
// Helper to create a channel arg to ensure this type of ChannelNode is
// created.
static grpc_arg CreateChannelArg();
protected:
GPRC_ALLOW_CLASS_TO_USE_NON_PUBLIC_DELETE
GPRC_ALLOW_CLASS_TO_USE_NON_PUBLIC_NEW
ClientChannelNode(grpc_channel* channel, size_t channel_tracer_max_nodes,
bool is_top_level_channel);
virtual ~ClientChannelNode() {}
private:
grpc_channel_element* client_channel_;
};
} // namespace channelz
} // namespace grpc_core
#endif /* GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_CLIENT_CHANNEL_CHANNELZ_H */

@ -25,6 +25,7 @@
#include <grpc/support/alloc.h>
#include "src/core/ext/filters/client_channel/client_channel.h"
#include "src/core/ext/filters/client_channel/client_channel_channelz.h"
#include "src/core/ext/filters/client_channel/http_connect_handshaker.h"
#include "src/core/ext/filters/client_channel/http_proxy.h"
#include "src/core/ext/filters/client_channel/lb_policy_registry.h"
@ -35,6 +36,14 @@
#include "src/core/lib/surface/channel_init.h"
static bool append_filter(grpc_channel_stack_builder* builder, void* arg) {
const grpc_channel_args* args =
grpc_channel_stack_builder_get_channel_arguments(builder);
grpc_arg args_to_add[] = {
grpc_core::channelz::ClientChannelNode::CreateChannelArg()};
grpc_channel_args* new_args = grpc_channel_args_copy_and_add(
args, args_to_add, GPR_ARRAY_SIZE(args_to_add));
grpc_channel_stack_builder_set_channel_arguments(builder, new_args);
grpc_channel_args_destroy(new_args);
return grpc_channel_stack_builder_append_filter(
builder, static_cast<const grpc_channel_filter*>(arg), nullptr, nullptr);
}

@ -21,6 +21,7 @@
#include <grpc/support/port_platform.h>
#include "src/core/ext/filters/client_channel/client_channel_channelz.h"
#include "src/core/ext/filters/client_channel/client_channel_factory.h"
#include "src/core/ext/filters/client_channel/subchannel.h"
#include "src/core/lib/gprpp/abstract.h"
@ -143,6 +144,14 @@ class LoadBalancingPolicy
/// consider whether this method is still needed.
virtual void ExitIdleLocked() GRPC_ABSTRACT;
/// populates child_subchannels and child_channels with the uuids of this
/// LB policy's referenced children. This is not invoked from the
/// client_channel's combiner. The implementation is responsible for
/// providing its own synchronization.
virtual void FillChildRefsForChannelz(ChildRefsList* child_subchannels,
ChildRefsList* child_channels)
GRPC_ABSTRACT;
void Orphan() override {
// Invoke ShutdownAndUnrefLocked() inside of the combiner.
GRPC_CLOSURE_SCHED(
@ -196,6 +205,12 @@ class LoadBalancingPolicy
grpc_pollset_set* interested_parties_;
/// Callback to force a re-resolution.
grpc_closure* request_reresolution_;
// Dummy classes needed for alignment issues.
// See https://github.com/grpc/grpc/issues/16032 for context.
// TODO(ncteisen): remove this as soon as the issue is resolved.
ChildRefsList dummy_list_foo;
ChildRefsList dummy_list_bar;
};
} // namespace grpc_core

@ -135,6 +135,8 @@ class GrpcLb : public LoadBalancingPolicy {
void HandOffPendingPicksLocked(LoadBalancingPolicy* new_policy) override;
void PingOneLocked(grpc_closure* on_initiate, grpc_closure* on_ack) override;
void ExitIdleLocked() override;
void FillChildRefsForChannelz(ChildRefsList* child_subchannels,
ChildRefsList* child_channels) override;
private:
/// Linked list of pending pick requests. It stores all information needed to
@ -298,6 +300,9 @@ class GrpcLb : public LoadBalancingPolicy {
// The channel for communicating with the LB server.
grpc_channel* lb_channel_ = nullptr;
// Mutex to protect the channel to the LB server. This is used when
// processing a channelz request.
gpr_mu lb_channel_mu_;
grpc_connectivity_state lb_channel_connectivity_;
grpc_closure lb_channel_on_connectivity_changed_;
// Are we already watching the LB channel's connectivity?
@ -1004,6 +1009,10 @@ grpc_channel_args* BuildBalancerChannelArgs(
// A channel arg indicating the target is a grpclb load balancer.
grpc_channel_arg_integer_create(
const_cast<char*>(GRPC_ARG_ADDRESS_IS_GRPCLB_LOAD_BALANCER), 1),
// A channel arg indicating this is an internal channels, aka it is
// owned by components in Core, not by the user application.
grpc_channel_arg_integer_create(
const_cast<char*>(GRPC_ARG_CHANNELZ_CHANNEL_IS_INTERNAL_CHANNEL), 1),
};
// Construct channel args.
grpc_channel_args* new_args = grpc_channel_args_copy_and_add_and_remove(
@ -1033,6 +1042,7 @@ GrpcLb::GrpcLb(const grpc_lb_addresses* addresses,
.set_max_backoff(GRPC_GRPCLB_RECONNECT_MAX_BACKOFF_SECONDS *
1000)) {
// Initialization.
gpr_mu_init(&lb_channel_mu_);
grpc_subchannel_index_ref();
GRPC_CLOSURE_INIT(&lb_channel_on_connectivity_changed_,
&GrpcLb::OnBalancerChannelConnectivityChangedLocked, this,
@ -1071,6 +1081,7 @@ GrpcLb::GrpcLb(const grpc_lb_addresses* addresses,
GrpcLb::~GrpcLb() {
GPR_ASSERT(pending_picks_ == nullptr);
GPR_ASSERT(pending_pings_ == nullptr);
gpr_mu_destroy(&lb_channel_mu_);
gpr_free((void*)server_name_);
grpc_channel_args_destroy(args_);
grpc_connectivity_state_destroy(&state_tracker_);
@ -1100,8 +1111,10 @@ void GrpcLb::ShutdownLocked() {
// OnBalancerChannelConnectivityChangedLocked(), and we need to be
// alive when that callback is invoked.
if (lb_channel_ != nullptr) {
gpr_mu_lock(&lb_channel_mu_);
grpc_channel_destroy(lb_channel_);
lb_channel_ = nullptr;
gpr_mu_unlock(&lb_channel_mu_);
}
grpc_connectivity_state_set(&state_tracker_, GRPC_CHANNEL_SHUTDOWN,
GRPC_ERROR_REF(error), "grpclb_shutdown");
@ -1272,6 +1285,20 @@ void GrpcLb::PingOneLocked(grpc_closure* on_initiate, grpc_closure* on_ack) {
}
}
void GrpcLb::FillChildRefsForChannelz(ChildRefsList* child_subchannels,
ChildRefsList* child_channels) {
// delegate to the RoundRobin to fill the children subchannels.
rr_policy_->FillChildRefsForChannelz(child_subchannels, child_channels);
mu_guard guard(&lb_channel_mu_);
if (lb_channel_ != nullptr) {
grpc_core::channelz::ChannelNode* channel_node =
grpc_channel_get_channelz_node(lb_channel_);
if (channel_node != nullptr) {
child_channels->push_back(channel_node->channel_uuid());
}
}
}
grpc_connectivity_state GrpcLb::CheckConnectivityLocked(
grpc_error** connectivity_error) {
return grpc_connectivity_state_get(&state_tracker_, connectivity_error);
@ -1315,9 +1342,11 @@ void GrpcLb::ProcessChannelArgsLocked(const grpc_channel_args& args) {
if (lb_channel_ == nullptr) {
char* uri_str;
gpr_asprintf(&uri_str, "fake:///%s", server_name_);
gpr_mu_lock(&lb_channel_mu_);
lb_channel_ = grpc_client_channel_factory_create_channel(
client_channel_factory(), uri_str,
GRPC_CLIENT_CHANNEL_TYPE_LOAD_BALANCING, lb_channel_args);
gpr_mu_unlock(&lb_channel_mu_);
GPR_ASSERT(lb_channel_ != nullptr);
gpr_free(uri_str);
}

@ -58,6 +58,8 @@ class PickFirst : public LoadBalancingPolicy {
void HandOffPendingPicksLocked(LoadBalancingPolicy* new_policy) override;
void PingOneLocked(grpc_closure* on_initiate, grpc_closure* on_ack) override;
void ExitIdleLocked() override;
void FillChildRefsForChannelz(ChildRefsList* child_subchannels,
ChildRefsList* ignored) override;
private:
~PickFirst();
@ -103,10 +105,23 @@ class PickFirst : public LoadBalancingPolicy {
}
};
// Helper class to ensure that any function that modifies the child refs
// data structures will update the channelz snapshot data structures before
// returning.
class AutoChildRefsUpdater {
public:
explicit AutoChildRefsUpdater(PickFirst* pf) : pf_(pf) {}
~AutoChildRefsUpdater() { pf_->UpdateChildRefsLocked(); }
private:
PickFirst* pf_;
};
void ShutdownLocked() override;
void StartPickingLocked();
void DestroyUnselectedSubchannelsLocked();
void UpdateChildRefsLocked();
// All our subchannels.
OrphanablePtr<PickFirstSubchannelList> subchannel_list_;
@ -122,10 +137,17 @@ class PickFirst : public LoadBalancingPolicy {
PickState* pending_picks_ = nullptr;
// Our connectivity state tracker.
grpc_connectivity_state_tracker state_tracker_;
/// Lock and data used to capture snapshots of this channels child
/// channels and subchannels. This data is consumed by channelz.
gpr_mu child_refs_mu_;
ChildRefsList child_subchannels_;
ChildRefsList child_channels_;
};
PickFirst::PickFirst(const Args& args) : LoadBalancingPolicy(args) {
GPR_ASSERT(args.client_channel_factory != nullptr);
gpr_mu_init(&child_refs_mu_);
grpc_connectivity_state_init(&state_tracker_, GRPC_CHANNEL_IDLE,
"pick_first");
if (grpc_lb_pick_first_trace.enabled()) {
@ -139,6 +161,7 @@ PickFirst::~PickFirst() {
if (grpc_lb_pick_first_trace.enabled()) {
gpr_log(GPR_INFO, "Destroying Pick First %p", this);
}
gpr_mu_destroy(&child_refs_mu_);
GPR_ASSERT(subchannel_list_ == nullptr);
GPR_ASSERT(latest_pending_subchannel_list_ == nullptr);
GPR_ASSERT(pending_picks_ == nullptr);
@ -158,6 +181,7 @@ void PickFirst::HandOffPendingPicksLocked(LoadBalancingPolicy* new_policy) {
}
void PickFirst::ShutdownLocked() {
AutoChildRefsUpdater guard(this);
grpc_error* error = GRPC_ERROR_CREATE_FROM_STATIC_STRING("Channel shutdown");
if (grpc_lb_pick_first_trace.enabled()) {
gpr_log(GPR_INFO, "Pick First %p Shutting down", this);
@ -280,7 +304,41 @@ void PickFirst::PingOneLocked(grpc_closure* on_initiate, grpc_closure* on_ack) {
}
}
void PickFirst::FillChildRefsForChannelz(
ChildRefsList* child_subchannels_to_fill, ChildRefsList* ignored) {
mu_guard guard(&child_refs_mu_);
for (size_t i = 0; i < child_subchannels_.size(); ++i) {
// TODO(ncteisen): implement a de dup loop that is not O(n^2). Might
// have to implement lightweight set. For now, we don't care about
// performance when channelz requests are made.
bool found = false;
for (size_t j = 0; j < child_subchannels_to_fill->size(); ++j) {
if ((*child_subchannels_to_fill)[j] == child_subchannels_[i]) {
found = true;
break;
}
}
if (!found) {
child_subchannels_to_fill->push_back(child_subchannels_[i]);
}
}
}
void PickFirst::UpdateChildRefsLocked() {
ChildRefsList cs;
if (subchannel_list_ != nullptr) {
subchannel_list_->PopulateChildRefsList(&cs);
}
if (latest_pending_subchannel_list_ != nullptr) {
latest_pending_subchannel_list_->PopulateChildRefsList(&cs);
}
// atomically update the data that channelz will actually be looking at.
mu_guard guard(&child_refs_mu_);
child_subchannels_ = std::move(cs);
}
void PickFirst::UpdateLocked(const grpc_channel_args& args) {
AutoChildRefsUpdater guard(this);
const grpc_arg* arg = grpc_channel_args_find(&args, GRPC_ARG_LB_ADDRESSES);
if (arg == nullptr || arg->type != GRPC_ARG_POINTER) {
if (subchannel_list_ == nullptr) {
@ -388,6 +446,7 @@ void PickFirst::UpdateLocked(const grpc_channel_args& args) {
void PickFirst::PickFirstSubchannelData::ProcessConnectivityChangeLocked(
grpc_connectivity_state connectivity_state, grpc_error* error) {
PickFirst* p = static_cast<PickFirst*>(subchannel_list()->policy());
AutoChildRefsUpdater guard(p);
// The notification must be for a subchannel in either the current or
// latest pending subchannel lists.
GPR_ASSERT(subchannel_list() == p->subchannel_list_.get() ||

@ -69,6 +69,8 @@ class RoundRobin : public LoadBalancingPolicy {
void HandOffPendingPicksLocked(LoadBalancingPolicy* new_policy) override;
void PingOneLocked(grpc_closure* on_initiate, grpc_closure* on_ack) override;
void ExitIdleLocked() override;
void FillChildRefsForChannelz(ChildRefsList* child_subchannels,
ChildRefsList* ignored) override;
private:
~RoundRobin();
@ -180,11 +182,24 @@ class RoundRobin : public LoadBalancingPolicy {
size_t last_ready_index_ = -1; // Index into list of last pick.
};
// Helper class to ensure that any function that modifies the child refs
// data structures will update the channelz snapshot data structures before
// returning.
class AutoChildRefsUpdater {
public:
explicit AutoChildRefsUpdater(RoundRobin* rr) : rr_(rr) {}
~AutoChildRefsUpdater() { rr_->UpdateChildRefsLocked(); }
private:
RoundRobin* rr_;
};
void ShutdownLocked() override;
void StartPickingLocked();
bool DoPickLocked(PickState* pick);
void DrainPendingPicksLocked();
void UpdateChildRefsLocked();
/** list of subchannels */
OrphanablePtr<RoundRobinSubchannelList> subchannel_list_;
@ -202,10 +217,16 @@ class RoundRobin : public LoadBalancingPolicy {
PickState* pending_picks_ = nullptr;
/** our connectivity state tracker */
grpc_connectivity_state_tracker state_tracker_;
/// Lock and data used to capture snapshots of this channel's child
/// channels and subchannels. This data is consumed by channelz.
gpr_mu child_refs_mu_;
ChildRefsList child_subchannels_;
ChildRefsList child_channels_;
};
RoundRobin::RoundRobin(const Args& args) : LoadBalancingPolicy(args) {
GPR_ASSERT(args.client_channel_factory != nullptr);
gpr_mu_init(&child_refs_mu_);
grpc_connectivity_state_init(&state_tracker_, GRPC_CHANNEL_IDLE,
"round_robin");
UpdateLocked(*args.args);
@ -220,6 +241,7 @@ RoundRobin::~RoundRobin() {
if (grpc_lb_round_robin_trace.enabled()) {
gpr_log(GPR_INFO, "[RR %p] Destroying Round Robin policy", this);
}
gpr_mu_destroy(&child_refs_mu_);
GPR_ASSERT(subchannel_list_ == nullptr);
GPR_ASSERT(latest_pending_subchannel_list_ == nullptr);
GPR_ASSERT(pending_picks_ == nullptr);
@ -239,6 +261,7 @@ void RoundRobin::HandOffPendingPicksLocked(LoadBalancingPolicy* new_policy) {
}
void RoundRobin::ShutdownLocked() {
AutoChildRefsUpdater guard(this);
grpc_error* error = GRPC_ERROR_CREATE_FROM_STATIC_STRING("Channel shutdown");
if (grpc_lb_round_robin_trace.enabled()) {
gpr_log(GPR_INFO, "[RR %p] Shutting down", this);
@ -354,14 +377,47 @@ bool RoundRobin::PickLocked(PickState* pick) {
if (DoPickLocked(pick)) return true;
}
/* no pick currently available. Save for later in list of pending picks */
pick->next = pending_picks_;
pending_picks_ = pick;
if (!started_picking_) {
StartPickingLocked();
}
pick->next = pending_picks_;
pending_picks_ = pick;
return false;
}
void RoundRobin::FillChildRefsForChannelz(
ChildRefsList* child_subchannels_to_fill, ChildRefsList* ignored) {
mu_guard guard(&child_refs_mu_);
for (size_t i = 0; i < child_subchannels_.size(); ++i) {
// TODO(ncteisen): implement a de dup loop that is not O(n^2). Might
// have to implement lightweight set. For now, we don't care about
// performance when channelz requests are made.
bool found = false;
for (size_t j = 0; j < child_subchannels_to_fill->size(); ++j) {
if ((*child_subchannels_to_fill)[j] == child_subchannels_[i]) {
found = true;
break;
}
}
if (!found) {
child_subchannels_to_fill->push_back(child_subchannels_[i]);
}
}
}
void RoundRobin::UpdateChildRefsLocked() {
ChildRefsList cs;
if (subchannel_list_ != nullptr) {
subchannel_list_->PopulateChildRefsList(&cs);
}
if (latest_pending_subchannel_list_ != nullptr) {
latest_pending_subchannel_list_->PopulateChildRefsList(&cs);
}
// atomically update the data that channelz will actually be looking at.
mu_guard guard(&child_refs_mu_);
child_subchannels_ = std::move(cs);
}
void RoundRobin::RoundRobinSubchannelList::StartWatchingLocked() {
if (num_subchannels() == 0) return;
// Check current state of each subchannel synchronously, since any
@ -452,6 +508,7 @@ void RoundRobin::RoundRobinSubchannelList::
void RoundRobin::RoundRobinSubchannelList::
UpdateRoundRobinStateFromSubchannelStateCountsLocked() {
RoundRobin* p = static_cast<RoundRobin*>(policy());
AutoChildRefsUpdater guard(p);
if (num_ready_ > 0) {
if (p->subchannel_list_.get() != this) {
// Promote this list to p->subchannel_list_.
@ -608,6 +665,7 @@ void RoundRobin::PingOneLocked(grpc_closure* on_initiate,
void RoundRobin::UpdateLocked(const grpc_channel_args& args) {
const grpc_arg* arg = grpc_channel_args_find(&args, GRPC_ARG_LB_ADDRESSES);
AutoChildRefsUpdater guard(this);
if (GPR_UNLIKELY(arg == nullptr || arg->type != GRPC_ARG_POINTER)) {
gpr_log(GPR_ERROR, "[RR %p] update provided no addresses; ignoring", this);
// If we don't have a current subchannel list, go into TRANSIENT_FAILURE.

@ -189,6 +189,19 @@ class SubchannelList
// Returns true if the subchannel list is shutting down.
bool shutting_down() const { return shutting_down_; }
// Populates refs_list with the uuids of this SubchannelLists's subchannels.
void PopulateChildRefsList(ChildRefsList* refs_list) {
for (size_t i = 0; i < subchannels_.size(); ++i) {
if (subchannels_[i].subchannel() != nullptr) {
grpc_core::channelz::SubchannelNode* subchannel_node =
grpc_subchannel_get_channelz_node(subchannels_[i].subchannel());
if (subchannel_node != nullptr) {
refs_list->push_back(subchannel_node->subchannel_uuid());
}
}
}
}
// Accessors.
LoadBalancingPolicy* policy() const { return policy_; }
TraceFlag* tracer() const { return tracer_; }

@ -153,3 +153,11 @@ grpc_lb_addresses* grpc_lb_addresses_find_channel_arg(
return nullptr;
return static_cast<grpc_lb_addresses*>(lb_addresses_arg->value.pointer.p);
}
bool grpc_lb_addresses_contains_balancer_address(
const grpc_lb_addresses& addresses) {
for (size_t i = 0; i < addresses.num_addresses; ++i) {
if (addresses.addresses[i].is_balancer) return true;
}
return false;
}

@ -101,6 +101,10 @@ grpc_arg grpc_lb_addresses_create_channel_arg(
grpc_lb_addresses* grpc_lb_addresses_find_channel_arg(
const grpc_channel_args* channel_args);
// Returns true if addresses contains at least one balancer address.
bool grpc_lb_addresses_contains_balancer_address(
const grpc_lb_addresses& addresses);
//
// LB policy factory
//

@ -134,6 +134,9 @@ struct grpc_subchannel {
bool backoff_begun;
/** our alarm */
grpc_timer alarm;
grpc_core::RefCountedPtr<grpc_core::channelz::SubchannelNode>
channelz_subchannel;
};
struct grpc_subchannel_call {
@ -178,6 +181,7 @@ static void connection_destroy(void* arg, grpc_error* error) {
static void subchannel_destroy(void* arg, grpc_error* error) {
grpc_subchannel* c = static_cast<grpc_subchannel*>(arg);
c->channelz_subchannel.reset();
gpr_free((void*)c->filters);
grpc_channel_args_destroy(c->args);
grpc_connectivity_state_destroy(&c->state_tracker);
@ -374,9 +378,22 @@ grpc_subchannel* grpc_subchannel_create(grpc_connector* connector,
c->backoff.Init(backoff_options);
gpr_mu_init(&c->mu);
const grpc_arg* arg =
grpc_channel_args_find(c->args, GRPC_ARG_ENABLE_CHANNELZ);
bool channelz_enabled = grpc_channel_arg_get_bool(arg, false);
if (channelz_enabled) {
c->channelz_subchannel =
grpc_core::MakeRefCounted<grpc_core::channelz::SubchannelNode>();
}
return grpc_subchannel_index_register(key, c);
}
grpc_core::channelz::SubchannelNode* grpc_subchannel_get_channelz_node(
grpc_subchannel* subchannel) {
return subchannel->channelz_subchannel.get();
}
static void continue_connect_locked(grpc_subchannel* c) {
grpc_connect_in_args args;
args.interested_parties = c->pollset_set;

@ -21,6 +21,7 @@
#include <grpc/support/port_platform.h>
#include "src/core/ext/filters/client_channel/client_channel_channelz.h"
#include "src/core/ext/filters/client_channel/connector.h"
#include "src/core/lib/channel/channel_stack.h"
#include "src/core/lib/gpr/arena.h"
@ -115,6 +116,9 @@ grpc_subchannel_call* grpc_subchannel_call_ref(
void grpc_subchannel_call_unref(
grpc_subchannel_call* call GRPC_SUBCHANNEL_REF_EXTRA_ARGS);
grpc_core::channelz::SubchannelNode* grpc_subchannel_get_channelz_node(
grpc_subchannel* subchannel);
/** Returns a pointer to the parent data associated with \a subchannel_call.
The data will be of the size specified in \a parent_data_size
field of the args passed to \a grpc_connected_subchannel_create_call(). */

@ -28,75 +28,84 @@
namespace grpc {
namespace load_reporter {
// Note that the functions here are specified as inline to share the static
// objects across all the translation units including this header. See more
// details on https://en.cppreference.com/w/cpp/language/inline.
// Measures.
::opencensus::stats::MeasureInt64 MeasureStartCount() {
static const ::opencensus::stats::MeasureInt64 start_count =
inline ::opencensus::stats::MeasureInt64 MeasureStartCount() {
static const ::opencensus::stats::MeasureInt64 measure =
::opencensus::stats::MeasureInt64::Register(
kMeasureStartCount, kMeasureStartCount, kMeasureStartCount);
return start_count;
return measure;
}
::opencensus::stats::MeasureInt64 MeasureEndCount() {
static const ::opencensus::stats::MeasureInt64 end_count =
inline ::opencensus::stats::MeasureInt64 MeasureEndCount() {
static const ::opencensus::stats::MeasureInt64 measure =
::opencensus::stats::MeasureInt64::Register(
kMeasureEndCount, kMeasureEndCount, kMeasureEndCount);
return end_count;
return measure;
}
::opencensus::stats::MeasureInt64 MeasureEndBytesSent() {
static const ::opencensus::stats::MeasureInt64 end_bytes_sent =
inline ::opencensus::stats::MeasureInt64 MeasureEndBytesSent() {
static const ::opencensus::stats::MeasureInt64 measure =
::opencensus::stats::MeasureInt64::Register(
kMeasureEndBytesSent, kMeasureEndBytesSent, kMeasureEndBytesSent);
return end_bytes_sent;
return measure;
}
::opencensus::stats::MeasureInt64 MeasureEndBytesReceived() {
static const ::opencensus::stats::MeasureInt64 end_bytes_received =
inline ::opencensus::stats::MeasureInt64 MeasureEndBytesReceived() {
static const ::opencensus::stats::MeasureInt64 measure =
::opencensus::stats::MeasureInt64::Register(kMeasureEndBytesReceived,
kMeasureEndBytesReceived,
kMeasureEndBytesReceived);
return end_bytes_received;
return measure;
}
::opencensus::stats::MeasureInt64 MeasureEndLatencyMs() {
static const ::opencensus::stats::MeasureInt64 end_latency_ms =
inline ::opencensus::stats::MeasureInt64 MeasureEndLatencyMs() {
static const ::opencensus::stats::MeasureInt64 measure =
::opencensus::stats::MeasureInt64::Register(
kMeasureEndLatencyMs, kMeasureEndLatencyMs, kMeasureEndLatencyMs);
return end_latency_ms;
return measure;
}
::opencensus::stats::MeasureDouble MeasureOtherCallMetric() {
static const ::opencensus::stats::MeasureDouble other_call_metric =
inline ::opencensus::stats::MeasureDouble MeasureOtherCallMetric() {
static const ::opencensus::stats::MeasureDouble measure =
::opencensus::stats::MeasureDouble::Register(kMeasureOtherCallMetric,
kMeasureOtherCallMetric,
kMeasureOtherCallMetric);
return other_call_metric;
return measure;
}
// Tags.
opencensus::stats::TagKey TagKeyToken() {
static const auto token = opencensus::stats::TagKey::Register(kTagKeyToken);
inline ::opencensus::stats::TagKey TagKeyToken() {
static const ::opencensus::stats::TagKey token =
opencensus::stats::TagKey::Register(kTagKeyToken);
return token;
}
opencensus::stats::TagKey TagKeyHost() {
static const auto token = opencensus::stats::TagKey::Register(kTagKeyHost);
inline ::opencensus::stats::TagKey TagKeyHost() {
static const ::opencensus::stats::TagKey token =
opencensus::stats::TagKey::Register(kTagKeyHost);
return token;
}
opencensus::stats::TagKey TagKeyUserId() {
static const auto token = opencensus::stats::TagKey::Register(kTagKeyUserId);
inline ::opencensus::stats::TagKey TagKeyUserId() {
static const ::opencensus::stats::TagKey token =
opencensus::stats::TagKey::Register(kTagKeyUserId);
return token;
}
opencensus::stats::TagKey TagKeyStatus() {
static const auto token = opencensus::stats::TagKey::Register(kTagKeyStatus);
inline ::opencensus::stats::TagKey TagKeyStatus() {
static const ::opencensus::stats::TagKey token =
opencensus::stats::TagKey::Register(kTagKeyStatus);
return token;
}
opencensus::stats::TagKey TagKeyMetricName() {
static const auto token =
inline ::opencensus::stats::TagKey TagKeyMetricName() {
static const ::opencensus::stats::TagKey token =
opencensus::stats::TagKey::Register(kTagKeyMetricName);
return token;
}

@ -19,7 +19,6 @@
#include <grpc/support/port_platform.h>
#include <grpc/grpc_security.h>
#include <grpc/load_reporting.h>
#include <grpc/slice.h>
#include <grpc/support/alloc.h>
#include <grpc/support/log.h>
@ -31,18 +30,20 @@
#include "src/core/ext/filters/load_reporting/server_load_reporting_filter.h"
#include "src/core/lib/channel/channel_args.h"
#include "src/core/lib/channel/context.h"
#include "src/core/lib/gpr/string.h"
#include "src/core/lib/iomgr/resolve_address.h"
#include "src/core/lib/iomgr/sockaddr_posix.h"
#include "src/core/lib/iomgr/socket_utils.h"
#include "src/core/lib/profiling/timers.h"
#include "src/core/lib/security/context/security_context.h"
#include "src/core/lib/slice/slice_internal.h"
#include "src/core/lib/surface/call.h"
#include "src/core/lib/transport/static_metadata.h"
namespace grpc {
constexpr char kEncodedIpv4AddressLengthString[] = "08";
constexpr char kEncodedIpv6AddressLengthString[] = "32";
constexpr char kEmptyAddressLengthString[] = "00";
constexpr size_t kLengthPrefixSize = 2;
grpc_error* ServerLoadReportingChannelData::Init(
grpc_channel_element* /* elem */, grpc_channel_element_args* args) {
GPR_ASSERT(!args->is_last);
@ -90,7 +91,9 @@ void ServerLoadReportingCallData::Destroy(
{chand->peer_identity(), chand->peer_identity_len()}},
{::grpc::load_reporter::TagKeyStatus(),
GetStatusTagForStatus(final_info->final_status)}});
gpr_free(client_ip_and_lr_token_);
}
gpr_free(target_host_);
grpc_slice_unref_internal(service_method_);
}
@ -100,7 +103,8 @@ void ServerLoadReportingCallData::StartTransportStreamOpBatch(
if (op->recv_initial_metadata() != nullptr) {
// Save some fields to use when initial metadata is ready.
peer_string_ = op->get_peer_string();
recv_initial_metadata_ = op->recv_initial_metadata();
recv_initial_metadata_ =
op->op()->payload->recv_initial_metadata.recv_initial_metadata;
original_recv_initial_metadata_ready_ = op->recv_initial_metadata_ready();
// Substitute the original closure for the wrapper closure.
op->set_recv_initial_metadata_ready(&recv_initial_metadata_ready_);
@ -157,10 +161,10 @@ void ServerLoadReportingCallData::GetCensusSafeClientIpString(
*size = 8;
} else if (addr->sa_family == GRPC_AF_INET6) {
grpc_sockaddr_in6* addr6 = reinterpret_cast<grpc_sockaddr_in6*>(addr);
*client_ip_string = static_cast<char*>(gpr_malloc(32));
*client_ip_string = static_cast<char*>(gpr_malloc(32 + 1));
for (size_t i = 0; i < 16; ++i) {
sprintf(*client_ip_string + i, "%02x",
addr6->sin6_addr.__in6_u.__u6_addr8[i]);
snprintf(*client_ip_string + i * 2, 2 + 1, "%02x",
addr6->sin6_addr.__in6_u.__u6_addr8[i]);
}
*size = 32;
} else {
@ -241,7 +245,7 @@ void ServerLoadReportingCallData::RecvInitialMetadataReady(void* arg,
if (err == GRPC_ERROR_NONE) {
GRPC_LOG_IF_ERROR(
"server_load_reporting_filter",
grpc_metadata_batch_filter(calld->recv_initial_metadata_->batch(),
grpc_metadata_batch_filter(calld->recv_initial_metadata_,
RecvInitialMetadataFilter, elem,
"recv_initial_metadata filtering error"));
// If the LB token was not found in the recv_initial_metadata, only the
@ -277,7 +281,6 @@ grpc_filtered_mdelem ServerLoadReportingCallData::SendTrailingMetadataFilter(
reinterpret_cast<ServerLoadReportingCallData*>(elem->call_data);
ServerLoadReportingChannelData* chand =
reinterpret_cast<ServerLoadReportingChannelData*>(elem->channel_data);
// TODO(juanlishen): GRPC_MDSTR_LB_COST_BIN meaning?
if (grpc_slice_eq(GRPC_MDKEY(md), GRPC_MDSTR_LB_COST_BIN)) {
const grpc_slice value = GRPC_MDVALUE(md);
const size_t cost_entry_size = GRPC_SLICE_LENGTH(value);
@ -325,4 +328,35 @@ const char* ServerLoadReportingCallData::GetStatusTagForStatus(
}
}
namespace {
bool MaybeAddServerLoadReportingFilter(const grpc_channel_args& args) {
return grpc_channel_arg_get_bool(
grpc_channel_args_find(&args, GRPC_ARG_ENABLE_LOAD_REPORTING), false);
}
} // namespace
// TODO(juanlishen): We should register the filter during grpc initialization
// time once OpenCensus is compatible with our build system. For now, we force
// registration of the server load reporting filter at static initialization
// time if we build with the filter target.
struct ServerLoadReportingFilterStaticRegistrar {
ServerLoadReportingFilterStaticRegistrar() {
static std::atomic_bool registered{false};
if (registered) return;
RegisterChannelFilter<ServerLoadReportingChannelData,
ServerLoadReportingCallData>(
"server_load_reporting", GRPC_SERVER_CHANNEL, INT_MAX,
MaybeAddServerLoadReportingFilter);
// Access measures to ensure they are initialized. Otherwise, we can't
// create any valid view before the first RPC.
::grpc::load_reporter::MeasureStartCount();
::grpc::load_reporter::MeasureEndCount();
::grpc::load_reporter::MeasureEndBytesSent();
::grpc::load_reporter::MeasureEndBytesReceived();
::grpc::load_reporter::MeasureEndLatencyMs();
::grpc::load_reporter::MeasureOtherCallMetric();
registered = true;
}
} server_load_reporting_filter_static_registrar;
} // namespace grpc

@ -86,8 +86,8 @@ class ServerLoadReportingCallData : public CallData {
// The received initial metadata (a member of the recv_initial_metadata op).
// When it is ready, we will extract some data from it via
// recv_initial_metadata_ready_ closure, before the original
// recv_initial_metadata_ready closure,
MetadataBatch* recv_initial_metadata_;
// recv_initial_metadata_ready closure.
grpc_metadata_batch* recv_initial_metadata_;
// The original recv_initial_metadata closure, which is wrapped by our own
// closure (recv_initial_metadata_ready_) to capture the incoming initial
@ -112,11 +112,6 @@ class ServerLoadReportingCallData : public CallData {
// token.
char* client_ip_and_lr_token_;
size_t client_ip_and_lr_token_len_;
static constexpr char kEncodedIpv4AddressLengthString[] = "08";
static constexpr char kEncodedIpv6AddressLengthString[] = "32";
static constexpr char kEmptyAddressLengthString[] = "00";
static constexpr size_t kLengthPrefixSize = 2;
};
} // namespace grpc

@ -2663,6 +2663,9 @@ static void init_keepalive_ping_locked(void* arg, grpc_error* error) {
static void start_keepalive_ping_locked(void* arg, grpc_error* error) {
grpc_chttp2_transport* t = static_cast<grpc_chttp2_transport*>(arg);
if (error != GRPC_ERROR_NONE) {
return;
}
GRPC_CHTTP2_REF_TRANSPORT(t, "keepalive watchdog");
grpc_timer_init(&t->keepalive_watchdog_timer,
grpc_core::ExecCtx::Get()->Now() + t->keepalive_timeout,

@ -1622,7 +1622,7 @@ grpc_error* grpc_chttp2_header_parser_parse(void* hpack_parser,
grpc_chttp2_transport* t,
grpc_chttp2_stream* s,
grpc_slice slice, int is_last) {
GPR_TIMER_SCOPE("grpc_chttp2_hpack_parser_parse", 0);
GPR_TIMER_SCOPE("grpc_chttp2_header_parser_parse", 0);
grpc_chttp2_hpack_parser* parser =
static_cast<grpc_chttp2_hpack_parser*>(hpack_parser);
if (s != nullptr) {

@ -131,38 +131,6 @@ void ChannelTrace::AddTraceEventReferencingSubchannel(
namespace {
// returns an allocated string that represents tm according to RFC-3339, and,
// more specifically, follows:
// https://developers.google.com/protocol-buffers/docs/proto3#json
//
// "Uses RFC 3339, where generated output will always be Z-normalized and uses
// 0, 3, 6 or 9 fractional digits."
char* fmt_time(gpr_timespec tm) {
char time_buffer[35];
char ns_buffer[11]; // '.' + 9 digits of precision
struct tm* tm_info = localtime((const time_t*)&tm.tv_sec);
strftime(time_buffer, sizeof(time_buffer), "%Y-%m-%dT%H:%M:%S", tm_info);
snprintf(ns_buffer, 11, ".%09d", tm.tv_nsec);
// This loop trims off trailing zeros by inserting a null character that the
// right point. We iterate in chunks of three because we want 0, 3, 6, or 9
// fractional digits.
for (int i = 7; i >= 1; i -= 3) {
if (ns_buffer[i] == '0' && ns_buffer[i + 1] == '0' &&
ns_buffer[i + 2] == '0') {
ns_buffer[i] = '\0';
// Edge case in which all fractional digits were 0.
if (i == 1) {
ns_buffer[0] = '\0';
}
} else {
break;
}
}
char* full_time_str;
gpr_asprintf(&full_time_str, "%s%sZ", time_buffer, ns_buffer);
return full_time_str;
}
const char* severity_string(ChannelTrace::Severity severity) {
switch (severity) {
case ChannelTrace::Severity::Info:
@ -186,9 +154,9 @@ void ChannelTrace::TraceEvent::RenderTraceEvent(grpc_json* json) const {
json_iterator = grpc_json_create_child(json_iterator, json, "severity",
severity_string(severity_),
GRPC_JSON_STRING, false);
json_iterator =
grpc_json_create_child(json_iterator, json, "timestamp",
fmt_time(timestamp_), GRPC_JSON_STRING, true);
json_iterator = grpc_json_create_child(json_iterator, json, "timestamp",
gpr_format_timespec(timestamp_),
GRPC_JSON_STRING, true);
if (referenced_channel_ != nullptr) {
char* uuid_str;
gpr_asprintf(&uuid_str, "%" PRIdPTR, referenced_channel_->channel_uuid());
@ -206,7 +174,7 @@ void ChannelTrace::TraceEvent::RenderTraceEvent(grpc_json* json) const {
}
}
grpc_json* ChannelTrace::RenderJSON() const {
grpc_json* ChannelTrace::RenderJson() const {
if (!max_list_size_)
return nullptr; // tracing is disabled if max_events == 0
grpc_json* json = grpc_json_create(GRPC_JSON_OBJECT);
@ -216,9 +184,9 @@ grpc_json* ChannelTrace::RenderJSON() const {
json_iterator =
grpc_json_create_child(json_iterator, json, "numEventsLogged",
num_events_logged_str, GRPC_JSON_STRING, true);
json_iterator =
grpc_json_create_child(json_iterator, json, "creationTimestamp",
fmt_time(time_created_), GRPC_JSON_STRING, true);
json_iterator = grpc_json_create_child(
json_iterator, json, "creationTimestamp",
gpr_format_timespec(time_created_), GRPC_JSON_STRING, true);
grpc_json* events = grpc_json_create_child(json_iterator, json, "events",
nullptr, GRPC_JSON_ARRAY, false);
json_iterator = nullptr;

@ -71,7 +71,7 @@ class ChannelTrace {
// Creates and returns the raw grpc_json object, so a parent channelz
// object may incorporate the json before rendering.
grpc_json* RenderJSON() const;
grpc_json* RenderJson() const;
private:
// Types of objects that can be references by trace events.

@ -36,71 +36,27 @@
#include "src/core/lib/iomgr/error.h"
#include "src/core/lib/slice/slice_internal.h"
#include "src/core/lib/surface/channel.h"
#include "src/core/lib/transport/connectivity_state.h"
#include "src/core/lib/transport/error_utils.h"
namespace grpc_core {
namespace channelz {
namespace {
// TODO(ncteisen): move this function to a common helper location.
//
// returns an allocated string that represents tm according to RFC-3339, and,
// more specifically, follows:
// https://developers.google.com/protocol-buffers/docs/proto3#json
//
// "Uses RFC 3339, where generated output will always be Z-normalized and uses
// 0, 3, 6 or 9 fractional digits."
char* fmt_time(gpr_timespec tm) {
char time_buffer[35];
char ns_buffer[11]; // '.' + 9 digits of precision
struct tm* tm_info = localtime((const time_t*)&tm.tv_sec);
strftime(time_buffer, sizeof(time_buffer), "%Y-%m-%dT%H:%M:%S", tm_info);
snprintf(ns_buffer, 11, ".%09d", tm.tv_nsec);
// This loop trims off trailing zeros by inserting a null character that the
// right point. We iterate in chunks of three because we want 0, 3, 6, or 9
// fractional digits.
for (int i = 7; i >= 1; i -= 3) {
if (ns_buffer[i] == '0' && ns_buffer[i + 1] == '0' &&
ns_buffer[i + 2] == '0') {
ns_buffer[i] = '\0';
// Edge case in which all fractional digits were 0.
if (i == 1) {
ns_buffer[0] = '\0';
}
} else {
break;
}
}
char* full_time_str;
gpr_asprintf(&full_time_str, "%s%sZ", time_buffer, ns_buffer);
return full_time_str;
}
// TODO(ncteisen); move this to json library
grpc_json* add_num_str(grpc_json* parent, grpc_json* it, const char* name,
int64_t num) {
char* num_str;
gpr_asprintf(&num_str, "%" PRId64, num);
return grpc_json_create_child(it, parent, name, num_str, GRPC_JSON_STRING,
true);
}
} // namespace
ChannelNode::ChannelNode(grpc_channel* channel, size_t channel_tracer_max_nodes)
: channel_(channel), target_(nullptr), channel_uuid_(-1) {
ChannelNode::ChannelNode(grpc_channel* channel, size_t channel_tracer_max_nodes,
bool is_top_level_channel)
: channel_(channel),
target_(nullptr),
channel_uuid_(-1),
is_top_level_channel_(is_top_level_channel) {
trace_.Init(channel_tracer_max_nodes);
target_ = UniquePtr<char>(grpc_channel_get_target(channel_));
channel_uuid_ = ChannelzRegistry::Register(this);
channel_uuid_ = ChannelzRegistry::RegisterChannelNode(this);
gpr_atm_no_barrier_store(&last_call_started_millis_,
(gpr_atm)ExecCtx::Get()->Now());
}
ChannelNode::~ChannelNode() {
trace_.Destroy();
ChannelzRegistry::Unregister(channel_uuid_);
ChannelzRegistry::UnregisterChannelNode(channel_uuid_);
}
void ChannelNode::RecordCallStarted() {
@ -109,18 +65,11 @@ void ChannelNode::RecordCallStarted() {
(gpr_atm)ExecCtx::Get()->Now());
}
grpc_connectivity_state ChannelNode::GetConnectivityState() {
if (channel_ == nullptr) {
return GRPC_CHANNEL_SHUTDOWN;
} else {
// TODO(ncteisen): re-enable this once we have cleaned up all of the
// internal dependency issues.
// return grpc_channel_check_connectivity_state(channel_, false);
return GRPC_CHANNEL_IDLE;
}
}
void ChannelNode::PopulateConnectivityState(grpc_json* json) {}
char* ChannelNode::RenderJSON() {
void ChannelNode::PopulateChildRefs(grpc_json* json) {}
grpc_json* ChannelNode::RenderJson() {
// We need to track these three json objects to build our object
grpc_json* top_level_json = grpc_json_create(GRPC_JSON_OBJECT);
grpc_json* json = top_level_json;
@ -130,7 +79,8 @@ char* ChannelNode::RenderJSON() {
GRPC_JSON_OBJECT, false);
json = json_iterator;
json_iterator = nullptr;
json_iterator = add_num_str(json, json_iterator, "channelId", channel_uuid_);
json_iterator = grpc_json_add_number_string_child(json, json_iterator,
"channelId", channel_uuid_);
// reset json iterators to top level object
json = top_level_json;
json_iterator = nullptr;
@ -139,50 +89,65 @@ char* ChannelNode::RenderJSON() {
GRPC_JSON_OBJECT, false);
json = data;
json_iterator = nullptr;
// create and fill the connectivity state child.
grpc_connectivity_state connectivity_state = GetConnectivityState();
json_iterator = grpc_json_create_child(json_iterator, json, "state", nullptr,
GRPC_JSON_OBJECT, false);
json = json_iterator;
grpc_json_create_child(nullptr, json, "state",
grpc_connectivity_state_name(connectivity_state),
GRPC_JSON_STRING, false);
// reset the parent to be the data object.
json = data;
PopulateConnectivityState(json);
GPR_ASSERT(target_.get() != nullptr);
json_iterator = grpc_json_create_child(
json_iterator, json, "target", target_.get(), GRPC_JSON_STRING, false);
// fill in the channel trace if applicable
grpc_json* trace = trace_->RenderJSON();
grpc_json* trace = trace_->RenderJson();
if (trace != nullptr) {
// we manuall link up and fill the child since it was created for us in
// ChannelTrace::RenderJSON
// we manually link up and fill the child since it was created for us in
// ChannelTrace::RenderJson
trace->key = "trace"; // this object is named trace in channelz.proto
json_iterator = grpc_json_link_child(json, trace, json_iterator);
trace->parent = json;
trace->value = nullptr;
trace->key = "trace";
trace->owns_value = false;
}
// reset the parent to be the data object.
json = data;
json_iterator = nullptr;
// We use -1 as sentinel values since proto default value for integers is
// zero, and the confuses the parser into thinking the value weren't present
json_iterator =
add_num_str(json, json_iterator, "callsStarted", calls_started_);
json_iterator =
add_num_str(json, json_iterator, "callsSucceeded", calls_succeeded_);
json_iterator =
add_num_str(json, json_iterator, "callsFailed", calls_failed_);
if (calls_started_ != 0) {
json_iterator = grpc_json_add_number_string_child(
json, json_iterator, "callsStarted", calls_started_);
}
if (calls_succeeded_ != 0) {
json_iterator = grpc_json_add_number_string_child(
json, json_iterator, "callsSucceeded", calls_succeeded_);
}
if (calls_failed_) {
json_iterator = grpc_json_add_number_string_child(
json, json_iterator, "callsFailed", calls_failed_);
}
gpr_timespec ts =
grpc_millis_to_timespec(last_call_started_millis_, GPR_CLOCK_REALTIME);
json_iterator =
grpc_json_create_child(json_iterator, json, "lastCallStartedTimestamp",
fmt_time(ts), GRPC_JSON_STRING, true);
// render and return the over json object
char* json_str = grpc_json_dump_to_string(top_level_json, 0);
grpc_json_destroy(top_level_json);
gpr_format_timespec(ts), GRPC_JSON_STRING, true);
json = top_level_json;
json_iterator = nullptr;
PopulateChildRefs(json);
return top_level_json;
}
char* ChannelNode::RenderJsonString() {
grpc_json* json = RenderJson();
char* json_str = grpc_json_dump_to_string(json, 0);
grpc_json_destroy(json);
return json_str;
}
RefCountedPtr<ChannelNode> ChannelNode::MakeChannelNode(
grpc_channel* channel, size_t channel_tracer_max_nodes,
bool is_top_level_channel) {
return MakeRefCounted<grpc_core::channelz::ChannelNode>(
channel, channel_tracer_max_nodes, is_top_level_channel);
}
SubchannelNode::SubchannelNode() {
subchannel_uuid_ = ChannelzRegistry::RegisterSubchannelNode(this);
}
SubchannelNode::~SubchannelNode() {
ChannelzRegistry::UnregisterSubchannelNode(subchannel_uuid_);
}
} // namespace channelz
} // namespace grpc_core

@ -31,6 +31,14 @@
#include "src/core/lib/iomgr/exec_ctx.h"
#include "src/core/lib/json/json.h"
// Channel arg key for client channel factory.
#define GRPC_ARG_CHANNELZ_CHANNEL_NODE_CREATION_FUNC \
"grpc.channelz_channel_node_creation_func"
// Channel arg key to signal that the channel is an internal channel.
#define GRPC_ARG_CHANNELZ_CHANNEL_IS_INTERNAL_CHANNEL \
"grpc.channelz_channel_is_internal_channel"
namespace grpc_core {
namespace channelz {
@ -40,8 +48,9 @@ class ChannelNodePeer;
class ChannelNode : public RefCounted<ChannelNode> {
public:
ChannelNode(grpc_channel* channel, size_t channel_tracer_max_nodes);
~ChannelNode();
static RefCountedPtr<ChannelNode> MakeChannelNode(
grpc_channel* channel, size_t channel_tracer_max_nodes,
bool is_top_level_channel);
void RecordCallStarted();
void RecordCallFailed() {
@ -51,24 +60,39 @@ class ChannelNode : public RefCounted<ChannelNode> {
gpr_atm_no_barrier_fetch_add(&calls_succeeded_, (gpr_atm(1)));
}
char* RenderJSON();
grpc_json* RenderJson();
char* RenderJsonString();
// helper for getting and populating connectivity state. It is virtual
// because it allows the client_channel specific code to live in ext/
// instead of lib/
virtual void PopulateConnectivityState(grpc_json* json);
virtual void PopulateChildRefs(grpc_json* json);
ChannelTrace* trace() { return trace_.get(); }
void set_channel_destroyed() {
void MarkChannelDestroyed() {
GPR_ASSERT(channel_ != nullptr);
channel_ = nullptr;
}
bool ChannelIsDestroyed() { return channel_ == nullptr; }
intptr_t channel_uuid() { return channel_uuid_; }
bool is_top_level_channel() { return is_top_level_channel_; }
protected:
GPRC_ALLOW_CLASS_TO_USE_NON_PUBLIC_DELETE
GPRC_ALLOW_CLASS_TO_USE_NON_PUBLIC_NEW
ChannelNode(grpc_channel* channel, size_t channel_tracer_max_nodes,
bool is_top_level_channel);
virtual ~ChannelNode();
private:
// testing peer friend.
friend class testing::ChannelNodePeer;
// helper for getting connectivity state.
grpc_connectivity_state GetConnectivityState();
grpc_channel* channel_ = nullptr;
UniquePtr<char> target_;
gpr_atm calls_started_ = 0;
@ -76,9 +100,33 @@ class ChannelNode : public RefCounted<ChannelNode> {
gpr_atm calls_failed_ = 0;
gpr_atm last_call_started_millis_ = 0;
intptr_t channel_uuid_;
bool is_top_level_channel_ = true;
ManualConstructor<ChannelTrace> trace_;
};
// Placeholds channelz class for subchannels. All this can do now is track its
// uuid (this information is needed by the parent channelz class).
// TODO(ncteisen): build this out to support the GetSubchannel channelz request.
class SubchannelNode : public RefCounted<SubchannelNode> {
public:
SubchannelNode();
virtual ~SubchannelNode();
intptr_t subchannel_uuid() { return subchannel_uuid_; }
protected:
GPRC_ALLOW_CLASS_TO_USE_NON_PUBLIC_DELETE
GPRC_ALLOW_CLASS_TO_USE_NON_PUBLIC_NEW
private:
intptr_t subchannel_uuid_;
};
// Creation functions
typedef RefCountedPtr<ChannelNode> (*ChannelNodeCreationFunc)(grpc_channel*,
size_t, bool);
} // namespace channelz
} // namespace grpc_core

@ -19,34 +19,24 @@
#include <grpc/impl/codegen/port_platform.h>
#include "src/core/lib/channel/channel_trace.h"
#include "src/core/lib/channel/channelz.h"
#include "src/core/lib/channel/channelz_registry.h"
#include "src/core/lib/gpr/useful.h"
#include "src/core/lib/gprpp/memory.h"
#include <grpc/support/alloc.h>
#include <grpc/support/log.h>
#include <grpc/support/sync.h>
#include <cstring>
namespace grpc_core {
namespace channelz {
namespace {
// singleton instance of the registry.
ChannelzRegistry* g_channelz_registry = nullptr;
// avl vtable for uuid (intptr_t) -> channelz_obj (void*)
// this table is only looking, it does not own anything.
void destroy_intptr(void* not_used, void* user_data) {}
void* copy_intptr(void* key, void* user_data) { return key; }
long compare_intptr(void* key1, void* key2, void* user_data) {
return GPR_ICMP(key1, key2);
}
void destroy_channelz_obj(void* channelz_obj, void* user_data) {}
void* copy_channelz_obj(void* channelz_obj, void* user_data) {
return channelz_obj;
}
const grpc_avl_vtable avl_vtable = {destroy_intptr, copy_intptr, compare_intptr,
destroy_channelz_obj, copy_channelz_obj};
} // anonymous namespace
void ChannelzRegistry::Init() { g_channelz_registry = New<ChannelzRegistry>(); }
@ -58,20 +48,97 @@ ChannelzRegistry* ChannelzRegistry::Default() {
return g_channelz_registry;
}
ChannelzRegistry::ChannelzRegistry() : uuid_(1) {
gpr_mu_init(&mu_);
avl_ = grpc_avl_create(&avl_vtable);
ChannelzRegistry::ChannelzRegistry() { gpr_mu_init(&mu_); }
ChannelzRegistry::~ChannelzRegistry() { gpr_mu_destroy(&mu_); }
intptr_t ChannelzRegistry::InternalRegisterEntry(const RegistryEntry& entry) {
mu_guard guard(&mu_);
entities_.push_back(entry);
intptr_t uuid = entities_.size();
return uuid;
}
ChannelzRegistry::~ChannelzRegistry() {
grpc_avl_unref(avl_, nullptr);
gpr_mu_destroy(&mu_);
void ChannelzRegistry::InternalUnregisterEntry(intptr_t uuid, EntityType type) {
GPR_ASSERT(uuid >= 1);
mu_guard guard(&mu_);
GPR_ASSERT(static_cast<size_t>(uuid) <= entities_.size());
GPR_ASSERT(entities_[uuid - 1].type == type);
entities_[uuid - 1].object = nullptr;
entities_[uuid - 1].type = EntityType::kUnset;
}
void ChannelzRegistry::InternalUnregister(intptr_t uuid) {
gpr_mu_lock(&mu_);
avl_ = grpc_avl_remove(avl_, (void*)uuid, nullptr);
gpr_mu_unlock(&mu_);
void* ChannelzRegistry::InternalGetEntry(intptr_t uuid, EntityType type) {
mu_guard guard(&mu_);
if (uuid < 1 || uuid > static_cast<intptr_t>(entities_.size())) {
return nullptr;
}
if (entities_[uuid - 1].type == type) {
return entities_[uuid - 1].object;
} else {
return nullptr;
}
}
char* ChannelzRegistry::InternalGetTopChannels(intptr_t start_channel_id) {
grpc_json* top_level_json = grpc_json_create(GRPC_JSON_OBJECT);
grpc_json* json = top_level_json;
grpc_json* json_iterator = nullptr;
InlinedVector<ChannelNode*, 10> top_level_channels;
// uuids index into entities one-off (idx 0 is really uuid 1, since 0 is
// reserved). However, we want to support requests coming in with
// start_channel_id=0, which signifies "give me everything." Hence this
// funky looking line below.
size_t start_idx = start_channel_id == 0 ? 0 : start_channel_id - 1;
for (size_t i = start_idx; i < entities_.size(); ++i) {
if (entities_[i].type == EntityType::kChannelNode) {
ChannelNode* channel_node =
static_cast<ChannelNode*>(entities_[i].object);
if (channel_node->is_top_level_channel()) {
top_level_channels.push_back(channel_node);
}
}
}
if (top_level_channels.size() > 0) {
// create list of channels
grpc_json* array_parent = grpc_json_create_child(
nullptr, json, "channel", nullptr, GRPC_JSON_ARRAY, false);
for (size_t i = 0; i < top_level_channels.size(); ++i) {
grpc_json* channel_json = top_level_channels[i]->RenderJson();
json_iterator =
grpc_json_link_child(array_parent, channel_json, json_iterator);
}
}
// For now we do not have any pagination rules. In the future we could
// pick a constant for max_channels_sent for a GetTopChannels request.
// Tracking: https://github.com/grpc/grpc/issues/16019.
json_iterator = grpc_json_create_child(nullptr, json, "end", nullptr,
GRPC_JSON_TRUE, false);
char* json_str = grpc_json_dump_to_string(top_level_json, 0);
grpc_json_destroy(top_level_json);
return json_str;
}
} // namespace channelz
} // namespace grpc_core
char* grpc_channelz_get_top_channels(intptr_t start_channel_id) {
return grpc_core::channelz::ChannelzRegistry::GetTopChannels(
start_channel_id);
}
char* grpc_channelz_get_channel(intptr_t channel_id) {
grpc_core::channelz::ChannelNode* channel_node =
grpc_core::channelz::ChannelzRegistry::GetChannelNode(channel_id);
if (channel_node == nullptr) {
return nullptr;
}
grpc_json* top_level_json = grpc_json_create(GRPC_JSON_OBJECT);
grpc_json* json = top_level_json;
grpc_json* channel_json = channel_node->RenderJson();
channel_json->key = "channel";
grpc_json_link_child(json, channel_json, nullptr);
char* json_str = grpc_json_dump_to_string(top_level_json, 0);
grpc_json_destroy(top_level_json);
return json_str;
}

@ -21,12 +21,14 @@
#include <grpc/impl/codegen/port_platform.h>
#include "src/core/lib/avl/avl.h"
#include "src/core/lib/channel/channel_trace.h"
#include "src/core/lib/channel/channelz.h"
#include "src/core/lib/gprpp/inlined_vector.h"
#include <stdint.h>
namespace grpc_core {
namespace channelz {
// singleton registry object to track all objects that are needed to support
// channelz bookkeeping. All objects share globally distributed uuids.
@ -35,26 +37,56 @@ class ChannelzRegistry {
// To be called in grpc_init()
static void Init();
// To be callen in grpc_shutdown();
// To be called in grpc_shutdown();
static void Shutdown();
// globally registers a channelz Object. Returns its unique uuid
template <typename Object>
static intptr_t Register(Object* object) {
return Default()->InternalRegister(object);
// Register/Unregister/Get for ChannelNode
static intptr_t RegisterChannelNode(ChannelNode* channel_node) {
RegistryEntry entry(channel_node, EntityType::kChannelNode);
return Default()->InternalRegisterEntry(entry);
}
static void UnregisterChannelNode(intptr_t uuid) {
Default()->InternalUnregisterEntry(uuid, EntityType::kChannelNode);
}
static ChannelNode* GetChannelNode(intptr_t uuid) {
void* gotten = Default()->InternalGetEntry(uuid, EntityType::kChannelNode);
return gotten == nullptr ? nullptr : static_cast<ChannelNode*>(gotten);
}
// globally unregisters the object that is associated to uuid.
static void Unregister(intptr_t uuid) { Default()->InternalUnregister(uuid); }
// Register/Unregister/Get for SubchannelNode
static intptr_t RegisterSubchannelNode(SubchannelNode* channel_node) {
RegistryEntry entry(channel_node, EntityType::kSubchannelNode);
return Default()->InternalRegisterEntry(entry);
}
static void UnregisterSubchannelNode(intptr_t uuid) {
Default()->InternalUnregisterEntry(uuid, EntityType::kSubchannelNode);
}
static SubchannelNode* GetSubchannelNode(intptr_t uuid) {
void* gotten =
Default()->InternalGetEntry(uuid, EntityType::kSubchannelNode);
return gotten == nullptr ? nullptr : static_cast<SubchannelNode*>(gotten);
}
// if object with uuid has previously been registered, returns the
// Object associated with that uuid. Else returns nullptr.
template <typename Object>
static Object* Get(intptr_t uuid) {
return Default()->InternalGet<Object>(uuid);
// Returns the allocated JSON string that represents the proto
// GetTopChannelsResponse as per channelz.proto.
static char* GetTopChannels(intptr_t start_channel_id) {
return Default()->InternalGetTopChannels(start_channel_id);
}
private:
enum class EntityType {
kChannelNode,
kSubchannelNode,
kUnset,
};
struct RegistryEntry {
RegistryEntry(void* object_in, EntityType type_in)
: object(object_in), type(type_in) {}
void* object;
EntityType type;
};
GPRC_ALLOW_CLASS_TO_USE_NON_PUBLIC_NEW
GPRC_ALLOW_CLASS_TO_USE_NON_PUBLIC_DELETE
@ -64,36 +96,25 @@ class ChannelzRegistry {
// Returned the singleton instance of ChannelzRegistry;
static ChannelzRegistry* Default();
// globally registers a channelz Object. Returns its unique uuid
template <typename Object>
intptr_t InternalRegister(Object* object) {
intptr_t prior = gpr_atm_no_barrier_fetch_add(&uuid_, 1);
gpr_mu_lock(&mu_);
avl_ = grpc_avl_add(avl_, (void*)prior, object, nullptr);
gpr_mu_unlock(&mu_);
return prior;
}
// globally registers an Entry. Returns its unique uuid
intptr_t InternalRegisterEntry(const RegistryEntry& entry);
// globally unregisters the object that is associated to uuid.
void InternalUnregister(intptr_t uuid);
// if object with uuid has previously been registered, returns the
// Object associated with that uuid. Else returns nullptr.
template <typename Object>
Object* InternalGet(intptr_t uuid) {
gpr_mu_lock(&mu_);
Object* ret =
static_cast<Object*>(grpc_avl_get(avl_, (void*)uuid, nullptr));
gpr_mu_unlock(&mu_);
return ret;
}
// globally unregisters the object that is associated to uuid. Also does
// sanity check that an object doesn't try to unregister the wrong type.
void InternalUnregisterEntry(intptr_t uuid, EntityType type);
// if object with uuid has previously been registered as the correct type,
// returns the void* associated with that uuid. Else returns nullptr.
void* InternalGetEntry(intptr_t uuid, EntityType type);
char* InternalGetTopChannels(intptr_t start_channel_id);
// private members
// protects entities_ and uuid_
gpr_mu mu_;
grpc_avl avl_;
gpr_atm uuid_;
InlinedVector<RegistryEntry, 20> entities_;
};
} // namespace channelz
} // namespace grpc_core
#endif /* GRPC_CORE_LIB_CHANNEL_CHANNELZ_REGISTRY_H */

@ -23,8 +23,10 @@
#include <ctype.h>
#include <limits.h>
#include <stddef.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <time.h>
#include <grpc/support/alloc.h>
#include <grpc/support/log.h>
@ -54,6 +56,32 @@ typedef struct {
char* data;
} dump_out;
char* gpr_format_timespec(gpr_timespec tm) {
char time_buffer[35];
char ns_buffer[11]; // '.' + 9 digits of precision
struct tm* tm_info = localtime((const time_t*)&tm.tv_sec);
strftime(time_buffer, sizeof(time_buffer), "%Y-%m-%dT%H:%M:%S", tm_info);
snprintf(ns_buffer, 11, ".%09d", tm.tv_nsec);
// This loop trims off trailing zeros by inserting a null character that the
// right point. We iterate in chunks of three because we want 0, 3, 6, or 9
// fractional digits.
for (int i = 7; i >= 1; i -= 3) {
if (ns_buffer[i] == '0' && ns_buffer[i + 1] == '0' &&
ns_buffer[i + 2] == '0') {
ns_buffer[i] = '\0';
// Edge case in which all fractional digits were 0.
if (i == 1) {
ns_buffer[0] = '\0';
}
} else {
break;
}
}
char* full_time_str;
gpr_asprintf(&full_time_str, "%s%sZ", time_buffer, ns_buffer);
return full_time_str;
}
static dump_out dump_out_create(void) {
dump_out r = {0, 0, nullptr};
return r;

@ -21,6 +21,8 @@
#include <grpc/support/port_platform.h>
#include <grpc/impl/codegen/gpr_types.h>
#include <stdbool.h>
#include <stddef.h>
@ -81,6 +83,14 @@ char* gpr_strjoin_sep(const char** strs, size_t nstrs, const char* sep,
void gpr_string_split(const char* input, const char* sep, char*** strs,
size_t* nstrs);
/* Returns an allocated string that represents tm according to RFC-3339, and,
more specifically, follows:
https://developers.google.com/protocol-buffers/docs/proto3#json
Uses RFC 3339, where generated output will always be Z-normalized and uses
0, 3, 6 or 9 fractional digits. */
char* gpr_format_timespec(gpr_timespec);
/* A vector of strings... for building up a final string one piece at a time */
typedef struct {
char** strs;

@ -28,7 +28,10 @@
// gRPC currently can't depend on libstdc++, so we can't use "= 0" for
// pure virtual methods. Instead, we use this macro.
#define GRPC_ABSTRACT \
{ GPR_ASSERT(false); }
#define GRPC_ABSTRACT \
{ \
gpr_log(GPR_ERROR, "Function marked GRPC_ABSTRACT was not implemented"); \
GPR_ASSERT(false); \
}
#endif /* GRPC_CORE_LIB_GPRPP_ABSTRACT_H */

@ -22,6 +22,7 @@
#include <grpc/support/port_platform.h>
#include <cassert>
#include <cstring>
#include "src/core/lib/gprpp/memory.h"
@ -50,9 +51,33 @@ class InlinedVector {
InlinedVector() { init_data(); }
~InlinedVector() { destroy_elements(); }
// For now, we do not support copying.
InlinedVector(const InlinedVector&) = delete;
InlinedVector& operator=(const InlinedVector&) = delete;
// copy constructor
InlinedVector(const InlinedVector& v) {
init_data();
copy_from(v);
}
InlinedVector& operator=(const InlinedVector& v) {
if (this != &v) {
clear();
copy_from(v);
}
return *this;
}
// move constructor
InlinedVector(InlinedVector&& v) {
init_data();
move_from(v);
}
InlinedVector& operator=(InlinedVector&& v) {
if (this != &v) {
clear();
move_from(v);
}
return *this;
}
T* data() {
return dynamic_ != nullptr ? dynamic_ : reinterpret_cast<T*>(inline_);
@ -98,6 +123,33 @@ class InlinedVector {
void push_back(T&& value) { emplace_back(std::move(value)); }
void copy_from(const InlinedVector& v) {
// if v is allocated, copy over the buffer.
if (v.dynamic_ != nullptr) {
reserve(v.capacity_);
memcpy(dynamic_, v.dynamic_, v.size_ * sizeof(T));
} else {
memcpy(inline_, v.inline_, v.size_ * sizeof(T));
}
// copy over metadata
size_ = v.size_;
capacity_ = v.capacity_;
}
void move_from(InlinedVector& v) {
// if v is allocated, then we steal its buffer, else we copy it.
if (v.dynamic_ != nullptr) {
dynamic_ = v.dynamic_;
} else {
memcpy(inline_, v.inline_, v.size_ * sizeof(T));
}
// copy over metadata
size_ = v.size_;
capacity_ = v.capacity_;
// null out the original
v.init_data();
}
size_t size() const { return size_; }
bool empty() const { return size_ == 0; }

@ -31,12 +31,12 @@
// protected destructor.
#define GPRC_ALLOW_CLASS_TO_USE_NON_PUBLIC_DELETE \
template <typename T> \
friend void Delete(T*);
friend void grpc_core::Delete(T*);
// Add this to a class that want to use New(), but has a private or
// protected constructor.
#define GPRC_ALLOW_CLASS_TO_USE_NON_PUBLIC_NEW \
template <typename T, typename... Args> \
friend T* New(Args&&...);
friend T* grpc_core::New(Args&&...);
namespace grpc_core {

@ -107,6 +107,11 @@ inline RefCountedPtr<T> MakeRefCounted(Args&&... args) {
return RefCountedPtr<T>(New<T>(std::forward<Args>(args)...));
}
template <typename Parent, typename Child, typename... Args>
inline RefCountedPtr<Parent> MakePolymorphicRefCounted(Args&&... args) {
return RefCountedPtr<Parent>(New<Child>(std::forward<Args>(args)...));
}
} // namespace grpc_core
#endif /* GRPC_CORE_LIB_GPRPP_REF_COUNTED_PTR_H */

@ -494,8 +494,9 @@ static void fd_orphan(grpc_fd* fd, grpc_closure* on_done, int* release_fd,
is_fd_closed = true;
}
// TODO(sreek): handle fd removal (where is_fd_closed=false)
if (!is_fd_closed) {
gpr_log(GPR_DEBUG, "TODO: handle fd removal?");
GRPC_FD_TRACE("epoll_fd %p (%d) was orphaned but not closed.", fd, fd->fd);
}
/* Remove the active status but keep referenced. We want this grpc_fd struct
@ -1564,7 +1565,7 @@ static void pollset_set_add_pollset_set(grpc_pollset_set* a,
gpr_mu_unlock(b_mu);
}
// try to do the least copying possible
// TODO(ctiller): there's probably a better heuristic here
// TODO(sreek): there's probably a better heuristic here
const size_t a_size = a->fd_count + a->pollset_count;
const size_t b_size = b->fd_count + b->pollset_count;
if (b_size > a_size) {

@ -28,52 +28,43 @@
#include <grpc/support/sync.h>
#include "src/core/lib/debug/stats.h"
#include "src/core/lib/gpr/spinlock.h"
#include "src/core/lib/gpr/tls.h"
#include "src/core/lib/gpr/useful.h"
#include "src/core/lib/gprpp/thd.h"
#include "src/core/lib/gprpp/memory.h"
#include "src/core/lib/iomgr/exec_ctx.h"
#define MAX_DEPTH 2
typedef struct {
gpr_mu mu;
gpr_cv cv;
grpc_closure_list elems;
size_t depth;
bool shutdown;
bool queued_long_job;
grpc_core::Thread thd;
} thread_state;
static thread_state* g_thread_state;
static size_t g_max_threads;
static gpr_atm g_cur_threads;
static gpr_spinlock g_adding_thread_lock = GPR_SPINLOCK_STATIC_INITIALIZER;
#define EXECUTOR_TRACE(format, ...) \
if (executor_trace.enabled()) { \
gpr_log(GPR_INFO, "EXECUTOR " format, __VA_ARGS__); \
}
grpc_core::TraceFlag executor_trace(false, "executor");
GPR_TLS_DECL(g_this_thread_state);
grpc_core::TraceFlag executor_trace(false, "executor");
GrpcExecutor::GrpcExecutor(const char* executor_name) : name_(executor_name) {
adding_thread_lock_ = GPR_SPINLOCK_STATIC_INITIALIZER;
gpr_atm_no_barrier_store(&num_threads_, 0);
max_threads_ = GPR_MAX(1, 2 * gpr_cpu_num_cores());
}
static void executor_thread(void* arg);
void GrpcExecutor::Init() { SetThreading(true); }
static size_t run_closures(grpc_closure_list list) {
size_t GrpcExecutor::RunClosures(grpc_closure_list list) {
size_t n = 0;
grpc_closure* c = list.head;
while (c != nullptr) {
grpc_closure* next = c->next_data.next;
grpc_error* error = c->error_data.error;
if (executor_trace.enabled()) {
#ifndef NDEBUG
gpr_log(GPR_DEBUG, "EXECUTOR: run %p [created by %s:%d]", c,
c->file_created, c->line_created);
#else
gpr_log(GPR_INFO, "EXECUTOR: run %p", c);
#endif
}
#ifndef NDEBUG
EXECUTOR_TRACE("run %p [created by %s:%d]", c, c->file_created,
c->line_created);
c->scheduled = false;
#else
EXECUTOR_TRACE("run %p", c);
#endif
c->cb(c->cb_arg, error);
GRPC_ERROR_UNREF(error);
@ -85,217 +76,282 @@ static size_t run_closures(grpc_closure_list list) {
return n;
}
bool grpc_executor_is_threaded() {
return gpr_atm_no_barrier_load(&g_cur_threads) > 0;
bool GrpcExecutor::IsThreaded() const {
return gpr_atm_no_barrier_load(&num_threads_) > 0;
}
void grpc_executor_set_threading(bool threading) {
gpr_atm cur_threads = gpr_atm_no_barrier_load(&g_cur_threads);
void GrpcExecutor::SetThreading(bool threading) {
gpr_atm curr_num_threads = gpr_atm_no_barrier_load(&num_threads_);
if (threading) {
if (cur_threads > 0) return;
g_max_threads = GPR_MAX(1, 2 * gpr_cpu_num_cores());
gpr_atm_no_barrier_store(&g_cur_threads, 1);
if (curr_num_threads > 0) return;
GPR_ASSERT(num_threads_ == 0);
gpr_atm_no_barrier_store(&num_threads_, 1);
gpr_tls_init(&g_this_thread_state);
g_thread_state = static_cast<thread_state*>(
gpr_zalloc(sizeof(thread_state) * g_max_threads));
for (size_t i = 0; i < g_max_threads; i++) {
gpr_mu_init(&g_thread_state[i].mu);
gpr_cv_init(&g_thread_state[i].cv);
g_thread_state[i].thd = grpc_core::Thread();
g_thread_state[i].elems = GRPC_CLOSURE_LIST_INIT;
thd_state_ = static_cast<ThreadState*>(
gpr_zalloc(sizeof(ThreadState) * max_threads_));
for (size_t i = 0; i < max_threads_; i++) {
gpr_mu_init(&thd_state_[i].mu);
gpr_cv_init(&thd_state_[i].cv);
thd_state_[i].id = i;
thd_state_[i].thd = grpc_core::Thread();
thd_state_[i].elems = GRPC_CLOSURE_LIST_INIT;
}
g_thread_state[0].thd =
grpc_core::Thread("grpc_executor", executor_thread, &g_thread_state[0]);
g_thread_state[0].thd.Start();
} else {
if (cur_threads == 0) return;
for (size_t i = 0; i < g_max_threads; i++) {
gpr_mu_lock(&g_thread_state[i].mu);
g_thread_state[i].shutdown = true;
gpr_cv_signal(&g_thread_state[i].cv);
gpr_mu_unlock(&g_thread_state[i].mu);
thd_state_[0].thd =
grpc_core::Thread(name_, &GrpcExecutor::ThreadMain, &thd_state_[0]);
thd_state_[0].thd.Start();
} else { // !threading
if (curr_num_threads == 0) return;
for (size_t i = 0; i < max_threads_; i++) {
gpr_mu_lock(&thd_state_[i].mu);
thd_state_[i].shutdown = true;
gpr_cv_signal(&thd_state_[i].cv);
gpr_mu_unlock(&thd_state_[i].mu);
}
/* ensure no thread is adding a new thread... once this is past, then
no thread will try to add a new one either (since shutdown is true) */
gpr_spinlock_lock(&g_adding_thread_lock);
gpr_spinlock_unlock(&g_adding_thread_lock);
for (gpr_atm i = 0; i < g_cur_threads; i++) {
g_thread_state[i].thd.Join();
/* Ensure no thread is adding a new thread. Once this is past, then no
* thread will try to add a new one either (since shutdown is true) */
gpr_spinlock_lock(&adding_thread_lock_);
gpr_spinlock_unlock(&adding_thread_lock_);
curr_num_threads = gpr_atm_no_barrier_load(&num_threads_);
for (gpr_atm i = 0; i < curr_num_threads; i++) {
thd_state_[i].thd.Join();
EXECUTOR_TRACE(" Thread %" PRIdPTR " of %" PRIdPTR " joined", i,
curr_num_threads);
}
gpr_atm_no_barrier_store(&g_cur_threads, 0);
for (size_t i = 0; i < g_max_threads; i++) {
gpr_mu_destroy(&g_thread_state[i].mu);
gpr_cv_destroy(&g_thread_state[i].cv);
run_closures(g_thread_state[i].elems);
gpr_atm_no_barrier_store(&num_threads_, 0);
for (size_t i = 0; i < max_threads_; i++) {
gpr_mu_destroy(&thd_state_[i].mu);
gpr_cv_destroy(&thd_state_[i].cv);
RunClosures(thd_state_[i].elems);
}
gpr_free(g_thread_state);
gpr_free(thd_state_);
gpr_tls_destroy(&g_this_thread_state);
}
}
void grpc_executor_init() {
gpr_atm_no_barrier_store(&g_cur_threads, 0);
grpc_executor_set_threading(true);
}
void GrpcExecutor::Shutdown() { SetThreading(false); }
void grpc_executor_shutdown() { grpc_executor_set_threading(false); }
static void executor_thread(void* arg) {
thread_state* ts = static_cast<thread_state*>(arg);
gpr_tls_set(&g_this_thread_state, (intptr_t)ts);
void GrpcExecutor::ThreadMain(void* arg) {
ThreadState* ts = static_cast<ThreadState*>(arg);
gpr_tls_set(&g_this_thread_state, reinterpret_cast<intptr_t>(ts));
grpc_core::ExecCtx exec_ctx(GRPC_EXEC_CTX_FLAG_IS_INTERNAL_THREAD);
size_t subtract_depth = 0;
for (;;) {
if (executor_trace.enabled()) {
gpr_log(GPR_INFO, "EXECUTOR[%d]: step (sub_depth=%" PRIdPTR ")",
static_cast<int>(ts - g_thread_state), subtract_depth);
}
EXECUTOR_TRACE("[%" PRIdPTR "]: step (sub_depth=%" PRIdPTR ")", ts->id,
subtract_depth);
gpr_mu_lock(&ts->mu);
ts->depth -= subtract_depth;
// Wait for closures to be enqueued or for the executor to be shutdown
while (grpc_closure_list_empty(ts->elems) && !ts->shutdown) {
ts->queued_long_job = false;
gpr_cv_wait(&ts->cv, &ts->mu, gpr_inf_future(GPR_CLOCK_MONOTONIC));
}
if (ts->shutdown) {
if (executor_trace.enabled()) {
gpr_log(GPR_INFO, "EXECUTOR[%d]: shutdown",
static_cast<int>(ts - g_thread_state));
}
EXECUTOR_TRACE("[%" PRIdPTR "]: shutdown", ts->id);
gpr_mu_unlock(&ts->mu);
break;
}
GRPC_STATS_INC_EXECUTOR_QUEUE_DRAINED();
grpc_closure_list exec = ts->elems;
grpc_closure_list closures = ts->elems;
ts->elems = GRPC_CLOSURE_LIST_INIT;
gpr_mu_unlock(&ts->mu);
if (executor_trace.enabled()) {
gpr_log(GPR_INFO, "EXECUTOR[%d]: execute",
static_cast<int>(ts - g_thread_state));
}
EXECUTOR_TRACE("[%" PRIdPTR "]: execute", ts->id);
grpc_core::ExecCtx::Get()->InvalidateNow();
subtract_depth = run_closures(exec);
subtract_depth = RunClosures(closures);
}
}
static void executor_push(grpc_closure* closure, grpc_error* error,
bool is_short) {
void GrpcExecutor::Enqueue(grpc_closure* closure, grpc_error* error,
bool is_short) {
bool retry_push;
if (is_short) {
GRPC_STATS_INC_EXECUTOR_SCHEDULED_SHORT_ITEMS();
} else {
GRPC_STATS_INC_EXECUTOR_SCHEDULED_LONG_ITEMS();
}
do {
retry_push = false;
size_t cur_thread_count =
static_cast<size_t>(gpr_atm_no_barrier_load(&g_cur_threads));
static_cast<size_t>(gpr_atm_no_barrier_load(&num_threads_));
// If the number of threads is zero(i.e either the executor is not threaded
// or already shutdown), then queue the closure on the exec context itself
if (cur_thread_count == 0) {
if (executor_trace.enabled()) {
#ifndef NDEBUG
gpr_log(GPR_DEBUG, "EXECUTOR: schedule %p (created %s:%d) inline",
closure, closure->file_created, closure->line_created);
EXECUTOR_TRACE("schedule %p (created %s:%d) inline", closure,
closure->file_created, closure->line_created);
#else
gpr_log(GPR_INFO, "EXECUTOR: schedule %p inline", closure);
EXECUTOR_TRACE("schedule %p inline", closure);
#endif
}
grpc_closure_list_append(grpc_core::ExecCtx::Get()->closure_list(),
closure, error);
return;
}
thread_state* ts = (thread_state*)gpr_tls_get(&g_this_thread_state);
ThreadState* ts = (ThreadState*)gpr_tls_get(&g_this_thread_state);
if (ts == nullptr) {
ts = &g_thread_state[GPR_HASH_POINTER(grpc_core::ExecCtx::Get(),
cur_thread_count)];
ts = &thd_state_[GPR_HASH_POINTER(grpc_core::ExecCtx::Get(),
cur_thread_count)];
} else {
GRPC_STATS_INC_EXECUTOR_SCHEDULED_TO_SELF();
}
thread_state* orig_ts = ts;
bool try_new_thread;
ThreadState* orig_ts = ts;
bool try_new_thread = false;
for (;;) {
if (executor_trace.enabled()) {
#ifndef NDEBUG
gpr_log(
GPR_DEBUG,
"EXECUTOR: try to schedule %p (%s) (created %s:%d) to thread %d",
closure, is_short ? "short" : "long", closure->file_created,
closure->line_created, static_cast<int>(ts - g_thread_state));
EXECUTOR_TRACE(
"try to schedule %p (%s) (created %s:%d) to thread "
"%" PRIdPTR,
closure, is_short ? "short" : "long", closure->file_created,
closure->line_created, ts->id);
#else
gpr_log(GPR_INFO, "EXECUTOR: try to schedule %p (%s) to thread %d",
closure, is_short ? "short" : "long",
(int)(ts - g_thread_state));
EXECUTOR_TRACE("try to schedule %p (%s) to thread %" PRIdPTR, closure,
is_short ? "short" : "long", ts->id);
#endif
}
gpr_mu_lock(&ts->mu);
if (ts->queued_long_job) {
// if there's a long job queued, we never queue anything else to this
// queue (since long jobs can take 'infinite' time and we need to
// guarantee no starvation)
// ... spin through queues and try again
// guarantee no starvation). Spin through queues and try again
gpr_mu_unlock(&ts->mu);
size_t idx = static_cast<size_t>(ts - g_thread_state);
ts = &g_thread_state[(idx + 1) % cur_thread_count];
size_t idx = ts->id;
ts = &thd_state_[(idx + 1) % cur_thread_count];
if (ts == orig_ts) {
// We cycled through all the threads. Retry enqueue again (by creating
// a new thread)
retry_push = true;
// TODO (sreek): What if the executor is shutdown OR if
// cur_thread_count is already equal to max_threads ? (currently - as
// of July 2018, we do not run in to this issue because there is only
// one instance of long job in gRPC. This has to be fixed soon)
try_new_thread = true;
break;
}
continue;
}
// == Found the thread state (i.e thread) to enqueue this closure! ==
// Also, if this thread has been waiting for closures, wake it up.
// - If grpc_closure_list_empty() is true and the Executor is not
// shutdown, it means that the thread must be waiting in ThreadMain()
// - Note that gpr_cv_signal() won't immediately wakeup the thread. That
// happens after we release the mutex &ts->mu a few lines below
if (grpc_closure_list_empty(ts->elems) && !ts->shutdown) {
GRPC_STATS_INC_EXECUTOR_WAKEUP_INITIATED();
gpr_cv_signal(&ts->cv);
}
grpc_closure_list_append(&ts->elems, closure, error);
// If we already queued more than MAX_DEPTH number of closures on this
// thread, use this as a hint to create more threads
ts->depth++;
try_new_thread = ts->depth > MAX_DEPTH &&
cur_thread_count < g_max_threads && !ts->shutdown;
if (!is_short) ts->queued_long_job = true;
cur_thread_count < max_threads_ && !ts->shutdown;
ts->queued_long_job = !is_short;
gpr_mu_unlock(&ts->mu);
break;
}
if (try_new_thread && gpr_spinlock_trylock(&g_adding_thread_lock)) {
if (try_new_thread && gpr_spinlock_trylock(&adding_thread_lock_)) {
cur_thread_count =
static_cast<size_t>(gpr_atm_no_barrier_load(&g_cur_threads));
if (cur_thread_count < g_max_threads) {
gpr_atm_no_barrier_store(&g_cur_threads, cur_thread_count + 1);
g_thread_state[cur_thread_count].thd =
grpc_core::Thread("grpc_executor", executor_thread,
&g_thread_state[cur_thread_count]);
g_thread_state[cur_thread_count].thd.Start();
static_cast<size_t>(gpr_atm_no_barrier_load(&num_threads_));
if (cur_thread_count < max_threads_) {
// Increment num_threads (Safe to do a no_barrier_store instead of a
// cas because we always increment num_threads under the
// 'adding_thread_lock')
gpr_atm_no_barrier_store(&num_threads_, cur_thread_count + 1);
thd_state_[cur_thread_count].thd = grpc_core::Thread(
name_, &GrpcExecutor::ThreadMain, &thd_state_[cur_thread_count]);
thd_state_[cur_thread_count].thd.Start();
}
gpr_spinlock_unlock(&g_adding_thread_lock);
gpr_spinlock_unlock(&adding_thread_lock_);
}
if (retry_push) {
GRPC_STATS_INC_EXECUTOR_PUSH_RETRIES();
}
} while (retry_push);
}
static void executor_push_short(grpc_closure* closure, grpc_error* error) {
executor_push(closure, error, true);
static GrpcExecutor* global_executor;
void enqueue_long(grpc_closure* closure, grpc_error* error) {
global_executor->Enqueue(closure, error, false /* is_short */);
}
void enqueue_short(grpc_closure* closure, grpc_error* error) {
global_executor->Enqueue(closure, error, true /* is_short */);
}
// Short-Job executor scheduler
static const grpc_closure_scheduler_vtable global_executor_vtable_short = {
enqueue_short, enqueue_short, "executor-short"};
static grpc_closure_scheduler global_scheduler_short = {
&global_executor_vtable_short};
// Long-job executor scheduler
static const grpc_closure_scheduler_vtable global_executor_vtable_long = {
enqueue_long, enqueue_long, "executor-long"};
static grpc_closure_scheduler global_scheduler_long = {
&global_executor_vtable_long};
// grpc_executor_init() and grpc_executor_shutdown() functions are called in the
// the grpc_init() and grpc_shutdown() code paths which are protected by a
// global mutex. So it is okay to assume that these functions are thread-safe
void grpc_executor_init() {
if (global_executor != nullptr) {
// grpc_executor_init() already called once (and grpc_executor_shutdown()
// wasn't called)
return;
}
global_executor = grpc_core::New<GrpcExecutor>("global-executor");
global_executor->Init();
}
static void executor_push_long(grpc_closure* closure, grpc_error* error) {
executor_push(closure, error, false);
void grpc_executor_shutdown() {
// Shutdown already called
if (global_executor == nullptr) {
return;
}
global_executor->Shutdown();
grpc_core::Delete<GrpcExecutor>(global_executor);
global_executor = nullptr;
}
static const grpc_closure_scheduler_vtable executor_vtable_short = {
executor_push_short, executor_push_short, "executor"};
static grpc_closure_scheduler executor_scheduler_short = {
&executor_vtable_short};
bool grpc_executor_is_threaded() { return global_executor->IsThreaded(); }
static const grpc_closure_scheduler_vtable executor_vtable_long = {
executor_push_long, executor_push_long, "executor"};
static grpc_closure_scheduler executor_scheduler_long = {&executor_vtable_long};
void grpc_executor_set_threading(bool enable) {
global_executor->SetThreading(enable);
}
grpc_closure_scheduler* grpc_executor_scheduler(
grpc_executor_job_length length) {
return length == GRPC_EXECUTOR_SHORT ? &executor_scheduler_short
: &executor_scheduler_long;
grpc_closure_scheduler* grpc_executor_scheduler(GrpcExecutorJobType job_type) {
return job_type == GRPC_EXECUTOR_SHORT ? &global_scheduler_short
: &global_scheduler_long;
}

@ -21,30 +21,63 @@
#include <grpc/support/port_platform.h>
#include "src/core/lib/gpr/spinlock.h"
#include "src/core/lib/gprpp/thd.h"
#include "src/core/lib/iomgr/closure.h"
typedef enum {
GRPC_EXECUTOR_SHORT,
GRPC_EXECUTOR_LONG
} grpc_executor_job_length;
typedef struct {
gpr_mu mu;
size_t id; // For debugging purposes
gpr_cv cv;
grpc_closure_list elems;
size_t depth; // Number of closures in the closure list
bool shutdown;
bool queued_long_job;
grpc_core::Thread thd;
} ThreadState;
typedef enum { GRPC_EXECUTOR_SHORT, GRPC_EXECUTOR_LONG } GrpcExecutorJobType;
class GrpcExecutor {
public:
GrpcExecutor(const char* executor_name);
void Init();
/** Is the executor multi-threaded? */
bool IsThreaded() const;
/* Enable/disable threading - must be called after Init and Shutdown() */
void SetThreading(bool threading);
/** Shutdown the executor, running all pending work as part of the call */
void Shutdown();
/** Enqueue the closure onto the executor. is_short is true if the closure is
* a short job (i.e expected to not block and complete quickly) */
void Enqueue(grpc_closure* closure, grpc_error* error, bool is_short);
private:
static size_t RunClosures(grpc_closure_list list);
static void ThreadMain(void* arg);
const char* name_;
ThreadState* thd_state_;
size_t max_threads_;
gpr_atm num_threads_;
gpr_spinlock adding_thread_lock_;
};
// == Global executor functions ==
/** Initialize the global executor.
*
* This mechanism is meant to outsource work (grpc_closure instances) to a
* thread, for those cases where blocking isn't an option but there isn't a
* non-blocking solution available. */
void grpc_executor_init();
grpc_closure_scheduler* grpc_executor_scheduler(grpc_executor_job_length);
grpc_closure_scheduler* grpc_executor_scheduler(GrpcExecutorJobType job_type);
/** Shutdown the executor, running all pending work as part of the call */
void grpc_executor_shutdown();
/** Is the executor multi-threaded? */
bool grpc_executor_is_threaded();
/* enable/disable threading - must be called after grpc_executor_init and before
grpc_executor_shutdown */
void grpc_executor_set_threading(bool enable);
#endif /* GRPC_CORE_LIB_IOMGR_EXECUTOR_H */

@ -89,7 +89,11 @@ void LockfreeEvent::DestroyEvent() {
void LockfreeEvent::NotifyOn(grpc_closure* closure) {
while (true) {
gpr_atm curr = gpr_atm_no_barrier_load(&state_);
/* This load needs to be an acquire load because this can be a shutdown
* error that we might need to reference. Adding acquire semantics makes
* sure that the shutdown error has been initialized properly before us
* referencing it. */
gpr_atm curr = gpr_atm_acq_load(&state_);
if (grpc_polling_trace.enabled()) {
gpr_log(GPR_ERROR, "LockfreeEvent::NotifyOn: %p curr=%p closure=%p", this,
(void*)curr, closure);

@ -26,6 +26,7 @@
#include "src/core/lib/iomgr/tcp_posix.h"
#include <errno.h>
#include <limits.h>
#include <stdbool.h>
#include <stdlib.h>
#include <string.h>
@ -513,7 +514,11 @@ static void tcp_read(grpc_endpoint* ep, grpc_slice_buffer* incoming_buffer,
}
/* returns true if done, false if pending; if returning true, *error is set */
#if defined(IOV_MAX) && IOV_MAX < 1000
#define MAX_WRITE_IOVEC IOV_MAX
#else
#define MAX_WRITE_IOVEC 1000
#endif
static bool tcp_flush(grpc_tcp* tcp, grpc_error** error) {
struct msghdr msg;
struct iovec iov[MAX_WRITE_IOVEC];

@ -18,10 +18,12 @@
#include <grpc/support/port_platform.h>
#include <inttypes.h>
#include <string.h>
#include <grpc/support/alloc.h>
#include <grpc/support/log.h>
#include <grpc/support/string_util.h>
#include "src/core/lib/json/json.h"
@ -56,6 +58,8 @@ void grpc_json_destroy(grpc_json* json) {
grpc_json* grpc_json_link_child(grpc_json* parent, grpc_json* child,
grpc_json* sibling) {
// link child up to parent
child->parent = parent;
// first child case.
if (parent->child == nullptr) {
GPR_ASSERT(sibling == nullptr);
@ -79,8 +83,15 @@ grpc_json* grpc_json_create_child(grpc_json* sibling, grpc_json* parent,
grpc_json* child = grpc_json_create(type);
grpc_json_link_child(parent, child, sibling);
child->owns_value = owns_value;
child->parent = parent;
child->value = value;
child->key = key;
return child;
}
grpc_json* grpc_json_add_number_string_child(grpc_json* parent, grpc_json* it,
const char* name, int64_t num) {
char* num_str;
gpr_asprintf(&num_str, "%" PRId64, num);
return grpc_json_create_child(it, parent, name, num_str, GRPC_JSON_STRING,
true);
}

@ -91,4 +91,9 @@ grpc_json* grpc_json_create_child(grpc_json* sibling, grpc_json* parent,
const char* key, const char* value,
grpc_json_type type, bool owns_value);
/* Creates a child json string object from the integer num, then links the
json object into the parent's json tree */
grpc_json* grpc_json_add_number_string_child(grpc_json* parent, grpc_json* it,
const char* name, int64_t num);
#endif /* GRPC_CORE_LIB_JSON_JSON_H */

@ -105,6 +105,11 @@ grpc_channel* grpc_channel_create_with_builder(
channel->is_client = grpc_channel_stack_type_is_client(channel_stack_type);
size_t channel_tracer_max_nodes = 0; // default to off
bool channelz_enabled = false;
bool internal_channel = false;
// this creates the default ChannelNode. Different types of channels may
// override this to ensure a correct ChannelNode is created.
grpc_core::channelz::ChannelNodeCreationFunc channel_node_create_func =
grpc_core::channelz::ChannelNode::MakeChannelNode;
gpr_mu_init(&channel->registered_call_mu);
channel->registered_calls = nullptr;
@ -144,15 +149,27 @@ grpc_channel* grpc_channel_create_with_builder(
channel_tracer_max_nodes =
(size_t)grpc_channel_arg_get_integer(&args->args[i], options);
} else if (0 == strcmp(args->args[i].key, GRPC_ARG_ENABLE_CHANNELZ)) {
// channelz will not be enabled by default until all concerns in
// https://github.com/grpc/grpc/issues/15986 are addressed.
channelz_enabled = grpc_channel_arg_get_bool(&args->args[i], false);
} else if (0 == strcmp(args->args[i].key,
GRPC_ARG_CHANNELZ_CHANNEL_NODE_CREATION_FUNC)) {
GPR_ASSERT(args->args[i].type == GRPC_ARG_POINTER);
GPR_ASSERT(args->args[i].value.pointer.p != nullptr);
channel_node_create_func =
reinterpret_cast<grpc_core::channelz::ChannelNodeCreationFunc>(
args->args[i].value.pointer.p);
} else if (0 == strcmp(args->args[i].key,
GRPC_ARG_CHANNELZ_CHANNEL_IS_INTERNAL_CHANNEL)) {
internal_channel = grpc_channel_arg_get_bool(&args->args[i], false);
}
}
grpc_channel_args_destroy(args);
if (channelz_enabled) {
channel->channelz_channel =
grpc_core::MakeRefCounted<grpc_core::channelz::ChannelNode>(
channel, channel_tracer_max_nodes);
bool is_top_level_channel = channel->is_client && !internal_channel;
channel->channelz_channel = channel_node_create_func(
channel, channel_tracer_max_nodes, is_top_level_channel);
channel->channelz_channel->trace()->AddTraceEvent(
grpc_core::channelz::ChannelTrace::Severity::Info,
grpc_slice_from_static_string("Channel created"));
@ -400,7 +417,7 @@ void grpc_channel_internal_unref(grpc_channel* c REF_ARG) {
static void destroy_channel(void* arg, grpc_error* error) {
grpc_channel* channel = static_cast<grpc_channel*>(arg);
if (channel->channelz_channel != nullptr) {
channel->channelz_channel->set_channel_destroyed();
channel->channelz_channel->MarkChannelDestroyed();
channel->channelz_channel.reset();
}
grpc_channel_stack_destroy(CHANNEL_STACK_FROM_CHANNEL(channel));

@ -127,7 +127,7 @@ void grpc_init(void) {
grpc_slice_intern_init();
grpc_mdctx_global_init();
grpc_channel_init_init();
grpc_core::ChannelzRegistry::Init();
grpc_core::channelz::ChannelzRegistry::Init();
grpc_security_pre_init();
grpc_core::ExecCtx::GlobalInit();
grpc_iomgr_init();
@ -176,7 +176,7 @@ void grpc_shutdown(void) {
grpc_mdctx_global_shutdown();
grpc_handshaker_factory_registry_shutdown();
grpc_slice_intern_shutdown();
grpc_core::ChannelzRegistry::Shutdown();
grpc_core::channelz::ChannelzRegistry::Shutdown();
grpc_stats_shutdown();
grpc_core::Fork::GlobalShutdown();
}

@ -42,34 +42,34 @@ typedef struct alts_grpc_integrity_only_record_protocol {
static tsi_result alts_grpc_integrity_only_extra_copy_protect(
alts_grpc_record_protocol* rp, grpc_slice_buffer* unprotected_slices,
grpc_slice_buffer* protected_slices) {
/* Allocates memory for protected frame. */
/* Allocates memory for protected frame and copies data. */
size_t data_length = unprotected_slices->length;
size_t protected_frame_size =
unprotected_slices->length + rp->header_length + rp->tag_length;
grpc_slice protected_slice = GRPC_SLICE_MALLOC(protected_frame_size);
uint8_t* data = GRPC_SLICE_START_PTR(protected_slice) + rp->header_length;
for (size_t i = 0; i < unprotected_slices->count; i++) {
memcpy(data, GRPC_SLICE_START_PTR(unprotected_slices->slices[i]),
GRPC_SLICE_LENGTH(unprotected_slices->slices[i]));
data += GRPC_SLICE_LENGTH(unprotected_slices->slices[i]);
}
/* Calls alts_iovec_record_protocol protect. */
char* error_details = nullptr;
iovec_t header_iovec = {GRPC_SLICE_START_PTR(protected_slice),
rp->header_length};
iovec_t tag_iovec = {GRPC_SLICE_START_PTR(protected_slice) +
rp->header_length + unprotected_slices->length,
rp->tag_length};
alts_grpc_record_protocol_convert_slice_buffer_to_iovec(rp,
unprotected_slices);
iovec_t tag_iovec = {
GRPC_SLICE_START_PTR(protected_slice) + rp->header_length + data_length,
rp->tag_length};
rp->iovec_buf[0].iov_base =
GRPC_SLICE_START_PTR(protected_slice) + rp->header_length;
rp->iovec_buf[0].iov_len = data_length;
grpc_status_code status = alts_iovec_record_protocol_integrity_only_protect(
rp->iovec_rp, rp->iovec_buf, unprotected_slices->count, header_iovec,
tag_iovec, &error_details);
rp->iovec_rp, rp->iovec_buf, 1, header_iovec, tag_iovec, &error_details);
if (status != GRPC_STATUS_OK) {
gpr_log(GPR_ERROR, "Failed to protect, %s", error_details);
gpr_free(error_details);
return TSI_INTERNAL_ERROR;
}
/* Copies data from unprotected_slices to protected_slice. */
uint8_t* data = GRPC_SLICE_START_PTR(protected_slice) + rp->header_length;
for (size_t i = 0; i < unprotected_slices->count; i++) {
memcpy(data, GRPC_SLICE_START_PTR(unprotected_slices->slices[i]),
GRPC_SLICE_LENGTH(unprotected_slices->slices[i]));
data += GRPC_SLICE_LENGTH(unprotected_slices->slices[i]);
}
grpc_slice_buffer_add(protected_slices, protected_slice);
grpc_slice_buffer_reset_and_unref_internal(unprotected_slices);
return TSI_OK;

@ -24,6 +24,16 @@
namespace grpc {
namespace load_reporter {
// TODO(juanlishen): Update the version number with the PR number every time
// there is any change to the server load reporter.
constexpr uint32_t kVersion = 15853;
// TODO(juanlishen): This window size is from the internal spec for the load
// reporter. Need to ask the gRPC LB team whether we should make this and the
// fetching interval configurable.
constexpr uint32_t kFeedbackSampleWindowSeconds = 10;
constexpr uint32_t kFetchAndSampleIntervalSeconds = 1;
constexpr size_t kLbIdLength = 8;
constexpr size_t kIpv4AddressLength = 8;
constexpr size_t kIpv6AddressLength = 32;

@ -160,7 +160,8 @@ class LoadRecordValue {
", error_count_=" + grpc::to_string(error_count_) +
", bytes_sent_=" + grpc::to_string(bytes_sent_) +
", bytes_recv_=" + grpc::to_string(bytes_recv_) +
", latency_ms_=" + grpc::to_string(latency_ms_) + "]";
", latency_ms_=" + grpc::to_string(latency_ms_) + ", " +
grpc::to_string(call_metrics_.size()) + " other call metric(s)]";
}
bool InsertCallMetric(const grpc::string& metric_name,

@ -22,6 +22,7 @@
#include <stdio.h>
#include <chrono>
#include <ctime>
#include <iterator>
#include "src/cpp/server/load_reporter/constants.h"
#include "src/cpp/server/load_reporter/get_cpu_stats.h"
@ -65,8 +66,8 @@ CensusViewProvider::CensusViewProvider()
// measurements instead of setting the data values directly.
auto vd_end_count =
::opencensus::stats::ViewDescriptor()
.set_name((kViewEndCount))
.set_measure((kMeasureEndCount))
.set_name(kViewEndCount)
.set_measure(kMeasureEndCount)
.set_aggregation(::opencensus::stats::Aggregation::Sum())
.add_column(tag_key_token_)
.add_column(tag_key_host_)
@ -80,8 +81,8 @@ CensusViewProvider::CensusViewProvider()
view_descriptor_map_.emplace(kViewEndCount, vd_end_count);
auto vd_end_bytes_sent =
::opencensus::stats::ViewDescriptor()
.set_name((kViewEndBytesSent))
.set_measure((kMeasureEndBytesSent))
.set_name(kViewEndBytesSent)
.set_measure(kMeasureEndBytesSent)
.set_aggregation(::opencensus::stats::Aggregation::Sum())
.add_column(tag_key_token_)
.add_column(tag_key_host_)
@ -95,8 +96,8 @@ CensusViewProvider::CensusViewProvider()
view_descriptor_map_.emplace(kViewEndBytesSent, vd_end_bytes_sent);
auto vd_end_bytes_received =
::opencensus::stats::ViewDescriptor()
.set_name((kViewEndBytesReceived))
.set_measure((kMeasureEndBytesReceived))
.set_name(kViewEndBytesReceived)
.set_measure(kMeasureEndBytesReceived)
.set_aggregation(::opencensus::stats::Aggregation::Sum())
.add_column(tag_key_token_)
.add_column(tag_key_host_)
@ -110,8 +111,8 @@ CensusViewProvider::CensusViewProvider()
view_descriptor_map_.emplace(kViewEndBytesReceived, vd_end_bytes_received);
auto vd_end_latency_ms =
::opencensus::stats::ViewDescriptor()
.set_name((kViewEndLatencyMs))
.set_measure((kMeasureEndLatencyMs))
.set_name(kViewEndLatencyMs)
.set_measure(kMeasureEndLatencyMs)
.set_aggregation(::opencensus::stats::Aggregation::Sum())
.add_column(tag_key_token_)
.add_column(tag_key_host_)
@ -126,8 +127,8 @@ CensusViewProvider::CensusViewProvider()
// Two views related to other call metrics.
auto vd_metric_call_count =
::opencensus::stats::ViewDescriptor()
.set_name((kViewOtherCallMetricCount))
.set_measure((kMeasureOtherCallMetric))
.set_name(kViewOtherCallMetricCount)
.set_measure(kMeasureOtherCallMetric)
.set_aggregation(::opencensus::stats::Aggregation::Count())
.add_column(tag_key_token_)
.add_column(tag_key_host_)
@ -141,8 +142,8 @@ CensusViewProvider::CensusViewProvider()
view_descriptor_map_.emplace(kViewOtherCallMetricCount, vd_metric_call_count);
auto vd_metric_value =
::opencensus::stats::ViewDescriptor()
.set_name((kViewOtherCallMetricValue))
.set_measure((kMeasureOtherCallMetric))
.set_name(kViewOtherCallMetricValue)
.set_measure(kMeasureOtherCallMetric)
.set_aggregation(::opencensus::stats::Aggregation::Sum())
.add_column(tag_key_token_)
.add_column(tag_key_host_)
@ -161,11 +162,26 @@ double CensusViewProvider::GetRelatedViewDataRowDouble(
size_t view_name_len, const std::vector<grpc::string>& tag_values) {
auto it_vd = view_data_map.find(grpc::string(view_name, view_name_len));
GPR_ASSERT(it_vd != view_data_map.end());
GPR_ASSERT(it_vd->second.type() ==
::opencensus::stats::ViewData::Type::kDouble);
auto it_row = it_vd->second.double_data().find(tag_values);
GPR_ASSERT(it_row != it_vd->second.double_data().end());
return it_row->second;
}
uint64_t CensusViewProvider::GetRelatedViewDataRowInt(
const ViewDataMap& view_data_map, const char* view_name,
size_t view_name_len, const std::vector<grpc::string>& tag_values) {
auto it_vd = view_data_map.find(grpc::string(view_name, view_name_len));
GPR_ASSERT(it_vd != view_data_map.end());
GPR_ASSERT(it_vd->second.type() ==
::opencensus::stats::ViewData::Type::kInt64);
auto it_row = it_vd->second.int_data().find(tag_values);
GPR_ASSERT(it_row != it_vd->second.int_data().end());
GPR_ASSERT(it_row->second >= 0);
return it_row->second;
}
CensusViewProviderDefaultImpl::CensusViewProviderDefaultImpl() {
for (const auto& p : view_descriptor_map()) {
const grpc::string& view_name = p.first;
@ -235,23 +251,23 @@ LoadReporter::GenerateLoadBalancingFeedback() {
return ::grpc::lb::v1::LoadBalancingFeedback::default_instance();
}
// Find the longest range with valid ends.
LoadBalancingFeedbackRecord* oldest = &feedback_records_[0];
LoadBalancingFeedbackRecord* newest =
&feedback_records_[feedback_records_.size() - 1];
while (newest > oldest &&
auto oldest = feedback_records_.begin();
auto newest = feedback_records_.end() - 1;
while (std::distance(oldest, newest) > 0 &&
(newest->cpu_limit == 0 || oldest->cpu_limit == 0)) {
// A zero limit means that the system info reading was failed, so these
// records can't be used to calculate CPU utilization.
if (newest->cpu_limit == 0) --newest;
if (oldest->cpu_limit == 0) ++oldest;
}
if (newest - oldest < 1 || oldest->end_time == newest->end_time ||
if (std::distance(oldest, newest) < 1 ||
oldest->end_time == newest->end_time ||
newest->cpu_limit == oldest->cpu_limit) {
return ::grpc::lb::v1::LoadBalancingFeedback::default_instance();
}
uint64_t rpcs = 0;
uint64_t errors = 0;
for (LoadBalancingFeedbackRecord* p = newest; p != oldest; --p) {
for (auto p = newest; p != oldest; --p) {
// Because these two numbers are counters, the oldest record shouldn't be
// included.
rpcs += p->rpcs;
@ -338,7 +354,8 @@ void LoadReporter::AttachOrphanLoadId(
if (per_balancer_store.lb_id() == kInvalidLbId) {
load->set_load_key_unknown(true);
} else {
load->set_load_key_unknown(false);
// We shouldn't set load_key_unknown to any value in this case because
// load_key_unknown and orphaned_load_identifier are under an oneof struct.
load->mutable_orphaned_load_identifier()->set_load_key(
per_balancer_store.load_key());
load->mutable_orphaned_load_identifier()->set_load_balancer_id(
@ -381,9 +398,7 @@ void LoadReporter::ProcessViewDataCallStart(
const CensusViewProvider::ViewDataMap& view_data_map) {
auto it = view_data_map.find(kViewStartCount);
if (it != view_data_map.end()) {
// Note that the data type for any Sum view is double, whatever the data
// type of the original measure.
for (const auto& p : it->second.double_data()) {
for (const auto& p : it->second.int_data()) {
const std::vector<grpc::string>& tag_values = p.first;
const uint64_t start_count = static_cast<uint64_t>(p.second);
const grpc::string& client_ip_and_token = tag_values[0];
@ -405,9 +420,7 @@ void LoadReporter::ProcessViewDataCallEnd(
uint64_t total_error_count = 0;
auto it = view_data_map.find(kViewEndCount);
if (it != view_data_map.end()) {
// Note that the data type for any Sum view is double, whatever the data
// type of the original measure.
for (const auto& p : it->second.double_data()) {
for (const auto& p : it->second.int_data()) {
const std::vector<grpc::string>& tag_values = p.first;
const uint64_t end_count = static_cast<uint64_t>(p.second);
const grpc::string& client_ip_and_token = tag_values[0];
@ -424,18 +437,16 @@ void LoadReporter::ProcessViewDataCallEnd(
continue;
}
LoadRecordKey key(client_ip_and_token, user_id);
const uint64_t bytes_sent =
CensusViewProvider::GetRelatedViewDataRowDouble(
view_data_map, kViewEndBytesSent, sizeof(kViewEndBytesSent) - 1,
tag_values);
const uint64_t bytes_sent = CensusViewProvider::GetRelatedViewDataRowInt(
view_data_map, kViewEndBytesSent, sizeof(kViewEndBytesSent) - 1,
tag_values);
const uint64_t bytes_received =
CensusViewProvider::GetRelatedViewDataRowDouble(
CensusViewProvider::GetRelatedViewDataRowInt(
view_data_map, kViewEndBytesReceived,
sizeof(kViewEndBytesReceived) - 1, tag_values);
const uint64_t latency_ms =
CensusViewProvider::GetRelatedViewDataRowDouble(
view_data_map, kViewEndLatencyMs, sizeof(kViewEndLatencyMs) - 1,
tag_values);
const uint64_t latency_ms = CensusViewProvider::GetRelatedViewDataRowInt(
view_data_map, kViewEndLatencyMs, sizeof(kViewEndLatencyMs) - 1,
tag_values);
uint64_t ok_count = 0;
uint64_t error_count = 0;
total_end_count += end_count;

@ -59,7 +59,10 @@ class CensusViewProvider {
// with the same tag values in a related view data. Several ViewData's are
// considered related if their views are based on the measures that are always
// recorded at the same time.
double static GetRelatedViewDataRowDouble(
static double GetRelatedViewDataRowDouble(
const ViewDataMap& view_data_map, const char* view_name,
size_t view_name_len, const std::vector<grpc::string>& tag_values);
static uint64_t GetRelatedViewDataRowInt(
const ViewDataMap& view_data_map, const char* view_name,
size_t view_name_len, const std::vector<grpc::string>& tag_values);

@ -0,0 +1,370 @@
/*
*
* Copyright 2018 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
#include <grpc/support/port_platform.h>
#include "src/cpp/server/load_reporter/load_reporter_async_service_impl.h"
namespace grpc {
namespace load_reporter {
void LoadReporterAsyncServiceImpl::CallableTag::Run(bool ok) {
GPR_ASSERT(handler_function_ != nullptr);
GPR_ASSERT(handler_ != nullptr);
handler_function_(std::move(handler_), ok);
}
LoadReporterAsyncServiceImpl::LoadReporterAsyncServiceImpl(
std::unique_ptr<ServerCompletionQueue> cq)
: cq_(std::move(cq)) {
thread_ = std::unique_ptr<::grpc_core::Thread>(
new ::grpc_core::Thread("server_load_reporting", Work, this));
std::unique_ptr<CpuStatsProvider> cpu_stats_provider = nullptr;
#if defined(GPR_LINUX) || defined(GPR_WINDOWS) || defined(GPR_APPLE)
cpu_stats_provider.reset(new CpuStatsProviderDefaultImpl());
#endif
load_reporter_ = std::unique_ptr<LoadReporter>(new LoadReporter(
kFeedbackSampleWindowSeconds,
std::unique_ptr<CensusViewProvider>(new CensusViewProviderDefaultImpl()),
std::move(cpu_stats_provider)));
}
LoadReporterAsyncServiceImpl::~LoadReporterAsyncServiceImpl() {
// We will reach here after the server starts shutting down.
shutdown_ = true;
{
std::unique_lock<std::mutex> lock(cq_shutdown_mu_);
cq_->Shutdown();
}
if (next_fetch_and_sample_alarm_ != nullptr)
next_fetch_and_sample_alarm_->Cancel();
thread_->Join();
}
void LoadReporterAsyncServiceImpl::ScheduleNextFetchAndSample() {
auto next_fetch_and_sample_time =
gpr_time_add(gpr_now(GPR_CLOCK_MONOTONIC),
gpr_time_from_millis(kFetchAndSampleIntervalSeconds * 1000,
GPR_TIMESPAN));
{
std::unique_lock<std::mutex> lock(cq_shutdown_mu_);
if (shutdown_) return;
// TODO(juanlishen): Improve the Alarm implementation to reuse a single
// instance for multiple events.
next_fetch_and_sample_alarm_.reset(new Alarm);
next_fetch_and_sample_alarm_->Set(cq_.get(), next_fetch_and_sample_time,
this);
}
gpr_log(GPR_DEBUG, "[LRS %p] Next fetch-and-sample scheduled.", this);
}
void LoadReporterAsyncServiceImpl::FetchAndSample(bool ok) {
if (!ok) {
gpr_log(GPR_INFO, "[LRS %p] Fetch-and-sample is stopped.", this);
return;
}
gpr_log(GPR_DEBUG, "[LRS %p] Starting a fetch-and-sample...", this);
load_reporter_->FetchAndSample();
ScheduleNextFetchAndSample();
}
void LoadReporterAsyncServiceImpl::Work(void* arg) {
LoadReporterAsyncServiceImpl* service =
reinterpret_cast<LoadReporterAsyncServiceImpl*>(arg);
service->FetchAndSample(true /* ok */);
// TODO(juanlishen): This is a workaround to wait for the cq to be ready. Need
// to figure out why cq is not ready after service starts.
gpr_sleep_until(gpr_time_add(gpr_now(GPR_CLOCK_MONOTONIC),
gpr_time_from_seconds(1, GPR_TIMESPAN)));
ReportLoadHandler::CreateAndStart(service->cq_.get(), service,
service->load_reporter_.get());
void* tag;
bool ok;
while (true) {
if (!service->cq_->Next(&tag, &ok)) {
// The completion queue is shutting down.
GPR_ASSERT(service->shutdown_);
break;
}
if (tag == service) {
service->FetchAndSample(ok);
} else {
auto* next_step = static_cast<CallableTag*>(tag);
next_step->Run(ok);
}
}
}
void LoadReporterAsyncServiceImpl::StartThread() { thread_->Start(); }
void LoadReporterAsyncServiceImpl::ReportLoadHandler::CreateAndStart(
ServerCompletionQueue* cq, LoadReporterAsyncServiceImpl* service,
LoadReporter* load_reporter) {
std::shared_ptr<ReportLoadHandler> handler =
std::make_shared<ReportLoadHandler>(cq, service, load_reporter);
ReportLoadHandler* p = handler.get();
{
std::unique_lock<std::mutex> lock(service->cq_shutdown_mu_);
if (service->shutdown_) return;
p->on_done_notified_ =
CallableTag(std::bind(&ReportLoadHandler::OnDoneNotified, p,
std::placeholders::_1, std::placeholders::_2),
handler);
p->next_inbound_ =
CallableTag(std::bind(&ReportLoadHandler::OnRequestDelivered, p,
std::placeholders::_1, std::placeholders::_2),
std::move(handler));
p->ctx_.AsyncNotifyWhenDone(&p->on_done_notified_);
service->RequestReportLoad(&p->ctx_, &p->stream_, cq, cq,
&p->next_inbound_);
}
}
LoadReporterAsyncServiceImpl::ReportLoadHandler::ReportLoadHandler(
ServerCompletionQueue* cq, LoadReporterAsyncServiceImpl* service,
LoadReporter* load_reporter)
: cq_(cq),
service_(service),
load_reporter_(load_reporter),
stream_(&ctx_),
call_status_(WAITING_FOR_DELIVERY) {}
void LoadReporterAsyncServiceImpl::ReportLoadHandler::OnRequestDelivered(
std::shared_ptr<ReportLoadHandler> self, bool ok) {
if (ok) {
call_status_ = DELIVERED;
} else {
// AsyncNotifyWhenDone() needs to be called before the call starts, but the
// tag will not pop out if the call never starts (
// https://github.com/grpc/grpc/issues/10136). So we need to manually
// release the ownership of the handler in this case.
GPR_ASSERT(on_done_notified_.ReleaseHandler() != nullptr);
}
if (!ok || shutdown_) {
// The value of ok being false means that the server is shutting down.
Shutdown(std::move(self), "OnRequestDelivered");
return;
}
// Spawn a new handler instance to serve the next new client. Every handler
// instance will deallocate itself when it's done.
CreateAndStart(cq_, service_, load_reporter_);
{
std::unique_lock<std::mutex> lock(service_->cq_shutdown_mu_);
if (service_->shutdown_) {
lock.release()->unlock();
Shutdown(std::move(self), "OnRequestDelivered");
return;
}
next_inbound_ =
CallableTag(std::bind(&ReportLoadHandler::OnReadDone, this,
std::placeholders::_1, std::placeholders::_2),
std::move(self));
stream_.Read(&request_, &next_inbound_);
}
// LB ID is unique for each load reporting stream.
lb_id_ = load_reporter_->GenerateLbId();
gpr_log(GPR_INFO,
"[LRS %p] Call request delivered (lb_id_: %s, handler: %p). "
"Start reading the initial request...",
service_, lb_id_.c_str(), this);
}
void LoadReporterAsyncServiceImpl::ReportLoadHandler::OnReadDone(
std::shared_ptr<ReportLoadHandler> self, bool ok) {
if (!ok || shutdown_) {
if (!ok && call_status_ < INITIAL_REQUEST_RECEIVED) {
// The client may have half-closed the stream or the stream is broken.
gpr_log(GPR_INFO,
"[LRS %p] Failed reading the initial request from the stream "
"(lb_id_: %s, handler: %p, done_notified: %d, is_cancelled: %d).",
service_, lb_id_.c_str(), this, static_cast<int>(done_notified_),
static_cast<int>(is_cancelled_));
}
Shutdown(std::move(self), "OnReadDone");
return;
}
// We only receive one request, which is the initial request.
if (call_status_ < INITIAL_REQUEST_RECEIVED) {
if (!request_.has_initial_request()) {
Shutdown(std::move(self), "OnReadDone+initial_request_not_found");
} else {
call_status_ = INITIAL_REQUEST_RECEIVED;
const auto& initial_request = request_.initial_request();
load_balanced_hostname_ = initial_request.load_balanced_hostname();
load_key_ = initial_request.load_key();
load_reporter_->ReportStreamCreated(load_balanced_hostname_, lb_id_,
load_key_);
const auto& load_report_interval = initial_request.load_report_interval();
load_report_interval_ms_ =
static_cast<uint64_t>(load_report_interval.seconds() * 1000 +
load_report_interval.nanos() / 1000);
gpr_log(
GPR_INFO,
"[LRS %p] Initial request received. Start load reporting (load "
"balanced host: %s, interval: %lu ms, lb_id_: %s, handler: %p)...",
service_, load_balanced_hostname_.c_str(), load_report_interval_ms_,
lb_id_.c_str(), this);
SendReport(self, true /* ok */);
// Expect this read to fail.
{
std::unique_lock<std::mutex> lock(service_->cq_shutdown_mu_);
if (service_->shutdown_) {
lock.release()->unlock();
Shutdown(std::move(self), "OnReadDone");
return;
}
next_inbound_ =
CallableTag(std::bind(&ReportLoadHandler::OnReadDone, this,
std::placeholders::_1, std::placeholders::_2),
std::move(self));
stream_.Read(&request_, &next_inbound_);
}
}
} else {
// Another request received! This violates the spec.
gpr_log(GPR_ERROR,
"[LRS %p] Another request received (lb_id_: %s, handler: %p).",
service_, lb_id_.c_str(), this);
Shutdown(std::move(self), "OnReadDone+second_request");
}
}
void LoadReporterAsyncServiceImpl::ReportLoadHandler::ScheduleNextReport(
std::shared_ptr<ReportLoadHandler> self, bool ok) {
if (!ok || shutdown_) {
Shutdown(std::move(self), "ScheduleNextReport");
return;
}
auto next_report_time = gpr_time_add(
gpr_now(GPR_CLOCK_MONOTONIC),
gpr_time_from_millis(load_report_interval_ms_, GPR_TIMESPAN));
{
std::unique_lock<std::mutex> lock(service_->cq_shutdown_mu_);
if (service_->shutdown_) {
lock.release()->unlock();
Shutdown(std::move(self), "ScheduleNextReport");
return;
}
next_outbound_ =
CallableTag(std::bind(&ReportLoadHandler::SendReport, this,
std::placeholders::_1, std::placeholders::_2),
std::move(self));
// TODO(juanlishen): Improve the Alarm implementation to reuse a single
// instance for multiple events.
next_report_alarm_.reset(new Alarm);
next_report_alarm_->Set(cq_, next_report_time, &next_outbound_);
}
gpr_log(GPR_DEBUG,
"[LRS %p] Next load report scheduled (lb_id_: %s, handler: %p).",
service_, lb_id_.c_str(), this);
}
void LoadReporterAsyncServiceImpl::ReportLoadHandler::SendReport(
std::shared_ptr<ReportLoadHandler> self, bool ok) {
if (!ok || shutdown_) {
Shutdown(std::move(self), "SendReport");
return;
}
::grpc::lb::v1::LoadReportResponse response;
auto loads = load_reporter_->GenerateLoads(load_balanced_hostname_, lb_id_);
response.mutable_load()->Swap(&loads);
auto feedback = load_reporter_->GenerateLoadBalancingFeedback();
response.mutable_load_balancing_feedback()->Swap(&feedback);
if (call_status_ < INITIAL_RESPONSE_SENT) {
auto initial_response = response.mutable_initial_response();
initial_response->set_load_balancer_id(lb_id_);
initial_response->set_implementation_id(
::grpc::lb::v1::InitialLoadReportResponse::CPP);
initial_response->set_server_version(kVersion);
call_status_ = INITIAL_RESPONSE_SENT;
}
{
std::unique_lock<std::mutex> lock(service_->cq_shutdown_mu_);
if (service_->shutdown_) {
lock.release()->unlock();
Shutdown(std::move(self), "SendReport");
return;
}
next_outbound_ =
CallableTag(std::bind(&ReportLoadHandler::ScheduleNextReport, this,
std::placeholders::_1, std::placeholders::_2),
std::move(self));
stream_.Write(response, &next_outbound_);
gpr_log(GPR_INFO,
"[LRS %p] Sending load report (lb_id_: %s, handler: %p, loads "
"count: %d)...",
service_, lb_id_.c_str(), this, response.load().size());
}
}
void LoadReporterAsyncServiceImpl::ReportLoadHandler::OnDoneNotified(
std::shared_ptr<ReportLoadHandler> self, bool ok) {
GPR_ASSERT(ok);
done_notified_ = true;
if (ctx_.IsCancelled()) {
is_cancelled_ = true;
}
gpr_log(GPR_INFO,
"[LRS %p] Load reporting call is notified done (handler: %p, "
"is_cancelled: %d).",
service_, this, static_cast<int>(is_cancelled_));
Shutdown(std::move(self), "OnDoneNotified");
}
void LoadReporterAsyncServiceImpl::ReportLoadHandler::Shutdown(
std::shared_ptr<ReportLoadHandler> self, const char* reason) {
if (!shutdown_) {
gpr_log(GPR_INFO,
"[LRS %p] Shutting down the handler (lb_id_: %s, handler: %p, "
"reason: %s).",
service_, lb_id_.c_str(), this, reason);
shutdown_ = true;
if (call_status_ >= INITIAL_REQUEST_RECEIVED) {
load_reporter_->ReportStreamClosed(load_balanced_hostname_, lb_id_);
next_report_alarm_->Cancel();
}
}
// OnRequestDelivered() may be called after OnDoneNotified(), so we need to
// try to Finish() every time we are in Shutdown().
if (call_status_ >= DELIVERED && call_status_ < FINISH_CALLED) {
std::unique_lock<std::mutex> lock(service_->cq_shutdown_mu_);
if (!service_->shutdown_) {
on_finish_done_ =
CallableTag(std::bind(&ReportLoadHandler::OnFinishDone, this,
std::placeholders::_1, std::placeholders::_2),
std::move(self));
// TODO(juanlishen): Maybe add a message proto for the client to
// explicitly cancel the stream so that we can return OK status in such
// cases.
stream_.Finish(Status::CANCELLED, &on_finish_done_);
call_status_ = FINISH_CALLED;
}
}
}
void LoadReporterAsyncServiceImpl::ReportLoadHandler::OnFinishDone(
std::shared_ptr<ReportLoadHandler> self, bool ok) {
if (ok) {
gpr_log(GPR_INFO,
"[LRS %p] Load reporting finished (lb_id_: %s, handler: %p).",
service_, lb_id_.c_str(), this);
}
}
} // namespace load_reporter
} // namespace grpc

@ -0,0 +1,194 @@
/*
*
* Copyright 2018 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
#ifndef GRPC_SRC_CPP_SERVER_LOAD_REPORTER_ASYNC_SERVICE_IMPL_H
#define GRPC_SRC_CPP_SERVER_LOAD_REPORTER_ASYNC_SERVICE_IMPL_H
#include <grpc/support/port_platform.h>
#include <grpc/support/log.h>
#include <grpcpp/alarm.h>
#include <grpcpp/grpcpp.h>
#include "src/core/lib/gprpp/thd.h"
#include "src/cpp/server/load_reporter/load_reporter.h"
namespace grpc {
namespace load_reporter {
// Async load reporting service. It's mainly responsible for controlling the
// procedure of incoming requests. The real business logic is handed off to the
// LoadReporter. There should be at most one instance of this service on a
// server to avoid spreading the load data into multiple places.
class LoadReporterAsyncServiceImpl
: public grpc::lb::v1::LoadReporter::AsyncService {
public:
explicit LoadReporterAsyncServiceImpl(
std::unique_ptr<ServerCompletionQueue> cq);
~LoadReporterAsyncServiceImpl();
// Starts the working thread.
void StartThread();
// Not copyable nor movable.
LoadReporterAsyncServiceImpl(const LoadReporterAsyncServiceImpl&) = delete;
LoadReporterAsyncServiceImpl& operator=(const LoadReporterAsyncServiceImpl&) =
delete;
private:
class ReportLoadHandler;
// A tag that can be called with a bool argument. It's tailored for
// ReportLoadHandler's use. Before being used, it should be constructed with a
// method of ReportLoadHandler and a shared pointer to the handler. The
// shared pointer will be moved to the invoked function and the function can
// only be invoked once. That makes ref counting of the handler easier,
// because the shared pointer is not bound to the function and can be gone
// once the invoked function returns (if not used any more).
class CallableTag {
public:
using HandlerFunction =
std::function<void(std::shared_ptr<ReportLoadHandler>, bool)>;
CallableTag() {}
CallableTag(HandlerFunction func,
std::shared_ptr<ReportLoadHandler> handler)
: handler_function_(std::move(func)), handler_(std::move(handler)) {
GPR_ASSERT(handler_function_ != nullptr);
GPR_ASSERT(handler_ != nullptr);
}
// Runs the tag. This should be called only once. The handler is no longer
// owned by this tag after this method is invoked.
void Run(bool ok);
// Releases and returns the shared pointer to the handler.
std::shared_ptr<ReportLoadHandler> ReleaseHandler() {
return std::move(handler_);
}
private:
HandlerFunction handler_function_ = nullptr;
std::shared_ptr<ReportLoadHandler> handler_;
};
// Each handler takes care of one load reporting stream. It contains
// per-stream data and it will access the members of the parent class (i.e.,
// LoadReporterAsyncServiceImpl) for service-wide data (e.g., the load data).
class ReportLoadHandler {
public:
// Instantiates a ReportLoadHandler and requests the next load reporting
// call. The handler object will manage its own lifetime, so no action is
// needed from the caller any more regarding that object.
static void CreateAndStart(ServerCompletionQueue* cq,
LoadReporterAsyncServiceImpl* service,
LoadReporter* load_reporter);
// This ctor is public because we want to use std::make_shared<> in
// CreateAndStart(). This ctor shouldn't be used elsewhere.
ReportLoadHandler(ServerCompletionQueue* cq,
LoadReporterAsyncServiceImpl* service,
LoadReporter* load_reporter);
private:
// After the handler has a call request delivered, it starts reading the
// initial request. Also, a new handler is spawned so that we can keep
// servicing future calls.
void OnRequestDelivered(std::shared_ptr<ReportLoadHandler> self, bool ok);
// The first Read() is expected to succeed, after which the handler starts
// sending load reports back to the balancer. The second Read() is
// expected to fail, which happens when the balancer half-closes the
// stream to signal that it's no longer interested in the load reports. For
// the latter case, the handler will then close the stream.
void OnReadDone(std::shared_ptr<ReportLoadHandler> self, bool ok);
// The report sending operations are sequential as: send report -> send
// done, schedule the next send -> waiting for the alarm to fire -> alarm
// fires, send report -> ...
void SendReport(std::shared_ptr<ReportLoadHandler> self, bool ok);
void ScheduleNextReport(std::shared_ptr<ReportLoadHandler> self, bool ok);
// Called when Finish() is done.
void OnFinishDone(std::shared_ptr<ReportLoadHandler> self, bool ok);
// Called when AsyncNotifyWhenDone() notifies us.
void OnDoneNotified(std::shared_ptr<ReportLoadHandler> self, bool ok);
void Shutdown(std::shared_ptr<ReportLoadHandler> self, const char* reason);
// The key fields of the stream.
grpc::string lb_id_;
grpc::string load_balanced_hostname_;
grpc::string load_key_;
uint64_t load_report_interval_ms_;
// The data for RPC communication with the load reportee.
ServerContext ctx_;
::grpc::lb::v1::LoadReportRequest request_;
// The members passed down from LoadReporterAsyncServiceImpl.
ServerCompletionQueue* cq_;
LoadReporterAsyncServiceImpl* service_;
LoadReporter* load_reporter_;
ServerAsyncReaderWriter<::grpc::lb::v1::LoadReportResponse,
::grpc::lb::v1::LoadReportRequest>
stream_;
// The status of the RPC progress.
enum CallStatus {
WAITING_FOR_DELIVERY,
DELIVERED,
INITIAL_REQUEST_RECEIVED,
INITIAL_RESPONSE_SENT,
FINISH_CALLED
} call_status_;
bool shutdown_{false};
bool done_notified_{false};
bool is_cancelled_{false};
CallableTag on_done_notified_;
CallableTag on_finish_done_;
CallableTag next_inbound_;
CallableTag next_outbound_;
std::unique_ptr<Alarm> next_report_alarm_;
};
// Handles the incoming requests and drives the completion queue in a loop.
static void Work(void* arg);
// Schedules the next data fetching from Census and LB feedback sampling.
void ScheduleNextFetchAndSample();
// Fetches data from Census and samples LB feedback.
void FetchAndSample(bool ok);
std::unique_ptr<ServerCompletionQueue> cq_;
// To synchronize the operations related to shutdown state of cq_, so that we
// don't enqueue new tags into cq_ after it is already shut down.
std::mutex cq_shutdown_mu_;
std::atomic_bool shutdown_{false};
std::unique_ptr<::grpc_core::Thread> thread_;
std::unique_ptr<LoadReporter> load_reporter_;
std::unique_ptr<Alarm> next_fetch_and_sample_alarm_;
};
} // namespace load_reporter
} // namespace grpc
#endif // GRPC_SRC_CPP_SERVER_LOAD_REPORTER_ASYNC_SERVICE_IMPL_H

@ -0,0 +1,41 @@
/*
*
* Copyright 2018 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
#include <grpc/support/port_platform.h>
#include <grpcpp/ext/server_load_reporting.h>
#include "src/cpp/server/load_reporter/load_reporting_service_server_builder_plugin.h"
namespace grpc {
namespace load_reporter {
namespace experimental {
void LoadReportingServiceServerBuilderOption::UpdateArguments(
::grpc::ChannelArguments* args) {
args->SetInt(GRPC_ARG_ENABLE_LOAD_REPORTING, true);
}
void LoadReportingServiceServerBuilderOption::UpdatePlugins(
std::vector<std::unique_ptr<::grpc::ServerBuilderPlugin>>* plugins) {
plugins->emplace_back(new LoadReportingServiceServerBuilderPlugin());
}
} // namespace experimental
} // namespace load_reporter
} // namespace grpc

@ -0,0 +1,60 @@
/*
*
* Copyright 2018 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
#include <grpc/support/port_platform.h>
#include "src/cpp/server/load_reporter/load_reporting_service_server_builder_plugin.h"
#include <grpcpp/impl/server_initializer.h>
namespace grpc {
namespace load_reporter {
bool LoadReportingServiceServerBuilderPlugin::has_sync_methods() const {
if (service_ != nullptr) {
return service_->has_synchronous_methods();
}
return false;
}
bool LoadReportingServiceServerBuilderPlugin::has_async_methods() const {
if (service_ != nullptr) {
return service_->has_async_methods();
}
return false;
}
void LoadReportingServiceServerBuilderPlugin::UpdateServerBuilder(
grpc::ServerBuilder* builder) {
auto cq = builder->AddCompletionQueue();
service_ = std::make_shared<LoadReporterAsyncServiceImpl>(std::move(cq));
}
void LoadReportingServiceServerBuilderPlugin::InitServer(
grpc::ServerInitializer* si) {
si->RegisterService(service_);
}
void LoadReportingServiceServerBuilderPlugin::Finish(
grpc::ServerInitializer* si) {
service_->StartThread();
service_.reset();
}
} // namespace load_reporter
} // namespace grpc

@ -0,0 +1,62 @@
/*
*
* Copyright 2018 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
#ifndef GRPC_SRC_CPP_LOAD_REPORTING_SERVICE_SERVER_BUILDER_PLUGIN_H
#define GRPC_SRC_CPP_LOAD_REPORTING_SERVICE_SERVER_BUILDER_PLUGIN_H
#include <grpc/support/port_platform.h>
#include <grpcpp/impl/server_builder_plugin.h>
#include "src/cpp/server/load_reporter/load_reporter_async_service_impl.h"
namespace grpc {
namespace load_reporter {
// The plugin that registers and starts load reporting service when starting a
// server.
class LoadReportingServiceServerBuilderPlugin : public ServerBuilderPlugin {
public:
~LoadReportingServiceServerBuilderPlugin() override = default;
grpc::string name() override { return "load_reporting_service"; }
// Creates a load reporting service.
void UpdateServerBuilder(grpc::ServerBuilder* builder) override;
// Registers the load reporter service.
void InitServer(grpc::ServerInitializer* si) override;
// Starts the load reporter service.
void Finish(grpc::ServerInitializer* si) override;
void ChangeArguments(const grpc::string& name, void* value) override {}
void UpdateChannelArguments(grpc::ChannelArguments* args) override {}
bool has_sync_methods() const override;
bool has_async_methods() const override;
private:
std::shared_ptr<LoadReporterAsyncServiceImpl> service_;
};
std::unique_ptr<grpc::ServerBuilderPlugin>
CreateLoadReportingServiceServerBuilderPlugin();
} // namespace load_reporter
} // namespace grpc
#endif // GRPC_SRC_CPP_LOAD_REPORTING_SERVICE_SERVER_BUILDER_PLUGIN_H

@ -0,0 +1,45 @@
/*
*
* Copyright 2018 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
#include <grpc/impl/codegen/port_platform.h>
#include <grpcpp/ext/server_load_reporting.h>
#include <grpc/support/log.h>
namespace grpc {
namespace load_reporter {
namespace experimental {
void AddLoadReportingCost(grpc::ServerContext* ctx,
const grpc::string& cost_name, double cost_value) {
if (std::isnormal(cost_value)) {
grpc::string buf;
buf.resize(sizeof(cost_value) + cost_name.size());
memcpy(&(*buf.begin()), &cost_value, sizeof(cost_value));
memcpy(&(*buf.begin()) + sizeof(cost_value), cost_name.data(),
cost_name.size());
ctx->AddTrailingMetadata(GRPC_LB_COST_MD_KEY, buf);
} else {
gpr_log(GPR_ERROR, "Call metric value is not normal.");
}
}
} // namespace experimental
} // namespace load_reporter
} // namespace grpc

@ -422,10 +422,32 @@ namespace Grpc.Core
{
if (!hooksRegistered)
{
// Under normal circumstances, the user is expected to shutdown all
// the gRPC channels and servers before the application exits. The following
// hooks provide some extra handling for cases when this is not the case,
// in the effort to achieve a reasonable behavior on shutdown.
#if NETSTANDARD1_5
// FIXME couldn't get around a "cannot resolve type" runtime exception on Xamarin.Android
//System.Runtime.Loader.AssemblyLoadContext.Default.Unloading += (assemblyLoadContext) => { HandleShutdown(); };
// No action required at shutdown on .NET Core
// - In-progress P/Invoke calls (such as grpc_completion_queue_next) don't seem
// to prevent a .NET core application from terminating, so no special handling
// is needed.
// - .NET core doesn't run finalizers on shutdown, so there's no risk of getting
// a crash because grpc_*_destroy methods for native objects being invoked
// in wrong order.
// TODO(jtattermusch): Verify that the shutdown hooks are still not needed
// once we add support for new platforms using netstandard (e.g. Xamarin).
#else
// On desktop .NET framework and Mono, we need to register for a shutdown
// event to explicitly shutdown the GrpcEnvironment.
// - On Desktop .NET framework, we need to do a proper shutdown to prevent a crash
// when the framework attempts to run the finalizers for SafeHandle object representing the native
// grpc objects. The finalizers calls the native grpc_*_destroy methods (e.g. grpc_server_destroy)
// in a random order, which is not supported by gRPC.
// - On Mono, the process would hang as the GrpcThreadPool threads are sleeping
// in grpc_completion_queue_next P/Invoke invocation and mono won't let the
// process shutdown until the P/Invoke calls return. We achieve that by shutting down
// the completion queue(s) which associated with the GrpcThreadPool, which will
// cause the grpc_completion_queue_next calls to return immediately.
AppDomain.CurrentDomain.ProcessExit += (sender, eventArgs) => { HandleShutdown(); };
AppDomain.CurrentDomain.DomainUnload += (sender, eventArgs) => { HandleShutdown(); };
#endif

@ -28,6 +28,7 @@ class BaseStub
private $hostname;
private $hostname_override;
private $channel;
private $call_invoker;
// a callback function
private $update_metadata;
@ -58,6 +59,15 @@ class BaseStub
if (!empty($opts['grpc.ssl_target_name_override'])) {
$this->hostname_override = $opts['grpc.ssl_target_name_override'];
}
if (isset($opts['grpc_call_invoker'])) {
$this->call_invoker = $opts['grpc_call_invoker'];
unset($opts['grpc_call_invoker']);
$channel_opts = $this->updateOpts($opts);
// If the grpc_call_invoker is defined, use the channel created by the call invoker.
$this->channel = $this->call_invoker->createChannelFactory($hostname, $channel_opts);
return;
}
$this->call_invoker = new DefaultCallInvoker();
if ($channel) {
if (!is_a($channel, 'Grpc\Channel') &&
!is_a($channel, 'Grpc\Internal\InterceptorChannel')) {
@ -72,15 +82,7 @@ class BaseStub
$this->channel = static::getDefaultChannel($hostname, $opts);
}
/**
* Creates and returns the default Channel
*
* @param array $opts Channel constructor options
*
* @return Channel The channel
*/
public static function getDefaultChannel($hostname, array $opts)
{
private static function updateOpts($opts) {
$package_config = json_decode(
file_get_contents(dirname(__FILE__).'/../../composer.json'),
true
@ -97,6 +99,19 @@ class BaseStub
'required. Please see one of the '.
'ChannelCredentials::create methods');
}
return $opts;
}
/**
* Creates and returns the default Channel
*
* @param array $opts Channel constructor options
*
* @return Channel The channel
*/
public static function getDefaultChannel($hostname, array $opts)
{
$channel_opts = self::updateOpts($opts);
return new Channel($hostname, $opts);
}
@ -239,7 +254,7 @@ class BaseStub
$deserialize,
array $metadata = [],
array $options = []) use ($channel) {
$call = new UnaryCall(
$call = $this->call_invoker->UnaryCall(
$channel,
$method,
$deserialize,
@ -275,7 +290,7 @@ class BaseStub
$deserialize,
array $metadata = [],
array $options = []) use ($channel) {
$call = new ClientStreamingCall(
$call = $this->call_invoker->ClientStreamingCall(
$channel,
$method,
$deserialize,
@ -312,7 +327,7 @@ class BaseStub
$deserialize,
array $metadata = [],
array $options = []) use ($channel) {
$call = new ServerStreamingCall(
$call = $this->call_invoker->ServerStreamingCall(
$channel,
$method,
$deserialize,
@ -348,7 +363,7 @@ class BaseStub
$deserialize,
array $metadata = [],
array $options = []) use ($channel) {
$call = new BidiStreamingCall(
$call = $this->call_invoker->BidiStreamingCall(
$channel,
$method,
$deserialize,

@ -0,0 +1,33 @@
<?php
/*
*
* Copyright 2018 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
namespace Grpc;
/**
* CallInvoker is used to pass the self defined channel into the stub,
* while intercept each RPC with the channel accessible.
* THIS IS AN EXPERIMENTAL API.
*/
interface CallInvoker
{
public function createChannelFactory($hostname, $opts);
public function UnaryCall($channel, $method, $deserialize, $options);
public function ClientStreamingCall($channel, $method, $deserialize, $options);
public function ServerStreamingCall($channel, $method, $deserialize, $options);
public function BidiStreamingCall($channel, $method, $deserialize, $options);
}

@ -0,0 +1,47 @@
<?php
/*
*
* Copyright 2018 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
namespace Grpc;
/**
* Default call invoker in the gRPC stub.
* THIS IS AN EXPERIMENTAL API.
*/
class DefaultCallInvoker implements CallInvoker
{
public function createChannelFactory($hostname, $opts) {
return new Channel($hostname, $opts);
}
public function UnaryCall($channel, $method, $deserialize, $options) {
return new UnaryCall($channel, $method, $deserialize, $options);
}
public function ClientStreamingCall($channel, $method, $deserialize, $options) {
return new ClientStreamingCall($channel, $method, $deserialize, $options);
}
public function ServerStreamingCall($channel, $method, $deserialize, $options) {
return new ServerStreamingCall($channel, $method, $deserialize, $options);
}
public function BidiStreamingCall($channel, $method, $deserialize, $options) {
return new BidiStreamingCall($channel, $method, $deserialize, $options);
}
}

@ -0,0 +1,227 @@
<?php
/*
*
* Copyright 2018 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
/**
* Interface exported by the server.
*/
require_once(dirname(__FILE__).'/../../lib/Grpc/BaseStub.php');
require_once(dirname(__FILE__).'/../../lib/Grpc/AbstractCall.php');
require_once(dirname(__FILE__).'/../../lib/Grpc/UnaryCall.php');
require_once(dirname(__FILE__).'/../../lib/Grpc/ClientStreamingCall.php');
require_once(dirname(__FILE__).'/../../lib/Grpc/Interceptor.php');
require_once(dirname(__FILE__).'/../../lib/Grpc/CallInvoker.php');
require_once(dirname(__FILE__).'/../../lib/Grpc/DefaultCallInvoker.php');
require_once(dirname(__FILE__).'/../../lib/Grpc/Internal/InterceptorChannel.php');
class CallInvokerSimpleRequest
{
private $data;
public function __construct($data)
{
$this->data = $data;
}
public function setData($data)
{
$this->data = $data;
}
public function serializeToString()
{
return $this->data;
}
}
class CallInvokerClient extends Grpc\BaseStub
{
/**
* @param string $hostname hostname
* @param array $opts channel options
* @param Channel|InterceptorChannel $channel (optional) re-use channel object
*/
public function __construct($hostname, $opts, $channel = null)
{
parent::__construct($hostname, $opts, $channel);
}
/**
* A simple RPC.
* @param SimpleRequest $argument input argument
* @param array $metadata metadata
* @param array $options call options
*/
public function UnaryCall(
CallInvokerSimpleRequest $argument,
$metadata = [],
$options = []
) {
return $this->_simpleRequest(
'/dummy_method',
$argument,
[],
$metadata,
$options
);
}
}
class CallInvokerUpdateChannel implements \Grpc\CallInvoker
{
private $channel;
public function getChannel() {
return $this->channel;
}
public function createChannelFactory($hostname, $opts) {
$this->channel = new \Grpc\Channel('localhost:50050', $opts);
return $this->channel;
}
public function UnaryCall($channel, $method, $deserialize, $options) {
return new UnaryCall($channel, $method, $deserialize, $options);
}
public function ClientStreamingCall($channel, $method, $deserialize, $options) {
return new ClientStreamingCall($channel, $method, $deserialize, $options);
}
public function ServerStreamingCall($channel, $method, $deserialize, $options) {
return new ServerStreamingCall($channel, $method, $deserialize, $options);
}
public function BidiStreamingCall($channel, $method, $deserialize, $options) {
return new BidiStreamingCall($channel, $method, $deserialize, $options);
}
}
class CallInvokerChangeRequest implements \Grpc\CallInvoker
{
private $channel;
public function getChannel() {
return $this->channel;
}
public function createChannelFactory($hostname, $opts) {
$this->channel = new \Grpc\Channel($hostname, $opts);
return $this->channel;
}
public function UnaryCall($channel, $method, $deserialize, $options) {
return new CallInvokerChangeRequestCall($channel, $method, $deserialize, $options);
}
public function ClientStreamingCall($channel, $method, $deserialize, $options) {
return new ClientStreamingCall($channel, $method, $deserialize, $options);
}
public function ServerStreamingCall($channel, $method, $deserialize, $options) {
return new ServerStreamingCall($channel, $method, $deserialize, $options);
}
public function BidiStreamingCall($channel, $method, $deserialize, $options) {
return new BidiStreamingCall($channel, $method, $deserialize, $options);
}
}
class CallInvokerChangeRequestCall
{
private $call;
public function __construct($channel, $method, $deserialize, $options)
{
$this->call = new \Grpc\UnaryCall($channel, $method, $deserialize, $options);
}
public function start($argument, $metadata, $options) {
$argument->setData('intercepted_unary_request');
$this->call->start($argument, $metadata, $options);
}
public function wait()
{
return $this->call->wait();
}
}
class CallInvokerTest extends PHPUnit_Framework_TestCase
{
public function setUp()
{
$this->server = new Grpc\Server([]);
$this->port = $this->server->addHttp2Port('0.0.0.0:0');
$this->server->start();
}
public function tearDown()
{
unset($this->server);
}
public function testCreateDefaultCallInvoker()
{
$call_invoker = new \Grpc\DefaultCallInvoker();
}
public function testCreateCallInvoker()
{
$call_invoker = new CallInvokerUpdateChannel();
}
public function testCallInvokerAccessChannel()
{
$call_invoker = new CallInvokerUpdateChannel();
$stub = new \Grpc\BaseStub('localhost:50051',
['credentials' => \Grpc\ChannelCredentials::createInsecure(),
'grpc_call_invoker' => $call_invoker]);
$this->assertEquals($call_invoker->getChannel()->getTarget(), 'localhost:50050');
$call_invoker->getChannel()->close();
}
public function testClientChangeRequestCallInvoker()
{
$req_text = 'client_request';
$call_invoker = new CallInvokerChangeRequest();
$client = new CallInvokerClient('localhost:'.$this->port, [
'force_new' => true,
'credentials' => Grpc\ChannelCredentials::createInsecure(),
'grpc_call_invoker' => $call_invoker,
]);
$req = new CallInvokerSimpleRequest($req_text);
$unary_call = $client->UnaryCall($req);
$event = $this->server->requestCall();
$this->assertSame('/dummy_method', $event->method);
$server_call = $event->call;
$event = $server_call->startBatch([
Grpc\OP_SEND_INITIAL_METADATA => [],
Grpc\OP_SEND_STATUS_FROM_SERVER => [
'metadata' => [],
'code' => Grpc\STATUS_OK,
'details' => '',
],
Grpc\OP_RECV_MESSAGE => true,
Grpc\OP_RECV_CLOSE_ON_SERVER => true,
]);
$this->assertSame('intercepted_unary_request', $event->message);
$call_invoker->getChannel()->close();
unset($unary_call);
unset($server_call);
}
}

@ -24,6 +24,7 @@ require_once(dirname(__FILE__).'/../../lib/Grpc/AbstractCall.php');
require_once(dirname(__FILE__).'/../../lib/Grpc/UnaryCall.php');
require_once(dirname(__FILE__).'/../../lib/Grpc/ClientStreamingCall.php');
require_once(dirname(__FILE__).'/../../lib/Grpc/Interceptor.php');
require_once(dirname(__FILE__).'/../../lib/Grpc/CallInvoker.php');
require_once(dirname(__FILE__).'/../../lib/Grpc/Internal/InterceptorChannel.php');
class SimpleRequest

@ -114,6 +114,10 @@ message Load {
// num_calls_in_progress is the only valid entry. If in_progress_report is not
// set, num_calls_in_progress will be ignored. If in_progress_report is set,
// fields other than num_calls_in_progress and orphaned_load will be ignored.
// TODO(juanlishen): A Load is either an in_progress_report or not. We should
// make this explicit in hierarchy. From the log, I see in_progress_report_
// has a random num_calls_in_progress_ when not set, which might lead to bug
// when the balancer process the load report.
oneof in_progress_report {
// The number of calls in progress (instantaneously) per load balancer id.
int64 num_calls_in_progress = 5;

@ -306,6 +306,7 @@ CORE_SOURCE_FILES = [
'src/core/ext/filters/client_channel/backup_poller.cc',
'src/core/ext/filters/client_channel/channel_connectivity.cc',
'src/core/ext/filters/client_channel/client_channel.cc',
'src/core/ext/filters/client_channel/client_channel_channelz.cc',
'src/core/ext/filters/client_channel/client_channel_factory.cc',
'src/core/ext/filters/client_channel/client_channel_plugin.cc',
'src/core/ext/filters/client_channel/connector.cc',

@ -92,6 +92,8 @@ grpc_resource_quota_ref_type grpc_resource_quota_ref_import;
grpc_resource_quota_unref_type grpc_resource_quota_unref_import;
grpc_resource_quota_resize_type grpc_resource_quota_resize_import;
grpc_resource_quota_arg_vtable_type grpc_resource_quota_arg_vtable_import;
grpc_channelz_get_top_channels_type grpc_channelz_get_top_channels_import;
grpc_channelz_get_channel_type grpc_channelz_get_channel_import;
grpc_insecure_channel_create_from_fd_type grpc_insecure_channel_create_from_fd_import;
grpc_server_add_insecure_channel_from_fd_type grpc_server_add_insecure_channel_from_fd_import;
grpc_use_signal_type grpc_use_signal_import;
@ -340,6 +342,8 @@ void grpc_rb_load_imports(HMODULE library) {
grpc_resource_quota_unref_import = (grpc_resource_quota_unref_type) GetProcAddress(library, "grpc_resource_quota_unref");
grpc_resource_quota_resize_import = (grpc_resource_quota_resize_type) GetProcAddress(library, "grpc_resource_quota_resize");
grpc_resource_quota_arg_vtable_import = (grpc_resource_quota_arg_vtable_type) GetProcAddress(library, "grpc_resource_quota_arg_vtable");
grpc_channelz_get_top_channels_import = (grpc_channelz_get_top_channels_type) GetProcAddress(library, "grpc_channelz_get_top_channels");
grpc_channelz_get_channel_import = (grpc_channelz_get_channel_type) GetProcAddress(library, "grpc_channelz_get_channel");
grpc_insecure_channel_create_from_fd_import = (grpc_insecure_channel_create_from_fd_type) GetProcAddress(library, "grpc_insecure_channel_create_from_fd");
grpc_server_add_insecure_channel_from_fd_import = (grpc_server_add_insecure_channel_from_fd_type) GetProcAddress(library, "grpc_server_add_insecure_channel_from_fd");
grpc_use_signal_import = (grpc_use_signal_type) GetProcAddress(library, "grpc_use_signal");

@ -251,6 +251,12 @@ extern grpc_resource_quota_resize_type grpc_resource_quota_resize_import;
typedef const grpc_arg_pointer_vtable*(*grpc_resource_quota_arg_vtable_type)(void);
extern grpc_resource_quota_arg_vtable_type grpc_resource_quota_arg_vtable_import;
#define grpc_resource_quota_arg_vtable grpc_resource_quota_arg_vtable_import
typedef char*(*grpc_channelz_get_top_channels_type)(intptr_t start_channel_id);
extern grpc_channelz_get_top_channels_type grpc_channelz_get_top_channels_import;
#define grpc_channelz_get_top_channels grpc_channelz_get_top_channels_import
typedef char*(*grpc_channelz_get_channel_type)(intptr_t channel_id);
extern grpc_channelz_get_channel_type grpc_channelz_get_channel_import;
#define grpc_channelz_get_channel grpc_channelz_get_channel_import
typedef grpc_channel*(*grpc_insecure_channel_create_from_fd_type)(const char* target, int fd, const grpc_channel_args* args);
extern grpc_insecure_channel_create_from_fd_type grpc_insecure_channel_create_from_fd_import;
#define grpc_insecure_channel_create_from_fd grpc_insecure_channel_create_from_fd_import

@ -2,4 +2,4 @@
# Bazel installation
RUN echo "deb [arch=amd64] http://storage.googleapis.com/bazel-apt stable jdk1.8" > /etc/apt/sources.list.d/bazel.list
RUN curl https://bazel.build/bazel-release.pub.gpg | apt-key add -
RUN apt-get -y update && apt-get -y install bazel=0.13.1 && apt-get clean
RUN apt-get -y update && apt-get -y install bazel=0.15.0 && apt-get clean

@ -16,5 +16,8 @@
FROM google/dart:latest
# Upgrade Dart to version 2.
RUN apt-get update && apt-get upgrade -y dart
# Define the default command.
CMD ["bash"]

@ -28,6 +28,8 @@
openjdk-8-jdk ${'\\'}
vim
<%include file="../../python_deps.include"/>
<%include file="../../bazel.include"/>
RUN mkdir -p /var/local/jenkins

@ -34,8 +34,6 @@
#include "test/core/util/test_config.h"
#include "test/cpp/util/channel_trace_proto_helper.h"
// remove me
#include <grpc/support/string_util.h>
#include <stdlib.h>
#include <string.h>
@ -88,7 +86,7 @@ void AddSimpleTrace(ChannelTrace* tracer) {
void ValidateChannelTrace(ChannelTrace* tracer,
size_t expected_num_event_logged, size_t max_nodes) {
if (!max_nodes) return;
grpc_json* json = tracer->RenderJSON();
grpc_json* json = tracer->RenderJson();
EXPECT_NE(json, nullptr);
char* json_str = grpc_json_dump_to_string(json, 0);
grpc_json_destroy(json);
@ -157,7 +155,7 @@ TEST_P(ChannelTracerTest, ComplexTest) {
AddSimpleTrace(&tracer);
ChannelFixture channel1(GetParam());
RefCountedPtr<ChannelNode> sc1 =
MakeRefCounted<ChannelNode>(channel1.channel(), GetParam());
MakeRefCounted<ChannelNode>(channel1.channel(), GetParam(), true);
tracer.AddTraceEventReferencingSubchannel(
ChannelTrace::Severity::Info,
grpc_slice_from_static_string("subchannel one created"), sc1);
@ -175,7 +173,7 @@ TEST_P(ChannelTracerTest, ComplexTest) {
ValidateChannelTrace(&tracer, 5, GetParam());
ChannelFixture channel2(GetParam());
RefCountedPtr<ChannelNode> sc2 =
MakeRefCounted<ChannelNode>(channel2.channel(), GetParam());
MakeRefCounted<ChannelNode>(channel2.channel(), GetParam(), true);
tracer.AddTraceEventReferencingChannel(
ChannelTrace::Severity::Info,
grpc_slice_from_static_string("LB channel two created"), sc2);
@ -204,7 +202,7 @@ TEST_P(ChannelTracerTest, TestNesting) {
ValidateChannelTrace(&tracer, 2, GetParam());
ChannelFixture channel1(GetParam());
RefCountedPtr<ChannelNode> sc1 =
MakeRefCounted<ChannelNode>(channel1.channel(), GetParam());
MakeRefCounted<ChannelNode>(channel1.channel(), GetParam(), true);
tracer.AddTraceEventReferencingChannel(
ChannelTrace::Severity::Info,
grpc_slice_from_static_string("subchannel one created"), sc1);
@ -212,7 +210,7 @@ TEST_P(ChannelTracerTest, TestNesting) {
AddSimpleTrace(sc1->trace());
ChannelFixture channel2(GetParam());
RefCountedPtr<ChannelNode> conn1 =
MakeRefCounted<ChannelNode>(channel2.channel(), GetParam());
MakeRefCounted<ChannelNode>(channel2.channel(), GetParam(), true);
// nesting one level deeper.
sc1->trace()->AddTraceEventReferencingSubchannel(
ChannelTrace::Severity::Info,
@ -225,7 +223,7 @@ TEST_P(ChannelTracerTest, TestNesting) {
ValidateChannelTrace(conn1->trace(), 1, GetParam());
ChannelFixture channel3(GetParam());
RefCountedPtr<ChannelNode> sc2 =
MakeRefCounted<ChannelNode>(channel3.channel(), GetParam());
MakeRefCounted<ChannelNode>(channel3.channel(), GetParam(), true);
tracer.AddTraceEventReferencingSubchannel(
ChannelTrace::Severity::Info,
grpc_slice_from_static_string("subchannel two created"), sc2);

@ -19,17 +19,20 @@
#include <stdlib.h>
#include <string.h>
#include <grpc/grpc.h>
#include <gtest/gtest.h>
#include <grpc/support/alloc.h>
#include <grpc/support/log.h>
#include "src/core/lib/channel/channel_trace.h"
#include "src/core/lib/channel/channelz.h"
#include "src/core/lib/channel/channelz_registry.h"
#include "src/core/lib/gpr/useful.h"
#include "src/core/lib/gprpp/memory.h"
#include "src/core/lib/iomgr/exec_ctx.h"
#include "src/core/lib/json/json.h"
#include "src/core/lib/surface/channel.h"
#include "test/core/util/test_config.h"
@ -37,27 +40,26 @@
#include <string.h>
namespace grpc_core {
namespace channelz {
namespace testing {
// Tests basic ChannelTrace functionality like construction, adding trace, and
// lookups by uuid.
TEST(ChannelzRegistryTest, UuidStartsAboveZeroTest) {
int object_to_register;
intptr_t uuid = ChannelzRegistry::Register(&object_to_register);
ChannelNode* channelz_channel = nullptr;
intptr_t uuid = ChannelzRegistry::RegisterChannelNode(channelz_channel);
EXPECT_GT(uuid, 0) << "First uuid chose must be greater than zero. Zero if "
"reserved according to "
"https://github.com/grpc/proposal/blob/master/"
"A14-channelz.md";
ChannelzRegistry::Unregister(uuid);
ChannelzRegistry::UnregisterChannelNode(uuid);
}
TEST(ChannelzRegistryTest, UuidsAreIncreasing) {
int object_to_register;
ChannelNode* channelz_channel = nullptr;
std::vector<intptr_t> uuids;
uuids.reserve(10);
for (int i = 0; i < 10; ++i) {
// reregister the same object. It's ok since we are just testing uuids
uuids.push_back(ChannelzRegistry::Register(&object_to_register));
uuids.push_back(ChannelzRegistry::RegisterChannelNode(channelz_channel));
}
for (size_t i = 1; i < uuids.size(); ++i) {
EXPECT_LT(uuids[i - 1], uuids[i]) << "Uuids must always be increasing";
@ -65,51 +67,36 @@ TEST(ChannelzRegistryTest, UuidsAreIncreasing) {
}
TEST(ChannelzRegistryTest, RegisterGetTest) {
int object_to_register = 42;
intptr_t uuid = ChannelzRegistry::Register(&object_to_register);
int* retrieved = ChannelzRegistry::Get<int>(uuid);
EXPECT_EQ(&object_to_register, retrieved);
// we hackily jam an intptr_t into this pointer to check for equality later
ChannelNode* channelz_channel = (ChannelNode*)42;
intptr_t uuid = ChannelzRegistry::RegisterChannelNode(channelz_channel);
ChannelNode* retrieved = ChannelzRegistry::GetChannelNode(uuid);
EXPECT_EQ(channelz_channel, retrieved);
}
TEST(ChannelzRegistryTest, MultipleTypeTest) {
int int_to_register = 42;
intptr_t int_uuid = ChannelzRegistry::Register(&int_to_register);
std::string str_to_register = "hello world";
intptr_t str_uuid = ChannelzRegistry::Register(&str_to_register);
int* retrieved_int = ChannelzRegistry::Get<int>(int_uuid);
std::string* retrieved_str = ChannelzRegistry::Get<std::string>(str_uuid);
EXPECT_EQ(&int_to_register, retrieved_int);
EXPECT_EQ(&str_to_register, retrieved_str);
}
namespace {
class Foo {
public:
int bar;
};
} // namespace
TEST(ChannelzRegistryTest, CustomObjectTest) {
Foo* foo = New<Foo>();
foo->bar = 1024;
intptr_t uuid = ChannelzRegistry::Register(foo);
Foo* retrieved = ChannelzRegistry::Get<Foo>(uuid);
EXPECT_EQ(foo, retrieved);
Delete(foo);
TEST(ChannelzRegistryTest, RegisterManyItems) {
// we hackily jam an intptr_t into this pointer to check for equality later
ChannelNode* channelz_channel = (ChannelNode*)42;
for (int i = 0; i < 100; i++) {
intptr_t uuid = ChannelzRegistry::RegisterChannelNode(channelz_channel);
ChannelNode* retrieved = ChannelzRegistry::GetChannelNode(uuid);
EXPECT_EQ(channelz_channel, retrieved);
}
}
TEST(ChannelzRegistryTest, NullIfNotPresentTest) {
int object_to_register = 42;
intptr_t uuid = ChannelzRegistry::Register(&object_to_register);
// we hackily jam an intptr_t into this pointer to check for equality later
ChannelNode* channelz_channel = (ChannelNode*)42;
intptr_t uuid = ChannelzRegistry::RegisterChannelNode(channelz_channel);
// try to pull out a uuid that does not exist.
int* nonexistant = ChannelzRegistry::Get<int>(uuid + 1);
ChannelNode* nonexistant = ChannelzRegistry::GetChannelNode(uuid + 1);
EXPECT_EQ(nonexistant, nullptr);
int* retrieved = ChannelzRegistry::Get<int>(uuid);
EXPECT_EQ(object_to_register, *retrieved);
EXPECT_EQ(&object_to_register, retrieved);
ChannelNode* retrieved = ChannelzRegistry::GetChannelNode(uuid);
EXPECT_EQ(channelz_channel, retrieved);
}
} // namespace testing
} // namespace channelz
} // namespace grpc_core
int main(int argc, char** argv) {

@ -67,17 +67,50 @@ grpc_json* GetJsonChild(grpc_json* parent, const char* key) {
return nullptr;
}
void ValidateJsonArraySize(grpc_json* json, const char* key,
size_t expected_size) {
grpc_json* arr = GetJsonChild(json, key);
if (expected_size == 0) {
ASSERT_EQ(arr, nullptr);
return;
}
ASSERT_NE(arr, nullptr);
ASSERT_EQ(arr->type, GRPC_JSON_ARRAY);
size_t count = 0;
for (grpc_json* child = arr->child; child != nullptr; child = child->next) {
++count;
}
EXPECT_EQ(count, expected_size);
}
void ValidateGetTopChannels(size_t expected_channels) {
char* json_str = ChannelzRegistry::GetTopChannels(0);
grpc::testing::ValidateGetTopChannelsResponseProtoJsonTranslation(json_str);
grpc_json* parsed_json = grpc_json_parse_string(json_str);
// This check will naturally have to change when we support pagination.
// tracked: https://github.com/grpc/grpc/issues/16019.
ValidateJsonArraySize(parsed_json, "channel", expected_channels);
grpc_json* end = GetJsonChild(parsed_json, "end");
ASSERT_NE(end, nullptr);
EXPECT_EQ(end->type, GRPC_JSON_TRUE);
grpc_json_destroy(parsed_json);
gpr_free(json_str);
// also check that the core API formats this correctly
char* core_api_json_str = grpc_channelz_get_top_channels(0);
grpc::testing::ValidateGetTopChannelsResponseProtoJsonTranslation(
core_api_json_str);
gpr_free(core_api_json_str);
}
class ChannelFixture {
public:
ChannelFixture(int max_trace_nodes) {
ChannelFixture(int max_trace_nodes = 0) {
grpc_arg client_a[2];
client_a[0].type = GRPC_ARG_INTEGER;
client_a[0].key =
const_cast<char*>(GRPC_ARG_MAX_CHANNEL_TRACE_EVENTS_PER_NODE);
client_a[0].value.integer = max_trace_nodes;
client_a[1].type = GRPC_ARG_INTEGER;
client_a[1].key = const_cast<char*>(GRPC_ARG_ENABLE_CHANNELZ);
client_a[1].value.integer = true;
client_a[0] = grpc_channel_arg_integer_create(
const_cast<char*>(GRPC_ARG_MAX_CHANNEL_TRACE_EVENTS_PER_NODE),
max_trace_nodes);
client_a[1] = grpc_channel_arg_integer_create(
const_cast<char*>(GRPC_ARG_ENABLE_CHANNELZ), true);
grpc_channel_args client_args = {GPR_ARRAY_SIZE(client_a), client_a};
channel_ =
grpc_insecure_channel_create("fake_target", &client_args, nullptr);
@ -99,6 +132,10 @@ struct validate_channel_data_args {
void ValidateChildInteger(grpc_json* json, int64_t expect, const char* key) {
grpc_json* gotten_json = GetJsonChild(json, key);
if (expect == 0) {
ASSERT_EQ(gotten_json, nullptr);
return;
}
ASSERT_NE(gotten_json, nullptr);
int64_t gotten_number = (int64_t)strtol(gotten_json->value, nullptr, 0);
EXPECT_EQ(gotten_number, expect);
@ -115,10 +152,15 @@ void ValidateCounters(char* json_str, validate_channel_data_args args) {
}
void ValidateChannel(ChannelNode* channel, validate_channel_data_args args) {
char* json_str = channel->RenderJSON();
char* json_str = channel->RenderJsonString();
grpc::testing::ValidateChannelProtoJsonTranslation(json_str);
ValidateCounters(json_str, args);
gpr_free(json_str);
// also check that the core API formats this the correct way
char* core_api_json_str = grpc_channelz_get_channel(channel->channel_uuid());
grpc::testing::ValidateGetChannelResponseProtoJsonTranslation(
core_api_json_str);
gpr_free(core_api_json_str);
}
grpc_millis GetLastCallStartedMillis(ChannelNode* channel) {
@ -141,9 +183,7 @@ TEST_P(ChannelzChannelTest, BasicChannel) {
ChannelFixture channel(GetParam());
ChannelNode* channelz_channel =
grpc_channel_get_channelz_node(channel.channel());
char* json_str = channelz_channel->RenderJSON();
ValidateCounters(json_str, {0, 0, 0});
gpr_free(json_str);
ValidateChannel(channelz_channel, {0, 0, 0});
}
TEST(ChannelzChannelTest, ChannelzDisabled) {
@ -199,6 +239,42 @@ TEST_P(ChannelzChannelTest, LastCallStartedMillis) {
EXPECT_NE(millis1, millis4);
}
TEST(ChannelzGetTopChannelsTest, BasicTest) {
grpc_core::ExecCtx exec_ctx;
ChannelFixture channel;
ValidateGetTopChannels(1);
}
TEST(ChannelzGetTopChannelsTest, NoChannelsTest) {
grpc_core::ExecCtx exec_ctx;
ValidateGetTopChannels(0);
}
TEST(ChannelzGetTopChannelsTest, ManyChannelsTest) {
grpc_core::ExecCtx exec_ctx;
ChannelFixture channels[10];
(void)channels; // suppress unused variable error
ValidateGetTopChannels(10);
}
TEST(ChannelzGetTopChannelsTest, InternalChannelTest) {
grpc_core::ExecCtx exec_ctx;
ChannelFixture channels[10];
(void)channels; // suppress unused variable error
// create an internal channel
grpc_arg client_a[2];
client_a[0] = grpc_channel_arg_integer_create(
const_cast<char*>(GRPC_ARG_CHANNELZ_CHANNEL_IS_INTERNAL_CHANNEL), true);
client_a[1] = grpc_channel_arg_integer_create(
const_cast<char*>(GRPC_ARG_ENABLE_CHANNELZ), true);
grpc_channel_args client_args = {GPR_ARRAY_SIZE(client_a), client_a};
grpc_channel* internal_channel =
grpc_insecure_channel_create("fake_target", &client_args, nullptr);
// The internal channel should not be returned from the request
ValidateGetTopChannels(10);
grpc_channel_destroy(internal_channel);
}
INSTANTIATE_TEST_CASE_P(ChannelzChannelTestSweep, ChannelzChannelTest,
::testing::Values(0, 1, 2, 6, 10, 15));

@ -209,27 +209,27 @@ static void test_channelz(grpc_end2end_test_config config) {
grpc_channel_get_channelz_node(f.client);
GPR_ASSERT(channelz_channel != nullptr);
char* json = channelz_channel->RenderJSON();
char* json = channelz_channel->RenderJsonString();
GPR_ASSERT(json != nullptr);
GPR_ASSERT(nullptr != strstr(json, "\"callsStarted\":\"0\""));
GPR_ASSERT(nullptr != strstr(json, "\"callsFailed\":\"0\""));
GPR_ASSERT(nullptr != strstr(json, "\"callsSucceeded\":\"0\""));
// nothing is present yet
GPR_ASSERT(nullptr == strstr(json, "\"callsStarted\""));
GPR_ASSERT(nullptr == strstr(json, "\"callsFailed\""));
GPR_ASSERT(nullptr == strstr(json, "\"callsSucceeded\""));
gpr_free(json);
// one successful request
run_one_request(config, f, true);
json = channelz_channel->RenderJSON();
json = channelz_channel->RenderJsonString();
GPR_ASSERT(json != nullptr);
GPR_ASSERT(nullptr != strstr(json, "\"callsStarted\":\"1\""));
GPR_ASSERT(nullptr != strstr(json, "\"callsFailed\":\"0\""));
GPR_ASSERT(nullptr != strstr(json, "\"callsSucceeded\":\"1\""));
gpr_free(json);
// one failed request
run_one_request(config, f, false);
json = channelz_channel->RenderJSON();
json = channelz_channel->RenderJsonString();
GPR_ASSERT(json != nullptr);
gpr_log(GPR_INFO, "%s", json);
GPR_ASSERT(nullptr != strstr(json, "\"callsStarted\":\"2\""));
@ -264,7 +264,7 @@ static void test_channelz_with_channel_trace(grpc_end2end_test_config config) {
grpc_channel_get_channelz_node(f.client);
GPR_ASSERT(channelz_channel != nullptr);
char* json = channelz_channel->RenderJSON();
char* json = channelz_channel->RenderJsonString();
GPR_ASSERT(json != nullptr);
gpr_log(GPR_INFO, "%s", json);
GPR_ASSERT(nullptr != strstr(json, "\"trace\""));

@ -86,6 +86,7 @@ grpc_cc_test(
"//:gpr",
"//test/core/util:gpr_test_util",
],
data = ["//third_party/toolchains:RBE_USE_MACHINE_TYPE_LARGE"],
)
grpc_cc_test(

@ -17,20 +17,32 @@
*/
#include "src/core/lib/gprpp/inlined_vector.h"
#include <grpc/support/log.h>
#include <gtest/gtest.h>
#include "src/core/lib/gprpp/memory.h"
#include "test/core/util/test_config.h"
namespace grpc_core {
namespace testing {
namespace {
template <typename Vector>
static void FillVector(Vector* v, int len, int start = 0) {
for (int i = 0; i < len; i++) {
v->push_back(i + start);
EXPECT_EQ(i + 1UL, v->size());
}
EXPECT_EQ(static_cast<size_t>(len), v->size());
EXPECT_LE(static_cast<size_t>(len), v->capacity());
}
} // namespace
TEST(InlinedVectorTest, CreateAndIterate) {
const int kNumElements = 9;
InlinedVector<int, 2> v;
EXPECT_TRUE(v.empty());
for (int i = 0; i < kNumElements; ++i) {
v.push_back(i);
}
FillVector(&v, kNumElements);
EXPECT_EQ(static_cast<size_t>(kNumElements), v.size());
EXPECT_FALSE(v.empty());
for (int i = 0; i < kNumElements; ++i) {
@ -42,9 +54,7 @@ TEST(InlinedVectorTest, CreateAndIterate) {
TEST(InlinedVectorTest, ValuesAreInlined) {
const int kNumElements = 5;
InlinedVector<int, 10> v;
for (int i = 0; i < kNumElements; ++i) {
v.push_back(i);
}
FillVector(&v, kNumElements);
EXPECT_EQ(static_cast<size_t>(kNumElements), v.size());
for (int i = 0; i < kNumElements; ++i) {
EXPECT_EQ(i, v[i]);
@ -71,19 +81,13 @@ TEST(InlinedVectorTest, ClearAndRepopulate) {
const int kNumElements = 10;
InlinedVector<int, 5> v;
EXPECT_EQ(0UL, v.size());
for (int i = 0; i < kNumElements; ++i) {
v.push_back(i);
EXPECT_EQ(i + 1UL, v.size());
}
FillVector(&v, kNumElements);
for (int i = 0; i < kNumElements; ++i) {
EXPECT_EQ(i, v[i]);
}
v.clear();
EXPECT_EQ(0UL, v.size());
for (int i = 0; i < kNumElements; ++i) {
v.push_back(kNumElements + i);
EXPECT_EQ(i + 1UL, v.size());
}
FillVector(&v, kNumElements, kNumElements);
for (int i = 0; i < kNumElements; ++i) {
EXPECT_EQ(kNumElements + i, v[i]);
}
@ -93,10 +97,7 @@ TEST(InlinedVectorTest, ConstIndexOperator) {
constexpr int kNumElements = 10;
InlinedVector<int, 5> v;
EXPECT_EQ(0UL, v.size());
for (int i = 0; i < kNumElements; ++i) {
v.push_back(i);
EXPECT_EQ(i + 1UL, v.size());
}
FillVector(&v, kNumElements);
// The following lambda function is exceptionally allowed to use an anonymous
// capture due to the erroneous behavior of the MSVC compiler, that refuses to
// capture the kNumElements constexpr, something allowed by the standard.
@ -108,6 +109,161 @@ TEST(InlinedVectorTest, ConstIndexOperator) {
const_func(v);
}
// the following constants and typedefs are used for copy/move
// construction/assignment
const size_t kInlinedLength = 8;
typedef InlinedVector<int, kInlinedLength> IntVec8;
const size_t kInlinedFillSize = kInlinedLength - 1;
const size_t kAllocatedFillSize = kInlinedLength + 1;
TEST(InlinedVectorTest, CopyConstructerInlined) {
IntVec8 original;
FillVector(&original, kInlinedFillSize);
IntVec8 copy_constructed(original);
for (size_t i = 0; i < original.size(); ++i) {
EXPECT_EQ(original[i], copy_constructed[i]);
}
}
TEST(InlinedVectorTest, CopyConstructerAllocated) {
IntVec8 original;
FillVector(&original, kAllocatedFillSize);
IntVec8 copy_constructed(original);
for (size_t i = 0; i < original.size(); ++i) {
EXPECT_EQ(original[i], copy_constructed[i]);
}
}
TEST(InlinedVectorTest, CopyAssignementInlinedInlined) {
IntVec8 original;
FillVector(&original, kInlinedFillSize);
IntVec8 copy_assigned;
FillVector(&copy_assigned, kInlinedFillSize, 99);
copy_assigned = original;
for (size_t i = 0; i < original.size(); ++i) {
EXPECT_EQ(original[i], copy_assigned[i]);
}
}
TEST(InlinedVectorTest, CopyAssignementInlinedAllocated) {
IntVec8 original;
FillVector(&original, kInlinedFillSize);
IntVec8 copy_assigned;
FillVector(&copy_assigned, kAllocatedFillSize, 99);
copy_assigned = original;
for (size_t i = 0; i < original.size(); ++i) {
EXPECT_EQ(original[i], copy_assigned[i]);
}
}
TEST(InlinedVectorTest, CopyAssignementAllocatedInlined) {
IntVec8 original;
FillVector(&original, kAllocatedFillSize);
IntVec8 copy_assigned;
FillVector(&copy_assigned, kInlinedFillSize, 99);
copy_assigned = original;
for (size_t i = 0; i < original.size(); ++i) {
EXPECT_EQ(original[i], copy_assigned[i]);
}
}
TEST(InlinedVectorTest, CopyAssignementAllocatedAllocated) {
IntVec8 original;
FillVector(&original, kAllocatedFillSize);
IntVec8 copy_assigned;
FillVector(&copy_assigned, kAllocatedFillSize, 99);
copy_assigned = original;
for (size_t i = 0; i < original.size(); ++i) {
EXPECT_EQ(original[i], copy_assigned[i]);
}
}
TEST(InlinedVectorTest, MoveConstructorInlined) {
IntVec8 original;
FillVector(&original, kInlinedFillSize);
IntVec8 tmp(original);
auto* old_data = tmp.data();
IntVec8 move_constructed(std::move(tmp));
for (size_t i = 0; i < original.size(); ++i) {
EXPECT_EQ(original[i], move_constructed[i]);
}
// original data was inlined so it should have been copied, not moved.
EXPECT_NE(move_constructed.data(), old_data);
}
TEST(InlinedVectorTest, MoveConstructorAllocated) {
IntVec8 original;
FillVector(&original, kAllocatedFillSize);
IntVec8 tmp(original);
auto* old_data = tmp.data();
IntVec8 move_constructed(std::move(tmp));
for (size_t i = 0; i < original.size(); ++i) {
EXPECT_EQ(original[i], move_constructed[i]);
}
// original data was allocated, so it should been moved, not copied
EXPECT_EQ(move_constructed.data(), old_data);
}
TEST(InlinedVectorTest, MoveAssignmentInlinedInlined) {
IntVec8 original;
FillVector(&original, kInlinedFillSize);
IntVec8 move_assigned;
FillVector(&move_assigned, kInlinedFillSize, 99); // Add dummy elements
IntVec8 tmp(original);
auto* old_data = tmp.data();
move_assigned = std::move(tmp);
for (size_t i = 0; i < original.size(); ++i) {
EXPECT_EQ(original[i], move_assigned[i]);
}
// original data was inlined so it should have been copied, not moved.
EXPECT_NE(move_assigned.data(), old_data);
}
TEST(InlinedVectorTest, MoveAssignmentInlinedAllocated) {
IntVec8 original;
FillVector(&original, kInlinedFillSize);
IntVec8 move_assigned;
FillVector(&move_assigned, kAllocatedFillSize, 99); // Add dummy elements
IntVec8 tmp(original);
auto* old_data = tmp.data();
move_assigned = std::move(tmp);
for (size_t i = 0; i < original.size(); ++i) {
EXPECT_EQ(original[i], move_assigned[i]);
}
// original data was inlined so it should have been copied, not moved.
EXPECT_NE(move_assigned.data(), old_data);
}
TEST(InlinedVectorTest, MoveAssignmentAllocatedInlined) {
IntVec8 original;
FillVector(&original, kAllocatedFillSize);
IntVec8 move_assigned;
FillVector(&move_assigned, kInlinedFillSize, 99); // Add dummy elements
IntVec8 tmp(original);
auto* old_data = tmp.data();
move_assigned = std::move(tmp);
for (size_t i = 0; i < original.size(); ++i) {
EXPECT_EQ(original[i], move_assigned[i]);
}
// original data was allocated so it should have been moved, not copied.
EXPECT_EQ(move_assigned.data(), old_data);
}
TEST(InlinedVectorTest, MoveAssignmentAllocatedAllocated) {
IntVec8 original;
FillVector(&original, kAllocatedFillSize);
IntVec8 move_assigned;
FillVector(&move_assigned, kAllocatedFillSize, 99); // Add dummy elements
IntVec8 tmp(original);
auto* old_data = tmp.data();
move_assigned = std::move(tmp);
for (size_t i = 0; i < original.size(); ++i) {
EXPECT_EQ(original[i], move_assigned[i]);
}
// original data was allocated so it should have been moved, not copied.
EXPECT_EQ(move_assigned.data(), old_data);
}
} // namespace testing
} // namespace grpc_core

@ -47,6 +47,7 @@ grpc_cc_test(
"//test/core/util:gpr_test_util",
"//test/core/util:grpc_test_util",
],
data = ["//third_party/toolchains:RBE_USE_MACHINE_TYPE_LARGE"],
)
grpc_cc_test(

@ -131,6 +131,8 @@ int main(int argc, char **argv) {
printf("%lx", (unsigned long) grpc_resource_quota_unref);
printf("%lx", (unsigned long) grpc_resource_quota_resize);
printf("%lx", (unsigned long) grpc_resource_quota_arg_vtable);
printf("%lx", (unsigned long) grpc_channelz_get_top_channels);
printf("%lx", (unsigned long) grpc_channelz_get_channel);
printf("%lx", (unsigned long) grpc_auth_property_iterator_next);
printf("%lx", (unsigned long) grpc_auth_context_property_iterator);
printf("%lx", (unsigned long) grpc_auth_context_peer_identity);

@ -14,7 +14,7 @@
licenses(["notice"]) # Apache v2
load("//bazel:grpc_build_system.bzl", "grpc_cc_library", "grpc_cc_test", "grpc_package", "grpc_cc_binary")
load("//bazel:grpc_build_system.bzl", "grpc_cc_binary", "grpc_cc_library", "grpc_cc_test", "grpc_package")
grpc_package(
name = "test/cpp/end2end",
@ -429,6 +429,20 @@ grpc_cc_binary(
],
)
grpc_cc_test(
name = "server_load_reporting_end2end_test",
srcs = ["server_load_reporting_end2end_test.cc"],
external_deps = [
"gtest",
"gmock",
],
deps = [
"//:grpcpp_server_load_reporting",
"//src/proto/grpc/testing:echo_proto",
"//test/cpp/util:test_util",
],
)
grpc_cc_test(
name = "shutdown_test",
srcs = ["shutdown_test.cc"],

Some files were not shown because too many files have changed in this diff Show More

Loading…
Cancel
Save