Merge github.com:grpc/grpc into serve_fries

pull/10897/head
Craig Tiller 8 years ago
commit 54773cfed8
  1. 31
      .github/ISSUE_TEMPLATE.md
  2. 116
      BUILD
  3. 171
      CMakeLists.txt
  4. 9
      INSTALL.md
  5. 199
      Makefile
  6. 2
      Rakefile
  7. 50
      binding.gyp
  8. 73
      build.yaml
  9. 2
      composer.json
  10. 12
      config.m4
  11. 38
      gRPC-Core.podspec
  12. 25
      grpc.gemspec
  13. 37
      include/grpc++/impl/codegen/call.h
  14. 2
      include/grpc++/impl/codegen/config_protobuf.h
  15. 6
      include/grpc++/impl/codegen/core_codegen.h
  16. 7
      include/grpc++/impl/codegen/core_codegen_interface.h
  17. 111
      include/grpc++/impl/codegen/proto_utils.h
  18. 3
      include/grpc/impl/codegen/grpc_types.h
  19. 2
      include/grpc/impl/codegen/port_platform.h
  20. 3
      package.json
  21. 25
      package.xml
  22. 10
      setup.py
  23. 2
      src/boringssl/gen_build_yaml.py
  24. 6
      src/compiler/cpp_generator.cc
  25. 12
      src/compiler/python_generator.cc
  26. 2
      src/core/ext/filters/client_channel/channel_connectivity.c
  27. 132
      src/core/ext/filters/client_channel/client_channel.c
  28. 3
      src/core/ext/filters/client_channel/lb_policy.c
  29. 11
      src/core/ext/filters/client_channel/lb_policy.h
  30. 153
      src/core/ext/filters/client_channel/lb_policy/grpclb/client_load_reporting_filter.c
  31. 42
      src/core/ext/filters/client_channel/lb_policy/grpclb/client_load_reporting_filter.h
  32. 406
      src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.c
  33. 133
      src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_client_stats.c
  34. 65
      src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_client_stats.h
  35. 50
      src/core/ext/filters/client_channel/lb_policy/grpclb/load_balancer_api.c
  36. 6
      src/core/ext/filters/client_channel/lb_policy/grpclb/load_balancer_api.h
  37. 3
      src/core/ext/filters/client_channel/lb_policy/pick_first/pick_first.c
  38. 25
      src/core/ext/filters/client_channel/lb_policy/round_robin/round_robin.c
  39. 2
      src/core/ext/filters/client_channel/subchannel.c
  40. 1
      src/core/ext/filters/client_channel/subchannel.h
  41. 1
      src/core/ext/filters/http/http_filters_plugin.c
  42. 5
      src/core/ext/filters/http/message_compress/message_compress_filter.c
  43. 2
      src/core/ext/filters/http/message_compress/message_compress_filter.h
  44. 2
      src/core/ext/filters/http/server/http_server_filter.c
  45. 2
      src/core/ext/transport/chttp2/client/insecure/channel_create.c
  46. 72
      src/core/ext/transport/chttp2/transport/chttp2_transport.c
  47. 5
      src/core/ext/transport/chttp2/transport/chttp2_transport.h
  48. 6
      src/core/ext/transport/chttp2/transport/frame_settings.c
  49. 6
      src/core/ext/transport/chttp2/transport/hpack_encoder.c
  50. 6
      src/core/ext/transport/chttp2/transport/hpack_parser.c
  51. 7
      src/core/ext/transport/chttp2/transport/hpack_table.c
  52. 31
      src/core/ext/transport/chttp2/transport/internal.h
  53. 16
      src/core/ext/transport/chttp2/transport/parsing.c
  54. 45
      src/core/ext/transport/chttp2/transport/writing.c
  55. 4
      src/core/ext/transport/cronet/transport/cronet_transport.c
  56. 2
      src/core/lib/channel/channel_stack.c
  57. 4
      src/core/lib/channel/channel_stack.h
  58. 3
      src/core/lib/channel/channel_stack_builder.c
  59. 2
      src/core/lib/channel/channel_stack_builder.h
  60. 3
      src/core/lib/channel/context.h
  61. 19
      src/core/lib/debug/trace.c
  62. 28
      src/core/lib/debug/trace.h
  63. 2
      src/core/lib/http/httpcli.c
  64. 4
      src/core/lib/http/parser.c
  65. 3
      src/core/lib/http/parser.h
  66. 12
      src/core/lib/iomgr/combiner.c
  67. 3
      src/core/lib/iomgr/combiner.h
  68. 984
      src/core/lib/iomgr/ev_epoll1_linux.c
  69. 44
      src/core/lib/iomgr/ev_epoll1_linux.h
  70. 2146
      src/core/lib/iomgr/ev_epoll_limited_pollers_linux.c
  71. 43
      src/core/lib/iomgr/ev_epoll_limited_pollers_linux.h
  72. 1337
      src/core/lib/iomgr/ev_epoll_thread_pool_linux.c
  73. 43
      src/core/lib/iomgr/ev_epoll_thread_pool_linux.h
  74. 1511
      src/core/lib/iomgr/ev_epollex_linux.c
  75. 43
      src/core/lib/iomgr/ev_epollex_linux.h
  76. 55
      src/core/lib/iomgr/ev_epollsig_linux.c
  77. 8
      src/core/lib/iomgr/ev_epollsig_linux.h
  78. 39
      src/core/lib/iomgr/ev_poll_posix.c
  79. 4
      src/core/lib/iomgr/ev_poll_posix.h
  80. 30
      src/core/lib/iomgr/ev_posix.c
  81. 7
      src/core/lib/iomgr/ev_posix.h
  82. 5
      src/core/lib/iomgr/exec_ctx.c
  83. 2
      src/core/lib/iomgr/exec_ctx.h
  84. 4
      src/core/lib/iomgr/iomgr.c
  85. 3
      src/core/lib/iomgr/iomgr.h
  86. 116
      src/core/lib/iomgr/is_epollexclusive_available.c
  87. 21
      src/core/lib/iomgr/is_epollexclusive_available.h
  88. 16
      src/core/lib/iomgr/lockfree_event.c
  89. 7
      src/core/lib/iomgr/pollset.h
  90. 2
      src/core/lib/iomgr/pollset_uv.c
  91. 6
      src/core/lib/iomgr/pollset_windows.c
  92. 1
      src/core/lib/iomgr/port.h
  93. 17
      src/core/lib/iomgr/resource_quota.c
  94. 3
      src/core/lib/iomgr/resource_quota.h
  95. 43
      src/core/lib/iomgr/sys_epoll_wrapper.h
  96. 12
      src/core/lib/iomgr/tcp_client_posix.c
  97. 6
      src/core/lib/iomgr/tcp_client_uv.c
  98. 14
      src/core/lib/iomgr/tcp_posix.c
  99. 3
      src/core/lib/iomgr/tcp_posix.h
  100. 2
      src/core/lib/iomgr/tcp_server_posix.c
  101. Some files were not shown because too many files have changed in this diff Show More

@ -0,0 +1,31 @@
Please answer these questions before submitting your issue.
### Should this be an issue in the gRPC issue tracker?
Create new issues for bugs and feature requests. An issue needs to be actionable. General gRPC discussions and usage questions belong to:
- [grpc.io mailing list](https://groups.google.com/forum/#!forum/grpc-io)
- [StackOverflow, with `grpc` tag](http://stackoverflow.com/questions/tagged/grpc)
*Please don't double post your questions in more locations, we are monitoring both channels and the time spent de-duplicating questions can is better spent answering more user questions.*
### What version of gRPC and what language are you using?
### What operating system (Linux, Windows, …) and version?
### What runtime / compiler are you using (e.g. python version or version of gcc)
### What did you do?
If possible, provide a recipe for reproducing the error. Try being specific and include code snippets if helpful.
### What did you expect to see?
### What did you see instead?
Make sure you include information that can help us debug (full error message, exception listing, stack trace, logs).
### Anything else we should know about your project / environment?

116
BUILD

@ -35,8 +35,12 @@ exports_files(["LICENSE"])
package(default_visibility = ["//visibility:public"])
load("//bazel:grpc_build_system.bzl", "grpc_cc_library",
"grpc_proto_plugin", "grpc_cc_libraries")
load(
"//bazel:grpc_build_system.bzl",
"grpc_cc_library",
"grpc_proto_plugin",
"grpc_cc_libraries",
)
# This should be updated along with build.yaml
g_stands_for = "gregarious"
@ -55,10 +59,19 @@ grpc_cc_library(
)
grpc_cc_libraries(
name_list = ["grpc", "grpc_unsecure",],
srcs = [
"src/core/lib/surface/init.c",
],
additional_dep_list = [
[
"grpc_secure",
"grpc_resolver_dns_ares",
"grpc_lb_policy_grpclb_secure",
"grpc_transport_chttp2_client_secure",
"grpc_transport_chttp2_server_secure",
],
[],
],
additional_src_list = [
[
"src/core/plugin_registry/grpc_plugin_registry.c",
@ -69,30 +82,24 @@ grpc_cc_libraries(
],
],
language = "c",
name_list = [
"grpc",
"grpc_unsecure",
],
standalone = True,
deps = [
"census",
"grpc_base",
"grpc_deadline_filter",
"grpc_lb_policy_pick_first",
"grpc_lb_policy_round_robin",
"grpc_load_reporting",
"grpc_max_age_filter",
"grpc_message_size_filter",
"grpc_resolver_dns_native",
"grpc_resolver_sockaddr",
"grpc_transport_chttp2_client_insecure",
"grpc_transport_chttp2_server_insecure",
"grpc_message_size_filter",
"grpc_deadline_filter",
],
additional_dep_list = [
[
"grpc_secure",
"grpc_resolver_dns_ares",
"grpc_lb_policy_grpclb_secure",
"grpc_transport_chttp2_client_secure",
"grpc_transport_chttp2_server_secure",
],
[],
],
)
@ -105,9 +112,9 @@ grpc_cc_library(
language = "c",
deps = [
"grpc_base",
"grpc_http_filters",
"grpc_transport_chttp2_client_secure",
"grpc_transport_cronet_client_secure",
"grpc_http_filters",
],
)
@ -373,13 +380,13 @@ grpc_cc_library(
hdrs = [
"src/core/lib/profiling/timers.h",
"src/core/lib/support/arena.h",
"src/core/lib/support/atomic.h",
"src/core/lib/support/atomic_with_atm.h",
"src/core/lib/support/atomic_with_std.h",
"src/core/lib/support/backoff.h",
"src/core/lib/support/block_annotate.h",
"src/core/lib/support/env.h",
"src/core/lib/support/memory.h",
"src/core/lib/support/atomic.h",
"src/core/lib/support/atomic_with_atm.h",
"src/core/lib/support/atomic_with_std.h",
"src/core/lib/support/mpscq.h",
"src/core/lib/support/murmur_hash.h",
"src/core/lib/support/spinlock.h",
@ -442,6 +449,13 @@ grpc_cc_library(
],
)
grpc_cc_library(
name = "grpc_trace",
srcs = ["src/core/lib/debug/trace.c"],
hdrs = ["src/core/lib/debug/trace.h"],
deps = [":gpr"],
)
grpc_cc_library(
name = "grpc_base",
srcs = [
@ -454,7 +468,6 @@ grpc_cc_library(
"src/core/lib/channel/handshaker_registry.c",
"src/core/lib/compression/compression.c",
"src/core/lib/compression/message_compress.c",
"src/core/lib/debug/trace.c",
"src/core/lib/http/format_request.c",
"src/core/lib/http/httpcli.c",
"src/core/lib/http/parser.c",
@ -465,7 +478,12 @@ grpc_cc_library(
"src/core/lib/iomgr/endpoint_pair_uv.c",
"src/core/lib/iomgr/endpoint_pair_windows.c",
"src/core/lib/iomgr/error.c",
"src/core/lib/iomgr/ev_epoll_linux.c",
"src/core/lib/iomgr/ev_epoll1_linux.c",
"src/core/lib/iomgr/ev_epollsig_linux.c",
"src/core/lib/iomgr/ev_epollex_linux.c",
"src/core/lib/iomgr/is_epollexclusive_available.c",
"src/core/lib/iomgr/ev_epoll_thread_pool_linux.c",
"src/core/lib/iomgr/ev_epoll_limited_pollers_linux.c",
"src/core/lib/iomgr/ev_poll_posix.c",
"src/core/lib/iomgr/ev_posix.c",
"src/core/lib/iomgr/exec_ctx.c",
@ -510,6 +528,7 @@ grpc_cc_library(
"src/core/lib/iomgr/tcp_windows.c",
"src/core/lib/iomgr/time_averaged_stats.c",
"src/core/lib/iomgr/timer_generic.c",
"src/core/lib/iomgr/timer_manager.c",
"src/core/lib/iomgr/timer_heap.c",
"src/core/lib/iomgr/timer_uv.c",
"src/core/lib/iomgr/udp_server.c",
@ -577,7 +596,6 @@ grpc_cc_library(
"src/core/lib/channel/handshaker_registry.h",
"src/core/lib/compression/algorithm_metadata.h",
"src/core/lib/compression/message_compress.h",
"src/core/lib/debug/trace.h",
"src/core/lib/http/format_request.h",
"src/core/lib/http/httpcli.h",
"src/core/lib/http/parser.h",
@ -587,7 +605,13 @@ grpc_cc_library(
"src/core/lib/iomgr/endpoint_pair.h",
"src/core/lib/iomgr/error.h",
"src/core/lib/iomgr/error_internal.h",
"src/core/lib/iomgr/ev_epoll_linux.h",
"src/core/lib/iomgr/ev_epoll1_linux.h",
"src/core/lib/iomgr/ev_epollsig_linux.h",
"src/core/lib/iomgr/ev_epollex_linux.h",
"src/core/lib/iomgr/is_epollexclusive_available.h",
"src/core/lib/iomgr/sys_epoll_wrapper.h",
"src/core/lib/iomgr/ev_epoll_thread_pool_linux.h",
"src/core/lib/iomgr/ev_epoll_limited_pollers_linux.h",
"src/core/lib/iomgr/ev_poll_posix.h",
"src/core/lib/iomgr/ev_posix.h",
"src/core/lib/iomgr/exec_ctx.h",
@ -627,6 +651,7 @@ grpc_cc_library(
"src/core/lib/iomgr/time_averaged_stats.h",
"src/core/lib/iomgr/timer.h",
"src/core/lib/iomgr/timer_generic.h",
"src/core/lib/iomgr/timer_manager.h",
"src/core/lib/iomgr/timer_heap.h",
"src/core/lib/iomgr/timer_uv.h",
"src/core/lib/iomgr/udp_server.h",
@ -693,6 +718,7 @@ grpc_cc_library(
deps = [
"gpr_base",
"grpc_codegen",
"grpc_trace",
],
)
@ -791,16 +817,16 @@ grpc_cc_library(
grpc_cc_library(
name = "grpc_http_filters",
hdrs = [
"src/core/ext/filters/http/message_compress/message_compress_filter.h",
"src/core/ext/filters/http/client/http_client_filter.h",
"src/core/ext/filters/http/server/http_server_filter.h",
],
srcs = [
"src/core/ext/filters/http/message_compress/message_compress_filter.c",
"src/core/ext/filters/http/client/http_client_filter.c",
"src/core/ext/filters/http/http_filters_plugin.c",
"src/core/ext/filters/http/message_compress/message_compress_filter.c",
"src/core/ext/filters/http/server/http_server_filter.c",
"src/core/ext/filters/http/http_filters_plugin.c"
],
hdrs = [
"src/core/ext/filters/http/client/http_client_filter.h",
"src/core/ext/filters/http/message_compress/message_compress_filter.h",
"src/core/ext/filters/http/server/http_server_filter.h",
],
language = "c",
deps = [
@ -829,14 +855,18 @@ grpc_cc_library(
grpc_cc_library(
name = "grpc_lb_policy_grpclb",
srcs = [
"src/core/ext/filters/client_channel/lb_policy/grpclb/client_load_reporting_filter.c",
"src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.c",
"src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_channel.c",
"src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_client_stats.c",
"src/core/ext/filters/client_channel/lb_policy/grpclb/load_balancer_api.c",
"src/core/ext/filters/client_channel/lb_policy/grpclb/proto/grpc/lb/v1/load_balancer.pb.c",
],
hdrs = [
"src/core/ext/filters/client_channel/lb_policy/grpclb/client_load_reporting_filter.h",
"src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.h",
"src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_channel.h",
"src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_client_stats.h",
"src/core/ext/filters/client_channel/lb_policy/grpclb/load_balancer_api.h",
"src/core/ext/filters/client_channel/lb_policy/grpclb/proto/grpc/lb/v1/load_balancer.pb.h",
],
@ -853,14 +883,18 @@ grpc_cc_library(
grpc_cc_library(
name = "grpc_lb_policy_grpclb_secure",
srcs = [
"src/core/ext/filters/client_channel/lb_policy/grpclb/client_load_reporting_filter.c",
"src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.c",
"src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_channel_secure.c",
"src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_client_stats.c",
"src/core/ext/filters/client_channel/lb_policy/grpclb/load_balancer_api.c",
"src/core/ext/filters/client_channel/lb_policy/grpclb/proto/grpc/lb/v1/load_balancer.pb.c",
],
hdrs = [
"src/core/ext/filters/client_channel/lb_policy/grpclb/client_load_reporting_filter.h",
"src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.h",
"src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_channel.h",
"src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_client_stats.h",
"src/core/ext/filters/client_channel/lb_policy/grpclb/load_balancer_api.h",
"src/core/ext/filters/client_channel/lb_policy/grpclb/proto/grpc/lb/v1/load_balancer.pb.h",
],
@ -1069,8 +1103,8 @@ grpc_cc_library(
language = "c",
deps = [
"grpc_base",
"grpc_transport_chttp2_alpn",
"grpc_http_filters",
"grpc_transport_chttp2_alpn",
],
)
@ -1222,15 +1256,11 @@ grpc_cc_library(
language = "c",
deps = [
"gpr",
"grpc_trace",
],
)
grpc_cc_libraries(
name_list = ["grpc++_base", "grpc++_base_unsecure"],
additional_dep_list = [
["grpc", ],
["grpc_unsecure", ],
],
srcs = [
"src/cpp/client/channel_cc.cc",
"src/cpp/client/client_context.cc",
@ -1265,7 +1295,7 @@ grpc_cc_libraries(
"src/cpp/util/status.cc",
"src/cpp/util/string_ref.cc",
"src/cpp/util/time_cc.cc",
],
],
hdrs = [
"src/cpp/client/create_channel_internal.h",
"src/cpp/common/channel_filter.h",
@ -1274,8 +1304,16 @@ grpc_cc_libraries(
"src/cpp/server/health/health.pb.h",
"src/cpp/server/thread_pool_interface.h",
"src/cpp/thread_manager/thread_manager.h",
],
],
additional_dep_list = [
["grpc"],
["grpc_unsecure"],
],
language = "c++",
name_list = [
"grpc++_base",
"grpc++_base_unsecure",
],
public_hdrs = [
"include/grpc++/alarm.h",
"include/grpc++/channel.h",
@ -1324,7 +1362,7 @@ grpc_cc_libraries(
"include/grpc++/support/stub_options.h",
"include/grpc++/support/sync_stream.h",
"include/grpc++/support/time.h",
],
],
deps = [
"grpc++_codegen_base",
],

@ -347,6 +347,7 @@ add_custom_target(plugins
add_custom_target(tools_c
DEPENDS
check_epollexclusive
gen_hpack_tables
gen_legal_metadata_characters
gen_percent_encoding_tables
@ -392,7 +393,7 @@ endif()
add_dependencies(buildtests_c endpoint_pair_test)
add_dependencies(buildtests_c error_test)
if(_gRPC_PLATFORM_LINUX)
add_dependencies(buildtests_c ev_epoll_linux_test)
add_dependencies(buildtests_c ev_epollsig_linux_test)
endif()
add_dependencies(buildtests_c fake_resolver_test)
if(_gRPC_PLATFORM_LINUX OR _gRPC_PLATFORM_MAC OR _gRPC_PLATFORM_POSIX)
@ -676,6 +677,7 @@ add_dependencies(buildtests_cxx golden_file_test)
add_dependencies(buildtests_cxx grpc_cli)
add_dependencies(buildtests_cxx grpc_tool_test)
add_dependencies(buildtests_cxx grpclb_api_test)
add_dependencies(buildtests_cxx grpclb_end2end_test)
add_dependencies(buildtests_cxx grpclb_test)
add_dependencies(buildtests_cxx health_service_end2end_test)
if(_gRPC_PLATFORM_LINUX OR _gRPC_PLATFORM_MAC OR _gRPC_PLATFORM_POSIX)
@ -926,7 +928,6 @@ add_library(grpc
src/core/lib/channel/handshaker_registry.c
src/core/lib/compression/compression.c
src/core/lib/compression/message_compress.c
src/core/lib/debug/trace.c
src/core/lib/http/format_request.c
src/core/lib/http/httpcli.c
src/core/lib/http/parser.c
@ -937,7 +938,11 @@ add_library(grpc
src/core/lib/iomgr/endpoint_pair_uv.c
src/core/lib/iomgr/endpoint_pair_windows.c
src/core/lib/iomgr/error.c
src/core/lib/iomgr/ev_epoll_linux.c
src/core/lib/iomgr/ev_epoll1_linux.c
src/core/lib/iomgr/ev_epoll_limited_pollers_linux.c
src/core/lib/iomgr/ev_epoll_thread_pool_linux.c
src/core/lib/iomgr/ev_epollex_linux.c
src/core/lib/iomgr/ev_epollsig_linux.c
src/core/lib/iomgr/ev_poll_posix.c
src/core/lib/iomgr/ev_posix.c
src/core/lib/iomgr/exec_ctx.c
@ -947,6 +952,7 @@ add_library(grpc
src/core/lib/iomgr/iomgr_posix.c
src/core/lib/iomgr/iomgr_uv.c
src/core/lib/iomgr/iomgr_windows.c
src/core/lib/iomgr/is_epollexclusive_available.c
src/core/lib/iomgr/load_file.c
src/core/lib/iomgr/lockfree_event.c
src/core/lib/iomgr/network_status_tracker.c
@ -983,6 +989,7 @@ add_library(grpc
src/core/lib/iomgr/time_averaged_stats.c
src/core/lib/iomgr/timer_generic.c
src/core/lib/iomgr/timer_heap.c
src/core/lib/iomgr/timer_manager.c
src/core/lib/iomgr/timer_uv.c
src/core/lib/iomgr/udp_server.c
src/core/lib/iomgr/unix_sockets_posix.c
@ -1037,6 +1044,7 @@ add_library(grpc
src/core/lib/transport/timeout_encoding.c
src/core/lib/transport/transport.c
src/core/lib/transport/transport_op_string.c
src/core/lib/debug/trace.c
src/core/ext/transport/chttp2/server/secure/server_secure_chttp2.c
src/core/ext/transport/chttp2/transport/bin_decoder.c
src/core/ext/transport/chttp2/transport/bin_encoder.c
@ -1120,8 +1128,10 @@ add_library(grpc
src/core/ext/transport/chttp2/server/insecure/server_chttp2_posix.c
src/core/ext/transport/chttp2/client/insecure/channel_create.c
src/core/ext/transport/chttp2/client/insecure/channel_create_posix.c
src/core/ext/filters/client_channel/lb_policy/grpclb/client_load_reporting_filter.c
src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.c
src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_channel_secure.c
src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_client_stats.c
src/core/ext/filters/client_channel/lb_policy/grpclb/load_balancer_api.c
src/core/ext/filters/client_channel/lb_policy/grpclb/proto/grpc/lb/v1/load_balancer.pb.c
third_party/nanopb/pb_common.c
@ -1252,7 +1262,6 @@ add_library(grpc_cronet
src/core/lib/channel/handshaker_registry.c
src/core/lib/compression/compression.c
src/core/lib/compression/message_compress.c
src/core/lib/debug/trace.c
src/core/lib/http/format_request.c
src/core/lib/http/httpcli.c
src/core/lib/http/parser.c
@ -1263,7 +1272,11 @@ add_library(grpc_cronet
src/core/lib/iomgr/endpoint_pair_uv.c
src/core/lib/iomgr/endpoint_pair_windows.c
src/core/lib/iomgr/error.c
src/core/lib/iomgr/ev_epoll_linux.c
src/core/lib/iomgr/ev_epoll1_linux.c
src/core/lib/iomgr/ev_epoll_limited_pollers_linux.c
src/core/lib/iomgr/ev_epoll_thread_pool_linux.c
src/core/lib/iomgr/ev_epollex_linux.c
src/core/lib/iomgr/ev_epollsig_linux.c
src/core/lib/iomgr/ev_poll_posix.c
src/core/lib/iomgr/ev_posix.c
src/core/lib/iomgr/exec_ctx.c
@ -1273,6 +1286,7 @@ add_library(grpc_cronet
src/core/lib/iomgr/iomgr_posix.c
src/core/lib/iomgr/iomgr_uv.c
src/core/lib/iomgr/iomgr_windows.c
src/core/lib/iomgr/is_epollexclusive_available.c
src/core/lib/iomgr/load_file.c
src/core/lib/iomgr/lockfree_event.c
src/core/lib/iomgr/network_status_tracker.c
@ -1309,6 +1323,7 @@ add_library(grpc_cronet
src/core/lib/iomgr/time_averaged_stats.c
src/core/lib/iomgr/timer_generic.c
src/core/lib/iomgr/timer_heap.c
src/core/lib/iomgr/timer_manager.c
src/core/lib/iomgr/timer_uv.c
src/core/lib/iomgr/udp_server.c
src/core/lib/iomgr/unix_sockets_posix.c
@ -1363,6 +1378,7 @@ add_library(grpc_cronet
src/core/lib/transport/timeout_encoding.c
src/core/lib/transport/transport.c
src/core/lib/transport/transport_op_string.c
src/core/lib/debug/trace.c
src/core/ext/transport/cronet/client/secure/cronet_channel_create.c
src/core/ext/transport/cronet/transport/cronet_api_dummy.c
src/core/ext/transport/cronet/transport/cronet_transport.c
@ -1563,7 +1579,6 @@ add_library(grpc_test_util
src/core/lib/channel/handshaker_registry.c
src/core/lib/compression/compression.c
src/core/lib/compression/message_compress.c
src/core/lib/debug/trace.c
src/core/lib/http/format_request.c
src/core/lib/http/httpcli.c
src/core/lib/http/parser.c
@ -1574,7 +1589,11 @@ add_library(grpc_test_util
src/core/lib/iomgr/endpoint_pair_uv.c
src/core/lib/iomgr/endpoint_pair_windows.c
src/core/lib/iomgr/error.c
src/core/lib/iomgr/ev_epoll_linux.c
src/core/lib/iomgr/ev_epoll1_linux.c
src/core/lib/iomgr/ev_epoll_limited_pollers_linux.c
src/core/lib/iomgr/ev_epoll_thread_pool_linux.c
src/core/lib/iomgr/ev_epollex_linux.c
src/core/lib/iomgr/ev_epollsig_linux.c
src/core/lib/iomgr/ev_poll_posix.c
src/core/lib/iomgr/ev_posix.c
src/core/lib/iomgr/exec_ctx.c
@ -1584,6 +1603,7 @@ add_library(grpc_test_util
src/core/lib/iomgr/iomgr_posix.c
src/core/lib/iomgr/iomgr_uv.c
src/core/lib/iomgr/iomgr_windows.c
src/core/lib/iomgr/is_epollexclusive_available.c
src/core/lib/iomgr/load_file.c
src/core/lib/iomgr/lockfree_event.c
src/core/lib/iomgr/network_status_tracker.c
@ -1620,6 +1640,7 @@ add_library(grpc_test_util
src/core/lib/iomgr/time_averaged_stats.c
src/core/lib/iomgr/timer_generic.c
src/core/lib/iomgr/timer_heap.c
src/core/lib/iomgr/timer_manager.c
src/core/lib/iomgr/timer_uv.c
src/core/lib/iomgr/udp_server.c
src/core/lib/iomgr/unix_sockets_posix.c
@ -1674,6 +1695,7 @@ add_library(grpc_test_util
src/core/lib/transport/timeout_encoding.c
src/core/lib/transport/transport.c
src/core/lib/transport/transport_op_string.c
src/core/lib/debug/trace.c
)
if(WIN32 AND MSVC)
@ -1819,7 +1841,6 @@ add_library(grpc_unsecure
src/core/lib/channel/handshaker_registry.c
src/core/lib/compression/compression.c
src/core/lib/compression/message_compress.c
src/core/lib/debug/trace.c
src/core/lib/http/format_request.c
src/core/lib/http/httpcli.c
src/core/lib/http/parser.c
@ -1830,7 +1851,11 @@ add_library(grpc_unsecure
src/core/lib/iomgr/endpoint_pair_uv.c
src/core/lib/iomgr/endpoint_pair_windows.c
src/core/lib/iomgr/error.c
src/core/lib/iomgr/ev_epoll_linux.c
src/core/lib/iomgr/ev_epoll1_linux.c
src/core/lib/iomgr/ev_epoll_limited_pollers_linux.c
src/core/lib/iomgr/ev_epoll_thread_pool_linux.c
src/core/lib/iomgr/ev_epollex_linux.c
src/core/lib/iomgr/ev_epollsig_linux.c
src/core/lib/iomgr/ev_poll_posix.c
src/core/lib/iomgr/ev_posix.c
src/core/lib/iomgr/exec_ctx.c
@ -1840,6 +1865,7 @@ add_library(grpc_unsecure
src/core/lib/iomgr/iomgr_posix.c
src/core/lib/iomgr/iomgr_uv.c
src/core/lib/iomgr/iomgr_windows.c
src/core/lib/iomgr/is_epollexclusive_available.c
src/core/lib/iomgr/load_file.c
src/core/lib/iomgr/lockfree_event.c
src/core/lib/iomgr/network_status_tracker.c
@ -1876,6 +1902,7 @@ add_library(grpc_unsecure
src/core/lib/iomgr/time_averaged_stats.c
src/core/lib/iomgr/timer_generic.c
src/core/lib/iomgr/timer_heap.c
src/core/lib/iomgr/timer_manager.c
src/core/lib/iomgr/timer_uv.c
src/core/lib/iomgr/udp_server.c
src/core/lib/iomgr/unix_sockets_posix.c
@ -1930,6 +1957,7 @@ add_library(grpc_unsecure
src/core/lib/transport/timeout_encoding.c
src/core/lib/transport/transport.c
src/core/lib/transport/transport_op_string.c
src/core/lib/debug/trace.c
src/core/ext/transport/chttp2/server/insecure/server_chttp2.c
src/core/ext/transport/chttp2/server/insecure/server_chttp2_posix.c
src/core/ext/transport/chttp2/transport/bin_decoder.c
@ -1990,8 +2018,10 @@ add_library(grpc_unsecure
src/core/ext/filters/client_channel/resolver/sockaddr/sockaddr_resolver.c
src/core/ext/filters/load_reporting/load_reporting.c
src/core/ext/filters/load_reporting/load_reporting_filter.c
src/core/ext/filters/client_channel/lb_policy/grpclb/client_load_reporting_filter.c
src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.c
src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_channel.c
src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_client_stats.c
src/core/ext/filters/client_channel/lb_policy/grpclb/load_balancer_api.c
src/core/ext/filters/client_channel/lb_policy/grpclb/proto/grpc/lb/v1/load_balancer.pb.c
third_party/nanopb/pb_common.c
@ -2238,7 +2268,6 @@ add_library(grpc++
src/core/lib/channel/handshaker_registry.c
src/core/lib/compression/compression.c
src/core/lib/compression/message_compress.c
src/core/lib/debug/trace.c
src/core/lib/http/format_request.c
src/core/lib/http/httpcli.c
src/core/lib/http/parser.c
@ -2249,7 +2278,11 @@ add_library(grpc++
src/core/lib/iomgr/endpoint_pair_uv.c
src/core/lib/iomgr/endpoint_pair_windows.c
src/core/lib/iomgr/error.c
src/core/lib/iomgr/ev_epoll_linux.c
src/core/lib/iomgr/ev_epoll1_linux.c
src/core/lib/iomgr/ev_epoll_limited_pollers_linux.c
src/core/lib/iomgr/ev_epoll_thread_pool_linux.c
src/core/lib/iomgr/ev_epollex_linux.c
src/core/lib/iomgr/ev_epollsig_linux.c
src/core/lib/iomgr/ev_poll_posix.c
src/core/lib/iomgr/ev_posix.c
src/core/lib/iomgr/exec_ctx.c
@ -2259,6 +2292,7 @@ add_library(grpc++
src/core/lib/iomgr/iomgr_posix.c
src/core/lib/iomgr/iomgr_uv.c
src/core/lib/iomgr/iomgr_windows.c
src/core/lib/iomgr/is_epollexclusive_available.c
src/core/lib/iomgr/load_file.c
src/core/lib/iomgr/lockfree_event.c
src/core/lib/iomgr/network_status_tracker.c
@ -2295,6 +2329,7 @@ add_library(grpc++
src/core/lib/iomgr/time_averaged_stats.c
src/core/lib/iomgr/timer_generic.c
src/core/lib/iomgr/timer_heap.c
src/core/lib/iomgr/timer_manager.c
src/core/lib/iomgr/timer_uv.c
src/core/lib/iomgr/udp_server.c
src/core/lib/iomgr/unix_sockets_posix.c
@ -2349,6 +2384,7 @@ add_library(grpc++
src/core/lib/transport/timeout_encoding.c
src/core/lib/transport/transport.c
src/core/lib/transport/transport_op_string.c
src/core/lib/debug/trace.c
third_party/nanopb/pb_common.c
third_party/nanopb/pb_decode.c
third_party/nanopb/pb_encode.c
@ -2563,7 +2599,6 @@ add_library(grpc++_cronet
src/core/lib/channel/handshaker_registry.c
src/core/lib/compression/compression.c
src/core/lib/compression/message_compress.c
src/core/lib/debug/trace.c
src/core/lib/http/format_request.c
src/core/lib/http/httpcli.c
src/core/lib/http/parser.c
@ -2574,7 +2609,11 @@ add_library(grpc++_cronet
src/core/lib/iomgr/endpoint_pair_uv.c
src/core/lib/iomgr/endpoint_pair_windows.c
src/core/lib/iomgr/error.c
src/core/lib/iomgr/ev_epoll_linux.c
src/core/lib/iomgr/ev_epoll1_linux.c
src/core/lib/iomgr/ev_epoll_limited_pollers_linux.c
src/core/lib/iomgr/ev_epoll_thread_pool_linux.c
src/core/lib/iomgr/ev_epollex_linux.c
src/core/lib/iomgr/ev_epollsig_linux.c
src/core/lib/iomgr/ev_poll_posix.c
src/core/lib/iomgr/ev_posix.c
src/core/lib/iomgr/exec_ctx.c
@ -2584,6 +2623,7 @@ add_library(grpc++_cronet
src/core/lib/iomgr/iomgr_posix.c
src/core/lib/iomgr/iomgr_uv.c
src/core/lib/iomgr/iomgr_windows.c
src/core/lib/iomgr/is_epollexclusive_available.c
src/core/lib/iomgr/load_file.c
src/core/lib/iomgr/lockfree_event.c
src/core/lib/iomgr/network_status_tracker.c
@ -2620,6 +2660,7 @@ add_library(grpc++_cronet
src/core/lib/iomgr/time_averaged_stats.c
src/core/lib/iomgr/timer_generic.c
src/core/lib/iomgr/timer_heap.c
src/core/lib/iomgr/timer_manager.c
src/core/lib/iomgr/timer_uv.c
src/core/lib/iomgr/udp_server.c
src/core/lib/iomgr/unix_sockets_posix.c
@ -2674,6 +2715,7 @@ add_library(grpc++_cronet
src/core/lib/transport/timeout_encoding.c
src/core/lib/transport/transport.c
src/core/lib/transport/transport_op_string.c
src/core/lib/debug/trace.c
third_party/nanopb/pb_common.c
third_party/nanopb/pb_decode.c
third_party/nanopb/pb_encode.c
@ -3332,7 +3374,6 @@ add_library(grpc++_unsecure
src/core/lib/channel/handshaker_registry.c
src/core/lib/compression/compression.c
src/core/lib/compression/message_compress.c
src/core/lib/debug/trace.c
src/core/lib/http/format_request.c
src/core/lib/http/httpcli.c
src/core/lib/http/parser.c
@ -3343,7 +3384,11 @@ add_library(grpc++_unsecure
src/core/lib/iomgr/endpoint_pair_uv.c
src/core/lib/iomgr/endpoint_pair_windows.c
src/core/lib/iomgr/error.c
src/core/lib/iomgr/ev_epoll_linux.c
src/core/lib/iomgr/ev_epoll1_linux.c
src/core/lib/iomgr/ev_epoll_limited_pollers_linux.c
src/core/lib/iomgr/ev_epoll_thread_pool_linux.c
src/core/lib/iomgr/ev_epollex_linux.c
src/core/lib/iomgr/ev_epollsig_linux.c
src/core/lib/iomgr/ev_poll_posix.c
src/core/lib/iomgr/ev_posix.c
src/core/lib/iomgr/exec_ctx.c
@ -3353,6 +3398,7 @@ add_library(grpc++_unsecure
src/core/lib/iomgr/iomgr_posix.c
src/core/lib/iomgr/iomgr_uv.c
src/core/lib/iomgr/iomgr_windows.c
src/core/lib/iomgr/is_epollexclusive_available.c
src/core/lib/iomgr/load_file.c
src/core/lib/iomgr/lockfree_event.c
src/core/lib/iomgr/network_status_tracker.c
@ -3389,6 +3435,7 @@ add_library(grpc++_unsecure
src/core/lib/iomgr/time_averaged_stats.c
src/core/lib/iomgr/timer_generic.c
src/core/lib/iomgr/timer_heap.c
src/core/lib/iomgr/timer_manager.c
src/core/lib/iomgr/timer_uv.c
src/core/lib/iomgr/udp_server.c
src/core/lib/iomgr/unix_sockets_posix.c
@ -3443,6 +3490,7 @@ add_library(grpc++_unsecure
src/core/lib/transport/timeout_encoding.c
src/core/lib/transport/transport.c
src/core/lib/transport/transport_op_string.c
src/core/lib/debug/trace.c
third_party/nanopb/pb_common.c
third_party/nanopb/pb_decode.c
third_party/nanopb/pb_encode.c
@ -5065,6 +5113,42 @@ target_link_libraries(channel_create_test
)
endif (gRPC_BUILD_TESTS)
add_executable(check_epollexclusive
test/build/check_epollexclusive.c
)
target_include_directories(check_epollexclusive
PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
PRIVATE ${BORINGSSL_ROOT_DIR}/include
PRIVATE ${PROTOBUF_ROOT_DIR}/src
PRIVATE ${BENCHMARK_ROOT_DIR}/include
PRIVATE ${ZLIB_ROOT_DIR}
PRIVATE ${CMAKE_CURRENT_BINARY_DIR}/third_party/zlib
PRIVATE ${CARES_BUILD_INCLUDE_DIR}
PRIVATE ${CARES_INCLUDE_DIR}
PRIVATE ${CARES_PLATFORM_INCLUDE_DIR}
PRIVATE ${CMAKE_CURRENT_BINARY_DIR}/third_party/cares/cares
PRIVATE ${CMAKE_CURRENT_BINARY_DIR}/third_party/gflags/include
)
target_link_libraries(check_epollexclusive
${_gRPC_ALLTARGETS_LIBRARIES}
grpc
gpr
)
if (gRPC_INSTALL)
install(TARGETS check_epollexclusive EXPORT gRPCTargets
RUNTIME DESTINATION ${CMAKE_INSTALL_BINDIR}
LIBRARY DESTINATION ${CMAKE_INSTALL_LIBDIR}
ARCHIVE DESTINATION ${CMAKE_INSTALL_LIBDIR}
)
endif()
if (gRPC_BUILD_TESTS)
add_executable(chttp2_hpack_encoder_test
@ -5442,12 +5526,12 @@ endif (gRPC_BUILD_TESTS)
if (gRPC_BUILD_TESTS)
if(_gRPC_PLATFORM_LINUX)
add_executable(ev_epoll_linux_test
test/core/iomgr/ev_epoll_linux_test.c
add_executable(ev_epollsig_linux_test
test/core/iomgr/ev_epollsig_linux_test.c
)
target_include_directories(ev_epoll_linux_test
target_include_directories(ev_epollsig_linux_test
PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
PRIVATE ${BORINGSSL_ROOT_DIR}/include
@ -5462,7 +5546,7 @@ target_include_directories(ev_epoll_linux_test
PRIVATE ${CMAKE_CURRENT_BINARY_DIR}/third_party/gflags/include
)
target_link_libraries(ev_epoll_linux_test
target_link_libraries(ev_epollsig_linux_test
${_gRPC_ALLTARGETS_LIBRARIES}
grpc_test_util
grpc
@ -10746,6 +10830,55 @@ target_link_libraries(grpclb_api_test
endif (gRPC_BUILD_TESTS)
if (gRPC_BUILD_TESTS)
add_executable(grpclb_end2end_test
${_gRPC_PROTO_GENS_DIR}/src/proto/grpc/lb/v1/load_balancer.pb.cc
${_gRPC_PROTO_GENS_DIR}/src/proto/grpc/lb/v1/load_balancer.grpc.pb.cc
${_gRPC_PROTO_GENS_DIR}/src/proto/grpc/lb/v1/load_balancer.pb.h
${_gRPC_PROTO_GENS_DIR}/src/proto/grpc/lb/v1/load_balancer.grpc.pb.h
test/cpp/end2end/grpclb_end2end_test.cc
third_party/googletest/googletest/src/gtest-all.cc
third_party/googletest/googlemock/src/gmock-all.cc
)
protobuf_generate_grpc_cpp(
src/proto/grpc/lb/v1/load_balancer.proto
)
target_include_directories(grpclb_end2end_test
PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
PRIVATE ${BORINGSSL_ROOT_DIR}/include
PRIVATE ${PROTOBUF_ROOT_DIR}/src
PRIVATE ${BENCHMARK_ROOT_DIR}/include
PRIVATE ${ZLIB_ROOT_DIR}
PRIVATE ${CMAKE_CURRENT_BINARY_DIR}/third_party/zlib
PRIVATE ${CARES_BUILD_INCLUDE_DIR}
PRIVATE ${CARES_INCLUDE_DIR}
PRIVATE ${CARES_PLATFORM_INCLUDE_DIR}
PRIVATE ${CMAKE_CURRENT_BINARY_DIR}/third_party/cares/cares
PRIVATE ${CMAKE_CURRENT_BINARY_DIR}/third_party/gflags/include
PRIVATE third_party/googletest/googletest/include
PRIVATE third_party/googletest/googletest
PRIVATE third_party/googletest/googlemock/include
PRIVATE third_party/googletest/googlemock
PRIVATE ${_gRPC_PROTO_GENS_DIR}
)
target_link_libraries(grpclb_end2end_test
${_gRPC_PROTOBUF_LIBRARIES}
${_gRPC_ALLTARGETS_LIBRARIES}
grpc++_test_util
grpc_test_util
grpc++
grpc
gpr_test_util
gpr
${_gRPC_GFLAGS_LIBRARIES}
)
endif (gRPC_BUILD_TESTS)
if (gRPC_BUILD_TESTS)
add_executable(grpclb_test
${_gRPC_PROTO_GENS_DIR}/src/proto/grpc/lb/v1/load_balancer.pb.cc
${_gRPC_PROTO_GENS_DIR}/src/proto/grpc/lb/v1/load_balancer.grpc.pb.cc

@ -51,6 +51,15 @@ If you plan to build from source and run tests, install the following as well:
$ brew install gflags
```
*Tip*: when building,
you *may* want to explicitly set the `LIBTOOL` and `LIBTOOLIZE`
environment variables when running `make` to ensure the version
installed by `brew` is being used:
```sh
$ LIBTOOL=glibtool LIBTOOLIZE=glibtoolize make
```
## Protoc
By default gRPC uses [protocol buffers](https://github.com/google/protobuf),

@ -308,10 +308,6 @@ else
TMPOUT = `mktemp /tmp/test-out-XXXXXX`
endif
# Detect if we can use C++11
CXX11_CHECK_CMD = $(CXX) -std=c++11 -o $(TMPOUT) -c test/build/c++11.cc
HAS_CXX11 = $(shell $(CXX11_CHECK_CMD) 2> /dev/null && echo true || echo false)
CHECK_SHADOW_WORKS_CMD = $(CC) -std=c99 -Werror -Wshadow -o $(TMPOUT) -c test/build/shadow.c
HAS_WORKING_SHADOW = $(shell $(CHECK_SHADOW_WORKS_CMD) 2> /dev/null && echo true || echo false)
ifeq ($(HAS_WORKING_SHADOW),true)
@ -342,11 +338,7 @@ HOST_LD ?= $(LD)
HOST_LDXX ?= $(LDXX)
CFLAGS += -std=c99 -Wsign-conversion -Wconversion $(W_SHADOW) $(W_EXTRA_SEMI)
ifeq ($(HAS_CXX11),true)
CXXFLAGS += -std=c++11
else
CXXFLAGS += -std=c++0x
endif
CPPFLAGS += -g -Wall -Wextra -Werror -Wno-long-long -Wno-unused-parameter -DOSATOMIC_USE_INLINED=1
LDFLAGS += -g
@ -978,6 +970,7 @@ census_context_test: $(BINDIR)/$(CONFIG)/census_context_test
census_resource_test: $(BINDIR)/$(CONFIG)/census_resource_test
census_trace_context_test: $(BINDIR)/$(CONFIG)/census_trace_context_test
channel_create_test: $(BINDIR)/$(CONFIG)/channel_create_test
check_epollexclusive: $(BINDIR)/$(CONFIG)/check_epollexclusive
chttp2_hpack_encoder_test: $(BINDIR)/$(CONFIG)/chttp2_hpack_encoder_test
chttp2_stream_map_test: $(BINDIR)/$(CONFIG)/chttp2_stream_map_test
chttp2_varint_test: $(BINDIR)/$(CONFIG)/chttp2_varint_test
@ -991,7 +984,7 @@ dns_resolver_test: $(BINDIR)/$(CONFIG)/dns_resolver_test
dualstack_socket_test: $(BINDIR)/$(CONFIG)/dualstack_socket_test
endpoint_pair_test: $(BINDIR)/$(CONFIG)/endpoint_pair_test
error_test: $(BINDIR)/$(CONFIG)/error_test
ev_epoll_linux_test: $(BINDIR)/$(CONFIG)/ev_epoll_linux_test
ev_epollsig_linux_test: $(BINDIR)/$(CONFIG)/ev_epollsig_linux_test
fake_resolver_test: $(BINDIR)/$(CONFIG)/fake_resolver_test
fd_conservation_posix_test: $(BINDIR)/$(CONFIG)/fd_conservation_posix_test
fd_posix_test: $(BINDIR)/$(CONFIG)/fd_posix_test
@ -1154,6 +1147,7 @@ grpc_python_plugin: $(BINDIR)/$(CONFIG)/grpc_python_plugin
grpc_ruby_plugin: $(BINDIR)/$(CONFIG)/grpc_ruby_plugin
grpc_tool_test: $(BINDIR)/$(CONFIG)/grpc_tool_test
grpclb_api_test: $(BINDIR)/$(CONFIG)/grpclb_api_test
grpclb_end2end_test: $(BINDIR)/$(CONFIG)/grpclb_end2end_test
grpclb_test: $(BINDIR)/$(CONFIG)/grpclb_test
health_service_end2end_test: $(BINDIR)/$(CONFIG)/health_service_end2end_test
http2_client: $(BINDIR)/$(CONFIG)/http2_client
@ -1379,7 +1373,7 @@ buildtests_c: privatelibs_c \
$(BINDIR)/$(CONFIG)/dualstack_socket_test \
$(BINDIR)/$(CONFIG)/endpoint_pair_test \
$(BINDIR)/$(CONFIG)/error_test \
$(BINDIR)/$(CONFIG)/ev_epoll_linux_test \
$(BINDIR)/$(CONFIG)/ev_epollsig_linux_test \
$(BINDIR)/$(CONFIG)/fake_resolver_test \
$(BINDIR)/$(CONFIG)/fd_conservation_posix_test \
$(BINDIR)/$(CONFIG)/fd_posix_test \
@ -1578,6 +1572,7 @@ buildtests_cxx: privatelibs_cxx \
$(BINDIR)/$(CONFIG)/grpc_cli \
$(BINDIR)/$(CONFIG)/grpc_tool_test \
$(BINDIR)/$(CONFIG)/grpclb_api_test \
$(BINDIR)/$(CONFIG)/grpclb_end2end_test \
$(BINDIR)/$(CONFIG)/grpclb_test \
$(BINDIR)/$(CONFIG)/health_service_end2end_test \
$(BINDIR)/$(CONFIG)/http2_client \
@ -1699,6 +1694,7 @@ buildtests_cxx: privatelibs_cxx \
$(BINDIR)/$(CONFIG)/grpc_cli \
$(BINDIR)/$(CONFIG)/grpc_tool_test \
$(BINDIR)/$(CONFIG)/grpclb_api_test \
$(BINDIR)/$(CONFIG)/grpclb_end2end_test \
$(BINDIR)/$(CONFIG)/grpclb_test \
$(BINDIR)/$(CONFIG)/health_service_end2end_test \
$(BINDIR)/$(CONFIG)/http2_client \
@ -1793,8 +1789,8 @@ test_c: buildtests_c
$(Q) $(BINDIR)/$(CONFIG)/endpoint_pair_test || ( echo test endpoint_pair_test failed ; exit 1 )
$(E) "[RUN] Testing error_test"
$(Q) $(BINDIR)/$(CONFIG)/error_test || ( echo test error_test failed ; exit 1 )
$(E) "[RUN] Testing ev_epoll_linux_test"
$(Q) $(BINDIR)/$(CONFIG)/ev_epoll_linux_test || ( echo test ev_epoll_linux_test failed ; exit 1 )
$(E) "[RUN] Testing ev_epollsig_linux_test"
$(Q) $(BINDIR)/$(CONFIG)/ev_epollsig_linux_test || ( echo test ev_epollsig_linux_test failed ; exit 1 )
$(E) "[RUN] Testing fake_resolver_test"
$(Q) $(BINDIR)/$(CONFIG)/fake_resolver_test || ( echo test fake_resolver_test failed ; exit 1 )
$(E) "[RUN] Testing fd_conservation_posix_test"
@ -2081,6 +2077,8 @@ test_cxx: buildtests_cxx
$(Q) $(BINDIR)/$(CONFIG)/grpc_tool_test || ( echo test grpc_tool_test failed ; exit 1 )
$(E) "[RUN] Testing grpclb_api_test"
$(Q) $(BINDIR)/$(CONFIG)/grpclb_api_test || ( echo test grpclb_api_test failed ; exit 1 )
$(E) "[RUN] Testing grpclb_end2end_test"
$(Q) $(BINDIR)/$(CONFIG)/grpclb_end2end_test || ( echo test grpclb_end2end_test failed ; exit 1 )
$(E) "[RUN] Testing grpclb_test"
$(Q) $(BINDIR)/$(CONFIG)/grpclb_test || ( echo test grpclb_test failed ; exit 1 )
$(E) "[RUN] Testing health_service_end2end_test"
@ -2140,7 +2138,7 @@ test_python: static_c
tools: tools_c tools_cxx
tools_c: privatelibs_c $(BINDIR)/$(CONFIG)/gen_hpack_tables $(BINDIR)/$(CONFIG)/gen_legal_metadata_characters $(BINDIR)/$(CONFIG)/gen_percent_encoding_tables $(BINDIR)/$(CONFIG)/grpc_create_jwt $(BINDIR)/$(CONFIG)/grpc_print_google_default_creds_token $(BINDIR)/$(CONFIG)/grpc_verify_jwt
tools_c: privatelibs_c $(BINDIR)/$(CONFIG)/check_epollexclusive $(BINDIR)/$(CONFIG)/gen_hpack_tables $(BINDIR)/$(CONFIG)/gen_legal_metadata_characters $(BINDIR)/$(CONFIG)/gen_percent_encoding_tables $(BINDIR)/$(CONFIG)/grpc_create_jwt $(BINDIR)/$(CONFIG)/grpc_print_google_default_creds_token $(BINDIR)/$(CONFIG)/grpc_verify_jwt
tools_cxx: privatelibs_cxx
@ -2905,7 +2903,6 @@ LIBGRPC_SRC = \
src/core/lib/channel/handshaker_registry.c \
src/core/lib/compression/compression.c \
src/core/lib/compression/message_compress.c \
src/core/lib/debug/trace.c \
src/core/lib/http/format_request.c \
src/core/lib/http/httpcli.c \
src/core/lib/http/parser.c \
@ -2916,7 +2913,11 @@ LIBGRPC_SRC = \
src/core/lib/iomgr/endpoint_pair_uv.c \
src/core/lib/iomgr/endpoint_pair_windows.c \
src/core/lib/iomgr/error.c \
src/core/lib/iomgr/ev_epoll_linux.c \
src/core/lib/iomgr/ev_epoll1_linux.c \
src/core/lib/iomgr/ev_epoll_limited_pollers_linux.c \
src/core/lib/iomgr/ev_epoll_thread_pool_linux.c \
src/core/lib/iomgr/ev_epollex_linux.c \
src/core/lib/iomgr/ev_epollsig_linux.c \
src/core/lib/iomgr/ev_poll_posix.c \
src/core/lib/iomgr/ev_posix.c \
src/core/lib/iomgr/exec_ctx.c \
@ -2926,6 +2927,7 @@ LIBGRPC_SRC = \
src/core/lib/iomgr/iomgr_posix.c \
src/core/lib/iomgr/iomgr_uv.c \
src/core/lib/iomgr/iomgr_windows.c \
src/core/lib/iomgr/is_epollexclusive_available.c \
src/core/lib/iomgr/load_file.c \
src/core/lib/iomgr/lockfree_event.c \
src/core/lib/iomgr/network_status_tracker.c \
@ -2962,6 +2964,7 @@ LIBGRPC_SRC = \
src/core/lib/iomgr/time_averaged_stats.c \
src/core/lib/iomgr/timer_generic.c \
src/core/lib/iomgr/timer_heap.c \
src/core/lib/iomgr/timer_manager.c \
src/core/lib/iomgr/timer_uv.c \
src/core/lib/iomgr/udp_server.c \
src/core/lib/iomgr/unix_sockets_posix.c \
@ -3016,6 +3019,7 @@ LIBGRPC_SRC = \
src/core/lib/transport/timeout_encoding.c \
src/core/lib/transport/transport.c \
src/core/lib/transport/transport_op_string.c \
src/core/lib/debug/trace.c \
src/core/ext/transport/chttp2/server/secure/server_secure_chttp2.c \
src/core/ext/transport/chttp2/transport/bin_decoder.c \
src/core/ext/transport/chttp2/transport/bin_encoder.c \
@ -3099,8 +3103,10 @@ LIBGRPC_SRC = \
src/core/ext/transport/chttp2/server/insecure/server_chttp2_posix.c \
src/core/ext/transport/chttp2/client/insecure/channel_create.c \
src/core/ext/transport/chttp2/client/insecure/channel_create_posix.c \
src/core/ext/filters/client_channel/lb_policy/grpclb/client_load_reporting_filter.c \
src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.c \
src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_channel_secure.c \
src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_client_stats.c \
src/core/ext/filters/client_channel/lb_policy/grpclb/load_balancer_api.c \
src/core/ext/filters/client_channel/lb_policy/grpclb/proto/grpc/lb/v1/load_balancer.pb.c \
third_party/nanopb/pb_common.c \
@ -3229,7 +3235,6 @@ LIBGRPC_CRONET_SRC = \
src/core/lib/channel/handshaker_registry.c \
src/core/lib/compression/compression.c \
src/core/lib/compression/message_compress.c \
src/core/lib/debug/trace.c \
src/core/lib/http/format_request.c \
src/core/lib/http/httpcli.c \
src/core/lib/http/parser.c \
@ -3240,7 +3245,11 @@ LIBGRPC_CRONET_SRC = \
src/core/lib/iomgr/endpoint_pair_uv.c \
src/core/lib/iomgr/endpoint_pair_windows.c \
src/core/lib/iomgr/error.c \
src/core/lib/iomgr/ev_epoll_linux.c \
src/core/lib/iomgr/ev_epoll1_linux.c \
src/core/lib/iomgr/ev_epoll_limited_pollers_linux.c \
src/core/lib/iomgr/ev_epoll_thread_pool_linux.c \
src/core/lib/iomgr/ev_epollex_linux.c \
src/core/lib/iomgr/ev_epollsig_linux.c \
src/core/lib/iomgr/ev_poll_posix.c \
src/core/lib/iomgr/ev_posix.c \
src/core/lib/iomgr/exec_ctx.c \
@ -3250,6 +3259,7 @@ LIBGRPC_CRONET_SRC = \
src/core/lib/iomgr/iomgr_posix.c \
src/core/lib/iomgr/iomgr_uv.c \
src/core/lib/iomgr/iomgr_windows.c \
src/core/lib/iomgr/is_epollexclusive_available.c \
src/core/lib/iomgr/load_file.c \
src/core/lib/iomgr/lockfree_event.c \
src/core/lib/iomgr/network_status_tracker.c \
@ -3286,6 +3296,7 @@ LIBGRPC_CRONET_SRC = \
src/core/lib/iomgr/time_averaged_stats.c \
src/core/lib/iomgr/timer_generic.c \
src/core/lib/iomgr/timer_heap.c \
src/core/lib/iomgr/timer_manager.c \
src/core/lib/iomgr/timer_uv.c \
src/core/lib/iomgr/udp_server.c \
src/core/lib/iomgr/unix_sockets_posix.c \
@ -3340,6 +3351,7 @@ LIBGRPC_CRONET_SRC = \
src/core/lib/transport/timeout_encoding.c \
src/core/lib/transport/transport.c \
src/core/lib/transport/transport_op_string.c \
src/core/lib/debug/trace.c \
src/core/ext/transport/cronet/client/secure/cronet_channel_create.c \
src/core/ext/transport/cronet/transport/cronet_api_dummy.c \
src/core/ext/transport/cronet/transport/cronet_transport.c \
@ -3539,7 +3551,6 @@ LIBGRPC_TEST_UTIL_SRC = \
src/core/lib/channel/handshaker_registry.c \
src/core/lib/compression/compression.c \
src/core/lib/compression/message_compress.c \
src/core/lib/debug/trace.c \
src/core/lib/http/format_request.c \
src/core/lib/http/httpcli.c \
src/core/lib/http/parser.c \
@ -3550,7 +3561,11 @@ LIBGRPC_TEST_UTIL_SRC = \
src/core/lib/iomgr/endpoint_pair_uv.c \
src/core/lib/iomgr/endpoint_pair_windows.c \
src/core/lib/iomgr/error.c \
src/core/lib/iomgr/ev_epoll_linux.c \
src/core/lib/iomgr/ev_epoll1_linux.c \
src/core/lib/iomgr/ev_epoll_limited_pollers_linux.c \
src/core/lib/iomgr/ev_epoll_thread_pool_linux.c \
src/core/lib/iomgr/ev_epollex_linux.c \
src/core/lib/iomgr/ev_epollsig_linux.c \
src/core/lib/iomgr/ev_poll_posix.c \
src/core/lib/iomgr/ev_posix.c \
src/core/lib/iomgr/exec_ctx.c \
@ -3560,6 +3575,7 @@ LIBGRPC_TEST_UTIL_SRC = \
src/core/lib/iomgr/iomgr_posix.c \
src/core/lib/iomgr/iomgr_uv.c \
src/core/lib/iomgr/iomgr_windows.c \
src/core/lib/iomgr/is_epollexclusive_available.c \
src/core/lib/iomgr/load_file.c \
src/core/lib/iomgr/lockfree_event.c \
src/core/lib/iomgr/network_status_tracker.c \
@ -3596,6 +3612,7 @@ LIBGRPC_TEST_UTIL_SRC = \
src/core/lib/iomgr/time_averaged_stats.c \
src/core/lib/iomgr/timer_generic.c \
src/core/lib/iomgr/timer_heap.c \
src/core/lib/iomgr/timer_manager.c \
src/core/lib/iomgr/timer_uv.c \
src/core/lib/iomgr/udp_server.c \
src/core/lib/iomgr/unix_sockets_posix.c \
@ -3650,6 +3667,7 @@ LIBGRPC_TEST_UTIL_SRC = \
src/core/lib/transport/timeout_encoding.c \
src/core/lib/transport/transport.c \
src/core/lib/transport/transport_op_string.c \
src/core/lib/debug/trace.c \
PUBLIC_HEADERS_C += \
include/grpc/byte_buffer.h \
@ -3767,7 +3785,6 @@ LIBGRPC_UNSECURE_SRC = \
src/core/lib/channel/handshaker_registry.c \
src/core/lib/compression/compression.c \
src/core/lib/compression/message_compress.c \
src/core/lib/debug/trace.c \
src/core/lib/http/format_request.c \
src/core/lib/http/httpcli.c \
src/core/lib/http/parser.c \
@ -3778,7 +3795,11 @@ LIBGRPC_UNSECURE_SRC = \
src/core/lib/iomgr/endpoint_pair_uv.c \
src/core/lib/iomgr/endpoint_pair_windows.c \
src/core/lib/iomgr/error.c \
src/core/lib/iomgr/ev_epoll_linux.c \
src/core/lib/iomgr/ev_epoll1_linux.c \
src/core/lib/iomgr/ev_epoll_limited_pollers_linux.c \
src/core/lib/iomgr/ev_epoll_thread_pool_linux.c \
src/core/lib/iomgr/ev_epollex_linux.c \
src/core/lib/iomgr/ev_epollsig_linux.c \
src/core/lib/iomgr/ev_poll_posix.c \
src/core/lib/iomgr/ev_posix.c \
src/core/lib/iomgr/exec_ctx.c \
@ -3788,6 +3809,7 @@ LIBGRPC_UNSECURE_SRC = \
src/core/lib/iomgr/iomgr_posix.c \
src/core/lib/iomgr/iomgr_uv.c \
src/core/lib/iomgr/iomgr_windows.c \
src/core/lib/iomgr/is_epollexclusive_available.c \
src/core/lib/iomgr/load_file.c \
src/core/lib/iomgr/lockfree_event.c \
src/core/lib/iomgr/network_status_tracker.c \
@ -3824,6 +3846,7 @@ LIBGRPC_UNSECURE_SRC = \
src/core/lib/iomgr/time_averaged_stats.c \
src/core/lib/iomgr/timer_generic.c \
src/core/lib/iomgr/timer_heap.c \
src/core/lib/iomgr/timer_manager.c \
src/core/lib/iomgr/timer_uv.c \
src/core/lib/iomgr/udp_server.c \
src/core/lib/iomgr/unix_sockets_posix.c \
@ -3878,6 +3901,7 @@ LIBGRPC_UNSECURE_SRC = \
src/core/lib/transport/timeout_encoding.c \
src/core/lib/transport/transport.c \
src/core/lib/transport/transport_op_string.c \
src/core/lib/debug/trace.c \
src/core/ext/transport/chttp2/server/insecure/server_chttp2.c \
src/core/ext/transport/chttp2/server/insecure/server_chttp2_posix.c \
src/core/ext/transport/chttp2/transport/bin_decoder.c \
@ -3938,8 +3962,10 @@ LIBGRPC_UNSECURE_SRC = \
src/core/ext/filters/client_channel/resolver/sockaddr/sockaddr_resolver.c \
src/core/ext/filters/load_reporting/load_reporting.c \
src/core/ext/filters/load_reporting/load_reporting_filter.c \
src/core/ext/filters/client_channel/lb_policy/grpclb/client_load_reporting_filter.c \
src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.c \
src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_channel.c \
src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_client_stats.c \
src/core/ext/filters/client_channel/lb_policy/grpclb/load_balancer_api.c \
src/core/ext/filters/client_channel/lb_policy/grpclb/proto/grpc/lb/v1/load_balancer.pb.c \
third_party/nanopb/pb_common.c \
@ -4163,7 +4189,6 @@ LIBGRPC++_SRC = \
src/core/lib/channel/handshaker_registry.c \
src/core/lib/compression/compression.c \
src/core/lib/compression/message_compress.c \
src/core/lib/debug/trace.c \
src/core/lib/http/format_request.c \
src/core/lib/http/httpcli.c \
src/core/lib/http/parser.c \
@ -4174,7 +4199,11 @@ LIBGRPC++_SRC = \
src/core/lib/iomgr/endpoint_pair_uv.c \
src/core/lib/iomgr/endpoint_pair_windows.c \
src/core/lib/iomgr/error.c \
src/core/lib/iomgr/ev_epoll_linux.c \
src/core/lib/iomgr/ev_epoll1_linux.c \
src/core/lib/iomgr/ev_epoll_limited_pollers_linux.c \
src/core/lib/iomgr/ev_epoll_thread_pool_linux.c \
src/core/lib/iomgr/ev_epollex_linux.c \
src/core/lib/iomgr/ev_epollsig_linux.c \
src/core/lib/iomgr/ev_poll_posix.c \
src/core/lib/iomgr/ev_posix.c \
src/core/lib/iomgr/exec_ctx.c \
@ -4184,6 +4213,7 @@ LIBGRPC++_SRC = \
src/core/lib/iomgr/iomgr_posix.c \
src/core/lib/iomgr/iomgr_uv.c \
src/core/lib/iomgr/iomgr_windows.c \
src/core/lib/iomgr/is_epollexclusive_available.c \
src/core/lib/iomgr/load_file.c \
src/core/lib/iomgr/lockfree_event.c \
src/core/lib/iomgr/network_status_tracker.c \
@ -4220,6 +4250,7 @@ LIBGRPC++_SRC = \
src/core/lib/iomgr/time_averaged_stats.c \
src/core/lib/iomgr/timer_generic.c \
src/core/lib/iomgr/timer_heap.c \
src/core/lib/iomgr/timer_manager.c \
src/core/lib/iomgr/timer_uv.c \
src/core/lib/iomgr/udp_server.c \
src/core/lib/iomgr/unix_sockets_posix.c \
@ -4274,6 +4305,7 @@ LIBGRPC++_SRC = \
src/core/lib/transport/timeout_encoding.c \
src/core/lib/transport/transport.c \
src/core/lib/transport/transport_op_string.c \
src/core/lib/debug/trace.c \
third_party/nanopb/pb_common.c \
third_party/nanopb/pb_decode.c \
third_party/nanopb/pb_encode.c \
@ -4496,7 +4528,6 @@ LIBGRPC++_CRONET_SRC = \
src/core/lib/channel/handshaker_registry.c \
src/core/lib/compression/compression.c \
src/core/lib/compression/message_compress.c \
src/core/lib/debug/trace.c \
src/core/lib/http/format_request.c \
src/core/lib/http/httpcli.c \
src/core/lib/http/parser.c \
@ -4507,7 +4538,11 @@ LIBGRPC++_CRONET_SRC = \
src/core/lib/iomgr/endpoint_pair_uv.c \
src/core/lib/iomgr/endpoint_pair_windows.c \
src/core/lib/iomgr/error.c \
src/core/lib/iomgr/ev_epoll_linux.c \
src/core/lib/iomgr/ev_epoll1_linux.c \
src/core/lib/iomgr/ev_epoll_limited_pollers_linux.c \
src/core/lib/iomgr/ev_epoll_thread_pool_linux.c \
src/core/lib/iomgr/ev_epollex_linux.c \
src/core/lib/iomgr/ev_epollsig_linux.c \
src/core/lib/iomgr/ev_poll_posix.c \
src/core/lib/iomgr/ev_posix.c \
src/core/lib/iomgr/exec_ctx.c \
@ -4517,6 +4552,7 @@ LIBGRPC++_CRONET_SRC = \
src/core/lib/iomgr/iomgr_posix.c \
src/core/lib/iomgr/iomgr_uv.c \
src/core/lib/iomgr/iomgr_windows.c \
src/core/lib/iomgr/is_epollexclusive_available.c \
src/core/lib/iomgr/load_file.c \
src/core/lib/iomgr/lockfree_event.c \
src/core/lib/iomgr/network_status_tracker.c \
@ -4553,6 +4589,7 @@ LIBGRPC++_CRONET_SRC = \
src/core/lib/iomgr/time_averaged_stats.c \
src/core/lib/iomgr/timer_generic.c \
src/core/lib/iomgr/timer_heap.c \
src/core/lib/iomgr/timer_manager.c \
src/core/lib/iomgr/timer_uv.c \
src/core/lib/iomgr/udp_server.c \
src/core/lib/iomgr/unix_sockets_posix.c \
@ -4607,6 +4644,7 @@ LIBGRPC++_CRONET_SRC = \
src/core/lib/transport/timeout_encoding.c \
src/core/lib/transport/transport.c \
src/core/lib/transport/transport_op_string.c \
src/core/lib/debug/trace.c \
third_party/nanopb/pb_common.c \
third_party/nanopb/pb_decode.c \
third_party/nanopb/pb_encode.c \
@ -5255,7 +5293,6 @@ LIBGRPC++_UNSECURE_SRC = \
src/core/lib/channel/handshaker_registry.c \
src/core/lib/compression/compression.c \
src/core/lib/compression/message_compress.c \
src/core/lib/debug/trace.c \
src/core/lib/http/format_request.c \
src/core/lib/http/httpcli.c \
src/core/lib/http/parser.c \
@ -5266,7 +5303,11 @@ LIBGRPC++_UNSECURE_SRC = \
src/core/lib/iomgr/endpoint_pair_uv.c \
src/core/lib/iomgr/endpoint_pair_windows.c \
src/core/lib/iomgr/error.c \
src/core/lib/iomgr/ev_epoll_linux.c \
src/core/lib/iomgr/ev_epoll1_linux.c \
src/core/lib/iomgr/ev_epoll_limited_pollers_linux.c \
src/core/lib/iomgr/ev_epoll_thread_pool_linux.c \
src/core/lib/iomgr/ev_epollex_linux.c \
src/core/lib/iomgr/ev_epollsig_linux.c \
src/core/lib/iomgr/ev_poll_posix.c \
src/core/lib/iomgr/ev_posix.c \
src/core/lib/iomgr/exec_ctx.c \
@ -5276,6 +5317,7 @@ LIBGRPC++_UNSECURE_SRC = \
src/core/lib/iomgr/iomgr_posix.c \
src/core/lib/iomgr/iomgr_uv.c \
src/core/lib/iomgr/iomgr_windows.c \
src/core/lib/iomgr/is_epollexclusive_available.c \
src/core/lib/iomgr/load_file.c \
src/core/lib/iomgr/lockfree_event.c \
src/core/lib/iomgr/network_status_tracker.c \
@ -5312,6 +5354,7 @@ LIBGRPC++_UNSECURE_SRC = \
src/core/lib/iomgr/time_averaged_stats.c \
src/core/lib/iomgr/timer_generic.c \
src/core/lib/iomgr/timer_heap.c \
src/core/lib/iomgr/timer_manager.c \
src/core/lib/iomgr/timer_uv.c \
src/core/lib/iomgr/udp_server.c \
src/core/lib/iomgr/unix_sockets_posix.c \
@ -5366,6 +5409,7 @@ LIBGRPC++_UNSECURE_SRC = \
src/core/lib/transport/timeout_encoding.c \
src/core/lib/transport/transport.c \
src/core/lib/transport/transport_op_string.c \
src/core/lib/debug/trace.c \
third_party/nanopb/pb_common.c \
third_party/nanopb/pb_decode.c \
third_party/nanopb/pb_encode.c \
@ -8270,8 +8314,8 @@ PUBLIC_HEADERS_C += \
LIBARES_OBJS = $(addprefix $(OBJDIR)/$(CONFIG)/, $(addsuffix .o, $(basename $(LIBARES_SRC))))
$(LIBARES_OBJS): CPPFLAGS += -Ithird_party/cares -Ithird_party/cares/cares $(if $(subst Linux,,$(SYSTEM)),,-Ithird_party/cares/config_linux) $(if $(subst Darwin,,$(SYSTEM)),,-Ithird_party/cares/config_darwin) -fvisibility=hidden -D_GNU_SOURCE -DWIN32_LEAN_AND_MEAN -D_HAS_EXCEPTIONS=0 -DNOMINMAX -DHAVE_CONFIG_H
$(LIBARES_OBJS): CFLAGS += -Wno-sign-conversion -Wno-invalid-source-encoding
$(LIBARES_OBJS): CPPFLAGS += -Ithird_party/cares -Ithird_party/cares/cares $(if $(subst Linux,,$(SYSTEM)),,-Ithird_party/cares/config_linux) $(if $(subst Darwin,,$(SYSTEM)),,-Ithird_party/cares/config_darwin) -fvisibility=hidden -D_GNU_SOURCE -DWIN32_LEAN_AND_MEAN -D_HAS_EXCEPTIONS=0 -DNOMINMAX $(if $(subst MINGW32,,$(SYSTEM)),-DHAVE_CONFIG_H,)
$(LIBARES_OBJS): CFLAGS += -Wno-sign-conversion $(if $(subst MINGW32,,$(SYSTEM)),-Wno-invalid-source-encoding,)
$(LIBDIR)/$(CONFIG)/libares.a: $(ZLIB_DEP) $(LIBARES_OBJS)
$(E) "[AR] Creating $@"
@ -8991,6 +9035,38 @@ endif
endif
CHECK_EPOLLEXCLUSIVE_SRC = \
test/build/check_epollexclusive.c \
CHECK_EPOLLEXCLUSIVE_OBJS = $(addprefix $(OBJDIR)/$(CONFIG)/, $(addsuffix .o, $(basename $(CHECK_EPOLLEXCLUSIVE_SRC))))
ifeq ($(NO_SECURE),true)
# You can't build secure targets if you don't have OpenSSL.
$(BINDIR)/$(CONFIG)/check_epollexclusive: openssl_dep_error
else
$(BINDIR)/$(CONFIG)/check_epollexclusive: $(CHECK_EPOLLEXCLUSIVE_OBJS) $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr.a
$(E) "[LD] Linking $@"
$(Q) mkdir -p `dirname $@`
$(Q) $(LD) $(LDFLAGS) $(CHECK_EPOLLEXCLUSIVE_OBJS) $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr.a $(LDLIBS) $(LDLIBS_SECURE) -o $(BINDIR)/$(CONFIG)/check_epollexclusive
endif
$(OBJDIR)/$(CONFIG)/test/build/check_epollexclusive.o: $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr.a
deps_check_epollexclusive: $(CHECK_EPOLLEXCLUSIVE_OBJS:.o=.dep)
ifneq ($(NO_SECURE),true)
ifneq ($(NO_DEPS),true)
-include $(CHECK_EPOLLEXCLUSIVE_OBJS:.o=.dep)
endif
endif
CHTTP2_HPACK_ENCODER_TEST_SRC = \
test/core/transport/chttp2/hpack_encoder_test.c \
@ -9407,34 +9483,34 @@ endif
endif
EV_EPOLL_LINUX_TEST_SRC = \
test/core/iomgr/ev_epoll_linux_test.c \
EV_EPOLLSIG_LINUX_TEST_SRC = \
test/core/iomgr/ev_epollsig_linux_test.c \
EV_EPOLL_LINUX_TEST_OBJS = $(addprefix $(OBJDIR)/$(CONFIG)/, $(addsuffix .o, $(basename $(EV_EPOLL_LINUX_TEST_SRC))))
EV_EPOLLSIG_LINUX_TEST_OBJS = $(addprefix $(OBJDIR)/$(CONFIG)/, $(addsuffix .o, $(basename $(EV_EPOLLSIG_LINUX_TEST_SRC))))
ifeq ($(NO_SECURE),true)
# You can't build secure targets if you don't have OpenSSL.
$(BINDIR)/$(CONFIG)/ev_epoll_linux_test: openssl_dep_error
$(BINDIR)/$(CONFIG)/ev_epollsig_linux_test: openssl_dep_error
else
$(BINDIR)/$(CONFIG)/ev_epoll_linux_test: $(EV_EPOLL_LINUX_TEST_OBJS) $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a
$(BINDIR)/$(CONFIG)/ev_epollsig_linux_test: $(EV_EPOLLSIG_LINUX_TEST_OBJS) $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a
$(E) "[LD] Linking $@"
$(Q) mkdir -p `dirname $@`
$(Q) $(LD) $(LDFLAGS) $(EV_EPOLL_LINUX_TEST_OBJS) $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a $(LDLIBS) $(LDLIBS_SECURE) -o $(BINDIR)/$(CONFIG)/ev_epoll_linux_test
$(Q) $(LD) $(LDFLAGS) $(EV_EPOLLSIG_LINUX_TEST_OBJS) $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a $(LDLIBS) $(LDLIBS_SECURE) -o $(BINDIR)/$(CONFIG)/ev_epollsig_linux_test
endif
$(OBJDIR)/$(CONFIG)/test/core/iomgr/ev_epoll_linux_test.o: $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a
$(OBJDIR)/$(CONFIG)/test/core/iomgr/ev_epollsig_linux_test.o: $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a
deps_ev_epoll_linux_test: $(EV_EPOLL_LINUX_TEST_OBJS:.o=.dep)
deps_ev_epollsig_linux_test: $(EV_EPOLLSIG_LINUX_TEST_OBJS:.o=.dep)
ifneq ($(NO_SECURE),true)
ifneq ($(NO_DEPS),true)
-include $(EV_EPOLL_LINUX_TEST_OBJS:.o=.dep)
-include $(EV_EPOLLSIG_LINUX_TEST_OBJS:.o=.dep)
endif
endif
@ -15092,6 +15168,53 @@ endif
$(OBJDIR)/$(CONFIG)/test/cpp/grpclb/grpclb_api_test.o: $(GENDIR)/src/proto/grpc/lb/v1/load_balancer.pb.cc $(GENDIR)/src/proto/grpc/lb/v1/load_balancer.grpc.pb.cc
GRPCLB_END2END_TEST_SRC = \
$(GENDIR)/src/proto/grpc/lb/v1/load_balancer.pb.cc $(GENDIR)/src/proto/grpc/lb/v1/load_balancer.grpc.pb.cc \
test/cpp/end2end/grpclb_end2end_test.cc \
GRPCLB_END2END_TEST_OBJS = $(addprefix $(OBJDIR)/$(CONFIG)/, $(addsuffix .o, $(basename $(GRPCLB_END2END_TEST_SRC))))
ifeq ($(NO_SECURE),true)
# You can't build secure targets if you don't have OpenSSL.
$(BINDIR)/$(CONFIG)/grpclb_end2end_test: openssl_dep_error
else
ifeq ($(NO_PROTOBUF),true)
# You can't build the protoc plugins or protobuf-enabled targets if you don't have protobuf 3.0.0+.
$(BINDIR)/$(CONFIG)/grpclb_end2end_test: protobuf_dep_error
else
$(BINDIR)/$(CONFIG)/grpclb_end2end_test: $(PROTOBUF_DEP) $(GRPCLB_END2END_TEST_OBJS) $(LIBDIR)/$(CONFIG)/libgrpc++_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc++.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a
$(E) "[LD] Linking $@"
$(Q) mkdir -p `dirname $@`
$(Q) $(LDXX) $(LDFLAGS) $(GRPCLB_END2END_TEST_OBJS) $(LIBDIR)/$(CONFIG)/libgrpc++_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc++.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a $(LDLIBSXX) $(LDLIBS_PROTOBUF) $(LDLIBS) $(LDLIBS_SECURE) $(GTEST_LIB) -o $(BINDIR)/$(CONFIG)/grpclb_end2end_test
endif
endif
$(OBJDIR)/$(CONFIG)/src/proto/grpc/lb/v1/load_balancer.o: $(LIBDIR)/$(CONFIG)/libgrpc++_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc++.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a
$(OBJDIR)/$(CONFIG)/test/cpp/end2end/grpclb_end2end_test.o: $(LIBDIR)/$(CONFIG)/libgrpc++_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc++.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a
deps_grpclb_end2end_test: $(GRPCLB_END2END_TEST_OBJS:.o=.dep)
ifneq ($(NO_SECURE),true)
ifneq ($(NO_DEPS),true)
-include $(GRPCLB_END2END_TEST_OBJS:.o=.dep)
endif
endif
$(OBJDIR)/$(CONFIG)/test/cpp/end2end/grpclb_end2end_test.o: $(GENDIR)/src/proto/grpc/lb/v1/load_balancer.pb.cc $(GENDIR)/src/proto/grpc/lb/v1/load_balancer.grpc.pb.cc
GRPCLB_TEST_SRC = \
$(GENDIR)/src/proto/grpc/lb/v1/load_balancer.pb.cc $(GENDIR)/src/proto/grpc/lb/v1/load_balancer.grpc.pb.cc \
test/cpp/grpclb/grpclb_test.cc \

@ -80,7 +80,7 @@ task 'dlls' do
grpc_config = ENV['GRPC_CONFIG'] || 'opt'
verbose = ENV['V'] || '0'
env = 'CPPFLAGS="-D_WIN32_WINNT=0x600 -DUNICODE -D_UNICODE -Wno-unused-variable -Wno-unused-result" '
env = 'CPPFLAGS="-D_WIN32_WINNT=0x600 -DUNICODE -D_UNICODE -Wno-unused-variable -Wno-unused-result -DCARES_STATICLIB" '
env += 'LDFLAGS=-static '
env += 'SYSTEM=MINGW32 '
env += 'EMBED_ZLIB=true '

@ -39,15 +39,16 @@
{
'variables': {
'runtime%': 'node',
# UV integration in C core is enabled by default. It can be disabled
# by setting this argument to anything else.
'grpc_uv%': 'true',
# Some Node installations use the system installation of OpenSSL, and on
# some systems, the system OpenSSL still does not have ALPN support. This
# will let users recompile gRPC to work without ALPN.
'grpc_alpn%': 'true',
# Indicates that the library should be built with gcov.
'grpc_gcov%': 'false'
'grpc_gcov%': 'false',
# Indicates that the library should be built with compatibility for musl
# libc, so that it can run on Alpine Linux. This is only necessary if not
# building on Alpine Linux
'grpc_alpine%': 'false'
},
'target_defaults': {
'configurations': {
@ -86,17 +87,11 @@
'include'
],
'defines': [
'GPR_BACKWARDS_COMPATIBILITY_MODE'
'GPR_BACKWARDS_COMPATIBILITY_MODE',
'GRPC_ARES=0',
'GRPC_UV'
],
'conditions': [
['grpc_uv=="true"', {
'defines': [
'GRPC_ARES=0',
# Disabling this while bugs are ironed out. Uncomment this to
# re-enable libuv integration in C core.
'GRPC_UV'
]
}],
['grpc_gcov=="true"', {
'cflags': [
'-O0',
@ -115,6 +110,11 @@
'-rdynamic',
],
}],
['grpc_alpine=="true"', {
'defines': [
'GPR_MUSL_LIBC_COMPAT'
]
}],
['OS!="win" and runtime=="electron"', {
"defines": [
'OPENSSL_NO_THREADS'
@ -535,6 +535,10 @@
}
]
},
]
}],
['OS == "win"', {
'targets': [
# Only want to compile zlib under Windows
{
'cflags': [
@ -569,7 +573,6 @@
}]
],
'targets': [
{
'cflags': [
'-std=c99',
@ -648,7 +651,6 @@
'type': 'static_library',
'dependencies': [
'gpr',
'node_modules/cares/deps/cares/cares.gyp:cares',
],
'sources': [
'src/core/lib/surface/init.c',
@ -661,7 +663,6 @@
'src/core/lib/channel/handshaker_registry.c',
'src/core/lib/compression/compression.c',
'src/core/lib/compression/message_compress.c',
'src/core/lib/debug/trace.c',
'src/core/lib/http/format_request.c',
'src/core/lib/http/httpcli.c',
'src/core/lib/http/parser.c',
@ -672,7 +673,11 @@
'src/core/lib/iomgr/endpoint_pair_uv.c',
'src/core/lib/iomgr/endpoint_pair_windows.c',
'src/core/lib/iomgr/error.c',
'src/core/lib/iomgr/ev_epoll_linux.c',
'src/core/lib/iomgr/ev_epoll1_linux.c',
'src/core/lib/iomgr/ev_epoll_limited_pollers_linux.c',
'src/core/lib/iomgr/ev_epoll_thread_pool_linux.c',
'src/core/lib/iomgr/ev_epollex_linux.c',
'src/core/lib/iomgr/ev_epollsig_linux.c',
'src/core/lib/iomgr/ev_poll_posix.c',
'src/core/lib/iomgr/ev_posix.c',
'src/core/lib/iomgr/exec_ctx.c',
@ -682,6 +687,7 @@
'src/core/lib/iomgr/iomgr_posix.c',
'src/core/lib/iomgr/iomgr_uv.c',
'src/core/lib/iomgr/iomgr_windows.c',
'src/core/lib/iomgr/is_epollexclusive_available.c',
'src/core/lib/iomgr/load_file.c',
'src/core/lib/iomgr/lockfree_event.c',
'src/core/lib/iomgr/network_status_tracker.c',
@ -718,6 +724,7 @@
'src/core/lib/iomgr/time_averaged_stats.c',
'src/core/lib/iomgr/timer_generic.c',
'src/core/lib/iomgr/timer_heap.c',
'src/core/lib/iomgr/timer_manager.c',
'src/core/lib/iomgr/timer_uv.c',
'src/core/lib/iomgr/udp_server.c',
'src/core/lib/iomgr/unix_sockets_posix.c',
@ -772,6 +779,7 @@
'src/core/lib/transport/timeout_encoding.c',
'src/core/lib/transport/transport.c',
'src/core/lib/transport/transport_op_string.c',
'src/core/lib/debug/trace.c',
'src/core/ext/transport/chttp2/server/secure/server_secure_chttp2.c',
'src/core/ext/transport/chttp2/transport/bin_decoder.c',
'src/core/ext/transport/chttp2/transport/bin_encoder.c',
@ -855,8 +863,10 @@
'src/core/ext/transport/chttp2/server/insecure/server_chttp2_posix.c',
'src/core/ext/transport/chttp2/client/insecure/channel_create.c',
'src/core/ext/transport/chttp2/client/insecure/channel_create_posix.c',
'src/core/ext/filters/client_channel/lb_policy/grpclb/client_load_reporting_filter.c',
'src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.c',
'src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_channel_secure.c',
'src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_client_stats.c',
'src/core/ext/filters/client_channel/lb_policy/grpclb/load_balancer_api.c',
'src/core/ext/filters/client_channel/lb_policy/grpclb/proto/grpc/lb/v1/load_balancer.pb.c',
'third_party/nanopb/pb_common.c',
@ -940,20 +950,16 @@
"src/node/ext/call_credentials.cc",
"src/node/ext/channel.cc",
"src/node/ext/channel_credentials.cc",
"src/node/ext/completion_queue_threadpool.cc",
"src/node/ext/completion_queue_uv.cc",
"src/node/ext/completion_queue.cc",
"src/node/ext/node_grpc.cc",
"src/node/ext/server.cc",
"src/node/ext/server_credentials.cc",
"src/node/ext/server_generic.cc",
"src/node/ext/server_uv.cc",
"src/node/ext/slice.cc",
"src/node/ext/timeval.cc",
],
"dependencies": [
"grpc",
"gpr",
"node_modules/cares/deps/cares/cares.gyp:cares",
]
},
{

@ -187,7 +187,6 @@ filegroups:
- src/core/lib/channel/handshaker_registry.h
- src/core/lib/compression/algorithm_metadata.h
- src/core/lib/compression/message_compress.h
- src/core/lib/debug/trace.h
- src/core/lib/http/format_request.h
- src/core/lib/http/httpcli.h
- src/core/lib/http/parser.h
@ -197,7 +196,11 @@ filegroups:
- src/core/lib/iomgr/endpoint_pair.h
- src/core/lib/iomgr/error.h
- src/core/lib/iomgr/error_internal.h
- src/core/lib/iomgr/ev_epoll_linux.h
- src/core/lib/iomgr/ev_epoll1_linux.h
- src/core/lib/iomgr/ev_epoll_limited_pollers_linux.h
- src/core/lib/iomgr/ev_epoll_thread_pool_linux.h
- src/core/lib/iomgr/ev_epollex_linux.h
- src/core/lib/iomgr/ev_epollsig_linux.h
- src/core/lib/iomgr/ev_poll_posix.h
- src/core/lib/iomgr/ev_posix.h
- src/core/lib/iomgr/exec_ctx.h
@ -206,6 +209,7 @@ filegroups:
- src/core/lib/iomgr/iomgr.h
- src/core/lib/iomgr/iomgr_internal.h
- src/core/lib/iomgr/iomgr_posix.h
- src/core/lib/iomgr/is_epollexclusive_available.h
- src/core/lib/iomgr/load_file.h
- src/core/lib/iomgr/lockfree_event.h
- src/core/lib/iomgr/network_status_tracker.h
@ -227,6 +231,7 @@ filegroups:
- src/core/lib/iomgr/socket_utils.h
- src/core/lib/iomgr/socket_utils_posix.h
- src/core/lib/iomgr/socket_windows.h
- src/core/lib/iomgr/sys_epoll_wrapper.h
- src/core/lib/iomgr/tcp_client.h
- src/core/lib/iomgr/tcp_client_posix.h
- src/core/lib/iomgr/tcp_posix.h
@ -238,6 +243,7 @@ filegroups:
- src/core/lib/iomgr/timer.h
- src/core/lib/iomgr/timer_generic.h
- src/core/lib/iomgr/timer_heap.h
- src/core/lib/iomgr/timer_manager.h
- src/core/lib/iomgr/timer_uv.h
- src/core/lib/iomgr/udp_server.h
- src/core/lib/iomgr/unix_sockets_posix.h
@ -293,7 +299,6 @@ filegroups:
- src/core/lib/channel/handshaker_registry.c
- src/core/lib/compression/compression.c
- src/core/lib/compression/message_compress.c
- src/core/lib/debug/trace.c
- src/core/lib/http/format_request.c
- src/core/lib/http/httpcli.c
- src/core/lib/http/parser.c
@ -304,7 +309,11 @@ filegroups:
- src/core/lib/iomgr/endpoint_pair_uv.c
- src/core/lib/iomgr/endpoint_pair_windows.c
- src/core/lib/iomgr/error.c
- src/core/lib/iomgr/ev_epoll_linux.c
- src/core/lib/iomgr/ev_epoll1_linux.c
- src/core/lib/iomgr/ev_epoll_limited_pollers_linux.c
- src/core/lib/iomgr/ev_epoll_thread_pool_linux.c
- src/core/lib/iomgr/ev_epollex_linux.c
- src/core/lib/iomgr/ev_epollsig_linux.c
- src/core/lib/iomgr/ev_poll_posix.c
- src/core/lib/iomgr/ev_posix.c
- src/core/lib/iomgr/exec_ctx.c
@ -314,6 +323,7 @@ filegroups:
- src/core/lib/iomgr/iomgr_posix.c
- src/core/lib/iomgr/iomgr_uv.c
- src/core/lib/iomgr/iomgr_windows.c
- src/core/lib/iomgr/is_epollexclusive_available.c
- src/core/lib/iomgr/load_file.c
- src/core/lib/iomgr/lockfree_event.c
- src/core/lib/iomgr/network_status_tracker.c
@ -350,6 +360,7 @@ filegroups:
- src/core/lib/iomgr/time_averaged_stats.c
- src/core/lib/iomgr/timer_generic.c
- src/core/lib/iomgr/timer_heap.c
- src/core/lib/iomgr/timer_manager.c
- src/core/lib/iomgr/timer_uv.c
- src/core/lib/iomgr/udp_server.c
- src/core/lib/iomgr/unix_sockets_posix.c
@ -408,6 +419,7 @@ filegroups:
- gpr
uses:
- grpc_codegen
- grpc_trace
- name: grpc_client_channel
headers:
- src/core/ext/filters/client_channel/client_channel.h
@ -488,13 +500,17 @@ filegroups:
- grpc_base
- name: grpc_lb_policy_grpclb
headers:
- src/core/ext/filters/client_channel/lb_policy/grpclb/client_load_reporting_filter.h
- src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.h
- src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_channel.h
- src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_client_stats.h
- src/core/ext/filters/client_channel/lb_policy/grpclb/load_balancer_api.h
- src/core/ext/filters/client_channel/lb_policy/grpclb/proto/grpc/lb/v1/load_balancer.pb.h
src:
- src/core/ext/filters/client_channel/lb_policy/grpclb/client_load_reporting_filter.c
- src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.c
- src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_channel.c
- src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_client_stats.c
- src/core/ext/filters/client_channel/lb_policy/grpclb/load_balancer_api.c
- src/core/ext/filters/client_channel/lb_policy/grpclb/proto/grpc/lb/v1/load_balancer.pb.c
plugin: grpc_lb_policy_grpclb
@ -504,13 +520,17 @@ filegroups:
- nanopb
- name: grpc_lb_policy_grpclb_secure
headers:
- src/core/ext/filters/client_channel/lb_policy/grpclb/client_load_reporting_filter.h
- src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.h
- src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_channel.h
- src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_client_stats.h
- src/core/ext/filters/client_channel/lb_policy/grpclb/load_balancer_api.h
- src/core/ext/filters/client_channel/lb_policy/grpclb/proto/grpc/lb/v1/load_balancer.pb.h
src:
- src/core/ext/filters/client_channel/lb_policy/grpclb/client_load_reporting_filter.c
- src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.c
- src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_channel_secure.c
- src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_client_stats.c
- src/core/ext/filters/client_channel/lb_policy/grpclb/load_balancer_api.c
- src/core/ext/filters/client_channel/lb_policy/grpclb/proto/grpc/lb/v1/load_balancer.pb.c
plugin: grpc_lb_policy_grpclb
@ -675,6 +695,13 @@ filegroups:
deps:
- grpc
- gpr_test_util
- name: grpc_trace
headers:
- src/core/lib/debug/trace.h
src:
- src/core/lib/debug/trace.c
deps:
- gpr
- name: grpc_transport_chttp2
headers:
- src/core/ext/transport/chttp2/transport/bin_decoder.h
@ -823,6 +850,8 @@ filegroups:
deps:
- gpr
secure: true
uses:
- grpc_trace
- name: grpc++_base
language: c++
public_headers:
@ -1654,6 +1683,14 @@ targets:
- grpc
- gpr_test_util
- gpr
- name: check_epollexclusive
build: tool
language: c
src:
- test/build/check_epollexclusive.c
deps:
- grpc
- gpr
- name: chttp2_hpack_encoder_test
build: test
language: c
@ -1806,12 +1843,12 @@ targets:
- grpc
- gpr_test_util
- gpr
- name: ev_epoll_linux_test
- name: ev_epollsig_linux_test
cpu_cost: 3
build: test
language: c
src:
- test/core/iomgr/ev_epoll_linux_test.c
- test/core/iomgr/ev_epollsig_linux_test.c
deps:
- grpc_test_util
- grpc
@ -3804,6 +3841,20 @@ targets:
- grpc_test_util
- grpc++
- grpc
- name: grpclb_end2end_test
gtest: true
build: test
language: c++
src:
- src/proto/grpc/lb/v1/load_balancer.proto
- test/cpp/end2end/grpclb_end2end_test.cc
deps:
- grpc++_test_util
- grpc_test_util
- grpc++
- grpc
- gpr_test_util
- gpr
- name: grpclb_test
gtest: false
build: test
@ -4490,10 +4541,11 @@ configs:
UBSAN_OPTIONS: halt_on_error=1:print_stacktrace=1:suppressions=tools/ubsan_suppressions.txt
defaults:
ares:
CFLAGS: -Wno-sign-conversion -Wno-invalid-source-encoding
CFLAGS: -Wno-sign-conversion $(if $(subst MINGW32,,$(SYSTEM)),-Wno-invalid-source-encoding,)
CPPFLAGS: -Ithird_party/cares -Ithird_party/cares/cares $(if $(subst Linux,,$(SYSTEM)),,-Ithird_party/cares/config_linux)
$(if $(subst Darwin,,$(SYSTEM)),,-Ithird_party/cares/config_darwin) -fvisibility=hidden
-D_GNU_SOURCE -DWIN32_LEAN_AND_MEAN -D_HAS_EXCEPTIONS=0 -DNOMINMAX -DHAVE_CONFIG_H
-D_GNU_SOURCE -DWIN32_LEAN_AND_MEAN -D_HAS_EXCEPTIONS=0 -DNOMINMAX $(if $(subst
MINGW32,,$(SYSTEM)),-DHAVE_CONFIG_H,)
benchmark:
CPPFLAGS: -Ithird_party/benchmark/include -DHAVE_POSIX_REGEX
boringssl:
@ -4539,13 +4591,10 @@ node_modules:
- src/node/ext/call_credentials.cc
- src/node/ext/channel.cc
- src/node/ext/channel_credentials.cc
- src/node/ext/completion_queue_threadpool.cc
- src/node/ext/completion_queue_uv.cc
- src/node/ext/completion_queue.cc
- src/node/ext/node_grpc.cc
- src/node/ext/server.cc
- src/node/ext/server_credentials.cc
- src/node/ext/server_generic.cc
- src/node/ext/server_uv.cc
- src/node/ext/slice.cc
- src/node/ext/timeval.cc
openssl_fallback:

@ -7,7 +7,7 @@
"license": "BSD-3-Clause",
"require": {
"php": ">=5.5.0",
"google/protobuf": "^v3.1.0"
"google/protobuf": "^v3.3.0"
},
"require-dev": {
"google/auth": "v0.9"

@ -97,7 +97,6 @@ if test "$PHP_GRPC" != "no"; then
src/core/lib/channel/handshaker_registry.c \
src/core/lib/compression/compression.c \
src/core/lib/compression/message_compress.c \
src/core/lib/debug/trace.c \
src/core/lib/http/format_request.c \
src/core/lib/http/httpcli.c \
src/core/lib/http/parser.c \
@ -108,7 +107,11 @@ if test "$PHP_GRPC" != "no"; then
src/core/lib/iomgr/endpoint_pair_uv.c \
src/core/lib/iomgr/endpoint_pair_windows.c \
src/core/lib/iomgr/error.c \
src/core/lib/iomgr/ev_epoll_linux.c \
src/core/lib/iomgr/ev_epoll1_linux.c \
src/core/lib/iomgr/ev_epoll_limited_pollers_linux.c \
src/core/lib/iomgr/ev_epoll_thread_pool_linux.c \
src/core/lib/iomgr/ev_epollex_linux.c \
src/core/lib/iomgr/ev_epollsig_linux.c \
src/core/lib/iomgr/ev_poll_posix.c \
src/core/lib/iomgr/ev_posix.c \
src/core/lib/iomgr/exec_ctx.c \
@ -118,6 +121,7 @@ if test "$PHP_GRPC" != "no"; then
src/core/lib/iomgr/iomgr_posix.c \
src/core/lib/iomgr/iomgr_uv.c \
src/core/lib/iomgr/iomgr_windows.c \
src/core/lib/iomgr/is_epollexclusive_available.c \
src/core/lib/iomgr/load_file.c \
src/core/lib/iomgr/lockfree_event.c \
src/core/lib/iomgr/network_status_tracker.c \
@ -154,6 +158,7 @@ if test "$PHP_GRPC" != "no"; then
src/core/lib/iomgr/time_averaged_stats.c \
src/core/lib/iomgr/timer_generic.c \
src/core/lib/iomgr/timer_heap.c \
src/core/lib/iomgr/timer_manager.c \
src/core/lib/iomgr/timer_uv.c \
src/core/lib/iomgr/udp_server.c \
src/core/lib/iomgr/unix_sockets_posix.c \
@ -208,6 +213,7 @@ if test "$PHP_GRPC" != "no"; then
src/core/lib/transport/timeout_encoding.c \
src/core/lib/transport/transport.c \
src/core/lib/transport/transport_op_string.c \
src/core/lib/debug/trace.c \
src/core/ext/transport/chttp2/server/secure/server_secure_chttp2.c \
src/core/ext/transport/chttp2/transport/bin_decoder.c \
src/core/ext/transport/chttp2/transport/bin_encoder.c \
@ -291,8 +297,10 @@ if test "$PHP_GRPC" != "no"; then
src/core/ext/transport/chttp2/server/insecure/server_chttp2_posix.c \
src/core/ext/transport/chttp2/client/insecure/channel_create.c \
src/core/ext/transport/chttp2/client/insecure/channel_create_posix.c \
src/core/ext/filters/client_channel/lb_policy/grpclb/client_load_reporting_filter.c \
src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.c \
src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_channel_secure.c \
src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_client_stats.c \
src/core/ext/filters/client_channel/lb_policy/grpclb/load_balancer_api.c \
src/core/ext/filters/client_channel/lb_policy/grpclb/proto/grpc/lb/v1/load_balancer.pb.c \
third_party/nanopb/pb_common.c \

@ -269,7 +269,6 @@ Pod::Spec.new do |s|
'src/core/lib/channel/handshaker_registry.h',
'src/core/lib/compression/algorithm_metadata.h',
'src/core/lib/compression/message_compress.h',
'src/core/lib/debug/trace.h',
'src/core/lib/http/format_request.h',
'src/core/lib/http/httpcli.h',
'src/core/lib/http/parser.h',
@ -279,7 +278,11 @@ Pod::Spec.new do |s|
'src/core/lib/iomgr/endpoint_pair.h',
'src/core/lib/iomgr/error.h',
'src/core/lib/iomgr/error_internal.h',
'src/core/lib/iomgr/ev_epoll_linux.h',
'src/core/lib/iomgr/ev_epoll1_linux.h',
'src/core/lib/iomgr/ev_epoll_limited_pollers_linux.h',
'src/core/lib/iomgr/ev_epoll_thread_pool_linux.h',
'src/core/lib/iomgr/ev_epollex_linux.h',
'src/core/lib/iomgr/ev_epollsig_linux.h',
'src/core/lib/iomgr/ev_poll_posix.h',
'src/core/lib/iomgr/ev_posix.h',
'src/core/lib/iomgr/exec_ctx.h',
@ -288,6 +291,7 @@ Pod::Spec.new do |s|
'src/core/lib/iomgr/iomgr.h',
'src/core/lib/iomgr/iomgr_internal.h',
'src/core/lib/iomgr/iomgr_posix.h',
'src/core/lib/iomgr/is_epollexclusive_available.h',
'src/core/lib/iomgr/load_file.h',
'src/core/lib/iomgr/lockfree_event.h',
'src/core/lib/iomgr/network_status_tracker.h',
@ -309,6 +313,7 @@ Pod::Spec.new do |s|
'src/core/lib/iomgr/socket_utils.h',
'src/core/lib/iomgr/socket_utils_posix.h',
'src/core/lib/iomgr/socket_windows.h',
'src/core/lib/iomgr/sys_epoll_wrapper.h',
'src/core/lib/iomgr/tcp_client.h',
'src/core/lib/iomgr/tcp_client_posix.h',
'src/core/lib/iomgr/tcp_posix.h',
@ -320,6 +325,7 @@ Pod::Spec.new do |s|
'src/core/lib/iomgr/timer.h',
'src/core/lib/iomgr/timer_generic.h',
'src/core/lib/iomgr/timer_heap.h',
'src/core/lib/iomgr/timer_manager.h',
'src/core/lib/iomgr/timer_uv.h',
'src/core/lib/iomgr/udp_server.h',
'src/core/lib/iomgr/unix_sockets_posix.h',
@ -365,6 +371,7 @@ Pod::Spec.new do |s|
'src/core/lib/transport/timeout_encoding.h',
'src/core/lib/transport/transport.h',
'src/core/lib/transport/transport_impl.h',
'src/core/lib/debug/trace.h',
'src/core/ext/transport/chttp2/transport/bin_decoder.h',
'src/core/ext/transport/chttp2/transport/bin_encoder.h',
'src/core/ext/transport/chttp2/transport/chttp2_transport.h',
@ -434,8 +441,10 @@ Pod::Spec.new do |s|
'src/core/ext/filters/client_channel/uri_parser.h',
'src/core/ext/filters/deadline/deadline_filter.h',
'src/core/ext/transport/chttp2/client/chttp2_connector.h',
'src/core/ext/filters/client_channel/lb_policy/grpclb/client_load_reporting_filter.h',
'src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.h',
'src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_channel.h',
'src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_client_stats.h',
'src/core/ext/filters/client_channel/lb_policy/grpclb/load_balancer_api.h',
'src/core/ext/filters/client_channel/lb_policy/grpclb/proto/grpc/lb/v1/load_balancer.pb.h',
'third_party/nanopb/pb.h',
@ -474,7 +483,6 @@ Pod::Spec.new do |s|
'src/core/lib/channel/handshaker_registry.c',
'src/core/lib/compression/compression.c',
'src/core/lib/compression/message_compress.c',
'src/core/lib/debug/trace.c',
'src/core/lib/http/format_request.c',
'src/core/lib/http/httpcli.c',
'src/core/lib/http/parser.c',
@ -485,7 +493,11 @@ Pod::Spec.new do |s|
'src/core/lib/iomgr/endpoint_pair_uv.c',
'src/core/lib/iomgr/endpoint_pair_windows.c',
'src/core/lib/iomgr/error.c',
'src/core/lib/iomgr/ev_epoll_linux.c',
'src/core/lib/iomgr/ev_epoll1_linux.c',
'src/core/lib/iomgr/ev_epoll_limited_pollers_linux.c',
'src/core/lib/iomgr/ev_epoll_thread_pool_linux.c',
'src/core/lib/iomgr/ev_epollex_linux.c',
'src/core/lib/iomgr/ev_epollsig_linux.c',
'src/core/lib/iomgr/ev_poll_posix.c',
'src/core/lib/iomgr/ev_posix.c',
'src/core/lib/iomgr/exec_ctx.c',
@ -495,6 +507,7 @@ Pod::Spec.new do |s|
'src/core/lib/iomgr/iomgr_posix.c',
'src/core/lib/iomgr/iomgr_uv.c',
'src/core/lib/iomgr/iomgr_windows.c',
'src/core/lib/iomgr/is_epollexclusive_available.c',
'src/core/lib/iomgr/load_file.c',
'src/core/lib/iomgr/lockfree_event.c',
'src/core/lib/iomgr/network_status_tracker.c',
@ -531,6 +544,7 @@ Pod::Spec.new do |s|
'src/core/lib/iomgr/time_averaged_stats.c',
'src/core/lib/iomgr/timer_generic.c',
'src/core/lib/iomgr/timer_heap.c',
'src/core/lib/iomgr/timer_manager.c',
'src/core/lib/iomgr/timer_uv.c',
'src/core/lib/iomgr/udp_server.c',
'src/core/lib/iomgr/unix_sockets_posix.c',
@ -585,6 +599,7 @@ Pod::Spec.new do |s|
'src/core/lib/transport/timeout_encoding.c',
'src/core/lib/transport/transport.c',
'src/core/lib/transport/transport_op_string.c',
'src/core/lib/debug/trace.c',
'src/core/ext/transport/chttp2/server/secure/server_secure_chttp2.c',
'src/core/ext/transport/chttp2/transport/bin_decoder.c',
'src/core/ext/transport/chttp2/transport/bin_encoder.c',
@ -668,8 +683,10 @@ Pod::Spec.new do |s|
'src/core/ext/transport/chttp2/server/insecure/server_chttp2_posix.c',
'src/core/ext/transport/chttp2/client/insecure/channel_create.c',
'src/core/ext/transport/chttp2/client/insecure/channel_create_posix.c',
'src/core/ext/filters/client_channel/lb_policy/grpclb/client_load_reporting_filter.c',
'src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.c',
'src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_channel_secure.c',
'src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_client_stats.c',
'src/core/ext/filters/client_channel/lb_policy/grpclb/load_balancer_api.c',
'src/core/ext/filters/client_channel/lb_policy/grpclb/proto/grpc/lb/v1/load_balancer.pb.c',
'third_party/nanopb/pb_common.c',
@ -730,7 +747,6 @@ Pod::Spec.new do |s|
'src/core/lib/channel/handshaker_registry.h',
'src/core/lib/compression/algorithm_metadata.h',
'src/core/lib/compression/message_compress.h',
'src/core/lib/debug/trace.h',
'src/core/lib/http/format_request.h',
'src/core/lib/http/httpcli.h',
'src/core/lib/http/parser.h',
@ -740,7 +756,11 @@ Pod::Spec.new do |s|
'src/core/lib/iomgr/endpoint_pair.h',
'src/core/lib/iomgr/error.h',
'src/core/lib/iomgr/error_internal.h',
'src/core/lib/iomgr/ev_epoll_linux.h',
'src/core/lib/iomgr/ev_epoll1_linux.h',
'src/core/lib/iomgr/ev_epoll_limited_pollers_linux.h',
'src/core/lib/iomgr/ev_epoll_thread_pool_linux.h',
'src/core/lib/iomgr/ev_epollex_linux.h',
'src/core/lib/iomgr/ev_epollsig_linux.h',
'src/core/lib/iomgr/ev_poll_posix.h',
'src/core/lib/iomgr/ev_posix.h',
'src/core/lib/iomgr/exec_ctx.h',
@ -749,6 +769,7 @@ Pod::Spec.new do |s|
'src/core/lib/iomgr/iomgr.h',
'src/core/lib/iomgr/iomgr_internal.h',
'src/core/lib/iomgr/iomgr_posix.h',
'src/core/lib/iomgr/is_epollexclusive_available.h',
'src/core/lib/iomgr/load_file.h',
'src/core/lib/iomgr/lockfree_event.h',
'src/core/lib/iomgr/network_status_tracker.h',
@ -770,6 +791,7 @@ Pod::Spec.new do |s|
'src/core/lib/iomgr/socket_utils.h',
'src/core/lib/iomgr/socket_utils_posix.h',
'src/core/lib/iomgr/socket_windows.h',
'src/core/lib/iomgr/sys_epoll_wrapper.h',
'src/core/lib/iomgr/tcp_client.h',
'src/core/lib/iomgr/tcp_client_posix.h',
'src/core/lib/iomgr/tcp_posix.h',
@ -781,6 +803,7 @@ Pod::Spec.new do |s|
'src/core/lib/iomgr/timer.h',
'src/core/lib/iomgr/timer_generic.h',
'src/core/lib/iomgr/timer_heap.h',
'src/core/lib/iomgr/timer_manager.h',
'src/core/lib/iomgr/timer_uv.h',
'src/core/lib/iomgr/udp_server.h',
'src/core/lib/iomgr/unix_sockets_posix.h',
@ -826,6 +849,7 @@ Pod::Spec.new do |s|
'src/core/lib/transport/timeout_encoding.h',
'src/core/lib/transport/transport.h',
'src/core/lib/transport/transport_impl.h',
'src/core/lib/debug/trace.h',
'src/core/ext/transport/chttp2/transport/bin_decoder.h',
'src/core/ext/transport/chttp2/transport/bin_encoder.h',
'src/core/ext/transport/chttp2/transport/chttp2_transport.h',
@ -895,8 +919,10 @@ Pod::Spec.new do |s|
'src/core/ext/filters/client_channel/uri_parser.h',
'src/core/ext/filters/deadline/deadline_filter.h',
'src/core/ext/transport/chttp2/client/chttp2_connector.h',
'src/core/ext/filters/client_channel/lb_policy/grpclb/client_load_reporting_filter.h',
'src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.h',
'src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_channel.h',
'src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_client_stats.h',
'src/core/ext/filters/client_channel/lb_policy/grpclb/load_balancer_api.h',
'src/core/ext/filters/client_channel/lb_policy/grpclb/proto/grpc/lb/v1/load_balancer.pb.h',
'third_party/nanopb/pb.h',

@ -185,7 +185,6 @@ Gem::Specification.new do |s|
s.files += %w( src/core/lib/channel/handshaker_registry.h )
s.files += %w( src/core/lib/compression/algorithm_metadata.h )
s.files += %w( src/core/lib/compression/message_compress.h )
s.files += %w( src/core/lib/debug/trace.h )
s.files += %w( src/core/lib/http/format_request.h )
s.files += %w( src/core/lib/http/httpcli.h )
s.files += %w( src/core/lib/http/parser.h )
@ -195,7 +194,11 @@ Gem::Specification.new do |s|
s.files += %w( src/core/lib/iomgr/endpoint_pair.h )
s.files += %w( src/core/lib/iomgr/error.h )
s.files += %w( src/core/lib/iomgr/error_internal.h )
s.files += %w( src/core/lib/iomgr/ev_epoll_linux.h )
s.files += %w( src/core/lib/iomgr/ev_epoll1_linux.h )
s.files += %w( src/core/lib/iomgr/ev_epoll_limited_pollers_linux.h )
s.files += %w( src/core/lib/iomgr/ev_epoll_thread_pool_linux.h )
s.files += %w( src/core/lib/iomgr/ev_epollex_linux.h )
s.files += %w( src/core/lib/iomgr/ev_epollsig_linux.h )
s.files += %w( src/core/lib/iomgr/ev_poll_posix.h )
s.files += %w( src/core/lib/iomgr/ev_posix.h )
s.files += %w( src/core/lib/iomgr/exec_ctx.h )
@ -204,6 +207,7 @@ Gem::Specification.new do |s|
s.files += %w( src/core/lib/iomgr/iomgr.h )
s.files += %w( src/core/lib/iomgr/iomgr_internal.h )
s.files += %w( src/core/lib/iomgr/iomgr_posix.h )
s.files += %w( src/core/lib/iomgr/is_epollexclusive_available.h )
s.files += %w( src/core/lib/iomgr/load_file.h )
s.files += %w( src/core/lib/iomgr/lockfree_event.h )
s.files += %w( src/core/lib/iomgr/network_status_tracker.h )
@ -225,6 +229,7 @@ Gem::Specification.new do |s|
s.files += %w( src/core/lib/iomgr/socket_utils.h )
s.files += %w( src/core/lib/iomgr/socket_utils_posix.h )
s.files += %w( src/core/lib/iomgr/socket_windows.h )
s.files += %w( src/core/lib/iomgr/sys_epoll_wrapper.h )
s.files += %w( src/core/lib/iomgr/tcp_client.h )
s.files += %w( src/core/lib/iomgr/tcp_client_posix.h )
s.files += %w( src/core/lib/iomgr/tcp_posix.h )
@ -236,6 +241,7 @@ Gem::Specification.new do |s|
s.files += %w( src/core/lib/iomgr/timer.h )
s.files += %w( src/core/lib/iomgr/timer_generic.h )
s.files += %w( src/core/lib/iomgr/timer_heap.h )
s.files += %w( src/core/lib/iomgr/timer_manager.h )
s.files += %w( src/core/lib/iomgr/timer_uv.h )
s.files += %w( src/core/lib/iomgr/udp_server.h )
s.files += %w( src/core/lib/iomgr/unix_sockets_posix.h )
@ -281,6 +287,7 @@ Gem::Specification.new do |s|
s.files += %w( src/core/lib/transport/timeout_encoding.h )
s.files += %w( src/core/lib/transport/transport.h )
s.files += %w( src/core/lib/transport/transport_impl.h )
s.files += %w( src/core/lib/debug/trace.h )
s.files += %w( src/core/ext/transport/chttp2/transport/bin_decoder.h )
s.files += %w( src/core/ext/transport/chttp2/transport/bin_encoder.h )
s.files += %w( src/core/ext/transport/chttp2/transport/chttp2_transport.h )
@ -350,8 +357,10 @@ Gem::Specification.new do |s|
s.files += %w( src/core/ext/filters/client_channel/uri_parser.h )
s.files += %w( src/core/ext/filters/deadline/deadline_filter.h )
s.files += %w( src/core/ext/transport/chttp2/client/chttp2_connector.h )
s.files += %w( src/core/ext/filters/client_channel/lb_policy/grpclb/client_load_reporting_filter.h )
s.files += %w( src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.h )
s.files += %w( src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_channel.h )
s.files += %w( src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_client_stats.h )
s.files += %w( src/core/ext/filters/client_channel/lb_policy/grpclb/load_balancer_api.h )
s.files += %w( src/core/ext/filters/client_channel/lb_policy/grpclb/proto/grpc/lb/v1/load_balancer.pb.h )
s.files += %w( third_party/nanopb/pb.h )
@ -390,7 +399,6 @@ Gem::Specification.new do |s|
s.files += %w( src/core/lib/channel/handshaker_registry.c )
s.files += %w( src/core/lib/compression/compression.c )
s.files += %w( src/core/lib/compression/message_compress.c )
s.files += %w( src/core/lib/debug/trace.c )
s.files += %w( src/core/lib/http/format_request.c )
s.files += %w( src/core/lib/http/httpcli.c )
s.files += %w( src/core/lib/http/parser.c )
@ -401,7 +409,11 @@ Gem::Specification.new do |s|
s.files += %w( src/core/lib/iomgr/endpoint_pair_uv.c )
s.files += %w( src/core/lib/iomgr/endpoint_pair_windows.c )
s.files += %w( src/core/lib/iomgr/error.c )
s.files += %w( src/core/lib/iomgr/ev_epoll_linux.c )
s.files += %w( src/core/lib/iomgr/ev_epoll1_linux.c )
s.files += %w( src/core/lib/iomgr/ev_epoll_limited_pollers_linux.c )
s.files += %w( src/core/lib/iomgr/ev_epoll_thread_pool_linux.c )
s.files += %w( src/core/lib/iomgr/ev_epollex_linux.c )
s.files += %w( src/core/lib/iomgr/ev_epollsig_linux.c )
s.files += %w( src/core/lib/iomgr/ev_poll_posix.c )
s.files += %w( src/core/lib/iomgr/ev_posix.c )
s.files += %w( src/core/lib/iomgr/exec_ctx.c )
@ -411,6 +423,7 @@ Gem::Specification.new do |s|
s.files += %w( src/core/lib/iomgr/iomgr_posix.c )
s.files += %w( src/core/lib/iomgr/iomgr_uv.c )
s.files += %w( src/core/lib/iomgr/iomgr_windows.c )
s.files += %w( src/core/lib/iomgr/is_epollexclusive_available.c )
s.files += %w( src/core/lib/iomgr/load_file.c )
s.files += %w( src/core/lib/iomgr/lockfree_event.c )
s.files += %w( src/core/lib/iomgr/network_status_tracker.c )
@ -447,6 +460,7 @@ Gem::Specification.new do |s|
s.files += %w( src/core/lib/iomgr/time_averaged_stats.c )
s.files += %w( src/core/lib/iomgr/timer_generic.c )
s.files += %w( src/core/lib/iomgr/timer_heap.c )
s.files += %w( src/core/lib/iomgr/timer_manager.c )
s.files += %w( src/core/lib/iomgr/timer_uv.c )
s.files += %w( src/core/lib/iomgr/udp_server.c )
s.files += %w( src/core/lib/iomgr/unix_sockets_posix.c )
@ -501,6 +515,7 @@ Gem::Specification.new do |s|
s.files += %w( src/core/lib/transport/timeout_encoding.c )
s.files += %w( src/core/lib/transport/transport.c )
s.files += %w( src/core/lib/transport/transport_op_string.c )
s.files += %w( src/core/lib/debug/trace.c )
s.files += %w( src/core/ext/transport/chttp2/server/secure/server_secure_chttp2.c )
s.files += %w( src/core/ext/transport/chttp2/transport/bin_decoder.c )
s.files += %w( src/core/ext/transport/chttp2/transport/bin_encoder.c )
@ -584,8 +599,10 @@ Gem::Specification.new do |s|
s.files += %w( src/core/ext/transport/chttp2/server/insecure/server_chttp2_posix.c )
s.files += %w( src/core/ext/transport/chttp2/client/insecure/channel_create.c )
s.files += %w( src/core/ext/transport/chttp2/client/insecure/channel_create_posix.c )
s.files += %w( src/core/ext/filters/client_channel/lb_policy/grpclb/client_load_reporting_filter.c )
s.files += %w( src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.c )
s.files += %w( src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_channel_secure.c )
s.files += %w( src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_client_stats.c )
s.files += %w( src/core/ext/filters/client_channel/lb_policy/grpclb/load_balancer_api.c )
s.files += %w( src/core/ext/filters/client_channel/lb_policy/grpclb/proto/grpc/lb/v1/load_balancer.pb.c )
s.files += %w( third_party/nanopb/pb_common.c )

@ -364,28 +364,6 @@ class CallOpRecvMessage {
bool allow_not_getting_message_;
};
namespace CallOpGenericRecvMessageHelper {
class DeserializeFunc {
public:
virtual Status Deserialize(grpc_byte_buffer* buf) = 0;
virtual ~DeserializeFunc() {}
};
template <class R>
class DeserializeFuncType final : public DeserializeFunc {
public:
DeserializeFuncType(R* message) : message_(message) {}
Status Deserialize(grpc_byte_buffer* buf) override {
return SerializationTraits<R>::Deserialize(buf, message_);
}
~DeserializeFuncType() override {}
private:
R* message_; // Not a managed pointer because management is external to this
};
} // namespace CallOpGenericRecvMessageHelper
class CallOpGenericRecvMessage {
public:
CallOpGenericRecvMessage()
@ -393,11 +371,9 @@ class CallOpGenericRecvMessage {
template <class R>
void RecvMessage(R* message) {
// Use an explicit base class pointer to avoid resolution error in the
// following unique_ptr::reset for some old implementations.
CallOpGenericRecvMessageHelper::DeserializeFunc* func =
new CallOpGenericRecvMessageHelper::DeserializeFuncType<R>(message);
deserialize_.reset(func);
deserialize_ = [message](grpc_byte_buffer* buf) -> Status {
return SerializationTraits<R>::Deserialize(buf, message);
};
}
// Do not change status if no message is received.
@ -420,7 +396,7 @@ class CallOpGenericRecvMessage {
if (recv_buf_) {
if (*status) {
got_message = true;
*status = deserialize_->Deserialize(recv_buf_).ok();
*status = deserialize_(recv_buf_).ok();
} else {
got_message = false;
g_core_codegen_interface->grpc_byte_buffer_destroy(recv_buf_);
@ -431,11 +407,12 @@ class CallOpGenericRecvMessage {
*status = false;
}
}
deserialize_.reset();
deserialize_ = DeserializeFunc();
}
private:
std::unique_ptr<CallOpGenericRecvMessageHelper::DeserializeFunc> deserialize_;
typedef std::function<Status(grpc_byte_buffer*)> DeserializeFunc;
DeserializeFunc deserialize_;
grpc_byte_buffer* recv_buf_;
bool allow_not_getting_message_;
};

@ -34,6 +34,8 @@
#ifndef GRPCXX_IMPL_CODEGEN_CONFIG_PROTOBUF_H
#define GRPCXX_IMPL_CODEGEN_CONFIG_PROTOBUF_H
#define GRPC_OPEN_SOURCE_PROTO
#ifndef GRPC_CUSTOM_PROTOBUF_INT64
#include <google/protobuf/stubs/common.h>
#define GRPC_CUSTOM_PROTOBUF_INT64 ::google::protobuf::int64

@ -90,11 +90,15 @@ class CoreCodegen final : public CoreCodegenInterface {
grpc_byte_buffer* grpc_raw_byte_buffer_create(grpc_slice* slice,
size_t nslices) override;
grpc_slice grpc_slice_new_with_user_data(void* p, size_t len,
void (*destroy)(void*),
void* user_data) override;
grpc_slice grpc_empty_slice() override;
grpc_slice grpc_slice_malloc(size_t length) override;
void grpc_slice_unref(grpc_slice slice) override;
grpc_slice grpc_slice_ref(grpc_slice slice) override;
grpc_slice grpc_slice_split_tail(grpc_slice* s, size_t split) override;
grpc_slice grpc_slice_split_head(grpc_slice* s, size_t split) override;
void grpc_slice_buffer_add(grpc_slice_buffer* sb, grpc_slice slice) override;
void grpc_slice_buffer_pop(grpc_slice_buffer* sb) override;
grpc_slice grpc_slice_from_static_buffer(const void* buffer,

@ -101,15 +101,18 @@ class CoreCodegenInterface {
virtual grpc_byte_buffer* grpc_raw_byte_buffer_create(grpc_slice* slice,
size_t nslices) = 0;
virtual grpc_slice grpc_slice_new_with_user_data(void* p, size_t len,
void (*destroy)(void*),
void* user_data) = 0;
virtual void grpc_call_ref(grpc_call* call) = 0;
virtual void grpc_call_unref(grpc_call* call) = 0;
virtual void* grpc_call_arena_alloc(grpc_call* call, size_t length) = 0;
virtual grpc_slice grpc_empty_slice() = 0;
virtual grpc_slice grpc_slice_malloc(size_t length) = 0;
virtual void grpc_slice_unref(grpc_slice slice) = 0;
virtual grpc_slice grpc_slice_ref(grpc_slice slice) = 0;
virtual grpc_slice grpc_slice_split_tail(grpc_slice* s, size_t split) = 0;
virtual grpc_slice grpc_slice_split_head(grpc_slice* s, size_t split) = 0;
virtual void grpc_slice_buffer_add(grpc_slice_buffer* sb,
grpc_slice slice) = 0;
virtual void grpc_slice_buffer_pop(grpc_slice_buffer* sb) = 0;

@ -54,8 +54,7 @@ class GrpcBufferWriterPeer;
const int kGrpcBufferWriterMaxBufferLength = 1024 * 1024;
class GrpcBufferWriter final
: public ::grpc::protobuf::io::ZeroCopyOutputStream {
class GrpcBufferWriter : public ::grpc::protobuf::io::ZeroCopyOutputStream {
public:
explicit GrpcBufferWriter(grpc_byte_buffer** bp, int block_size)
: block_size_(block_size), byte_count_(0), have_backup_(false) {
@ -103,6 +102,8 @@ class GrpcBufferWriter final
grpc::protobuf::int64 ByteCount() const override { return byte_count_; }
grpc_slice_buffer* SliceBuffer() { return slice_buffer_; }
private:
friend class GrpcBufferWriterPeer;
const int block_size_;
@ -113,8 +114,7 @@ class GrpcBufferWriter final
grpc_slice slice_;
};
class GrpcBufferReader final
: public ::grpc::protobuf::io::ZeroCopyInputStream {
class GrpcBufferReader : public ::grpc::protobuf::io::ZeroCopyInputStream {
public:
explicit GrpcBufferReader(grpc_byte_buffer* buffer)
: byte_count_(0), backup_count_(0), status_() {
@ -175,64 +175,91 @@ class GrpcBufferReader final
return byte_count_ - backup_count_;
}
private:
protected:
int64_t byte_count_;
int64_t backup_count_;
grpc_byte_buffer_reader reader_;
grpc_slice slice_;
Status status_;
};
template <class BufferWriter, class T>
Status GenericSerialize(const grpc::protobuf::Message& msg,
grpc_byte_buffer** bp, bool* own_buffer) {
static_assert(
std::is_base_of<protobuf::io::ZeroCopyOutputStream, BufferWriter>::value,
"BufferWriter must be a subclass of io::ZeroCopyOutputStream");
*own_buffer = true;
int byte_size = msg.ByteSize();
if (byte_size <= internal::kGrpcBufferWriterMaxBufferLength) {
grpc_slice slice = g_core_codegen_interface->grpc_slice_malloc(byte_size);
GPR_CODEGEN_ASSERT(
GRPC_SLICE_END_PTR(slice) ==
msg.SerializeWithCachedSizesToArray(GRPC_SLICE_START_PTR(slice)));
*bp = g_core_codegen_interface->grpc_raw_byte_buffer_create(&slice, 1);
g_core_codegen_interface->grpc_slice_unref(slice);
return g_core_codegen_interface->ok();
} else {
BufferWriter writer(bp, internal::kGrpcBufferWriterMaxBufferLength);
return msg.SerializeToZeroCopyStream(&writer)
? g_core_codegen_interface->ok()
: Status(StatusCode::INTERNAL, "Failed to serialize message");
}
}
template <class BufferReader, class T>
Status GenericDeserialize(grpc_byte_buffer* buffer,
grpc::protobuf::Message* msg) {
static_assert(
std::is_base_of<protobuf::io::ZeroCopyInputStream, BufferReader>::value,
"BufferReader must be a subclass of io::ZeroCopyInputStream");
if (buffer == nullptr) {
return Status(StatusCode::INTERNAL, "No payload");
}
Status result = g_core_codegen_interface->ok();
{
BufferReader reader(buffer);
if (!reader.status().ok()) {
return reader.status();
}
::grpc::protobuf::io::CodedInputStream decoder(&reader);
decoder.SetTotalBytesLimit(INT_MAX, INT_MAX);
if (!msg->ParseFromCodedStream(&decoder)) {
result = Status(StatusCode::INTERNAL, msg->InitializationErrorString());
}
if (!decoder.ConsumedEntireMessage()) {
result = Status(StatusCode::INTERNAL, "Did not read entire message");
}
}
g_core_codegen_interface->grpc_byte_buffer_destroy(buffer);
return result;
}
} // namespace internal
// this is needed so the following class does not conflict with protobuf
// serializers that utilize internal-only tools.
#ifdef GRPC_OPEN_SOURCE_PROTO
// This class provides a protobuf serializer. It translates between protobuf
// objects and grpc_byte_buffers. More information about SerializationTraits can
// be found in include/grpc++/impl/codegen/serialization_traits.h.
template <class T>
class SerializationTraits<T, typename std::enable_if<std::is_base_of<
grpc::protobuf::Message, T>::value>::type> {
public:
static Status Serialize(const grpc::protobuf::Message& msg,
grpc_byte_buffer** bp, bool* own_buffer) {
*own_buffer = true;
int byte_size = msg.ByteSize();
if (byte_size <= internal::kGrpcBufferWriterMaxBufferLength) {
grpc_slice slice = g_core_codegen_interface->grpc_slice_malloc(byte_size);
GPR_CODEGEN_ASSERT(
GRPC_SLICE_END_PTR(slice) ==
msg.SerializeWithCachedSizesToArray(GRPC_SLICE_START_PTR(slice)));
*bp = g_core_codegen_interface->grpc_raw_byte_buffer_create(&slice, 1);
g_core_codegen_interface->grpc_slice_unref(slice);
return g_core_codegen_interface->ok();
} else {
internal::GrpcBufferWriter writer(
bp, internal::kGrpcBufferWriterMaxBufferLength);
return msg.SerializeToZeroCopyStream(&writer)
? g_core_codegen_interface->ok()
: Status(StatusCode::INTERNAL, "Failed to serialize message");
}
return internal::GenericSerialize<internal::GrpcBufferWriter, T>(
msg, bp, own_buffer);
}
static Status Deserialize(grpc_byte_buffer* buffer,
grpc::protobuf::Message* msg) {
if (buffer == nullptr) {
return Status(StatusCode::INTERNAL, "No payload");
}
Status result = g_core_codegen_interface->ok();
{
internal::GrpcBufferReader reader(buffer);
if (!reader.status().ok()) {
return reader.status();
}
::grpc::protobuf::io::CodedInputStream decoder(&reader);
decoder.SetTotalBytesLimit(INT_MAX, INT_MAX);
if (!msg->ParseFromCodedStream(&decoder)) {
result = Status(StatusCode::INTERNAL, msg->InitializationErrorString());
}
if (!decoder.ConsumedEntireMessage()) {
result = Status(StatusCode::INTERNAL, "Did not read entire message");
}
}
g_core_codegen_interface->grpc_byte_buffer_destroy(buffer);
return result;
return internal::GenericDeserialize<internal::GrpcBufferReader, T>(buffer,
msg);
}
};
#endif
} // namespace grpc

@ -293,6 +293,9 @@ each time recvmsg (or equivalent) is called */
"grpc.experimental.tcp_min_read_chunk_size"
#define GRPC_ARG_TCP_MAX_READ_CHUNK_SIZE \
"grpc.experimental.tcp_max_read_chunk_size"
/* Timeout in milliseconds to use for calls to the grpclb load balancer.
If 0 or unset, the balancer calls will have no deadline. */
#define GRPC_ARG_GRPCLB_CALL_TIMEOUT_MS "grpc.grpclb_timeout_ms"
/** \} */
/** Result of a grpc call. If the caller satisfies the prerequisites of a

@ -189,7 +189,7 @@
#ifdef __GLIBC__
#define GPR_POSIX_CRASH_HANDLER 1
#else /* musl libc */
#define GRPC_MSG_IOVLEN_TYPE int
#define GPR_MUSL_LIBC_COMPAT 1
#endif
#elif defined(__APPLE__)
#include <Availability.h>

@ -34,8 +34,7 @@
"lodash": "^4.15.0",
"nan": "^2.0.0",
"node-pre-gyp": "^0.6.0",
"protobufjs": "^6.7.0",
"cares": "^1.1.5"
"protobufjs": "^6.7.0"
},
"devDependencies": {
"async": "^2.0.1",

@ -194,7 +194,6 @@
<file baseinstalldir="/" name="src/core/lib/channel/handshaker_registry.h" role="src" />
<file baseinstalldir="/" name="src/core/lib/compression/algorithm_metadata.h" role="src" />
<file baseinstalldir="/" name="src/core/lib/compression/message_compress.h" role="src" />
<file baseinstalldir="/" name="src/core/lib/debug/trace.h" role="src" />
<file baseinstalldir="/" name="src/core/lib/http/format_request.h" role="src" />
<file baseinstalldir="/" name="src/core/lib/http/httpcli.h" role="src" />
<file baseinstalldir="/" name="src/core/lib/http/parser.h" role="src" />
@ -204,7 +203,11 @@
<file baseinstalldir="/" name="src/core/lib/iomgr/endpoint_pair.h" role="src" />
<file baseinstalldir="/" name="src/core/lib/iomgr/error.h" role="src" />
<file baseinstalldir="/" name="src/core/lib/iomgr/error_internal.h" role="src" />
<file baseinstalldir="/" name="src/core/lib/iomgr/ev_epoll_linux.h" role="src" />
<file baseinstalldir="/" name="src/core/lib/iomgr/ev_epoll1_linux.h" role="src" />
<file baseinstalldir="/" name="src/core/lib/iomgr/ev_epoll_limited_pollers_linux.h" role="src" />
<file baseinstalldir="/" name="src/core/lib/iomgr/ev_epoll_thread_pool_linux.h" role="src" />
<file baseinstalldir="/" name="src/core/lib/iomgr/ev_epollex_linux.h" role="src" />
<file baseinstalldir="/" name="src/core/lib/iomgr/ev_epollsig_linux.h" role="src" />
<file baseinstalldir="/" name="src/core/lib/iomgr/ev_poll_posix.h" role="src" />
<file baseinstalldir="/" name="src/core/lib/iomgr/ev_posix.h" role="src" />
<file baseinstalldir="/" name="src/core/lib/iomgr/exec_ctx.h" role="src" />
@ -213,6 +216,7 @@
<file baseinstalldir="/" name="src/core/lib/iomgr/iomgr.h" role="src" />
<file baseinstalldir="/" name="src/core/lib/iomgr/iomgr_internal.h" role="src" />
<file baseinstalldir="/" name="src/core/lib/iomgr/iomgr_posix.h" role="src" />
<file baseinstalldir="/" name="src/core/lib/iomgr/is_epollexclusive_available.h" role="src" />
<file baseinstalldir="/" name="src/core/lib/iomgr/load_file.h" role="src" />
<file baseinstalldir="/" name="src/core/lib/iomgr/lockfree_event.h" role="src" />
<file baseinstalldir="/" name="src/core/lib/iomgr/network_status_tracker.h" role="src" />
@ -234,6 +238,7 @@
<file baseinstalldir="/" name="src/core/lib/iomgr/socket_utils.h" role="src" />
<file baseinstalldir="/" name="src/core/lib/iomgr/socket_utils_posix.h" role="src" />
<file baseinstalldir="/" name="src/core/lib/iomgr/socket_windows.h" role="src" />
<file baseinstalldir="/" name="src/core/lib/iomgr/sys_epoll_wrapper.h" role="src" />
<file baseinstalldir="/" name="src/core/lib/iomgr/tcp_client.h" role="src" />
<file baseinstalldir="/" name="src/core/lib/iomgr/tcp_client_posix.h" role="src" />
<file baseinstalldir="/" name="src/core/lib/iomgr/tcp_posix.h" role="src" />
@ -245,6 +250,7 @@
<file baseinstalldir="/" name="src/core/lib/iomgr/timer.h" role="src" />
<file baseinstalldir="/" name="src/core/lib/iomgr/timer_generic.h" role="src" />
<file baseinstalldir="/" name="src/core/lib/iomgr/timer_heap.h" role="src" />
<file baseinstalldir="/" name="src/core/lib/iomgr/timer_manager.h" role="src" />
<file baseinstalldir="/" name="src/core/lib/iomgr/timer_uv.h" role="src" />
<file baseinstalldir="/" name="src/core/lib/iomgr/udp_server.h" role="src" />
<file baseinstalldir="/" name="src/core/lib/iomgr/unix_sockets_posix.h" role="src" />
@ -290,6 +296,7 @@
<file baseinstalldir="/" name="src/core/lib/transport/timeout_encoding.h" role="src" />
<file baseinstalldir="/" name="src/core/lib/transport/transport.h" role="src" />
<file baseinstalldir="/" name="src/core/lib/transport/transport_impl.h" role="src" />
<file baseinstalldir="/" name="src/core/lib/debug/trace.h" role="src" />
<file baseinstalldir="/" name="src/core/ext/transport/chttp2/transport/bin_decoder.h" role="src" />
<file baseinstalldir="/" name="src/core/ext/transport/chttp2/transport/bin_encoder.h" role="src" />
<file baseinstalldir="/" name="src/core/ext/transport/chttp2/transport/chttp2_transport.h" role="src" />
@ -359,8 +366,10 @@
<file baseinstalldir="/" name="src/core/ext/filters/client_channel/uri_parser.h" role="src" />
<file baseinstalldir="/" name="src/core/ext/filters/deadline/deadline_filter.h" role="src" />
<file baseinstalldir="/" name="src/core/ext/transport/chttp2/client/chttp2_connector.h" role="src" />
<file baseinstalldir="/" name="src/core/ext/filters/client_channel/lb_policy/grpclb/client_load_reporting_filter.h" role="src" />
<file baseinstalldir="/" name="src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.h" role="src" />
<file baseinstalldir="/" name="src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_channel.h" role="src" />
<file baseinstalldir="/" name="src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_client_stats.h" role="src" />
<file baseinstalldir="/" name="src/core/ext/filters/client_channel/lb_policy/grpclb/load_balancer_api.h" role="src" />
<file baseinstalldir="/" name="src/core/ext/filters/client_channel/lb_policy/grpclb/proto/grpc/lb/v1/load_balancer.pb.h" role="src" />
<file baseinstalldir="/" name="third_party/nanopb/pb.h" role="src" />
@ -399,7 +408,6 @@
<file baseinstalldir="/" name="src/core/lib/channel/handshaker_registry.c" role="src" />
<file baseinstalldir="/" name="src/core/lib/compression/compression.c" role="src" />
<file baseinstalldir="/" name="src/core/lib/compression/message_compress.c" role="src" />
<file baseinstalldir="/" name="src/core/lib/debug/trace.c" role="src" />
<file baseinstalldir="/" name="src/core/lib/http/format_request.c" role="src" />
<file baseinstalldir="/" name="src/core/lib/http/httpcli.c" role="src" />
<file baseinstalldir="/" name="src/core/lib/http/parser.c" role="src" />
@ -410,7 +418,11 @@
<file baseinstalldir="/" name="src/core/lib/iomgr/endpoint_pair_uv.c" role="src" />
<file baseinstalldir="/" name="src/core/lib/iomgr/endpoint_pair_windows.c" role="src" />
<file baseinstalldir="/" name="src/core/lib/iomgr/error.c" role="src" />
<file baseinstalldir="/" name="src/core/lib/iomgr/ev_epoll_linux.c" role="src" />
<file baseinstalldir="/" name="src/core/lib/iomgr/ev_epoll1_linux.c" role="src" />
<file baseinstalldir="/" name="src/core/lib/iomgr/ev_epoll_limited_pollers_linux.c" role="src" />
<file baseinstalldir="/" name="src/core/lib/iomgr/ev_epoll_thread_pool_linux.c" role="src" />
<file baseinstalldir="/" name="src/core/lib/iomgr/ev_epollex_linux.c" role="src" />
<file baseinstalldir="/" name="src/core/lib/iomgr/ev_epollsig_linux.c" role="src" />
<file baseinstalldir="/" name="src/core/lib/iomgr/ev_poll_posix.c" role="src" />
<file baseinstalldir="/" name="src/core/lib/iomgr/ev_posix.c" role="src" />
<file baseinstalldir="/" name="src/core/lib/iomgr/exec_ctx.c" role="src" />
@ -420,6 +432,7 @@
<file baseinstalldir="/" name="src/core/lib/iomgr/iomgr_posix.c" role="src" />
<file baseinstalldir="/" name="src/core/lib/iomgr/iomgr_uv.c" role="src" />
<file baseinstalldir="/" name="src/core/lib/iomgr/iomgr_windows.c" role="src" />
<file baseinstalldir="/" name="src/core/lib/iomgr/is_epollexclusive_available.c" role="src" />
<file baseinstalldir="/" name="src/core/lib/iomgr/load_file.c" role="src" />
<file baseinstalldir="/" name="src/core/lib/iomgr/lockfree_event.c" role="src" />
<file baseinstalldir="/" name="src/core/lib/iomgr/network_status_tracker.c" role="src" />
@ -456,6 +469,7 @@
<file baseinstalldir="/" name="src/core/lib/iomgr/time_averaged_stats.c" role="src" />
<file baseinstalldir="/" name="src/core/lib/iomgr/timer_generic.c" role="src" />
<file baseinstalldir="/" name="src/core/lib/iomgr/timer_heap.c" role="src" />
<file baseinstalldir="/" name="src/core/lib/iomgr/timer_manager.c" role="src" />
<file baseinstalldir="/" name="src/core/lib/iomgr/timer_uv.c" role="src" />
<file baseinstalldir="/" name="src/core/lib/iomgr/udp_server.c" role="src" />
<file baseinstalldir="/" name="src/core/lib/iomgr/unix_sockets_posix.c" role="src" />
@ -510,6 +524,7 @@
<file baseinstalldir="/" name="src/core/lib/transport/timeout_encoding.c" role="src" />
<file baseinstalldir="/" name="src/core/lib/transport/transport.c" role="src" />
<file baseinstalldir="/" name="src/core/lib/transport/transport_op_string.c" role="src" />
<file baseinstalldir="/" name="src/core/lib/debug/trace.c" role="src" />
<file baseinstalldir="/" name="src/core/ext/transport/chttp2/server/secure/server_secure_chttp2.c" role="src" />
<file baseinstalldir="/" name="src/core/ext/transport/chttp2/transport/bin_decoder.c" role="src" />
<file baseinstalldir="/" name="src/core/ext/transport/chttp2/transport/bin_encoder.c" role="src" />
@ -593,8 +608,10 @@
<file baseinstalldir="/" name="src/core/ext/transport/chttp2/server/insecure/server_chttp2_posix.c" role="src" />
<file baseinstalldir="/" name="src/core/ext/transport/chttp2/client/insecure/channel_create.c" role="src" />
<file baseinstalldir="/" name="src/core/ext/transport/chttp2/client/insecure/channel_create_posix.c" role="src" />
<file baseinstalldir="/" name="src/core/ext/filters/client_channel/lb_policy/grpclb/client_load_reporting_filter.c" role="src" />
<file baseinstalldir="/" name="src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.c" role="src" />
<file baseinstalldir="/" name="src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_channel_secure.c" role="src" />
<file baseinstalldir="/" name="src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_client_stats.c" role="src" />
<file baseinstalldir="/" name="src/core/ext/filters/client_channel/lb_policy/grpclb/load_balancer_api.c" role="src" />
<file baseinstalldir="/" name="src/core/ext/filters/client_channel/lb_policy/grpclb/proto/grpc/lb/v1/load_balancer.pb.c" role="src" />
<file baseinstalldir="/" name="third_party/nanopb/pb_common.c" role="src" />

@ -116,7 +116,7 @@ if EXTRA_ENV_COMPILE_ARGS is None:
elif 'win32' in sys.platform:
EXTRA_ENV_COMPILE_ARGS += ' -D_PYTHON_MSVC'
elif "linux" in sys.platform:
EXTRA_ENV_COMPILE_ARGS += ' -std=c++11 -fvisibility=hidden -fno-wrapv'
EXTRA_ENV_COMPILE_ARGS += ' -std=c++11 -std=gnu99 -fvisibility=hidden -fno-wrapv'
elif "darwin" in sys.platform:
EXTRA_ENV_COMPILE_ARGS += ' -fvisibility=hidden -fno-wrapv'
@ -144,6 +144,8 @@ CYTHON_EXTENSION_MODULE_NAMES = ('grpc._cython.cygrpc',)
CYTHON_HELPER_C_FILES = ()
CORE_C_FILES = tuple(grpc_core_dependencies.CORE_SOURCE_FILES)
if "win32" in sys.platform and "64bit" in platform.architecture()[0]:
CORE_C_FILES = filter(lambda x: 'third_party/cares' not in x, CORE_C_FILES)
EXTENSION_INCLUDE_DIRECTORIES = (
(PYTHON_STEM,) + CORE_INCLUDE + BORINGSSL_INCLUDE + ZLIB_INCLUDE +
@ -163,7 +165,9 @@ DEFINE_MACROS = (
if "win32" in sys.platform:
DEFINE_MACROS += (('WIN32_LEAN_AND_MEAN', 1), ('CARES_STATICLIB', 1),)
if '64bit' in platform.architecture()[0]:
DEFINE_MACROS += (('MS_WIN64', 1),)
# TODO(zyc): Re-enble c-ares on x64 windows after fixing the
# ares_library_init compilation issue
DEFINE_MACROS += (('MS_WIN64', 1), ('GRPC_ARES', 0),)
elif sys.version_info >= (3, 5):
# For some reason, this is needed to get access to inet_pton/inet_ntop
# on msvc, but only for 32 bits
@ -233,7 +237,7 @@ INSTALL_REQUIRES = (
'six>=1.5.2',
# TODO(atash): eventually split the grpcio package into a metapackage
# depending on protobuf and the runtime component (independent of protobuf)
'protobuf>=3.2.0',
'protobuf>=3.3.0',
)
if not PY3:

@ -138,7 +138,7 @@ class Grpc(object):
{
'name': 'boringssl_%s' % os.path.basename(test[0]),
'args': [map_testarg(arg) for arg in test[1:]],
'exclude_configs': ['asan'],
'exclude_configs': ['asan', 'ubsan'],
'ci_platforms': ['linux', 'mac', 'posix', 'windows'],
'platforms': ['linux', 'mac', 'posix', 'windows'],
'flaky': False,

@ -804,6 +804,12 @@ void PrintHeaderService(grpc_generator::Printer *printer,
" public:\n");
printer->Indent();
// Service metadata
printer->Print(*vars,
"static constexpr char const* service_full_name() {\n"
" return \"$Package$$Service$\";\n"
"}\n");
// Client side
printer->Print(
"class StubInterface {\n"

@ -622,9 +622,17 @@ bool PrivateGenerator::PrintPreamble(grpc_generator::Printer* out) {
for (StringPairSet::iterator it = imports_set.begin();
it != imports_set.end(); ++it) {
var["ModuleName"] = std::get<0>(*it);
auto module_name = std::get<0>(*it);
var["ModuleAlias"] = std::get<1>(*it);
out->Print(var, "import $ModuleName$ as $ModuleAlias$\n");
const size_t last_dot_pos = module_name.rfind('.');
if (last_dot_pos == grpc::string::npos) {
var["ImportStatement"] = "import " + module_name;
} else {
var["ImportStatement"] = "from " + module_name.substr(0, last_dot_pos) +
" import " +
module_name.substr(last_dot_pos + 1);
}
out->Print(var, "$ImportStatement$ as $ModuleAlias$\n");
}
}
return true;

@ -132,7 +132,7 @@ static void partly_done(grpc_exec_ctx *exec_ctx, state_watcher *w,
gpr_mu_lock(&w->mu);
if (due_to_completion) {
if (grpc_trace_operation_failures) {
if (GRPC_TRACER_ON(grpc_trace_operation_failures)) {
GRPC_LOG_IF_ERROR("watch_completion_error", GRPC_ERROR_REF(error));
}
GRPC_ERROR_UNREF(error);

@ -760,12 +760,6 @@ static void cc_destroy_channel_elem(grpc_exec_ctx *exec_ctx,
#define CANCELLED_CALL ((grpc_subchannel_call *)1)
typedef enum {
/* zero so that it can be default-initialized */
GRPC_SUBCHANNEL_CALL_HOLDER_NOT_CREATING = 0,
GRPC_SUBCHANNEL_CALL_HOLDER_PICKING_SUBCHANNEL
} subchannel_creation_phase;
/** Call data. Holds a pointer to grpc_subchannel_call and the
associated machinery to create such a pointer.
Handles queueing of stream ops until a call object is ready, waiting
@ -793,8 +787,9 @@ typedef struct client_channel_call_data {
gpr_atm subchannel_call;
gpr_arena *arena;
subchannel_creation_phase creation_phase;
bool pick_pending;
grpc_connected_subchannel *connected_subchannel;
grpc_call_context_element subchannel_call_context[GRPC_CONTEXT_COUNT];
grpc_polling_entity *pollent;
grpc_transport_stream_op_batch **waiting_ops;
@ -914,11 +909,10 @@ static void subchannel_ready_locked(grpc_exec_ctx *exec_ctx, void *arg,
grpc_call_element *elem = arg;
call_data *calld = elem->call_data;
channel_data *chand = elem->channel_data;
GPR_ASSERT(calld->creation_phase ==
GRPC_SUBCHANNEL_CALL_HOLDER_PICKING_SUBCHANNEL);
GPR_ASSERT(calld->pick_pending);
calld->pick_pending = false;
grpc_polling_entity_del_from_pollset_set(exec_ctx, calld->pollent,
chand->interested_parties);
calld->creation_phase = GRPC_SUBCHANNEL_CALL_HOLDER_NOT_CREATING;
if (calld->connected_subchannel == NULL) {
gpr_atm_no_barrier_store(&calld->subchannel_call, 1);
fail_locked(exec_ctx, calld,
@ -944,7 +938,8 @@ static void subchannel_ready_locked(grpc_exec_ctx *exec_ctx, void *arg,
.path = calld->path,
.start_time = calld->call_start_time,
.deadline = calld->deadline,
.arena = calld->arena};
.arena = calld->arena,
.context = calld->subchannel_call_context};
grpc_error *new_error = grpc_connected_subchannel_create_call(
exec_ctx, calld->connected_subchannel, &call_args, &subchannel_call);
gpr_atm_rel_store(&calld->subchannel_call,
@ -973,6 +968,7 @@ typedef struct {
grpc_metadata_batch *initial_metadata;
uint32_t initial_metadata_flags;
grpc_connected_subchannel **connected_subchannel;
grpc_call_context_element *subchannel_call_context;
grpc_closure *on_ready;
grpc_call_element *elem;
grpc_closure closure;
@ -984,8 +980,8 @@ typedef struct {
static bool pick_subchannel_locked(
grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
grpc_metadata_batch *initial_metadata, uint32_t initial_metadata_flags,
grpc_connected_subchannel **connected_subchannel, grpc_closure *on_ready,
grpc_error *error);
grpc_connected_subchannel **connected_subchannel,
grpc_call_context_element *subchannel_call_context, grpc_closure *on_ready);
static void continue_picking_locked(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error) {
@ -997,49 +993,49 @@ static void continue_picking_locked(grpc_exec_ctx *exec_ctx, void *arg,
} else {
if (pick_subchannel_locked(exec_ctx, cpa->elem, cpa->initial_metadata,
cpa->initial_metadata_flags,
cpa->connected_subchannel, cpa->on_ready,
GRPC_ERROR_NONE)) {
cpa->connected_subchannel,
cpa->subchannel_call_context, cpa->on_ready)) {
grpc_closure_sched(exec_ctx, cpa->on_ready, GRPC_ERROR_NONE);
}
}
gpr_free(cpa);
}
static void cancel_pick_locked(grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
grpc_error *error) {
channel_data *chand = elem->channel_data;
call_data *calld = elem->call_data;
if (chand->lb_policy != NULL) {
grpc_lb_policy_cancel_pick_locked(exec_ctx, chand->lb_policy,
&calld->connected_subchannel,
GRPC_ERROR_REF(error));
}
for (grpc_closure *closure = chand->waiting_for_config_closures.head;
closure != NULL; closure = closure->next_data.next) {
continue_picking_args *cpa = closure->cb_arg;
if (cpa->connected_subchannel == &calld->connected_subchannel) {
cpa->connected_subchannel = NULL;
grpc_closure_sched(exec_ctx, cpa->on_ready,
GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
"Pick cancelled", &error, 1));
}
}
GRPC_ERROR_UNREF(error);
}
static bool pick_subchannel_locked(
grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
grpc_metadata_batch *initial_metadata, uint32_t initial_metadata_flags,
grpc_connected_subchannel **connected_subchannel, grpc_closure *on_ready,
grpc_error *error) {
grpc_connected_subchannel **connected_subchannel,
grpc_call_context_element *subchannel_call_context,
grpc_closure *on_ready) {
GPR_TIMER_BEGIN("pick_subchannel", 0);
channel_data *chand = elem->channel_data;
call_data *calld = elem->call_data;
continue_picking_args *cpa;
grpc_closure *closure;
GPR_ASSERT(connected_subchannel);
if (initial_metadata == NULL) {
if (chand->lb_policy != NULL) {
grpc_lb_policy_cancel_pick_locked(exec_ctx, chand->lb_policy,
connected_subchannel,
GRPC_ERROR_REF(error));
}
for (closure = chand->waiting_for_config_closures.head; closure != NULL;
closure = closure->next_data.next) {
cpa = closure->cb_arg;
if (cpa->connected_subchannel == connected_subchannel) {
cpa->connected_subchannel = NULL;
grpc_closure_sched(exec_ctx, cpa->on_ready,
GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
"Pick cancelled", &error, 1));
}
}
GPR_TIMER_END("pick_subchannel", 0);
GRPC_ERROR_UNREF(error);
return true;
}
GPR_ASSERT(error == GRPC_ERROR_NONE);
if (chand->lb_policy != NULL) {
apply_final_configuration_locked(exec_ctx, elem);
grpc_lb_policy *lb_policy = chand->lb_policy;
@ -1062,8 +1058,7 @@ static bool pick_subchannel_locked(
}
}
const grpc_lb_policy_pick_args inputs = {
initial_metadata, initial_metadata_flags, &calld->lb_token_mdelem,
gpr_inf_future(GPR_CLOCK_MONOTONIC)};
initial_metadata, initial_metadata_flags, &calld->lb_token_mdelem};
// Wrap the user-provided callback in order to hold a strong reference to
// the LB policy for the duration of the pick.
@ -1076,8 +1071,8 @@ static bool pick_subchannel_locked(
GRPC_LB_POLICY_REF(lb_policy, "pick_subchannel_wrapping");
w_on_pick_arg->lb_policy = lb_policy;
const bool pick_done = grpc_lb_policy_pick_locked(
exec_ctx, lb_policy, &inputs, connected_subchannel, NULL,
&w_on_pick_arg->wrapper_closure);
exec_ctx, lb_policy, &inputs, connected_subchannel,
subchannel_call_context, NULL, &w_on_pick_arg->wrapper_closure);
if (pick_done) {
/* synchronous grpc_lb_policy_pick call. Unref the LB policy. */
GRPC_LB_POLICY_UNREF(exec_ctx, w_on_pick_arg->lb_policy,
@ -1096,10 +1091,11 @@ static bool pick_subchannel_locked(
&chand->on_resolver_result_changed);
}
if (chand->resolver != NULL) {
cpa = gpr_malloc(sizeof(*cpa));
continue_picking_args *cpa = gpr_malloc(sizeof(*cpa));
cpa->initial_metadata = initial_metadata;
cpa->initial_metadata_flags = initial_metadata_flags;
cpa->connected_subchannel = connected_subchannel;
cpa->subchannel_call_context = subchannel_call_context;
cpa->on_ready = on_ready;
cpa->elem = elem;
grpc_closure_init(&cpa->closure, continue_picking_locked, cpa,
@ -1151,16 +1147,13 @@ static void start_transport_stream_op_batch_locked_inner(
error to the caller when the first op does get passed down. */
calld->cancel_error =
GRPC_ERROR_REF(op->payload->cancel_stream.cancel_error);
switch (calld->creation_phase) {
case GRPC_SUBCHANNEL_CALL_HOLDER_NOT_CREATING:
fail_locked(exec_ctx, calld,
GRPC_ERROR_REF(op->payload->cancel_stream.cancel_error));
break;
case GRPC_SUBCHANNEL_CALL_HOLDER_PICKING_SUBCHANNEL:
pick_subchannel_locked(
exec_ctx, elem, NULL, 0, &calld->connected_subchannel, NULL,
GRPC_ERROR_REF(op->payload->cancel_stream.cancel_error));
break;
if (calld->pick_pending) {
cancel_pick_locked(
exec_ctx, elem,
GRPC_ERROR_REF(op->payload->cancel_stream.cancel_error));
} else {
fail_locked(exec_ctx, calld,
GRPC_ERROR_REF(op->payload->cancel_stream.cancel_error));
}
grpc_transport_stream_op_batch_finish_with_failure(
exec_ctx, op,
@ -1170,9 +1163,9 @@ static void start_transport_stream_op_batch_locked_inner(
}
}
/* if we don't have a subchannel, try to get one */
if (calld->creation_phase == GRPC_SUBCHANNEL_CALL_HOLDER_NOT_CREATING &&
calld->connected_subchannel == NULL && op->send_initial_metadata) {
calld->creation_phase = GRPC_SUBCHANNEL_CALL_HOLDER_PICKING_SUBCHANNEL;
if (!calld->pick_pending && calld->connected_subchannel == NULL &&
op->send_initial_metadata) {
calld->pick_pending = true;
grpc_closure_init(&calld->next_step, subchannel_ready_locked, elem,
grpc_combiner_scheduler(chand->combiner, true));
GRPC_CALL_STACK_REF(calld->owning_call, "pick_subchannel");
@ -1183,8 +1176,9 @@ static void start_transport_stream_op_batch_locked_inner(
exec_ctx, elem,
op->payload->send_initial_metadata.send_initial_metadata,
op->payload->send_initial_metadata.send_initial_metadata_flags,
&calld->connected_subchannel, &calld->next_step, GRPC_ERROR_NONE)) {
calld->creation_phase = GRPC_SUBCHANNEL_CALL_HOLDER_NOT_CREATING;
&calld->connected_subchannel, calld->subchannel_call_context,
&calld->next_step)) {
calld->pick_pending = false;
GRPC_CALL_STACK_UNREF(exec_ctx, calld->owning_call, "pick_subchannel");
} else {
grpc_polling_entity_add_to_pollset_set(exec_ctx, calld->pollent,
@ -1192,15 +1186,15 @@ static void start_transport_stream_op_batch_locked_inner(
}
}
/* if we've got a subchannel, then let's ask it to create a call */
if (calld->creation_phase == GRPC_SUBCHANNEL_CALL_HOLDER_NOT_CREATING &&
calld->connected_subchannel != NULL) {
if (!calld->pick_pending && calld->connected_subchannel != NULL) {
grpc_subchannel_call *subchannel_call = NULL;
const grpc_connected_subchannel_call_args call_args = {
.pollent = calld->pollent,
.path = calld->path,
.start_time = calld->call_start_time,
.deadline = calld->deadline,
.arena = calld->arena};
.arena = calld->arena,
.context = calld->subchannel_call_context};
grpc_error *error = grpc_connected_subchannel_create_call(
exec_ctx, calld->connected_subchannel, &call_args, &subchannel_call);
gpr_atm_rel_store(&calld->subchannel_call,
@ -1349,12 +1343,18 @@ static void cc_destroy_call_elem(grpc_exec_ctx *exec_ctx,
then_schedule_closure = NULL;
GRPC_SUBCHANNEL_CALL_UNREF(exec_ctx, call, "client_channel_destroy_call");
}
GPR_ASSERT(calld->creation_phase == GRPC_SUBCHANNEL_CALL_HOLDER_NOT_CREATING);
GPR_ASSERT(!calld->pick_pending);
GPR_ASSERT(calld->waiting_ops_count == 0);
if (calld->connected_subchannel != NULL) {
GRPC_CONNECTED_SUBCHANNEL_UNREF(exec_ctx, calld->connected_subchannel,
"picked");
}
for (size_t i = 0; i < GRPC_CONTEXT_COUNT; ++i) {
if (calld->subchannel_call_context[i].value != NULL) {
calld->subchannel_call_context[i].destroy(
calld->subchannel_call_context[i].value);
}
}
gpr_free(calld->waiting_ops);
grpc_closure_sched(exec_ctx, then_schedule_closure, GRPC_ERROR_NONE);
}
@ -1450,12 +1450,12 @@ static void watch_connectivity_state_locked(grpc_exec_ctx *exec_ctx, void *arg,
void grpc_client_channel_watch_connectivity_state(
grpc_exec_ctx *exec_ctx, grpc_channel_element *elem, grpc_pollset *pollset,
grpc_connectivity_state *state, grpc_closure *on_complete) {
grpc_connectivity_state *state, grpc_closure *closure) {
channel_data *chand = elem->channel_data;
external_connectivity_watcher *w = gpr_malloc(sizeof(*w));
w->chand = chand;
w->pollset = pollset;
w->on_complete = on_complete;
w->on_complete = closure;
w->state = state;
grpc_pollset_set_add_pollset(exec_ctx, chand->interested_parties, pollset);
GRPC_CHANNEL_STACK_REF(w->chand->owning_stack,

@ -119,9 +119,10 @@ void grpc_lb_policy_weak_unref(grpc_exec_ctx *exec_ctx,
int grpc_lb_policy_pick_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy,
const grpc_lb_policy_pick_args *pick_args,
grpc_connected_subchannel **target,
grpc_call_context_element *context,
void **user_data, grpc_closure *on_complete) {
return policy->vtable->pick_locked(exec_ctx, policy, pick_args, target,
user_data, on_complete);
context, user_data, on_complete);
}
void grpc_lb_policy_cancel_pick_locked(grpc_exec_ctx *exec_ctx,

@ -43,9 +43,6 @@
typedef struct grpc_lb_policy grpc_lb_policy;
typedef struct grpc_lb_policy_vtable grpc_lb_policy_vtable;
typedef void (*grpc_lb_completion)(void *cb_arg, grpc_subchannel *subchannel,
grpc_status_code status, const char *errmsg);
struct grpc_lb_policy {
const grpc_lb_policy_vtable *vtable;
gpr_atm ref_pair;
@ -65,8 +62,6 @@ typedef struct grpc_lb_policy_pick_args {
uint32_t initial_metadata_flags;
/** Storage for LB token in \a initial_metadata, or NULL if not used */
grpc_linked_mdelem *lb_token_mdelem_storage;
/** Deadline for the call to the LB server */
gpr_timespec deadline;
} grpc_lb_policy_pick_args;
struct grpc_lb_policy_vtable {
@ -76,7 +71,8 @@ struct grpc_lb_policy_vtable {
/** \see grpc_lb_policy_pick */
int (*pick_locked)(grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy,
const grpc_lb_policy_pick_args *pick_args,
grpc_connected_subchannel **target, void **user_data,
grpc_connected_subchannel **target,
grpc_call_context_element *context, void **user_data,
grpc_closure *on_complete);
/** \see grpc_lb_policy_cancel_pick */
@ -156,6 +152,8 @@ void grpc_lb_policy_init(grpc_lb_policy *policy,
\a target will be set to the selected subchannel, or NULL on failure.
Upon success, \a user_data will be set to whatever opaque information
may need to be propagated from the LB policy, or NULL if not needed.
\a context will be populated with context to pass to the subchannel
call, if needed.
If the pick succeeds and a result is known immediately, a non-zero
value will be returned. Otherwise, \a on_complete will be invoked
@ -167,6 +165,7 @@ void grpc_lb_policy_init(grpc_lb_policy *policy,
int grpc_lb_policy_pick_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy,
const grpc_lb_policy_pick_args *pick_args,
grpc_connected_subchannel **target,
grpc_call_context_element *context,
void **user_data, grpc_closure *on_complete);
/** Perform a connected subchannel ping (see \a grpc_connected_subchannel_ping)

@ -0,0 +1,153 @@
/*
*
* Copyright 2017, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following disclaimer
* in the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Google Inc. nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
#include "src/core/ext/filters/client_channel/lb_policy/grpclb/client_load_reporting_filter.h"
#include <grpc/support/atm.h>
#include <grpc/support/log.h>
#include "src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_client_stats.h"
#include "src/core/lib/iomgr/error.h"
#include "src/core/lib/profiling/timers.h"
static grpc_error *init_channel_elem(grpc_exec_ctx *exec_ctx,
grpc_channel_element *elem,
grpc_channel_element_args *args) {
return GRPC_ERROR_NONE;
}
static void destroy_channel_elem(grpc_exec_ctx *exec_ctx,
grpc_channel_element *elem) {}
typedef struct {
// Stats object to update.
grpc_grpclb_client_stats *client_stats;
// State for intercepting send_initial_metadata.
grpc_closure on_complete_for_send;
grpc_closure *original_on_complete_for_send;
bool send_initial_metadata_succeeded;
// State for intercepting recv_initial_metadata.
grpc_closure recv_initial_metadata_ready;
grpc_closure *original_recv_initial_metadata_ready;
bool recv_initial_metadata_succeeded;
} call_data;
static void on_complete_for_send(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error) {
call_data *calld = arg;
if (error == GRPC_ERROR_NONE) {
calld->send_initial_metadata_succeeded = true;
}
grpc_closure_run(exec_ctx, calld->original_on_complete_for_send,
GRPC_ERROR_REF(error));
}
static void recv_initial_metadata_ready(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error) {
call_data *calld = arg;
if (error == GRPC_ERROR_NONE) {
calld->recv_initial_metadata_succeeded = true;
}
grpc_closure_run(exec_ctx, calld->original_recv_initial_metadata_ready,
GRPC_ERROR_REF(error));
}
static grpc_error *init_call_elem(grpc_exec_ctx *exec_ctx,
grpc_call_element *elem,
const grpc_call_element_args *args) {
call_data *calld = elem->call_data;
// Get stats object from context and take a ref.
GPR_ASSERT(args->context != NULL);
GPR_ASSERT(args->context[GRPC_GRPCLB_CLIENT_STATS].value != NULL);
calld->client_stats = grpc_grpclb_client_stats_ref(
args->context[GRPC_GRPCLB_CLIENT_STATS].value);
// Record call started.
grpc_grpclb_client_stats_add_call_started(calld->client_stats);
return GRPC_ERROR_NONE;
}
static void destroy_call_elem(grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
const grpc_call_final_info *final_info,
grpc_closure *ignored) {
call_data *calld = elem->call_data;
// Record call finished, optionally setting client_failed_to_send and
// received.
grpc_grpclb_client_stats_add_call_finished(
false /* drop_for_rate_limiting */, false /* drop_for_load_balancing */,
!calld->send_initial_metadata_succeeded /* client_failed_to_send */,
calld->recv_initial_metadata_succeeded /* known_received */,
calld->client_stats);
// All done, so unref the stats object.
grpc_grpclb_client_stats_unref(calld->client_stats);
}
static void start_transport_stream_op_batch(
grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
grpc_transport_stream_op_batch *batch) {
call_data *calld = elem->call_data;
GPR_TIMER_BEGIN("clr_start_transport_stream_op_batch", 0);
// Intercept send_initial_metadata.
if (batch->send_initial_metadata) {
calld->original_on_complete_for_send = batch->on_complete;
grpc_closure_init(&calld->on_complete_for_send, on_complete_for_send, calld,
grpc_schedule_on_exec_ctx);
batch->on_complete = &calld->on_complete_for_send;
}
// Intercept recv_initial_metadata.
if (batch->recv_initial_metadata) {
calld->original_recv_initial_metadata_ready =
batch->payload->recv_initial_metadata.recv_initial_metadata_ready;
grpc_closure_init(&calld->recv_initial_metadata_ready,
recv_initial_metadata_ready, calld,
grpc_schedule_on_exec_ctx);
batch->payload->recv_initial_metadata.recv_initial_metadata_ready =
&calld->recv_initial_metadata_ready;
}
// Chain to next filter.
grpc_call_next_op(exec_ctx, elem, batch);
GPR_TIMER_END("clr_start_transport_stream_op_batch", 0);
}
const grpc_channel_filter grpc_client_load_reporting_filter = {
start_transport_stream_op_batch,
grpc_channel_next_op,
sizeof(call_data),
init_call_elem,
grpc_call_stack_ignore_set_pollset_or_pollset_set,
destroy_call_elem,
0, // sizeof(channel_data)
init_channel_elem,
destroy_channel_elem,
grpc_call_next_get_peer,
grpc_channel_next_get_info,
"client_load_reporting"};

@ -0,0 +1,42 @@
/*
*
* Copyright 2017, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following disclaimer
* in the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Google Inc. nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
#ifndef GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_LB_POLICY_GRPCLB_CLIENT_LOAD_REPORTING_FILTER_H
#define GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_LB_POLICY_GRPCLB_CLIENT_LOAD_REPORTING_FILTER_H
#include "src/core/lib/channel/channel_stack.h"
extern const grpc_channel_filter grpc_client_load_reporting_filter;
#endif /* GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_LB_POLICY_GRPCLB_CLIENT_LOAD_REPORTING_FILTER_H \
*/

@ -95,8 +95,7 @@
headers. Therefore, sockaddr.h must always be included first */
#include "src/core/lib/iomgr/sockaddr.h"
#include <errno.h>
#include <limits.h>
#include <string.h>
#include <grpc/byte_buffer_reader.h>
@ -108,13 +107,16 @@
#include "src/core/ext/filters/client_channel/client_channel.h"
#include "src/core/ext/filters/client_channel/client_channel_factory.h"
#include "src/core/ext/filters/client_channel/lb_policy/grpclb/client_load_reporting_filter.h"
#include "src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.h"
#include "src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_channel.h"
#include "src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_client_stats.h"
#include "src/core/ext/filters/client_channel/lb_policy/grpclb/load_balancer_api.h"
#include "src/core/ext/filters/client_channel/lb_policy_factory.h"
#include "src/core/ext/filters/client_channel/lb_policy_registry.h"
#include "src/core/ext/filters/client_channel/parse_address.h"
#include "src/core/lib/channel/channel_args.h"
#include "src/core/lib/channel/channel_stack.h"
#include "src/core/lib/iomgr/combiner.h"
#include "src/core/lib/iomgr/sockaddr.h"
#include "src/core/lib/iomgr/sockaddr_utils.h"
@ -126,6 +128,7 @@
#include "src/core/lib/support/string.h"
#include "src/core/lib/surface/call.h"
#include "src/core/lib/surface/channel.h"
#include "src/core/lib/surface/channel_init.h"
#include "src/core/lib/transport/static_metadata.h"
#define GRPC_GRPCLB_MIN_CONNECT_TIMEOUT_SECONDS 20
@ -134,7 +137,7 @@
#define GRPC_GRPCLB_RECONNECT_MAX_BACKOFF_SECONDS 120
#define GRPC_GRPCLB_RECONNECT_JITTER 0.2
int grpc_lb_glb_trace = 0;
grpc_tracer_flag grpc_lb_glb_trace = GRPC_TRACER_INITIALIZER(false);
/* add lb_token of selected subchannel (address) to the call's initial
* metadata */
@ -147,6 +150,10 @@ static grpc_error *initial_metadata_add_lb_token(
lb_token_mdelem_storage, lb_token);
}
static void destroy_client_stats(void *arg) {
grpc_grpclb_client_stats_unref(arg);
}
typedef struct wrapped_rr_closure_arg {
/* the closure instance using this struct as argument */
grpc_closure wrapper_closure;
@ -163,6 +170,13 @@ typedef struct wrapped_rr_closure_arg {
* initial metadata */
grpc_connected_subchannel **target;
/* the context to be populated for the subchannel call */
grpc_call_context_element *context;
/* Stats for client-side load reporting. Note that this holds a
* reference, which must be either passed on via context or unreffed. */
grpc_grpclb_client_stats *client_stats;
/* the LB token associated with the pick */
grpc_mdelem lb_token;
@ -202,8 +216,14 @@ static void wrapped_rr_closure(grpc_exec_ctx *exec_ctx, void *arg,
(void *)*wc_arg->target, (void *)wc_arg->rr_policy);
abort();
}
// Pass on client stats via context. Passes ownership of the reference.
GPR_ASSERT(wc_arg->client_stats != NULL);
wc_arg->context[GRPC_GRPCLB_CLIENT_STATS].value = wc_arg->client_stats;
wc_arg->context[GRPC_GRPCLB_CLIENT_STATS].destroy = destroy_client_stats;
} else {
grpc_grpclb_client_stats_unref(wc_arg->client_stats);
}
if (grpc_lb_glb_trace) {
if (GRPC_TRACER_ON(grpc_lb_glb_trace)) {
gpr_log(GPR_INFO, "Unreffing RR %p", (void *)wc_arg->rr_policy);
}
GRPC_LB_POLICY_UNREF(exec_ctx, wc_arg->rr_policy, "wrapped_rr_closure");
@ -237,6 +257,7 @@ typedef struct pending_pick {
static void add_pending_pick(pending_pick **root,
const grpc_lb_policy_pick_args *pick_args,
grpc_connected_subchannel **target,
grpc_call_context_element *context,
grpc_closure *on_complete) {
pending_pick *pp = gpr_zalloc(sizeof(*pp));
pp->next = *root;
@ -244,6 +265,7 @@ static void add_pending_pick(pending_pick **root,
pp->target = target;
pp->wrapped_on_complete_arg.wrapped_closure = on_complete;
pp->wrapped_on_complete_arg.target = target;
pp->wrapped_on_complete_arg.context = context;
pp->wrapped_on_complete_arg.initial_metadata = pick_args->initial_metadata;
pp->wrapped_on_complete_arg.lb_token_mdelem_storage =
pick_args->lb_token_mdelem_storage;
@ -287,8 +309,8 @@ typedef struct glb_lb_policy {
grpc_client_channel_factory *cc_factory;
grpc_channel_args *args;
/** deadline for the LB's call */
gpr_timespec deadline;
/** timeout in milliseconds for the LB call. 0 means no deadline. */
int lb_call_timeout_ms;
/** for communicating with the LB server */
grpc_channel *lb_channel;
@ -316,6 +338,10 @@ typedef struct glb_lb_policy {
/************************************************************/
/* client data associated with the LB server communication */
/************************************************************/
/* Finished sending initial request. */
grpc_closure lb_on_sent_initial_request;
/* Status from the LB server has been received. This signals the end of the LB
* call. */
grpc_closure lb_on_server_status_received;
@ -348,6 +374,23 @@ typedef struct glb_lb_policy {
/** LB call retry timer */
grpc_timer lb_call_retry_timer;
bool initial_request_sent;
bool seen_initial_response;
/* Stats for client-side load reporting. Should be unreffed and
* recreated whenever lb_call is replaced. */
grpc_grpclb_client_stats *client_stats;
/* Interval and timer for next client load report. */
gpr_timespec client_stats_report_interval;
grpc_timer client_load_report_timer;
bool client_load_report_timer_pending;
bool last_client_load_report_counters_were_zero;
/* Closure used for either the load report timer or the callback for
* completion of sending the load report. */
grpc_closure client_load_report_closure;
/* Client load report message payload. */
grpc_byte_buffer *client_load_report_payload;
} glb_lb_policy;
/* Keeps track and reacts to changes in connectivity of the RR instance */
@ -531,7 +574,7 @@ static bool update_lb_connectivity_status_locked(
GPR_ASSERT(new_rr_state_error == GRPC_ERROR_NONE);
}
if (grpc_lb_glb_trace) {
if (GRPC_TRACER_ON(grpc_lb_glb_trace)) {
gpr_log(GPR_INFO,
"Setting grpclb's state to %s from new RR policy %p state.",
grpc_connectivity_state_name(new_rr_state),
@ -552,11 +595,11 @@ static bool pick_from_internal_rr_locked(
grpc_connected_subchannel **target, wrapped_rr_closure_arg *wc_arg) {
GPR_ASSERT(rr_policy != NULL);
const bool pick_done = grpc_lb_policy_pick_locked(
exec_ctx, rr_policy, pick_args, target, (void **)&wc_arg->lb_token,
&wc_arg->wrapper_closure);
exec_ctx, rr_policy, pick_args, target, wc_arg->context,
(void **)&wc_arg->lb_token, &wc_arg->wrapper_closure);
if (pick_done) {
/* synchronous grpc_lb_policy_pick call. Unref the RR policy. */
if (grpc_lb_glb_trace) {
if (GRPC_TRACER_ON(grpc_lb_glb_trace)) {
gpr_log(GPR_INFO, "Unreffing RR (0x%" PRIxPTR ")",
(intptr_t)wc_arg->rr_policy);
}
@ -567,7 +610,12 @@ static bool pick_from_internal_rr_locked(
pick_args->lb_token_mdelem_storage,
GRPC_MDELEM_REF(wc_arg->lb_token));
gpr_free(wc_arg);
// Pass on client stats via context. Passes ownership of the reference.
GPR_ASSERT(wc_arg->client_stats != NULL);
wc_arg->context[GRPC_GRPCLB_CLIENT_STATS].value = wc_arg->client_stats;
wc_arg->context[GRPC_GRPCLB_CLIENT_STATS].destroy = destroy_client_stats;
gpr_free(wc_arg->free_when_done);
}
/* else, the pending pick will be registered and taken care of by the
* pending pick list inside the RR policy (glb_policy->rr_policy).
@ -637,7 +685,7 @@ static void rr_handover_locked(grpc_exec_ctx *exec_ctx,
if (!replace_old_rr) {
/* dispose of the new RR policy that won't be used after all */
GRPC_LB_POLICY_UNREF(exec_ctx, new_rr_policy, "rr_handover_no_replace");
if (grpc_lb_glb_trace) {
if (GRPC_TRACER_ON(grpc_lb_glb_trace)) {
gpr_log(GPR_INFO,
"Keeping old RR policy (%p) despite new serverlist: new RR "
"policy was in %s connectivity state.",
@ -647,7 +695,7 @@ static void rr_handover_locked(grpc_exec_ctx *exec_ctx,
return;
}
if (grpc_lb_glb_trace) {
if (GRPC_TRACER_ON(grpc_lb_glb_trace)) {
gpr_log(GPR_INFO, "Created RR policy (%p) to replace old RR (%p)",
(void *)new_rr_policy, (void *)glb_policy->rr_policy);
}
@ -690,7 +738,9 @@ static void rr_handover_locked(grpc_exec_ctx *exec_ctx,
glb_policy->pending_picks = pp->next;
GRPC_LB_POLICY_REF(glb_policy->rr_policy, "rr_handover_pending_pick");
pp->wrapped_on_complete_arg.rr_policy = glb_policy->rr_policy;
if (grpc_lb_glb_trace) {
pp->wrapped_on_complete_arg.client_stats =
grpc_grpclb_client_stats_ref(glb_policy->client_stats);
if (GRPC_TRACER_ON(grpc_lb_glb_trace)) {
gpr_log(GPR_INFO, "Pending pick about to PICK from 0x%" PRIxPTR "",
(intptr_t)glb_policy->rr_policy);
}
@ -704,7 +754,7 @@ static void rr_handover_locked(grpc_exec_ctx *exec_ctx,
glb_policy->pending_pings = pping->next;
GRPC_LB_POLICY_REF(glb_policy->rr_policy, "rr_handover_pending_ping");
pping->wrapped_notify_arg.rr_policy = glb_policy->rr_policy;
if (grpc_lb_glb_trace) {
if (GRPC_TRACER_ON(grpc_lb_glb_trace)) {
gpr_log(GPR_INFO, "Pending ping about to PING from 0x%" PRIxPTR "",
(intptr_t)glb_policy->rr_policy);
}
@ -857,16 +907,29 @@ static grpc_lb_policy *glb_create(grpc_exec_ctx *exec_ctx,
GPR_ASSERT(uri->path[0] != '\0');
glb_policy->server_name =
gpr_strdup(uri->path[0] == '/' ? uri->path + 1 : uri->path);
if (grpc_lb_glb_trace) {
if (GRPC_TRACER_ON(grpc_lb_glb_trace)) {
gpr_log(GPR_INFO, "Will use '%s' as the server name for LB request.",
glb_policy->server_name);
}
grpc_uri_destroy(uri);
glb_policy->cc_factory = args->client_channel_factory;
glb_policy->args = grpc_channel_args_copy(args->args);
GPR_ASSERT(glb_policy->cc_factory != NULL);
arg = grpc_channel_args_find(args->args, GRPC_ARG_GRPCLB_CALL_TIMEOUT_MS);
glb_policy->lb_call_timeout_ms =
grpc_channel_arg_get_integer(arg, (grpc_integer_options){0, 0, INT_MAX});
// Make sure that GRPC_ARG_LB_POLICY_NAME is set in channel args,
// since we use this to trigger the client_load_reporting filter.
grpc_arg new_arg;
new_arg.key = GRPC_ARG_LB_POLICY_NAME;
new_arg.type = GRPC_ARG_STRING;
new_arg.value.string = "grpclb";
static const char *args_to_remove[] = {GRPC_ARG_LB_POLICY_NAME};
glb_policy->args = grpc_channel_args_copy_and_add_and_remove(
args->args, args_to_remove, GPR_ARRAY_SIZE(args_to_remove), &new_arg, 1);
grpc_slice_hash_table *targets_info = NULL;
/* Create a client channel over them to communicate with a LB service */
char *lb_service_target_addresses =
@ -880,6 +943,8 @@ static grpc_lb_policy *glb_create(grpc_exec_ctx *exec_ctx,
grpc_channel_args_destroy(exec_ctx, lb_channel_args);
gpr_free(lb_service_target_addresses);
if (glb_policy->lb_channel == NULL) {
gpr_free((void *)glb_policy->server_name);
grpc_channel_args_destroy(exec_ctx, glb_policy->args);
gpr_free(glb_policy);
return NULL;
}
@ -895,6 +960,9 @@ static void glb_destroy(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol) {
GPR_ASSERT(glb_policy->pending_pings == NULL);
gpr_free((void *)glb_policy->server_name);
grpc_channel_args_destroy(exec_ctx, glb_policy->args);
if (glb_policy->client_stats != NULL) {
grpc_grpclb_client_stats_unref(glb_policy->client_stats);
}
grpc_channel_destroy(glb_policy->lb_channel);
glb_policy->lb_channel = NULL;
grpc_connectivity_state_destroy(exec_ctx, &glb_policy->state_tracker);
@ -1011,7 +1079,8 @@ static void glb_exit_idle_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol) {
static int glb_pick_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
const grpc_lb_policy_pick_args *pick_args,
grpc_connected_subchannel **target, void **user_data,
grpc_connected_subchannel **target,
grpc_call_context_element *context, void **user_data,
grpc_closure *on_complete) {
if (pick_args->lb_token_mdelem_storage == NULL) {
*target = NULL;
@ -1023,11 +1092,10 @@ static int glb_pick_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
}
glb_lb_policy *glb_policy = (glb_lb_policy *)pol;
glb_policy->deadline = pick_args->deadline;
bool pick_done;
if (glb_policy->rr_policy != NULL) {
if (grpc_lb_glb_trace) {
if (GRPC_TRACER_ON(grpc_lb_glb_trace)) {
gpr_log(GPR_INFO, "grpclb %p about to PICK from RR %p",
(void *)glb_policy, (void *)glb_policy->rr_policy);
}
@ -1039,6 +1107,10 @@ static int glb_pick_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
grpc_schedule_on_exec_ctx);
wc_arg->rr_policy = glb_policy->rr_policy;
wc_arg->target = target;
wc_arg->context = context;
GPR_ASSERT(glb_policy->client_stats != NULL);
wc_arg->client_stats =
grpc_grpclb_client_stats_ref(glb_policy->client_stats);
wc_arg->wrapped_closure = on_complete;
wc_arg->lb_token_mdelem_storage = pick_args->lb_token_mdelem_storage;
wc_arg->initial_metadata = pick_args->initial_metadata;
@ -1046,13 +1118,13 @@ static int glb_pick_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
pick_done = pick_from_internal_rr_locked(exec_ctx, glb_policy->rr_policy,
pick_args, target, wc_arg);
} else {
if (grpc_lb_glb_trace) {
if (GRPC_TRACER_ON(grpc_lb_glb_trace)) {
gpr_log(GPR_DEBUG,
"No RR policy in grpclb instance %p. Adding to grpclb's pending "
"picks",
(void *)(glb_policy));
}
add_pending_pick(&glb_policy->pending_picks, pick_args, target,
add_pending_pick(&glb_policy->pending_picks, pick_args, target, context,
on_complete);
if (!glb_policy->started_picking) {
@ -1093,6 +1165,104 @@ static void glb_notify_on_state_change_locked(grpc_exec_ctx *exec_ctx,
exec_ctx, &glb_policy->state_tracker, current, notify);
}
static void send_client_load_report_locked(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error);
static void schedule_next_client_load_report(grpc_exec_ctx *exec_ctx,
glb_lb_policy *glb_policy) {
const gpr_timespec now = gpr_now(GPR_CLOCK_MONOTONIC);
const gpr_timespec next_client_load_report_time =
gpr_time_add(now, glb_policy->client_stats_report_interval);
grpc_closure_init(&glb_policy->client_load_report_closure,
send_client_load_report_locked, glb_policy,
grpc_combiner_scheduler(glb_policy->base.combiner, false));
grpc_timer_init(exec_ctx, &glb_policy->client_load_report_timer,
next_client_load_report_time,
&glb_policy->client_load_report_closure, now);
}
static void client_load_report_done_locked(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error) {
glb_lb_policy *glb_policy = arg;
grpc_byte_buffer_destroy(glb_policy->client_load_report_payload);
glb_policy->client_load_report_payload = NULL;
if (error != GRPC_ERROR_NONE || glb_policy->lb_call == NULL) {
glb_policy->client_load_report_timer_pending = false;
GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, &glb_policy->base,
"client_load_report");
return;
}
schedule_next_client_load_report(exec_ctx, glb_policy);
}
static void do_send_client_load_report_locked(grpc_exec_ctx *exec_ctx,
glb_lb_policy *glb_policy) {
grpc_op op;
memset(&op, 0, sizeof(op));
op.op = GRPC_OP_SEND_MESSAGE;
op.data.send_message.send_message = glb_policy->client_load_report_payload;
grpc_closure_init(&glb_policy->client_load_report_closure,
client_load_report_done_locked, glb_policy,
grpc_combiner_scheduler(glb_policy->base.combiner, false));
grpc_call_error call_error = grpc_call_start_batch_and_execute(
exec_ctx, glb_policy->lb_call, &op, 1,
&glb_policy->client_load_report_closure);
GPR_ASSERT(GRPC_CALL_OK == call_error);
}
static bool load_report_counters_are_zero(grpc_grpclb_request *request) {
return request->client_stats.num_calls_started == 0 &&
request->client_stats.num_calls_finished == 0 &&
request->client_stats.num_calls_finished_with_drop_for_rate_limiting ==
0 &&
request->client_stats
.num_calls_finished_with_drop_for_load_balancing == 0 &&
request->client_stats.num_calls_finished_with_client_failed_to_send ==
0 &&
request->client_stats.num_calls_finished_known_received == 0;
}
static void send_client_load_report_locked(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error) {
glb_lb_policy *glb_policy = arg;
if (error == GRPC_ERROR_CANCELLED || glb_policy->lb_call == NULL) {
glb_policy->client_load_report_timer_pending = false;
GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, &glb_policy->base,
"client_load_report");
return;
}
// Construct message payload.
GPR_ASSERT(glb_policy->client_load_report_payload == NULL);
grpc_grpclb_request *request =
grpc_grpclb_load_report_request_create(glb_policy->client_stats);
// Skip client load report if the counters were all zero in the last
// report and they are still zero in this one.
if (load_report_counters_are_zero(request)) {
if (glb_policy->last_client_load_report_counters_were_zero) {
grpc_grpclb_request_destroy(request);
schedule_next_client_load_report(exec_ctx, glb_policy);
return;
}
glb_policy->last_client_load_report_counters_were_zero = true;
} else {
glb_policy->last_client_load_report_counters_were_zero = false;
}
grpc_slice request_payload_slice = grpc_grpclb_request_encode(request);
glb_policy->client_load_report_payload =
grpc_raw_byte_buffer_create(&request_payload_slice, 1);
grpc_slice_unref_internal(exec_ctx, request_payload_slice);
grpc_grpclb_request_destroy(request);
// If we've already sent the initial request, then we can go ahead and
// sent the load report. Otherwise, we need to wait until the initial
// request has been sent to send this
// (see lb_on_sent_initial_request_locked() below).
if (glb_policy->initial_request_sent) {
do_send_client_load_report_locked(exec_ctx, glb_policy);
}
}
static void lb_on_sent_initial_request_locked(grpc_exec_ctx *exec_ctx,
void *arg, grpc_error *error);
static void lb_on_server_status_received_locked(grpc_exec_ctx *exec_ctx,
void *arg, grpc_error *error);
static void lb_on_response_received_locked(grpc_exec_ctx *exec_ctx, void *arg,
@ -1107,13 +1277,24 @@ static void lb_call_init_locked(grpc_exec_ctx *exec_ctx,
* glb_policy->base.interested_parties, which is comprised of the polling
* entities from \a client_channel. */
grpc_slice host = grpc_slice_from_copied_string(glb_policy->server_name);
gpr_timespec deadline =
glb_policy->lb_call_timeout_ms == 0
? gpr_inf_future(GPR_CLOCK_MONOTONIC)
: gpr_time_add(gpr_now(GPR_CLOCK_MONOTONIC),
gpr_time_from_millis(glb_policy->lb_call_timeout_ms,
GPR_TIMESPAN));
glb_policy->lb_call = grpc_channel_create_pollset_set_call(
exec_ctx, glb_policy->lb_channel, NULL, GRPC_PROPAGATE_DEFAULTS,
glb_policy->base.interested_parties,
GRPC_MDSTR_SLASH_GRPC_DOT_LB_DOT_V1_DOT_LOADBALANCER_SLASH_BALANCELOAD,
&host, glb_policy->deadline, NULL);
&host, deadline, NULL);
grpc_slice_unref_internal(exec_ctx, host);
if (glb_policy->client_stats != NULL) {
grpc_grpclb_client_stats_unref(glb_policy->client_stats);
}
glb_policy->client_stats = grpc_grpclb_client_stats_create();
grpc_metadata_array_init(&glb_policy->lb_initial_metadata_recv);
grpc_metadata_array_init(&glb_policy->lb_trailing_metadata_recv);
@ -1125,6 +1306,9 @@ static void lb_call_init_locked(grpc_exec_ctx *exec_ctx,
grpc_slice_unref_internal(exec_ctx, request_payload_slice);
grpc_grpclb_request_destroy(request);
grpc_closure_init(&glb_policy->lb_on_sent_initial_request,
lb_on_sent_initial_request_locked, glb_policy,
grpc_combiner_scheduler(glb_policy->base.combiner, false));
grpc_closure_init(&glb_policy->lb_on_server_status_received,
lb_on_server_status_received_locked, glb_policy,
grpc_combiner_scheduler(glb_policy->base.combiner, false));
@ -1138,6 +1322,10 @@ static void lb_call_init_locked(grpc_exec_ctx *exec_ctx,
GRPC_GRPCLB_RECONNECT_JITTER,
GRPC_GRPCLB_MIN_CONNECT_TIMEOUT_SECONDS * 1000,
GRPC_GRPCLB_RECONNECT_MAX_BACKOFF_SECONDS * 1000);
glb_policy->initial_request_sent = false;
glb_policy->seen_initial_response = false;
glb_policy->last_client_load_report_counters_were_zero = false;
}
static void lb_call_destroy_locked(grpc_exec_ctx *exec_ctx,
@ -1151,6 +1339,10 @@ static void lb_call_destroy_locked(grpc_exec_ctx *exec_ctx,
grpc_byte_buffer_destroy(glb_policy->lb_request_payload);
grpc_slice_unref_internal(exec_ctx, glb_policy->lb_call_status_details);
if (!glb_policy->client_load_report_timer_pending) {
grpc_timer_cancel(exec_ctx, &glb_policy->client_load_report_timer);
}
}
/*
@ -1163,7 +1355,7 @@ static void query_for_backends_locked(grpc_exec_ctx *exec_ctx,
lb_call_init_locked(exec_ctx, glb_policy);
if (grpc_lb_glb_trace) {
if (GRPC_TRACER_ON(grpc_lb_glb_trace)) {
gpr_log(GPR_INFO, "Query for backends (grpclb: %p, lb_call: %p)",
(void *)glb_policy, (void *)glb_policy->lb_call);
}
@ -1179,21 +1371,27 @@ static void query_for_backends_locked(grpc_exec_ctx *exec_ctx,
op->flags = 0;
op->reserved = NULL;
op++;
op->op = GRPC_OP_RECV_INITIAL_METADATA;
op->data.recv_initial_metadata.recv_initial_metadata =
&glb_policy->lb_initial_metadata_recv;
op->flags = 0;
op->reserved = NULL;
op++;
GPR_ASSERT(glb_policy->lb_request_payload != NULL);
op->op = GRPC_OP_SEND_MESSAGE;
op->data.send_message.send_message = glb_policy->lb_request_payload;
op->flags = 0;
op->reserved = NULL;
op++;
/* take a weak ref (won't prevent calling of \a glb_shutdown if the strong ref
* count goes to zero) to be unref'd in lb_on_sent_initial_request_locked() */
GRPC_LB_POLICY_WEAK_REF(&glb_policy->base, "lb_on_server_status_received");
call_error = grpc_call_start_batch_and_execute(
exec_ctx, glb_policy->lb_call, ops, (size_t)(op - ops),
&glb_policy->lb_on_sent_initial_request);
GPR_ASSERT(GRPC_CALL_OK == call_error);
op = ops;
op->op = GRPC_OP_RECV_STATUS_ON_CLIENT;
op->data.recv_status_on_client.trailing_metadata =
&glb_policy->lb_trailing_metadata_recv;
@ -1225,6 +1423,19 @@ static void query_for_backends_locked(grpc_exec_ctx *exec_ctx,
GPR_ASSERT(GRPC_CALL_OK == call_error);
}
static void lb_on_sent_initial_request_locked(grpc_exec_ctx *exec_ctx,
void *arg, grpc_error *error) {
glb_lb_policy *glb_policy = arg;
glb_policy->initial_request_sent = true;
// If we attempted to send a client load report before the initial
// request was sent, send the load report now.
if (glb_policy->client_load_report_payload != NULL) {
do_send_client_load_report_locked(exec_ctx, glb_policy);
}
GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, &glb_policy->base,
"lb_on_response_received_locked");
}
static void lb_on_response_received_locked(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error) {
glb_lb_policy *glb_policy = arg;
@ -1240,58 +1451,91 @@ static void lb_on_response_received_locked(grpc_exec_ctx *exec_ctx, void *arg,
grpc_byte_buffer_reader_init(&bbr, glb_policy->lb_response_payload);
grpc_slice response_slice = grpc_byte_buffer_reader_readall(&bbr);
grpc_byte_buffer_destroy(glb_policy->lb_response_payload);
grpc_grpclb_serverlist *serverlist =
grpc_grpclb_response_parse_serverlist(response_slice);
if (serverlist != NULL) {
GPR_ASSERT(glb_policy->lb_call != NULL);
grpc_slice_unref_internal(exec_ctx, response_slice);
if (grpc_lb_glb_trace) {
gpr_log(GPR_INFO, "Serverlist with %lu servers received",
(unsigned long)serverlist->num_servers);
for (size_t i = 0; i < serverlist->num_servers; ++i) {
grpc_resolved_address addr;
parse_server(serverlist->servers[i], &addr);
char *ipport;
grpc_sockaddr_to_string(&ipport, &addr, false);
gpr_log(GPR_INFO, "Serverlist[%lu]: %s", (unsigned long)i, ipport);
gpr_free(ipport);
grpc_grpclb_initial_response *response = NULL;
if (!glb_policy->seen_initial_response &&
(response = grpc_grpclb_initial_response_parse(response_slice)) !=
NULL) {
if (response->has_client_stats_report_interval) {
glb_policy->client_stats_report_interval =
gpr_time_max(gpr_time_from_seconds(1, GPR_TIMESPAN),
grpc_grpclb_duration_to_timespec(
&response->client_stats_report_interval));
if (GRPC_TRACER_ON(grpc_lb_glb_trace)) {
gpr_log(GPR_INFO,
"received initial LB response message; "
"client load reporting interval = %" PRId64 ".%09d sec",
glb_policy->client_stats_report_interval.tv_sec,
glb_policy->client_stats_report_interval.tv_nsec);
}
/* take a weak ref (won't prevent calling of \a glb_shutdown() if the
* strong ref count goes to zero) to be unref'd in
* send_client_load_report() */
glb_policy->client_load_report_timer_pending = true;
GRPC_LB_POLICY_WEAK_REF(&glb_policy->base, "client_load_report");
schedule_next_client_load_report(exec_ctx, glb_policy);
} else if (GRPC_TRACER_ON(grpc_lb_glb_trace)) {
gpr_log(GPR_INFO,
"received initial LB response message; "
"client load reporting NOT enabled");
}
grpc_grpclb_initial_response_destroy(response);
glb_policy->seen_initial_response = true;
} else {
grpc_grpclb_serverlist *serverlist =
grpc_grpclb_response_parse_serverlist(response_slice);
if (serverlist != NULL) {
GPR_ASSERT(glb_policy->lb_call != NULL);
if (GRPC_TRACER_ON(grpc_lb_glb_trace)) {
gpr_log(GPR_INFO, "Serverlist with %lu servers received",
(unsigned long)serverlist->num_servers);
for (size_t i = 0; i < serverlist->num_servers; ++i) {
grpc_resolved_address addr;
parse_server(serverlist->servers[i], &addr);
char *ipport;
grpc_sockaddr_to_string(&ipport, &addr, false);
gpr_log(GPR_INFO, "Serverlist[%lu]: %s", (unsigned long)i, ipport);
gpr_free(ipport);
}
}
/* update serverlist */
if (serverlist->num_servers > 0) {
if (grpc_grpclb_serverlist_equals(glb_policy->serverlist, serverlist)) {
if (grpc_lb_glb_trace) {
/* update serverlist */
if (serverlist->num_servers > 0) {
if (grpc_grpclb_serverlist_equals(glb_policy->serverlist,
serverlist)) {
if (GRPC_TRACER_ON(grpc_lb_glb_trace)) {
gpr_log(GPR_INFO,
"Incoming server list identical to current, ignoring.");
}
grpc_grpclb_destroy_serverlist(serverlist);
} else { /* new serverlist */
if (glb_policy->serverlist != NULL) {
/* dispose of the old serverlist */
grpc_grpclb_destroy_serverlist(glb_policy->serverlist);
}
/* and update the copy in the glb_lb_policy instance. This
* serverlist instance will be destroyed either upon the next
* update or in glb_destroy() */
glb_policy->serverlist = serverlist;
rr_handover_locked(exec_ctx, glb_policy);
}
} else {
if (GRPC_TRACER_ON(grpc_lb_glb_trace)) {
gpr_log(GPR_INFO,
"Incoming server list identical to current, ignoring.");
"Received empty server list. Picks will stay pending until "
"a response with > 0 servers is received");
}
grpc_grpclb_destroy_serverlist(serverlist);
} else { /* new serverlist */
if (glb_policy->serverlist != NULL) {
/* dispose of the old serverlist */
grpc_grpclb_destroy_serverlist(glb_policy->serverlist);
}
/* and update the copy in the glb_lb_policy instance. This serverlist
* instance will be destroyed either upon the next update or in
* glb_destroy() */
glb_policy->serverlist = serverlist;
rr_handover_locked(exec_ctx, glb_policy);
}
} else {
if (grpc_lb_glb_trace) {
gpr_log(GPR_INFO,
"Received empty server list. Picks will stay pending until a "
"response with > 0 servers is received");
}
grpc_grpclb_destroy_serverlist(glb_policy->serverlist);
} else { /* serverlist == NULL */
gpr_log(GPR_ERROR, "Invalid LB response received: '%s'. Ignoring.",
grpc_dump_slice(response_slice, GPR_DUMP_ASCII | GPR_DUMP_HEX));
}
} else { /* serverlist == NULL */
gpr_log(GPR_ERROR, "Invalid LB response received: '%s'. Ignoring.",
grpc_dump_slice(response_slice, GPR_DUMP_ASCII | GPR_DUMP_HEX));
grpc_slice_unref_internal(exec_ctx, response_slice);
}
grpc_slice_unref_internal(exec_ctx, response_slice);
if (!glb_policy->shutting_down) {
/* keep listening for serverlist updates */
op->op = GRPC_OP_RECV_MESSAGE;
@ -1319,7 +1563,7 @@ static void lb_call_on_retry_timer_locked(grpc_exec_ctx *exec_ctx, void *arg,
glb_lb_policy *glb_policy = arg;
if (!glb_policy->shutting_down) {
if (grpc_lb_glb_trace) {
if (GRPC_TRACER_ON(grpc_lb_glb_trace)) {
gpr_log(GPR_INFO, "Restaring call to LB server (grpclb %p)",
(void *)glb_policy);
}
@ -1336,7 +1580,7 @@ static void lb_on_server_status_received_locked(grpc_exec_ctx *exec_ctx,
GPR_ASSERT(glb_policy->lb_call != NULL);
if (grpc_lb_glb_trace) {
if (GRPC_TRACER_ON(grpc_lb_glb_trace)) {
char *status_details =
grpc_slice_to_c_string(glb_policy->lb_call_status_details);
gpr_log(GPR_DEBUG,
@ -1355,7 +1599,7 @@ static void lb_on_server_status_received_locked(grpc_exec_ctx *exec_ctx,
gpr_timespec now = gpr_now(GPR_CLOCK_MONOTONIC);
gpr_timespec next_try =
gpr_backoff_step(&glb_policy->lb_call_backoff_state, now);
if (grpc_lb_glb_trace) {
if (GRPC_TRACER_ON(grpc_lb_glb_trace)) {
gpr_log(GPR_DEBUG, "Connection to LB server lost (grpclb: %p)...",
(void *)glb_policy);
gpr_timespec timeout = gpr_time_sub(next_try, now);
@ -1403,9 +1647,29 @@ grpc_lb_policy_factory *grpc_glb_lb_factory_create() {
}
/* Plugin registration */
// Only add client_load_reporting filter if the grpclb LB policy is used.
static bool maybe_add_client_load_reporting_filter(
grpc_exec_ctx *exec_ctx, grpc_channel_stack_builder *builder, void *arg) {
const grpc_channel_args *args =
grpc_channel_stack_builder_get_channel_arguments(builder);
const grpc_arg *channel_arg =
grpc_channel_args_find(args, GRPC_ARG_LB_POLICY_NAME);
if (channel_arg != NULL && channel_arg->type == GRPC_ARG_STRING &&
strcmp(channel_arg->value.string, "grpclb") == 0) {
return grpc_channel_stack_builder_append_filter(
builder, (const grpc_channel_filter *)arg, NULL, NULL);
}
return true;
}
void grpc_lb_policy_grpclb_init() {
grpc_register_lb_policy(grpc_glb_lb_factory_create());
grpc_register_tracer("glb", &grpc_lb_glb_trace);
grpc_channel_init_register_stage(GRPC_CLIENT_SUBCHANNEL,
GRPC_CHANNEL_INIT_BUILTIN_PRIORITY,
maybe_add_client_load_reporting_filter,
(void *)&grpc_client_load_reporting_filter);
}
void grpc_lb_policy_grpclb_shutdown() {}

@ -0,0 +1,133 @@
/*
*
* Copyright 2017, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following disclaimer
* in the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Google Inc. nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
#include "src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_client_stats.h"
#include <grpc/support/alloc.h>
#include <grpc/support/atm.h>
#include <grpc/support/sync.h>
#include <grpc/support/useful.h>
#include "src/core/lib/channel/channel_args.h"
#define GRPC_ARG_GRPCLB_CLIENT_STATS "grpc.grpclb_client_stats"
struct grpc_grpclb_client_stats {
gpr_refcount refs;
gpr_atm num_calls_started;
gpr_atm num_calls_finished;
gpr_atm num_calls_finished_with_drop_for_rate_limiting;
gpr_atm num_calls_finished_with_drop_for_load_balancing;
gpr_atm num_calls_finished_with_client_failed_to_send;
gpr_atm num_calls_finished_known_received;
};
grpc_grpclb_client_stats* grpc_grpclb_client_stats_create() {
grpc_grpclb_client_stats* client_stats = gpr_zalloc(sizeof(*client_stats));
gpr_ref_init(&client_stats->refs, 1);
return client_stats;
}
grpc_grpclb_client_stats* grpc_grpclb_client_stats_ref(
grpc_grpclb_client_stats* client_stats) {
gpr_ref(&client_stats->refs);
return client_stats;
}
void grpc_grpclb_client_stats_unref(grpc_grpclb_client_stats* client_stats) {
if (gpr_unref(&client_stats->refs)) {
gpr_free(client_stats);
}
}
void grpc_grpclb_client_stats_add_call_started(
grpc_grpclb_client_stats* client_stats) {
gpr_atm_full_fetch_add(&client_stats->num_calls_started, (gpr_atm)1);
}
void grpc_grpclb_client_stats_add_call_finished(
bool finished_with_drop_for_rate_limiting,
bool finished_with_drop_for_load_balancing,
bool finished_with_client_failed_to_send, bool finished_known_received,
grpc_grpclb_client_stats* client_stats) {
gpr_atm_full_fetch_add(&client_stats->num_calls_finished, (gpr_atm)1);
if (finished_with_drop_for_rate_limiting) {
gpr_atm_full_fetch_add(
&client_stats->num_calls_finished_with_drop_for_rate_limiting,
(gpr_atm)1);
}
if (finished_with_drop_for_load_balancing) {
gpr_atm_full_fetch_add(
&client_stats->num_calls_finished_with_drop_for_load_balancing,
(gpr_atm)1);
}
if (finished_with_client_failed_to_send) {
gpr_atm_full_fetch_add(
&client_stats->num_calls_finished_with_client_failed_to_send,
(gpr_atm)1);
}
if (finished_known_received) {
gpr_atm_full_fetch_add(&client_stats->num_calls_finished_known_received,
(gpr_atm)1);
}
}
static void atomic_get_and_reset_counter(int64_t* value, gpr_atm* counter) {
*value = (int64_t)gpr_atm_acq_load(counter);
gpr_atm_full_fetch_add(counter, (gpr_atm)(-*value));
}
void grpc_grpclb_client_stats_get(
grpc_grpclb_client_stats* client_stats, int64_t* num_calls_started,
int64_t* num_calls_finished,
int64_t* num_calls_finished_with_drop_for_rate_limiting,
int64_t* num_calls_finished_with_drop_for_load_balancing,
int64_t* num_calls_finished_with_client_failed_to_send,
int64_t* num_calls_finished_known_received) {
atomic_get_and_reset_counter(num_calls_started,
&client_stats->num_calls_started);
atomic_get_and_reset_counter(num_calls_finished,
&client_stats->num_calls_finished);
atomic_get_and_reset_counter(
num_calls_finished_with_drop_for_rate_limiting,
&client_stats->num_calls_finished_with_drop_for_rate_limiting);
atomic_get_and_reset_counter(
num_calls_finished_with_drop_for_load_balancing,
&client_stats->num_calls_finished_with_drop_for_load_balancing);
atomic_get_and_reset_counter(
num_calls_finished_with_client_failed_to_send,
&client_stats->num_calls_finished_with_client_failed_to_send);
atomic_get_and_reset_counter(
num_calls_finished_known_received,
&client_stats->num_calls_finished_known_received);
}

@ -0,0 +1,65 @@
/*
*
* Copyright 2017, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following disclaimer
* in the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Google Inc. nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
#ifndef GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_LB_POLICY_GRPCLB_GRPCLB_CLIENT_STATS_H
#define GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_LB_POLICY_GRPCLB_GRPCLB_CLIENT_STATS_H
#include <stdbool.h>
#include <grpc/impl/codegen/grpc_types.h>
typedef struct grpc_grpclb_client_stats grpc_grpclb_client_stats;
grpc_grpclb_client_stats* grpc_grpclb_client_stats_create();
grpc_grpclb_client_stats* grpc_grpclb_client_stats_ref(
grpc_grpclb_client_stats* client_stats);
void grpc_grpclb_client_stats_unref(grpc_grpclb_client_stats* client_stats);
void grpc_grpclb_client_stats_add_call_started(
grpc_grpclb_client_stats* client_stats);
void grpc_grpclb_client_stats_add_call_finished(
bool finished_with_drop_for_rate_limiting,
bool finished_with_drop_for_load_balancing,
bool finished_with_client_failed_to_send, bool finished_known_received,
grpc_grpclb_client_stats* client_stats);
void grpc_grpclb_client_stats_get(
grpc_grpclb_client_stats* client_stats, int64_t* num_calls_started,
int64_t* num_calls_finished,
int64_t* num_calls_finished_with_drop_for_rate_limiting,
int64_t* num_calls_finished_with_drop_for_load_balancing,
int64_t* num_calls_finished_with_client_failed_to_send,
int64_t* num_calls_finished_known_received);
#endif /* GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_LB_POLICY_GRPCLB_GRPCLB_CLIENT_STATS_H \
*/

@ -80,15 +80,45 @@ static bool decode_serverlist(pb_istream_t *stream, const pb_field_t *field,
grpc_grpclb_request *grpc_grpclb_request_create(const char *lb_service_name) {
grpc_grpclb_request *req = gpr_malloc(sizeof(grpc_grpclb_request));
req->has_client_stats = 0; /* TODO(dgq): add support for stats once defined */
req->has_initial_request = 1;
req->initial_request.has_name = 1;
req->has_client_stats = false;
req->has_initial_request = true;
req->initial_request.has_name = true;
strncpy(req->initial_request.name, lb_service_name,
GRPC_GRPCLB_SERVICE_NAME_MAX_LENGTH);
return req;
}
static void populate_timestamp(gpr_timespec timestamp,
struct _grpc_lb_v1_Timestamp *timestamp_pb) {
timestamp_pb->has_seconds = true;
timestamp_pb->seconds = timestamp.tv_sec;
timestamp_pb->has_nanos = true;
timestamp_pb->nanos = timestamp.tv_nsec;
}
grpc_grpclb_request *grpc_grpclb_load_report_request_create(
grpc_grpclb_client_stats *client_stats) {
grpc_grpclb_request *req = gpr_zalloc(sizeof(grpc_grpclb_request));
req->has_client_stats = true;
req->client_stats.has_timestamp = true;
populate_timestamp(gpr_now(GPR_CLOCK_REALTIME), &req->client_stats.timestamp);
req->client_stats.has_num_calls_started = true;
req->client_stats.has_num_calls_finished = true;
req->client_stats.has_num_calls_finished_with_drop_for_rate_limiting = true;
req->client_stats.has_num_calls_finished_with_drop_for_load_balancing = true;
req->client_stats.has_num_calls_finished_with_client_failed_to_send = true;
req->client_stats.has_num_calls_finished_with_client_failed_to_send = true;
req->client_stats.has_num_calls_finished_known_received = true;
grpc_grpclb_client_stats_get(
client_stats, &req->client_stats.num_calls_started,
&req->client_stats.num_calls_finished,
&req->client_stats.num_calls_finished_with_drop_for_rate_limiting,
&req->client_stats.num_calls_finished_with_drop_for_load_balancing,
&req->client_stats.num_calls_finished_with_client_failed_to_send,
&req->client_stats.num_calls_finished_known_received);
return req;
}
grpc_slice grpc_grpclb_request_encode(const grpc_grpclb_request *request) {
size_t encoded_length;
pb_ostream_t sizestream;
@ -122,6 +152,9 @@ grpc_grpclb_initial_response *grpc_grpclb_initial_response_parse(
gpr_log(GPR_ERROR, "nanopb error: %s", PB_GET_ERROR(&stream));
return NULL;
}
if (!res.has_initial_response) return NULL;
grpc_grpclb_initial_response *initial_res =
gpr_malloc(sizeof(grpc_grpclb_initial_response));
memcpy(initial_res, &res.initial_response,
@ -243,6 +276,15 @@ int grpc_grpclb_duration_compare(const grpc_grpclb_duration *lhs,
return 0;
}
gpr_timespec grpc_grpclb_duration_to_timespec(
grpc_grpclb_duration *duration_pb) {
gpr_timespec duration;
duration.tv_sec = duration_pb->has_seconds ? duration_pb->seconds : 0;
duration.tv_nsec = duration_pb->has_nanos ? duration_pb->nanos : 0;
duration.clock_type = GPR_TIMESPAN;
return duration;
}
void grpc_grpclb_initial_response_destroy(
grpc_grpclb_initial_response *response) {
gpr_free(response);

@ -36,6 +36,7 @@
#include <grpc/slice_buffer.h>
#include "src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_client_stats.h"
#include "src/core/ext/filters/client_channel/lb_policy/grpclb/proto/grpc/lb/v1/load_balancer.pb.h"
#include "src/core/ext/filters/client_channel/lb_policy_factory.h"
@ -58,6 +59,8 @@ typedef struct grpc_grpclb_serverlist {
/** Create a request for a gRPC LB service under \a lb_service_name */
grpc_grpclb_request *grpc_grpclb_request_create(const char *lb_service_name);
grpc_grpclb_request *grpc_grpclb_load_report_request_create(
grpc_grpclb_client_stats *client_stats);
/** Protocol Buffers v3-encode \a request */
grpc_slice grpc_grpclb_request_encode(const grpc_grpclb_request *request);
@ -93,6 +96,9 @@ void grpc_grpclb_destroy_serverlist(grpc_grpclb_serverlist *serverlist);
int grpc_grpclb_duration_compare(const grpc_grpclb_duration *lhs,
const grpc_grpclb_duration *rhs);
gpr_timespec grpc_grpclb_duration_to_timespec(
grpc_grpclb_duration *duration_pb);
/** Destroy \a initial_response */
void grpc_grpclb_initial_response_destroy(
grpc_grpclb_initial_response *response);

@ -189,7 +189,8 @@ static void pf_exit_idle_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol) {
static int pf_pick_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
const grpc_lb_policy_pick_args *pick_args,
grpc_connected_subchannel **target, void **user_data,
grpc_connected_subchannel **target,
grpc_call_context_element *context, void **user_data,
grpc_closure *on_complete) {
pick_first_lb_policy *p = (pick_first_lb_policy *)pol;
pending_pick *pp;

@ -74,7 +74,7 @@
typedef struct round_robin_lb_policy round_robin_lb_policy;
int grpc_lb_round_robin_trace = 0;
grpc_tracer_flag grpc_lb_round_robin_trace = GRPC_TRACER_INITIALIZER(false);
/** List of entities waiting for a pick.
*
@ -198,7 +198,7 @@ static void advance_last_picked_locked(round_robin_lb_policy *p) {
GPR_ASSERT(p->ready_list_last_pick == &p->ready_list);
}
if (grpc_lb_round_robin_trace) {
if (GRPC_TRACER_ON(grpc_lb_round_robin_trace)) {
gpr_log(GPR_DEBUG,
"[READYLIST, RR: %p] ADVANCED LAST PICK. NOW AT NODE %p (SC %p, "
"CSC %p)",
@ -228,7 +228,7 @@ static ready_list *add_connected_sc_locked(round_robin_lb_policy *p,
p->ready_list.prev->next = new_elem;
p->ready_list.prev = new_elem;
}
if (grpc_lb_round_robin_trace) {
if (GRPC_TRACER_ON(grpc_lb_round_robin_trace)) {
gpr_log(GPR_DEBUG, "[READYLIST] ADDING NODE %p (Conn. SC %p)",
(void *)new_elem, (void *)sd->subchannel);
}
@ -256,7 +256,7 @@ static void remove_disconnected_sc_locked(round_robin_lb_policy *p,
node->next->prev = node->prev;
}
if (grpc_lb_round_robin_trace) {
if (GRPC_TRACER_ON(grpc_lb_round_robin_trace)) {
gpr_log(GPR_DEBUG, "[READYLIST] REMOVED NODE %p (SC %p)", (void *)node,
(void *)node->subchannel);
}
@ -276,7 +276,7 @@ static void rr_destroy(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol) {
round_robin_lb_policy *p = (round_robin_lb_policy *)pol;
ready_list *elem;
if (grpc_lb_round_robin_trace) {
if (GRPC_TRACER_ON(grpc_lb_round_robin_trace)) {
gpr_log(GPR_DEBUG, "Destroying Round Robin policy at %p", (void *)pol);
}
@ -312,7 +312,7 @@ static void rr_shutdown_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol) {
pending_pick *pp;
size_t i;
if (grpc_lb_round_robin_trace) {
if (GRPC_TRACER_ON(grpc_lb_round_robin_trace)) {
gpr_log(GPR_DEBUG, "Shutting down Round Robin policy at %p", (void *)pol);
}
@ -414,13 +414,14 @@ static void rr_exit_idle_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol) {
static int rr_pick_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
const grpc_lb_policy_pick_args *pick_args,
grpc_connected_subchannel **target, void **user_data,
grpc_connected_subchannel **target,
grpc_call_context_element *context, void **user_data,
grpc_closure *on_complete) {
round_robin_lb_policy *p = (round_robin_lb_policy *)pol;
pending_pick *pp;
ready_list *selected;
if (grpc_lb_round_robin_trace) {
if (GRPC_TRACER_ON(grpc_lb_round_robin_trace)) {
gpr_log(GPR_INFO, "Round Robin %p trying to pick", (void *)pol);
}
@ -433,7 +434,7 @@ static int rr_pick_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
if (user_data != NULL) {
*user_data = selected->user_data;
}
if (grpc_lb_round_robin_trace) {
if (GRPC_TRACER_ON(grpc_lb_round_robin_trace)) {
gpr_log(GPR_DEBUG,
"[RR PICK] TARGET <-- CONNECTED SUBCHANNEL %p (NODE %p)",
(void *)*target, (void *)selected);
@ -565,7 +566,7 @@ static void rr_connectivity_changed_locked(grpc_exec_ctx *exec_ctx, void *arg,
if (pp->user_data != NULL) {
*pp->user_data = selected->user_data;
}
if (grpc_lb_round_robin_trace) {
if (GRPC_TRACER_ON(grpc_lb_round_robin_trace)) {
gpr_log(GPR_DEBUG,
"[RR CONN CHANGED] TARGET <-- SUBCHANNEL %p (NODE %p)",
(void *)selected->subchannel, (void *)selected);
@ -723,7 +724,7 @@ static grpc_lb_policy *round_robin_create(grpc_exec_ctx *exec_ctx,
sc_args.args = new_args;
grpc_subchannel *subchannel = grpc_client_channel_factory_create_subchannel(
exec_ctx, args->client_channel_factory, &sc_args);
if (grpc_lb_round_robin_trace) {
if (GRPC_TRACER_ON(grpc_lb_round_robin_trace)) {
char *address_uri =
grpc_sockaddr_to_uri(&addresses->addresses[i].address);
gpr_log(GPR_DEBUG, "Created subchannel %p for address uri %s",
@ -767,7 +768,7 @@ static grpc_lb_policy *round_robin_create(grpc_exec_ctx *exec_ctx,
grpc_connectivity_state_init(&p->state_tracker, GRPC_CHANNEL_IDLE,
"round_robin");
if (grpc_lb_round_robin_trace) {
if (GRPC_TRACER_ON(grpc_lb_round_robin_trace)) {
gpr_log(GPR_DEBUG, "Created RR policy at %p with %lu subchannels",
(void *)p, (unsigned long)p->num_subchannels);
}

@ -781,7 +781,7 @@ grpc_error *grpc_connected_subchannel_create_call(
(*call)->connection = GRPC_CONNECTED_SUBCHANNEL_REF(con, "subchannel_call");
const grpc_call_element_args call_args = {.call_stack = callstk,
.server_transport_data = NULL,
.context = NULL,
.context = args->context,
.path = args->path,
.start_time = args->start_time,
.deadline = args->deadline,

@ -119,6 +119,7 @@ typedef struct {
gpr_timespec start_time;
gpr_timespec deadline;
gpr_arena *arena;
grpc_call_context_element *context;
} grpc_connected_subchannel_call_args;
grpc_error *grpc_connected_subchannel_create_call(

@ -37,6 +37,7 @@
#include "src/core/ext/filters/http/message_compress/message_compress_filter.h"
#include "src/core/ext/filters/http/server/http_server_filter.h"
#include "src/core/lib/channel/channel_stack_builder.h"
#include "src/core/lib/surface/call.h"
#include "src/core/lib/surface/channel_init.h"
#include "src/core/lib/transport/transport_impl.h"

@ -47,6 +47,7 @@
#include "src/core/lib/slice/slice_internal.h"
#include "src/core/lib/slice/slice_string_helpers.h"
#include "src/core/lib/support/string.h"
#include "src/core/lib/surface/call.h"
#include "src/core/lib/transport/static_metadata.h"
#define INITIAL_METADATA_UNSEEN 0
@ -197,7 +198,7 @@ static void finish_send_message(grpc_exec_ctx *exec_ctx,
did_compress = grpc_msg_compress(exec_ctx, calld->compression_algorithm,
&calld->slices, &tmp);
if (did_compress) {
if (grpc_compression_trace) {
if (GRPC_TRACER_ON(grpc_compression_trace)) {
char *algo_name;
const size_t before_size = calld->slices.length;
const size_t after_size = tmp.length;
@ -211,7 +212,7 @@ static void finish_send_message(grpc_exec_ctx *exec_ctx,
grpc_slice_buffer_swap(&calld->slices, &tmp);
calld->send_flags |= GRPC_WRITE_INTERNAL_COMPRESS;
} else {
if (grpc_compression_trace) {
if (GRPC_TRACER_ON(grpc_compression_trace)) {
char *algo_name;
GPR_ASSERT(grpc_compression_algorithm_name(calld->compression_algorithm,
&algo_name));

@ -38,8 +38,6 @@
#include "src/core/lib/channel/channel_stack.h"
extern int grpc_compression_trace;
/** Compression filter for outgoing data.
*
* See <grpc/compression.h> for the available compression settings.

@ -46,8 +46,6 @@
#define EXPECTED_CONTENT_TYPE "application/grpc"
#define EXPECTED_CONTENT_TYPE_LENGTH sizeof(EXPECTED_CONTENT_TYPE) - 1
extern int grpc_http_trace;
typedef struct call_data {
grpc_linked_mdelem status;
grpc_linked_mdelem content_type;

@ -101,7 +101,7 @@ grpc_channel *grpc_insecure_channel_create(const char *target,
void *reserved) {
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
GRPC_API_TRACE(
"grpc_insecure_channel_create(target=%p, args=%p, reserved=%p)", 3,
"grpc_insecure_channel_create(target=%s, args=%p, reserved=%p)", 3,
(target, args, reserved));
GPR_ASSERT(reserved == NULL);
// Add channel arg containing the client channel factory.

@ -89,8 +89,8 @@ static bool g_default_keepalive_permit_without_calls =
DEFAULT_KEEPALIVE_PERMIT_WITHOUT_CALLS;
#define MAX_CLIENT_STREAM_ID 0x7fffffffu
int grpc_http_trace = 0;
int grpc_flowctl_trace = 0;
grpc_tracer_flag grpc_http_trace = GRPC_TRACER_INITIALIZER(false);
grpc_tracer_flag grpc_flowctl_trace = GRPC_TRACER_INITIALIZER(false);
static const grpc_transport_vtable vtable;
@ -884,14 +884,23 @@ static void write_action_begin_locked(grpc_exec_ctx *exec_ctx, void *gt,
GPR_TIMER_BEGIN("write_action_begin_locked", 0);
grpc_chttp2_transport *t = gt;
GPR_ASSERT(t->write_state != GRPC_CHTTP2_WRITE_STATE_IDLE);
if (!t->closed && grpc_chttp2_begin_write(exec_ctx, t)) {
set_write_state(exec_ctx, t, GRPC_CHTTP2_WRITE_STATE_WRITING,
"begin writing");
grpc_closure_sched(exec_ctx, &t->write_action, GRPC_ERROR_NONE);
} else {
set_write_state(exec_ctx, t, GRPC_CHTTP2_WRITE_STATE_IDLE,
"begin writing nothing");
GRPC_CHTTP2_UNREF_TRANSPORT(exec_ctx, t, "writing");
switch (t->closed ? GRPC_CHTTP2_NOTHING_TO_WRITE
: grpc_chttp2_begin_write(exec_ctx, t)) {
case GRPC_CHTTP2_NOTHING_TO_WRITE:
set_write_state(exec_ctx, t, GRPC_CHTTP2_WRITE_STATE_IDLE,
"begin writing nothing");
GRPC_CHTTP2_UNREF_TRANSPORT(exec_ctx, t, "writing");
break;
case GRPC_CHTTP2_PARTIAL_WRITE:
set_write_state(exec_ctx, t, GRPC_CHTTP2_WRITE_STATE_WRITING_WITH_MORE,
"begin writing partial");
grpc_closure_sched(exec_ctx, &t->write_action, GRPC_ERROR_NONE);
break;
case GRPC_CHTTP2_FULL_WRITE:
set_write_state(exec_ctx, t, GRPC_CHTTP2_WRITE_STATE_WRITING,
"begin writing");
grpc_closure_sched(exec_ctx, &t->write_action, GRPC_ERROR_NONE);
break;
}
GPR_TIMER_END("write_action_begin_locked", 0);
}
@ -988,7 +997,7 @@ void grpc_chttp2_add_incoming_goaway(grpc_exec_ctx *exec_ctx,
t->seen_goaway = 1;
/* When a client receives a GOAWAY with error code ENHANCE_YOUR_CALM and debug
* data equal to too_many_pings, it should log the occurrence at a log level
* data equal to "too_many_pings", it should log the occurrence at a log level
* that is enabled by default and double the configured KEEPALIVE_TIME used
* for new connections on that channel. */
if (t->is_client && goaway_error == GRPC_HTTP2_ENHANCE_YOUR_CALM &&
@ -1095,7 +1104,7 @@ void grpc_chttp2_complete_closure_step(grpc_exec_ctx *exec_ctx,
return;
}
closure->next_data.scratch -= CLOSURE_BARRIER_FIRST_REF_BIT;
if (grpc_http_trace) {
if (GRPC_TRACER_ON(grpc_http_trace)) {
const char *errstr = grpc_error_string(error);
gpr_log(GPR_DEBUG,
"complete_closure_step: %p refs=%d flags=0x%04x desc=%s err=%s",
@ -1240,7 +1249,7 @@ static void perform_stream_op_locked(grpc_exec_ctx *exec_ctx, void *stream_op,
grpc_transport_stream_op_batch_payload *op_payload = op->payload;
grpc_chttp2_transport *t = s->t;
if (grpc_http_trace) {
if (GRPC_TRACER_ON(grpc_http_trace)) {
char *str = grpc_transport_stream_op_batch_string(op);
gpr_log(GPR_DEBUG, "perform_stream_op_locked: %s; on_complete = %p", str,
op->on_complete);
@ -1483,9 +1492,9 @@ static void perform_stream_op(grpc_exec_ctx *exec_ctx, grpc_transport *gt,
grpc_chttp2_transport *t = (grpc_chttp2_transport *)gt;
grpc_chttp2_stream *s = (grpc_chttp2_stream *)gs;
if (grpc_http_trace) {
if (GRPC_TRACER_ON(grpc_http_trace)) {
char *str = grpc_transport_stream_op_batch_string(op);
gpr_log(GPR_DEBUG, "perform_stream_op[s=%p/%d]: %s", s, s->id, str);
gpr_log(GPR_DEBUG, "perform_stream_op[s=%p]: %s", s, str);
gpr_free(str);
}
@ -2130,27 +2139,29 @@ static void end_all_the_calls(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
static void update_bdp(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
double bdp_dbl) {
uint32_t bdp;
if (bdp_dbl <= 0) {
bdp = 0;
} else if (bdp_dbl > UINT32_MAX) {
bdp = UINT32_MAX;
int32_t bdp;
const int32_t kMinBDP = 128;
if (bdp_dbl <= kMinBDP) {
bdp = kMinBDP;
} else if (bdp_dbl > INT32_MAX) {
bdp = INT32_MAX;
} else {
bdp = (uint32_t)(bdp_dbl);
bdp = (int32_t)(bdp_dbl);
}
int64_t delta =
(int64_t)bdp -
(int64_t)t->settings[GRPC_LOCAL_SETTINGS]
[GRPC_CHTTP2_SETTINGS_INITIAL_WINDOW_SIZE];
if (delta == 0 || (bdp != 0 && delta > -1024 && delta < 1024)) {
if (delta == 0 || (delta > -bdp / 10 && delta < bdp / 10)) {
return;
}
if (grpc_bdp_estimator_trace) {
if (GRPC_TRACER_ON(grpc_bdp_estimator_trace)) {
gpr_log(GPR_DEBUG, "%s: update initial window size to %d", t->peer_string,
(int)bdp);
}
push_setting(exec_ctx, t, GRPC_CHTTP2_SETTINGS_INITIAL_WINDOW_SIZE, bdp);
push_setting(exec_ctx, t, GRPC_CHTTP2_SETTINGS_MAX_FRAME_SIZE, bdp);
push_setting(exec_ctx, t, GRPC_CHTTP2_SETTINGS_INITIAL_WINDOW_SIZE,
(uint32_t)bdp);
push_setting(exec_ctx, t, GRPC_CHTTP2_SETTINGS_MAX_FRAME_SIZE, (uint32_t)bdp);
}
static grpc_error *try_http_parsing(grpc_exec_ctx *exec_ctx,
@ -2305,7 +2316,7 @@ static void read_action_locked(grpc_exec_ctx *exec_ctx, void *tp,
static void start_bdp_ping_locked(grpc_exec_ctx *exec_ctx, void *tp,
grpc_error *error) {
grpc_chttp2_transport *t = tp;
if (grpc_http_trace) {
if (GRPC_TRACER_ON(grpc_http_trace)) {
gpr_log(GPR_DEBUG, "%s: Start BDP ping", t->peer_string);
}
/* Reset the keepalive ping timer */
@ -2318,7 +2329,7 @@ static void start_bdp_ping_locked(grpc_exec_ctx *exec_ctx, void *tp,
static void finish_bdp_ping_locked(grpc_exec_ctx *exec_ctx, void *tp,
grpc_error *error) {
grpc_chttp2_transport *t = tp;
if (grpc_http_trace) {
if (GRPC_TRACER_ON(grpc_http_trace)) {
gpr_log(GPR_DEBUG, "%s: Complete BDP ping", t->peer_string);
}
grpc_bdp_estimator_complete_ping(&t->bdp_estimator);
@ -2779,7 +2790,7 @@ static void benign_reclaimer_locked(grpc_exec_ctx *exec_ctx, void *arg,
grpc_chttp2_stream_map_size(&t->stream_map) == 0) {
/* Channel with no active streams: send a goaway to try and make it
* disconnect cleanly */
if (grpc_resource_quota_trace) {
if (GRPC_TRACER_ON(grpc_resource_quota_trace)) {
gpr_log(GPR_DEBUG, "HTTP2: %s - send goaway to free memory",
t->peer_string);
}
@ -2787,7 +2798,8 @@ static void benign_reclaimer_locked(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error_set_int(
GRPC_ERROR_CREATE_FROM_STATIC_STRING("Buffers full"),
GRPC_ERROR_INT_HTTP2_ERROR, GRPC_HTTP2_ENHANCE_YOUR_CALM));
} else if (error == GRPC_ERROR_NONE && grpc_resource_quota_trace) {
} else if (error == GRPC_ERROR_NONE &&
GRPC_TRACER_ON(grpc_resource_quota_trace)) {
gpr_log(GPR_DEBUG,
"HTTP2: %s - skip benign reclamation, there are still %" PRIdPTR
" streams",
@ -2808,7 +2820,7 @@ static void destructive_reclaimer_locked(grpc_exec_ctx *exec_ctx, void *arg,
t->destructive_reclaimer_registered = false;
if (error == GRPC_ERROR_NONE && n > 0) {
grpc_chttp2_stream *s = grpc_chttp2_stream_map_rand(&t->stream_map);
if (grpc_resource_quota_trace) {
if (GRPC_TRACER_ON(grpc_resource_quota_trace)) {
gpr_log(GPR_DEBUG, "HTTP2: %s - abandon stream id %d", t->peer_string,
s->id);
}

@ -34,11 +34,12 @@
#ifndef GRPC_CORE_EXT_TRANSPORT_CHTTP2_TRANSPORT_CHTTP2_TRANSPORT_H
#define GRPC_CORE_EXT_TRANSPORT_CHTTP2_TRANSPORT_CHTTP2_TRANSPORT_H
#include "src/core/lib/debug/trace.h"
#include "src/core/lib/iomgr/endpoint.h"
#include "src/core/lib/transport/transport.h"
extern int grpc_http_trace;
extern int grpc_flowctl_trace;
extern grpc_tracer_flag grpc_http_trace;
extern grpc_tracer_flag grpc_flowctl_trace;
grpc_transport *grpc_create_chttp2_transport(
grpc_exec_ctx *exec_ctx, const grpc_channel_args *channel_args,

@ -218,18 +218,18 @@ grpc_error *grpc_chttp2_settings_parser_parse(grpc_exec_ctx *exec_ctx, void *p,
parser->incoming_settings[id] != parser->value) {
t->initial_window_update +=
(int64_t)parser->value - parser->incoming_settings[id];
if (grpc_http_trace) {
if (GRPC_TRACER_ON(grpc_http_trace)) {
gpr_log(GPR_DEBUG, "adding %d for initial_window change",
(int)t->initial_window_update);
}
}
parser->incoming_settings[id] = parser->value;
if (grpc_http_trace) {
if (GRPC_TRACER_ON(grpc_http_trace)) {
gpr_log(GPR_DEBUG, "CHTTP2:%s:%s: got setting %s = %d",
t->is_client ? "CLI" : "SVR", t->peer_string, sp->name,
parser->value);
}
} else if (grpc_http_trace) {
} else if (GRPC_TRACER_ON(grpc_http_trace)) {
gpr_log(GPR_ERROR, "CHTTP2: Ignoring unknown setting %d (value %d)",
parser->id, parser->value);
}

@ -69,7 +69,7 @@ static grpc_slice_refcount terminal_slice_refcount = {NULL, NULL};
static const grpc_slice terminal_slice = {&terminal_slice_refcount,
.data.refcounted = {0, 0}};
extern int grpc_http_trace;
extern grpc_tracer_flag grpc_http_trace;
typedef struct {
int is_first_frame;
@ -425,7 +425,7 @@ static void hpack_enc(grpc_exec_ctx *exec_ctx, grpc_chttp2_hpack_compressor *c,
"Reserved header (colon-prefixed) happening after regular ones.");
}
if (grpc_http_trace && !GRPC_MDELEM_IS_INTERNED(elem)) {
if (GRPC_TRACER_ON(grpc_http_trace) && !GRPC_MDELEM_IS_INTERNED(elem)) {
char *k = grpc_slice_to_c_string(GRPC_MDKEY(elem));
char *v = grpc_slice_to_c_string(GRPC_MDVALUE(elem));
gpr_log(
@ -616,7 +616,7 @@ void grpc_chttp2_hpack_compressor_set_max_table_size(
}
}
c->advertise_table_size_change = 1;
if (grpc_http_trace) {
if (GRPC_TRACER_ON(grpc_http_trace)) {
gpr_log(GPR_DEBUG, "set max table size from encoder to %d", max_table_size);
}
}

@ -50,8 +50,6 @@
#include "src/core/lib/support/string.h"
#include "src/core/lib/transport/http2_errors.h"
extern int grpc_http_trace;
typedef enum {
NOT_BINARY,
BINARY_BEGIN,
@ -666,7 +664,7 @@ static const uint8_t inverse_base64[256] = {
/* emission helpers */
static grpc_error *on_hdr(grpc_exec_ctx *exec_ctx, grpc_chttp2_hpack_parser *p,
grpc_mdelem md, int add_to_table) {
if (grpc_http_trace && !GRPC_MDELEM_IS_INTERNED(md)) {
if (GRPC_TRACER_ON(grpc_http_trace) && !GRPC_MDELEM_IS_INTERNED(md)) {
char *k = grpc_slice_to_c_string(GRPC_MDKEY(md));
char *v = grpc_slice_to_c_string(GRPC_MDVALUE(md));
gpr_log(
@ -1052,7 +1050,7 @@ static grpc_error *parse_lithdr_nvridx_v(grpc_exec_ctx *exec_ctx,
static grpc_error *finish_max_tbl_size(grpc_exec_ctx *exec_ctx,
grpc_chttp2_hpack_parser *p,
const uint8_t *cur, const uint8_t *end) {
if (grpc_http_trace) {
if (GRPC_TRACER_ON(grpc_http_trace)) {
gpr_log(GPR_INFO, "MAX TABLE SIZE: %d", p->index);
}
grpc_error *err =

@ -40,9 +40,10 @@
#include <grpc/support/log.h>
#include <grpc/support/string_util.h>
#include "src/core/lib/debug/trace.h"
#include "src/core/lib/support/murmur_hash.h"
extern int grpc_http_trace;
extern grpc_tracer_flag grpc_http_trace;
static struct {
const char *key;
@ -260,7 +261,7 @@ void grpc_chttp2_hptbl_set_max_bytes(grpc_exec_ctx *exec_ctx,
if (tbl->max_bytes == max_bytes) {
return;
}
if (grpc_http_trace) {
if (GRPC_TRACER_ON(grpc_http_trace)) {
gpr_log(GPR_DEBUG, "Update hpack parser max size to %d", max_bytes);
}
while (tbl->mem_used > max_bytes) {
@ -284,7 +285,7 @@ grpc_error *grpc_chttp2_hptbl_set_current_table_size(grpc_exec_ctx *exec_ctx,
gpr_free(msg);
return err;
}
if (grpc_http_trace) {
if (GRPC_TRACER_ON(grpc_http_trace)) {
gpr_log(GPR_DEBUG, "Update hpack parser table size to %d", bytes);
}
while (tbl->mem_used > bytes) {

@ -552,9 +552,14 @@ void grpc_chttp2_initiate_write(grpc_exec_ctx *exec_ctx,
grpc_chttp2_transport *t,
bool covered_by_poller, const char *reason);
/** Someone is unlocking the transport mutex: check to see if writes
are required, and frame them if so */
bool grpc_chttp2_begin_write(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t);
typedef enum {
GRPC_CHTTP2_NOTHING_TO_WRITE,
GRPC_CHTTP2_PARTIAL_WRITE,
GRPC_CHTTP2_FULL_WRITE,
} grpc_chttp2_begin_write_result;
grpc_chttp2_begin_write_result grpc_chttp2_begin_write(
grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t);
void grpc_chttp2_end_write(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
grpc_error *error);
@ -629,13 +634,13 @@ void grpc_chttp2_complete_closure_step(grpc_exec_ctx *exec_ctx,
#define GRPC_CHTTP2_CLIENT_CONNECT_STRLEN \
(sizeof(GRPC_CHTTP2_CLIENT_CONNECT_STRING) - 1)
extern int grpc_http_trace;
extern int grpc_flowctl_trace;
extern grpc_tracer_flag grpc_http_trace;
extern grpc_tracer_flag grpc_flowctl_trace;
#define GRPC_CHTTP2_IF_TRACING(stmt) \
if (!(grpc_http_trace)) \
; \
else \
#define GRPC_CHTTP2_IF_TRACING(stmt) \
if (!(GRPC_TRACER_ON(grpc_http_trace))) \
; \
else \
stmt
typedef enum {
@ -648,7 +653,7 @@ typedef enum {
dst_var, src_context, src_var) \
do { \
assert(id1 == id2); \
if (grpc_flowctl_trace) { \
if (GRPC_TRACER_ON(grpc_flowctl_trace)) { \
grpc_chttp2_flowctl_trace( \
__FILE__, __LINE__, phase, GRPC_CHTTP2_FLOWCTL_MOVE, #dst_context, \
#dst_var, #src_context, #src_var, transport->is_client, id1, \
@ -671,7 +676,7 @@ typedef enum {
#define GRPC_CHTTP2_FLOW_CREDIT_COMMON(phase, transport, id, dst_context, \
dst_var, amount) \
do { \
if (grpc_flowctl_trace) { \
if (GRPC_TRACER_ON(grpc_flowctl_trace)) { \
grpc_chttp2_flowctl_trace(__FILE__, __LINE__, phase, \
GRPC_CHTTP2_FLOWCTL_CREDIT, #dst_context, \
#dst_var, NULL, #amount, transport->is_client, \
@ -729,7 +734,7 @@ typedef enum {
#define GRPC_CHTTP2_FLOW_DEBIT_COMMON(phase, transport, id, dst_context, \
dst_var, amount) \
do { \
if (grpc_flowctl_trace) { \
if (GRPC_TRACER_ON(grpc_flowctl_trace)) { \
grpc_chttp2_flowctl_trace(__FILE__, __LINE__, phase, \
GRPC_CHTTP2_FLOWCTL_DEBIT, #dst_context, \
#dst_var, NULL, #amount, transport->is_client, \
@ -815,7 +820,7 @@ void grpc_chttp2_ack_ping(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
/** Add a new ping strike to ping_recv_state.ping_strikes. If
ping_recv_state.ping_strikes > ping_policy.max_ping_strikes, it sends GOAWAY
with error code ENHANCE_YOUR_CALM and additional debug data resembling
too_many_pings followed by immediately closing the connection. */
"too_many_pings" followed by immediately closing the connection. */
void grpc_chttp2_add_ping_strike(grpc_exec_ctx *exec_ctx,
grpc_chttp2_transport *t);

@ -324,7 +324,7 @@ static grpc_error *init_frame_parser(grpc_exec_ctx *exec_ctx,
case GRPC_CHTTP2_FRAME_GOAWAY:
return init_goaway_parser(exec_ctx, t);
default:
if (grpc_http_trace) {
if (GRPC_TRACER_ON(grpc_http_trace)) {
gpr_log(GPR_ERROR, "Unknown frame type %02x", t->incoming_frame_type);
}
return init_skip_frame_parser(exec_ctx, t, 0);
@ -418,11 +418,9 @@ static grpc_error *update_incoming_window(grpc_exec_ctx *exec_ctx,
GRPC_CHTTP2_FLOW_DEBIT_STREAM_INCOMING_WINDOW_DELTA("parse", t, s,
incoming_frame_size);
if ((int64_t)t->settings[GRPC_SENT_SETTINGS]
[GRPC_CHTTP2_SETTINGS_INITIAL_WINDOW_SIZE] +
(int64_t)s->incoming_window_delta - (int64_t)s->announce_window <=
(int64_t)t->settings[GRPC_SENT_SETTINGS]
[GRPC_CHTTP2_SETTINGS_INITIAL_WINDOW_SIZE] /
if ((int64_t)s->incoming_window_delta - (int64_t)s->announce_window <=
-(int64_t)t->settings[GRPC_SENT_SETTINGS]
[GRPC_CHTTP2_SETTINGS_INITIAL_WINDOW_SIZE] /
2) {
grpc_chttp2_become_writable(exec_ctx, t, s,
GRPC_CHTTP2_STREAM_WRITE_INITIATE_UNCOVERED,
@ -494,7 +492,7 @@ static void on_initial_header(grpc_exec_ctx *exec_ctx, void *tp,
GPR_ASSERT(s != NULL);
if (grpc_http_trace) {
if (GRPC_TRACER_ON(grpc_http_trace)) {
char *key = grpc_slice_to_c_string(GRPC_MDKEY(md));
char *value =
grpc_dump_slice(GRPC_MDVALUE(md), GPR_DUMP_HEX | GPR_DUMP_ASCII);
@ -574,7 +572,7 @@ static void on_trailing_header(grpc_exec_ctx *exec_ctx, void *tp,
GPR_ASSERT(s != NULL);
if (grpc_http_trace) {
if (GRPC_TRACER_ON(grpc_http_trace)) {
char *key = grpc_slice_to_c_string(GRPC_MDKEY(md));
char *value =
grpc_dump_slice(GRPC_MDVALUE(md), GPR_DUMP_HEX | GPR_DUMP_ASCII);
@ -807,7 +805,7 @@ static grpc_error *parse_frame_slice(grpc_exec_ctx *exec_ctx,
if (err == GRPC_ERROR_NONE) {
return err;
} else if (grpc_error_get_int(err, GRPC_ERROR_INT_STREAM_ID, NULL)) {
if (grpc_http_trace) {
if (GRPC_TRACER_ON(grpc_http_trace)) {
const char *msg = grpc_error_string(err);
gpr_log(GPR_ERROR, "%s", msg);
}

@ -74,7 +74,8 @@ static void maybe_initiate_ping(grpc_exec_ctx *exec_ctx,
}
if (!grpc_closure_list_empty(pq->lists[GRPC_CHTTP2_PCL_INFLIGHT])) {
/* ping already in-flight: wait */
if (grpc_http_trace || grpc_bdp_estimator_trace) {
if (GRPC_TRACER_ON(grpc_http_trace) ||
GRPC_TRACER_ON(grpc_bdp_estimator_trace)) {
gpr_log(GPR_DEBUG, "Ping delayed [%p]: already pinging", t->peer_string);
}
return;
@ -82,7 +83,8 @@ static void maybe_initiate_ping(grpc_exec_ctx *exec_ctx,
if (t->ping_state.pings_before_data_required == 0 &&
t->ping_policy.max_pings_without_data != 0) {
/* need to send something of substance before sending a ping again */
if (grpc_http_trace || grpc_bdp_estimator_trace) {
if (GRPC_TRACER_ON(grpc_http_trace) ||
GRPC_TRACER_ON(grpc_bdp_estimator_trace)) {
gpr_log(GPR_DEBUG, "Ping delayed [%p]: too many recent pings: %d/%d",
t->peer_string, t->ping_state.pings_before_data_required,
t->ping_policy.max_pings_without_data);
@ -96,7 +98,8 @@ static void maybe_initiate_ping(grpc_exec_ctx *exec_ctx,
(int)t->ping_policy.min_time_between_pings.tv_nsec);*/
if (gpr_time_cmp(elapsed, t->ping_policy.min_time_between_pings) < 0) {
/* not enough elapsed time between successive pings */
if (grpc_http_trace || grpc_bdp_estimator_trace) {
if (GRPC_TRACER_ON(grpc_http_trace) ||
GRPC_TRACER_ON(grpc_bdp_estimator_trace)) {
gpr_log(GPR_DEBUG,
"Ping delayed [%p]: not enough time elapsed since last ping",
t->peer_string);
@ -160,19 +163,22 @@ static bool stream_ref_if_not_destroyed(gpr_refcount *r) {
return true;
}
/* How many bytes of incoming flow control would we like to advertise */
uint32_t grpc_chttp2_target_incoming_window(grpc_chttp2_transport *t) {
return (uint32_t)GPR_MAX(
return (uint32_t)GPR_MIN(
(int64_t)((1u << 31) - 1),
t->stream_total_over_incoming_window +
(int64_t)GPR_MAX(
t->settings[GRPC_SENT_SETTINGS]
[GRPC_CHTTP2_SETTINGS_INITIAL_WINDOW_SIZE] -
t->stream_total_under_incoming_window,
0));
t->settings[GRPC_SENT_SETTINGS]
[GRPC_CHTTP2_SETTINGS_INITIAL_WINDOW_SIZE]);
}
bool grpc_chttp2_begin_write(grpc_exec_ctx *exec_ctx,
grpc_chttp2_transport *t) {
/* How many bytes would we like to put on the wire during a single syscall */
static uint32_t target_write_size(grpc_chttp2_transport *t) {
return 1024 * 1024;
}
grpc_chttp2_begin_write_result grpc_chttp2_begin_write(
grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t) {
grpc_chttp2_stream *s;
GPR_TIMER_BEGIN("grpc_chttp2_begin_write", 0);
@ -206,9 +212,20 @@ bool grpc_chttp2_begin_write(grpc_exec_ctx *exec_ctx,
}
}
bool partial_write = false;
/* for each grpc_chttp2_stream that's become writable, frame it's data
(according to available window sizes) and add to the output buffer */
while (grpc_chttp2_list_pop_writable_stream(t, &s)) {
while (true) {
if (t->outbuf.length > target_write_size(t)) {
partial_write = true;
break;
}
if (!grpc_chttp2_list_pop_writable_stream(t, &s)) {
break;
}
bool sent_initial_metadata = s->sent_initial_metadata;
bool now_writing = false;
@ -395,7 +412,9 @@ bool grpc_chttp2_begin_write(grpc_exec_ctx *exec_ctx,
GPR_TIMER_END("grpc_chttp2_begin_write", 0);
return t->outbuf.count > 0;
return t->outbuf.count > 0 ? (partial_write ? GRPC_CHTTP2_PARTIAL_WRITE
: GRPC_CHTTP2_FULL_WRITE)
: GRPC_CHTTP2_NOTHING_TO_WRITE;
}
void grpc_chttp2_end_write(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,

@ -886,6 +886,10 @@ static bool op_can_be_run(grpc_transport_stream_op_batch *curr_op,
!stream_state->state_op_done[OP_RECV_MESSAGE]) {
CRONET_LOG(GPR_DEBUG, "Because");
result = false;
} else if (curr_op->cancel_stream &&
!stream_state->state_callback_received[OP_CANCELED]) {
CRONET_LOG(GPR_DEBUG, "Because");
result = false;
} else if (curr_op->recv_trailing_metadata) {
/* We aren't done with trailing metadata yet */
if (!stream_state->state_op_done[OP_RECV_TRAILING_METADATA]) {

@ -38,7 +38,7 @@
#include <stdlib.h>
#include <string.h>
int grpc_trace_channel = 0;
grpc_tracer_flag grpc_trace_channel = GRPC_TRACER_INITIALIZER(false);
/* Memory layouts.

@ -307,10 +307,10 @@ void grpc_call_element_signal_error(grpc_exec_ctx *exec_ctx,
grpc_call_element *cur_elem,
grpc_error *error);
extern int grpc_trace_channel;
extern grpc_tracer_flag grpc_trace_channel;
#define GRPC_CALL_LOG_OP(sev, elem, op) \
if (grpc_trace_channel) grpc_call_log_op(sev, elem, op)
if (GRPC_TRACER_ON(grpc_trace_channel)) grpc_call_log_op(sev, elem, op)
#ifdef __cplusplus
}

@ -38,7 +38,8 @@
#include <grpc/support/alloc.h>
#include <grpc/support/string_util.h>
int grpc_trace_channel_stack_builder = 0;
grpc_tracer_flag grpc_trace_channel_stack_builder =
GRPC_TRACER_INITIALIZER(false);
typedef struct filter_node {
struct filter_node *next;

@ -165,7 +165,7 @@ grpc_error *grpc_channel_stack_builder_finish(
void grpc_channel_stack_builder_destroy(grpc_exec_ctx *exec_ctx,
grpc_channel_stack_builder *builder);
extern int grpc_trace_channel_stack_builder;
extern grpc_tracer_flag grpc_trace_channel_stack_builder;
#ifdef __cplusplus
}

@ -50,6 +50,9 @@ typedef enum {
/// Reserved for traffic_class_context.
GRPC_CONTEXT_TRAFFIC,
/// Value is a \a grpc_grpclb_client_stats.
GRPC_GRPCLB_CLIENT_STATS,
GRPC_CONTEXT_COUNT
} grpc_context_index;

@ -35,24 +35,31 @@
#include <string.h>
#include <grpc/grpc.h>
#include <grpc/support/alloc.h>
#include <grpc/support/log.h>
#include "src/core/lib/support/env.h"
int grpc_tracer_set_enabled(const char *name, int enabled);
typedef struct tracer {
const char *name;
int *flag;
grpc_tracer_flag *flag;
struct tracer *next;
} tracer;
static tracer *tracers;
void grpc_register_tracer(const char *name, int *flag) {
#ifdef GRPC_THREADSAFE_TRACER
#define TRACER_SET(flag, on) gpr_atm_no_barrier_store(&(flag).value, (on))
#else
#define TRACER_SET(flag, on) (flag).value = (on)
#endif
void grpc_register_tracer(const char *name, grpc_tracer_flag *flag) {
tracer *t = gpr_malloc(sizeof(*t));
t->name = name;
t->flag = flag;
t->next = tracers;
*flag = 0;
TRACER_SET(*flag, false);
tracers = t;
}
@ -121,13 +128,13 @@ int grpc_tracer_set_enabled(const char *name, int enabled) {
tracer *t;
if (0 == strcmp(name, "all")) {
for (t = tracers; t; t = t->next) {
*t->flag = enabled;
TRACER_SET(*t->flag, enabled);
}
} else {
int found = 0;
for (t = tracers; t; t = t->next) {
if (0 == strcmp(name, t->name)) {
*t->flag = enabled;
TRACER_SET(*t->flag, enabled);
found = 1;
}
}

@ -34,9 +34,35 @@
#ifndef GRPC_CORE_LIB_DEBUG_TRACE_H
#define GRPC_CORE_LIB_DEBUG_TRACE_H
#include <grpc/support/atm.h>
#include <grpc/support/port_platform.h>
#include <stdbool.h>
void grpc_register_tracer(const char *name, int *flag);
#if defined(__has_feature)
#if __has_feature(thread_sanitizer)
#define GRPC_THREADSAFE_TRACER
#endif
#endif
typedef struct {
#ifdef GRPC_THREADSAFE_TRACER
gpr_atm value;
#else
bool value;
#endif
} grpc_tracer_flag;
#ifdef GRPC_THREADSAFE_TRACER
#define GRPC_TRACER_ON(flag) (gpr_atm_no_barrier_load(&(flag).value) != 0)
#define GRPC_TRACER_INITIALIZER(on) \
{ (gpr_atm)(on) }
#else
#define GRPC_TRACER_ON(flag) ((flag).value)
#define GRPC_TRACER_INITIALIZER(on) \
{ (on) }
#endif
void grpc_register_tracer(const char *name, grpc_tracer_flag *flag);
void grpc_tracer_init(const char *env_var_name);
void grpc_tracer_shutdown(void);

@ -105,7 +105,7 @@ static void finish(grpc_exec_ctx *exec_ctx, internal_request *req,
grpc_error *error) {
grpc_polling_entity_del_from_pollset_set(exec_ctx, req->pollent,
req->context->pollset_set);
grpc_closure_sched(exec_ctx, req->on_done, error);
grpc_closure_sched(exec_ctx, req->on_done, GRPC_ERROR_REF(error));
grpc_http_parser_destroy(&req->parser);
if (req->addresses != NULL) {
grpc_resolved_addresses_destroy(req->addresses);

@ -40,7 +40,7 @@
#include <grpc/support/log.h>
#include <grpc/support/useful.h>
int grpc_http1_trace = 0;
grpc_tracer_flag grpc_http1_trace = GRPC_TRACER_INITIALIZER(false);
static char *buf2str(void *buffer, size_t length) {
char *out = gpr_malloc(length + 1);
@ -308,7 +308,7 @@ static grpc_error *addbyte(grpc_http_parser *parser, uint8_t byte,
case GRPC_HTTP_FIRST_LINE:
case GRPC_HTTP_HEADERS:
if (parser->cur_line_length >= GRPC_HTTP_PARSER_MAX_HEADER_LENGTH) {
if (grpc_http1_trace)
if (GRPC_TRACER_ON(grpc_http1_trace))
gpr_log(GPR_ERROR, "HTTP header max line length (%d) exceeded",
GRPC_HTTP_PARSER_MAX_HEADER_LENGTH);
return GRPC_ERROR_CREATE_FROM_STATIC_STRING(

@ -36,6 +36,7 @@
#include <grpc/slice.h>
#include <grpc/support/port_platform.h>
#include "src/core/lib/debug/trace.h"
#include "src/core/lib/iomgr/error.h"
/* Maximum length of a header string of the form 'Key: Value\r\n' */
@ -121,6 +122,6 @@ grpc_error *grpc_http_parser_eof(grpc_http_parser *parser);
void grpc_http_request_destroy(grpc_http_request *request);
void grpc_http_response_destroy(grpc_http_response *response);
extern int grpc_http1_trace;
extern grpc_tracer_flag grpc_http1_trace;
#endif /* GRPC_CORE_LIB_HTTP_PARSER_H */

@ -42,13 +42,13 @@
#include "src/core/lib/iomgr/workqueue.h"
#include "src/core/lib/profiling/timers.h"
int grpc_combiner_trace = 0;
grpc_tracer_flag grpc_combiner_trace = GRPC_TRACER_INITIALIZER(false);
#define GRPC_COMBINER_TRACE(fn) \
do { \
if (grpc_combiner_trace) { \
fn; \
} \
#define GRPC_COMBINER_TRACE(fn) \
do { \
if (GRPC_TRACER_ON(grpc_combiner_trace)) { \
fn; \
} \
} while (0)
#define STATE_UNORPHANED 1

@ -37,6 +37,7 @@
#include <stddef.h>
#include <grpc/support/atm.h>
#include "src/core/lib/debug/trace.h"
#include "src/core/lib/iomgr/exec_ctx.h"
#include "src/core/lib/support/mpscq.h"
@ -78,6 +79,6 @@ grpc_closure_scheduler *grpc_combiner_finally_scheduler(grpc_combiner *lock,
bool grpc_combiner_continue_exec_ctx(grpc_exec_ctx *exec_ctx);
extern int grpc_combiner_trace;
extern grpc_tracer_flag grpc_combiner_trace;
#endif /* GRPC_CORE_LIB_IOMGR_COMBINER_H */

@ -0,0 +1,984 @@
/*
*
* Copyright 2017, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following disclaimer
* in the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Google Inc. nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
#include "src/core/lib/iomgr/port.h"
/* This polling engine is only relevant on linux kernels supporting epoll() */
#ifdef GRPC_LINUX_EPOLL
#include "src/core/lib/iomgr/ev_epoll1_linux.h"
#include <assert.h>
#include <errno.h>
#include <poll.h>
#include <pthread.h>
#include <string.h>
#include <sys/epoll.h>
#include <sys/socket.h>
#include <unistd.h>
#include <grpc/support/alloc.h>
#include <grpc/support/cpu.h>
#include <grpc/support/log.h>
#include <grpc/support/string_util.h>
#include <grpc/support/tls.h>
#include <grpc/support/useful.h>
#include "src/core/lib/iomgr/ev_posix.h"
#include "src/core/lib/iomgr/iomgr_internal.h"
#include "src/core/lib/iomgr/lockfree_event.h"
#include "src/core/lib/iomgr/wakeup_fd_posix.h"
#include "src/core/lib/iomgr/workqueue.h"
#include "src/core/lib/profiling/timers.h"
#include "src/core/lib/support/block_annotate.h"
static grpc_wakeup_fd global_wakeup_fd;
static int g_epfd;
/*******************************************************************************
* Fd Declarations
*/
struct grpc_fd {
int fd;
gpr_atm read_closure;
gpr_atm write_closure;
struct grpc_fd *freelist_next;
/* The pollset that last noticed that the fd is readable. The actual type
* stored in this is (grpc_pollset *) */
gpr_atm read_notifier_pollset;
grpc_iomgr_object iomgr_object;
};
static void fd_global_init(void);
static void fd_global_shutdown(void);
/*******************************************************************************
* Pollset Declarations
*/
typedef enum { UNKICKED, KICKED, DESIGNATED_POLLER } kick_state;
struct grpc_pollset_worker {
kick_state kick_state;
bool initialized_cv;
grpc_pollset_worker *next;
grpc_pollset_worker *prev;
gpr_cv cv;
grpc_closure_list schedule_on_end_work;
};
#define MAX_NEIGHBOURHOODS 1024
typedef struct pollset_neighbourhood {
gpr_mu mu;
grpc_pollset *active_root;
char pad[GPR_CACHELINE_SIZE];
} pollset_neighbourhood;
struct grpc_pollset {
gpr_mu mu;
pollset_neighbourhood *neighbourhood;
bool reassigning_neighbourhood;
grpc_pollset_worker *root_worker;
bool kicked_without_poller;
bool seen_inactive;
bool shutting_down; /* Is the pollset shutting down ? */
bool finish_shutdown_called; /* Is the 'finish_shutdown_locked()' called ? */
grpc_closure *shutdown_closure; /* Called after after shutdown is complete */
int begin_refs;
grpc_pollset *next;
grpc_pollset *prev;
};
/*******************************************************************************
* Pollset-set Declarations
*/
struct grpc_pollset_set {};
/*******************************************************************************
* Common helpers
*/
static bool append_error(grpc_error **composite, grpc_error *error,
const char *desc) {
if (error == GRPC_ERROR_NONE) return true;
if (*composite == GRPC_ERROR_NONE) {
*composite = GRPC_ERROR_CREATE_FROM_COPIED_STRING(desc);
}
*composite = grpc_error_add_child(*composite, error);
return false;
}
/*******************************************************************************
* Fd Definitions
*/
/* We need to keep a freelist not because of any concerns of malloc performance
* but instead so that implementations with multiple threads in (for example)
* epoll_wait deal with the race between pollset removal and incoming poll
* notifications.
*
* The problem is that the poller ultimately holds a reference to this
* object, so it is very difficult to know when is safe to free it, at least
* without some expensive synchronization.
*
* If we keep the object freelisted, in the worst case losing this race just
* becomes a spurious read notification on a reused fd.
*/
/* The alarm system needs to be able to wakeup 'some poller' sometimes
* (specifically when a new alarm needs to be triggered earlier than the next
* alarm 'epoch'). This wakeup_fd gives us something to alert on when such a
* case occurs. */
static grpc_fd *fd_freelist = NULL;
static gpr_mu fd_freelist_mu;
static void fd_global_init(void) { gpr_mu_init(&fd_freelist_mu); }
static void fd_global_shutdown(void) {
gpr_mu_lock(&fd_freelist_mu);
gpr_mu_unlock(&fd_freelist_mu);
while (fd_freelist != NULL) {
grpc_fd *fd = fd_freelist;
fd_freelist = fd_freelist->freelist_next;
gpr_free(fd);
}
gpr_mu_destroy(&fd_freelist_mu);
}
static grpc_fd *fd_create(int fd, const char *name) {
grpc_fd *new_fd = NULL;
gpr_mu_lock(&fd_freelist_mu);
if (fd_freelist != NULL) {
new_fd = fd_freelist;
fd_freelist = fd_freelist->freelist_next;
}
gpr_mu_unlock(&fd_freelist_mu);
if (new_fd == NULL) {
new_fd = gpr_malloc(sizeof(grpc_fd));
}
new_fd->fd = fd;
grpc_lfev_init(&new_fd->read_closure);
grpc_lfev_init(&new_fd->write_closure);
gpr_atm_no_barrier_store(&new_fd->read_notifier_pollset, (gpr_atm)NULL);
new_fd->freelist_next = NULL;
char *fd_name;
gpr_asprintf(&fd_name, "%s fd=%d", name, fd);
grpc_iomgr_register_object(&new_fd->iomgr_object, fd_name);
#ifdef GRPC_FD_REF_COUNT_DEBUG
gpr_log(GPR_DEBUG, "FD %d %p create %s", fd, (void *)new_fd, fd_name);
#endif
gpr_free(fd_name);
struct epoll_event ev = {.events = (uint32_t)(EPOLLIN | EPOLLOUT | EPOLLET),
.data.ptr = new_fd};
if (epoll_ctl(g_epfd, EPOLL_CTL_ADD, fd, &ev) != 0) {
gpr_log(GPR_ERROR, "epoll_ctl failed: %s", strerror(errno));
}
return new_fd;
}
static int fd_wrapped_fd(grpc_fd *fd) { return fd->fd; }
/* Might be called multiple times */
static void fd_shutdown(grpc_exec_ctx *exec_ctx, grpc_fd *fd, grpc_error *why) {
if (grpc_lfev_set_shutdown(exec_ctx, &fd->read_closure,
GRPC_ERROR_REF(why))) {
shutdown(fd->fd, SHUT_RDWR);
grpc_lfev_set_shutdown(exec_ctx, &fd->write_closure, GRPC_ERROR_REF(why));
}
GRPC_ERROR_UNREF(why);
}
static void fd_orphan(grpc_exec_ctx *exec_ctx, grpc_fd *fd,
grpc_closure *on_done, int *release_fd,
const char *reason) {
grpc_error *error = GRPC_ERROR_NONE;
if (!grpc_lfev_is_shutdown(&fd->read_closure)) {
fd_shutdown(exec_ctx, fd, GRPC_ERROR_CREATE_FROM_COPIED_STRING(reason));
}
/* If release_fd is not NULL, we should be relinquishing control of the file
descriptor fd->fd (but we still own the grpc_fd structure). */
if (release_fd != NULL) {
*release_fd = fd->fd;
} else {
close(fd->fd);
}
grpc_closure_sched(exec_ctx, on_done, GRPC_ERROR_REF(error));
grpc_iomgr_unregister_object(&fd->iomgr_object);
grpc_lfev_destroy(&fd->read_closure);
grpc_lfev_destroy(&fd->write_closure);
gpr_mu_lock(&fd_freelist_mu);
fd->freelist_next = fd_freelist;
fd_freelist = fd;
gpr_mu_unlock(&fd_freelist_mu);
}
static grpc_pollset *fd_get_read_notifier_pollset(grpc_exec_ctx *exec_ctx,
grpc_fd *fd) {
gpr_atm notifier = gpr_atm_acq_load(&fd->read_notifier_pollset);
return (grpc_pollset *)notifier;
}
static bool fd_is_shutdown(grpc_fd *fd) {
return grpc_lfev_is_shutdown(&fd->read_closure);
}
static void fd_notify_on_read(grpc_exec_ctx *exec_ctx, grpc_fd *fd,
grpc_closure *closure) {
grpc_lfev_notify_on(exec_ctx, &fd->read_closure, closure);
}
static void fd_notify_on_write(grpc_exec_ctx *exec_ctx, grpc_fd *fd,
grpc_closure *closure) {
grpc_lfev_notify_on(exec_ctx, &fd->write_closure, closure);
}
static grpc_workqueue *fd_get_workqueue(grpc_fd *fd) {
return (grpc_workqueue *)0xb0b51ed;
}
static void fd_become_readable(grpc_exec_ctx *exec_ctx, grpc_fd *fd,
grpc_pollset *notifier) {
grpc_lfev_set_ready(exec_ctx, &fd->read_closure);
/* Note, it is possible that fd_become_readable might be called twice with
different 'notifier's when an fd becomes readable and it is in two epoll
sets (This can happen briefly during polling island merges). In such cases
it does not really matter which notifer is set as the read_notifier_pollset
(They would both point to the same polling island anyway) */
/* Use release store to match with acquire load in fd_get_read_notifier */
gpr_atm_rel_store(&fd->read_notifier_pollset, (gpr_atm)notifier);
}
static void fd_become_writable(grpc_exec_ctx *exec_ctx, grpc_fd *fd) {
grpc_lfev_set_ready(exec_ctx, &fd->write_closure);
}
/*******************************************************************************
* Pollset Definitions
*/
GPR_TLS_DECL(g_current_thread_pollset);
GPR_TLS_DECL(g_current_thread_worker);
static gpr_atm g_active_poller;
static pollset_neighbourhood *g_neighbourhoods;
static size_t g_num_neighbourhoods;
static gpr_mu g_wq_mu;
static grpc_closure_list g_wq_items;
/* Return true if first in list */
static bool worker_insert(grpc_pollset *pollset, grpc_pollset_worker *worker) {
if (pollset->root_worker == NULL) {
pollset->root_worker = worker;
worker->next = worker->prev = worker;
return true;
} else {
worker->next = pollset->root_worker;
worker->prev = worker->next->prev;
worker->next->prev = worker;
worker->prev->next = worker;
return false;
}
}
/* Return true if last in list */
typedef enum { EMPTIED, NEW_ROOT, REMOVED } worker_remove_result;
static worker_remove_result worker_remove(grpc_pollset *pollset,
grpc_pollset_worker *worker) {
if (worker == pollset->root_worker) {
if (worker == worker->next) {
pollset->root_worker = NULL;
return EMPTIED;
} else {
pollset->root_worker = worker->next;
worker->prev->next = worker->next;
worker->next->prev = worker->prev;
return NEW_ROOT;
}
} else {
worker->prev->next = worker->next;
worker->next->prev = worker->prev;
return REMOVED;
}
}
static size_t choose_neighbourhood(void) {
return (size_t)gpr_cpu_current_cpu() % g_num_neighbourhoods;
}
static grpc_error *pollset_global_init(void) {
gpr_tls_init(&g_current_thread_pollset);
gpr_tls_init(&g_current_thread_worker);
gpr_atm_no_barrier_store(&g_active_poller, 0);
global_wakeup_fd.read_fd = -1;
grpc_error *err = grpc_wakeup_fd_init(&global_wakeup_fd);
gpr_mu_init(&g_wq_mu);
g_wq_items = (grpc_closure_list)GRPC_CLOSURE_LIST_INIT;
if (err != GRPC_ERROR_NONE) return err;
struct epoll_event ev = {.events = (uint32_t)(EPOLLIN | EPOLLET),
.data.ptr = &global_wakeup_fd};
if (epoll_ctl(g_epfd, EPOLL_CTL_ADD, global_wakeup_fd.read_fd, &ev) != 0) {
return GRPC_OS_ERROR(errno, "epoll_ctl");
}
g_num_neighbourhoods = GPR_CLAMP(gpr_cpu_num_cores(), 1, MAX_NEIGHBOURHOODS);
g_neighbourhoods =
gpr_zalloc(sizeof(*g_neighbourhoods) * g_num_neighbourhoods);
for (size_t i = 0; i < g_num_neighbourhoods; i++) {
gpr_mu_init(&g_neighbourhoods[i].mu);
}
return GRPC_ERROR_NONE;
}
static void pollset_global_shutdown(void) {
gpr_tls_destroy(&g_current_thread_pollset);
gpr_tls_destroy(&g_current_thread_worker);
gpr_mu_destroy(&g_wq_mu);
if (global_wakeup_fd.read_fd != -1) grpc_wakeup_fd_destroy(&global_wakeup_fd);
for (size_t i = 0; i < g_num_neighbourhoods; i++) {
gpr_mu_destroy(&g_neighbourhoods[i].mu);
}
gpr_free(g_neighbourhoods);
}
static void pollset_init(grpc_pollset *pollset, gpr_mu **mu) {
gpr_mu_init(&pollset->mu);
*mu = &pollset->mu;
pollset->neighbourhood = &g_neighbourhoods[choose_neighbourhood()];
pollset->seen_inactive = true;
}
static void pollset_destroy(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset) {
gpr_mu_lock(&pollset->mu);
if (!pollset->seen_inactive) {
pollset_neighbourhood *neighbourhood = pollset->neighbourhood;
gpr_mu_unlock(&pollset->mu);
retry_lock_neighbourhood:
gpr_mu_lock(&neighbourhood->mu);
gpr_mu_lock(&pollset->mu);
if (!pollset->seen_inactive) {
if (pollset->neighbourhood != neighbourhood) {
gpr_mu_unlock(&neighbourhood->mu);
neighbourhood = pollset->neighbourhood;
gpr_mu_unlock(&pollset->mu);
goto retry_lock_neighbourhood;
}
pollset->prev->next = pollset->next;
pollset->next->prev = pollset->prev;
if (pollset == pollset->neighbourhood->active_root) {
pollset->neighbourhood->active_root =
pollset->next == pollset ? NULL : pollset->next;
}
}
gpr_mu_unlock(&pollset->neighbourhood->mu);
}
gpr_mu_unlock(&pollset->mu);
gpr_mu_destroy(&pollset->mu);
}
static grpc_error *pollset_kick_all(grpc_pollset *pollset) {
grpc_error *error = GRPC_ERROR_NONE;
if (pollset->root_worker != NULL) {
grpc_pollset_worker *worker = pollset->root_worker;
do {
if (worker->initialized_cv) {
worker->kick_state = KICKED;
gpr_cv_signal(&worker->cv);
} else {
worker->kick_state = KICKED;
append_error(&error, grpc_wakeup_fd_wakeup(&global_wakeup_fd),
"pollset_shutdown");
}
worker = worker->next;
} while (worker != pollset->root_worker);
}
return error;
}
static void pollset_maybe_finish_shutdown(grpc_exec_ctx *exec_ctx,
grpc_pollset *pollset) {
if (pollset->shutdown_closure != NULL && pollset->root_worker == NULL &&
pollset->begin_refs == 0) {
grpc_closure_sched(exec_ctx, pollset->shutdown_closure, GRPC_ERROR_NONE);
pollset->shutdown_closure = NULL;
}
}
static void pollset_shutdown(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
grpc_closure *closure) {
GPR_ASSERT(pollset->shutdown_closure == NULL);
pollset->shutdown_closure = closure;
GRPC_LOG_IF_ERROR("pollset_shutdown", pollset_kick_all(pollset));
pollset_maybe_finish_shutdown(exec_ctx, pollset);
}
#define MAX_EPOLL_EVENTS 100
static int poll_deadline_to_millis_timeout(gpr_timespec deadline,
gpr_timespec now) {
gpr_timespec timeout;
if (gpr_time_cmp(deadline, gpr_inf_future(deadline.clock_type)) == 0) {
return -1;
}
if (gpr_time_cmp(deadline, now) <= 0) {
return 0;
}
static const gpr_timespec round_up = {
.clock_type = GPR_TIMESPAN, .tv_sec = 0, .tv_nsec = GPR_NS_PER_MS - 1};
timeout = gpr_time_sub(deadline, now);
int millis = gpr_time_to_millis(gpr_time_add(timeout, round_up));
return millis >= 1 ? millis : 1;
}
static grpc_error *pollset_epoll(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
gpr_timespec now, gpr_timespec deadline) {
struct epoll_event events[MAX_EPOLL_EVENTS];
static const char *err_desc = "pollset_poll";
int timeout = poll_deadline_to_millis_timeout(deadline, now);
if (timeout != 0) {
GRPC_SCHEDULING_START_BLOCKING_REGION;
}
int r;
do {
r = epoll_wait(g_epfd, events, MAX_EPOLL_EVENTS, timeout);
} while (r < 0 && errno == EINTR);
if (timeout != 0) {
GRPC_SCHEDULING_END_BLOCKING_REGION;
}
if (r < 0) return GRPC_OS_ERROR(errno, "epoll_wait");
grpc_error *error = GRPC_ERROR_NONE;
for (int i = 0; i < r; i++) {
void *data_ptr = events[i].data.ptr;
if (data_ptr == &global_wakeup_fd) {
gpr_mu_lock(&g_wq_mu);
grpc_closure_list_move(&g_wq_items, &exec_ctx->closure_list);
gpr_mu_unlock(&g_wq_mu);
append_error(&error, grpc_wakeup_fd_consume_wakeup(&global_wakeup_fd),
err_desc);
} else {
grpc_fd *fd = (grpc_fd *)(data_ptr);
bool cancel = (events[i].events & (EPOLLERR | EPOLLHUP)) != 0;
bool read_ev = (events[i].events & (EPOLLIN | EPOLLPRI)) != 0;
bool write_ev = (events[i].events & EPOLLOUT) != 0;
if (read_ev || cancel) {
fd_become_readable(exec_ctx, fd, pollset);
}
if (write_ev || cancel) {
fd_become_writable(exec_ctx, fd);
}
}
}
return error;
}
static bool begin_worker(grpc_pollset *pollset, grpc_pollset_worker *worker,
grpc_pollset_worker **worker_hdl, gpr_timespec *now,
gpr_timespec deadline) {
if (worker_hdl != NULL) *worker_hdl = worker;
worker->initialized_cv = false;
worker->kick_state = UNKICKED;
worker->schedule_on_end_work = (grpc_closure_list)GRPC_CLOSURE_LIST_INIT;
pollset->begin_refs++;
if (pollset->seen_inactive) {
// pollset has been observed to be inactive, we need to move back to the
// active list
bool is_reassigning = false;
if (!pollset->reassigning_neighbourhood) {
is_reassigning = true;
pollset->reassigning_neighbourhood = true;
pollset->neighbourhood = &g_neighbourhoods[choose_neighbourhood()];
}
pollset_neighbourhood *neighbourhood = pollset->neighbourhood;
gpr_mu_unlock(&pollset->mu);
// pollset unlocked: state may change (even worker->kick_state)
retry_lock_neighbourhood:
gpr_mu_lock(&neighbourhood->mu);
gpr_mu_lock(&pollset->mu);
if (pollset->seen_inactive) {
if (neighbourhood != pollset->neighbourhood) {
gpr_mu_unlock(&neighbourhood->mu);
neighbourhood = pollset->neighbourhood;
gpr_mu_unlock(&pollset->mu);
goto retry_lock_neighbourhood;
}
pollset->seen_inactive = false;
if (neighbourhood->active_root == NULL) {
neighbourhood->active_root = pollset->next = pollset->prev = pollset;
if (gpr_atm_no_barrier_cas(&g_active_poller, 0, (gpr_atm)worker)) {
worker->kick_state = DESIGNATED_POLLER;
}
} else {
pollset->next = neighbourhood->active_root;
pollset->prev = pollset->next->prev;
pollset->next->prev = pollset->prev->next = pollset;
}
}
if (is_reassigning) {
GPR_ASSERT(pollset->reassigning_neighbourhood);
pollset->reassigning_neighbourhood = false;
}
gpr_mu_unlock(&neighbourhood->mu);
}
worker_insert(pollset, worker);
pollset->begin_refs--;
if (worker->kick_state == UNKICKED) {
GPR_ASSERT(gpr_atm_no_barrier_load(&g_active_poller) != (gpr_atm)worker);
worker->initialized_cv = true;
gpr_cv_init(&worker->cv);
while (worker->kick_state == UNKICKED &&
pollset->shutdown_closure == NULL) {
if (gpr_cv_wait(&worker->cv, &pollset->mu, deadline) &&
worker->kick_state == UNKICKED) {
worker->kick_state = KICKED;
}
}
*now = gpr_now(now->clock_type);
}
return worker->kick_state == DESIGNATED_POLLER &&
pollset->shutdown_closure == NULL;
}
static bool check_neighbourhood_for_available_poller(
pollset_neighbourhood *neighbourhood) {
bool found_worker = false;
do {
grpc_pollset *inspect = neighbourhood->active_root;
if (inspect == NULL) {
break;
}
gpr_mu_lock(&inspect->mu);
GPR_ASSERT(!inspect->seen_inactive);
grpc_pollset_worker *inspect_worker = inspect->root_worker;
if (inspect_worker != NULL) {
do {
switch (inspect_worker->kick_state) {
case UNKICKED:
if (gpr_atm_no_barrier_cas(&g_active_poller, 0,
(gpr_atm)inspect_worker)) {
inspect_worker->kick_state = DESIGNATED_POLLER;
if (inspect_worker->initialized_cv) {
gpr_cv_signal(&inspect_worker->cv);
}
}
// even if we didn't win the cas, there's a worker, we can stop
found_worker = true;
break;
case KICKED:
break;
case DESIGNATED_POLLER:
found_worker = true; // ok, so someone else found the worker, but
// we'll accept that
break;
}
inspect_worker = inspect_worker->next;
} while (inspect_worker != inspect->root_worker);
}
if (!found_worker) {
inspect->seen_inactive = true;
if (inspect == neighbourhood->active_root) {
neighbourhood->active_root =
inspect->next == inspect ? NULL : inspect->next;
}
inspect->next->prev = inspect->prev;
inspect->prev->next = inspect->next;
inspect->next = inspect->prev = NULL;
}
gpr_mu_unlock(&inspect->mu);
} while (!found_worker);
return found_worker;
}
static void end_worker(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
grpc_pollset_worker *worker,
grpc_pollset_worker **worker_hdl) {
if (worker_hdl != NULL) *worker_hdl = NULL;
worker->kick_state = KICKED;
grpc_closure_list_move(&worker->schedule_on_end_work,
&exec_ctx->closure_list);
if (gpr_atm_no_barrier_load(&g_active_poller) == (gpr_atm)worker) {
if (worker->next != worker && worker->next->kick_state == UNKICKED) {
GPR_ASSERT(worker->next->initialized_cv);
gpr_atm_no_barrier_store(&g_active_poller, (gpr_atm)worker->next);
worker->next->kick_state = DESIGNATED_POLLER;
gpr_cv_signal(&worker->next->cv);
if (grpc_exec_ctx_has_work(exec_ctx)) {
gpr_mu_unlock(&pollset->mu);
grpc_exec_ctx_flush(exec_ctx);
gpr_mu_lock(&pollset->mu);
}
} else {
gpr_atm_no_barrier_store(&g_active_poller, 0);
gpr_mu_unlock(&pollset->mu);
size_t poller_neighbourhood_idx =
(size_t)(pollset->neighbourhood - g_neighbourhoods);
bool found_worker = false;
bool scan_state[MAX_NEIGHBOURHOODS];
for (size_t i = 0; !found_worker && i < g_num_neighbourhoods; i++) {
pollset_neighbourhood *neighbourhood =
&g_neighbourhoods[(poller_neighbourhood_idx + i) %
g_num_neighbourhoods];
if (gpr_mu_trylock(&neighbourhood->mu)) {
found_worker =
check_neighbourhood_for_available_poller(neighbourhood);
gpr_mu_unlock(&neighbourhood->mu);
scan_state[i] = true;
} else {
scan_state[i] = false;
}
}
for (size_t i = 0; !found_worker && i < g_num_neighbourhoods; i++) {
if (scan_state[i]) continue;
pollset_neighbourhood *neighbourhood =
&g_neighbourhoods[(poller_neighbourhood_idx + i) %
g_num_neighbourhoods];
gpr_mu_lock(&neighbourhood->mu);
found_worker = check_neighbourhood_for_available_poller(neighbourhood);
gpr_mu_unlock(&neighbourhood->mu);
}
grpc_exec_ctx_flush(exec_ctx);
gpr_mu_lock(&pollset->mu);
}
} else if (grpc_exec_ctx_has_work(exec_ctx)) {
gpr_mu_unlock(&pollset->mu);
grpc_exec_ctx_flush(exec_ctx);
gpr_mu_lock(&pollset->mu);
}
if (worker->initialized_cv) {
gpr_cv_destroy(&worker->cv);
}
if (EMPTIED == worker_remove(pollset, worker)) {
pollset_maybe_finish_shutdown(exec_ctx, pollset);
}
GPR_ASSERT(gpr_atm_no_barrier_load(&g_active_poller) != (gpr_atm)worker);
}
/* pollset->po.mu lock must be held by the caller before calling this.
The function pollset_work() may temporarily release the lock (pollset->po.mu)
during the course of its execution but it will always re-acquire the lock and
ensure that it is held by the time the function returns */
static grpc_error *pollset_work(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
grpc_pollset_worker **worker_hdl,
gpr_timespec now, gpr_timespec deadline) {
grpc_pollset_worker worker;
grpc_error *error = GRPC_ERROR_NONE;
static const char *err_desc = "pollset_work";
if (pollset->kicked_without_poller) {
pollset->kicked_without_poller = false;
return GRPC_ERROR_NONE;
}
gpr_tls_set(&g_current_thread_pollset, (intptr_t)pollset);
if (begin_worker(pollset, &worker, worker_hdl, &now, deadline)) {
gpr_tls_set(&g_current_thread_worker, (intptr_t)&worker);
GPR_ASSERT(!pollset->shutdown_closure);
GPR_ASSERT(!pollset->seen_inactive);
gpr_mu_unlock(&pollset->mu);
append_error(&error, pollset_epoll(exec_ctx, pollset, now, deadline),
err_desc);
gpr_mu_lock(&pollset->mu);
gpr_tls_set(&g_current_thread_worker, 0);
}
end_worker(exec_ctx, pollset, &worker, worker_hdl);
gpr_tls_set(&g_current_thread_pollset, 0);
return error;
}
static grpc_error *pollset_kick(grpc_pollset *pollset,
grpc_pollset_worker *specific_worker) {
if (specific_worker == NULL) {
if (gpr_tls_get(&g_current_thread_pollset) != (intptr_t)pollset) {
grpc_pollset_worker *root_worker = pollset->root_worker;
if (root_worker == NULL) {
pollset->kicked_without_poller = true;
return GRPC_ERROR_NONE;
}
grpc_pollset_worker *next_worker = root_worker->next;
if (root_worker == next_worker &&
root_worker == (grpc_pollset_worker *)gpr_atm_no_barrier_load(
&g_active_poller)) {
root_worker->kick_state = KICKED;
return grpc_wakeup_fd_wakeup(&global_wakeup_fd);
} else if (next_worker->kick_state == UNKICKED) {
GPR_ASSERT(next_worker->initialized_cv);
next_worker->kick_state = KICKED;
gpr_cv_signal(&next_worker->cv);
return GRPC_ERROR_NONE;
} else {
return GRPC_ERROR_NONE;
}
} else {
return GRPC_ERROR_NONE;
}
} else if (specific_worker->kick_state == KICKED) {
return GRPC_ERROR_NONE;
} else if (gpr_tls_get(&g_current_thread_worker) ==
(intptr_t)specific_worker) {
specific_worker->kick_state = KICKED;
return GRPC_ERROR_NONE;
} else if (specific_worker ==
(grpc_pollset_worker *)gpr_atm_no_barrier_load(&g_active_poller)) {
specific_worker->kick_state = KICKED;
return grpc_wakeup_fd_wakeup(&global_wakeup_fd);
} else if (specific_worker->initialized_cv) {
specific_worker->kick_state = KICKED;
gpr_cv_signal(&specific_worker->cv);
return GRPC_ERROR_NONE;
} else {
specific_worker->kick_state = KICKED;
return GRPC_ERROR_NONE;
}
}
static void pollset_add_fd(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
grpc_fd *fd) {}
/*******************************************************************************
* Workqueue Definitions
*/
#ifdef GRPC_WORKQUEUE_REFCOUNT_DEBUG
static grpc_workqueue *workqueue_ref(grpc_workqueue *workqueue,
const char *file, int line,
const char *reason) {
return workqueue;
}
static void workqueue_unref(grpc_exec_ctx *exec_ctx, grpc_workqueue *workqueue,
const char *file, int line, const char *reason) {}
#else
static grpc_workqueue *workqueue_ref(grpc_workqueue *workqueue) {
return workqueue;
}
static void workqueue_unref(grpc_exec_ctx *exec_ctx,
grpc_workqueue *workqueue) {}
#endif
static void wq_sched(grpc_exec_ctx *exec_ctx, grpc_closure *closure,
grpc_error *error) {
// find a neighbourhood to wakeup
bool scheduled = false;
size_t initial_neighbourhood = choose_neighbourhood();
for (size_t i = 0; !scheduled && i < g_num_neighbourhoods; i++) {
pollset_neighbourhood *neighbourhood =
&g_neighbourhoods[(initial_neighbourhood + i) % g_num_neighbourhoods];
if (gpr_mu_trylock(&neighbourhood->mu)) {
if (neighbourhood->active_root != NULL) {
grpc_pollset *inspect = neighbourhood->active_root;
do {
if (gpr_mu_trylock(&inspect->mu)) {
if (inspect->root_worker != NULL) {
grpc_pollset_worker *inspect_worker = inspect->root_worker;
do {
if (inspect_worker->kick_state == UNKICKED) {
inspect_worker->kick_state = KICKED;
grpc_closure_list_append(
&inspect_worker->schedule_on_end_work, closure, error);
if (inspect_worker->initialized_cv) {
gpr_cv_signal(&inspect_worker->cv);
}
scheduled = true;
}
inspect_worker = inspect_worker->next;
} while (!scheduled && inspect_worker != inspect->root_worker);
}
gpr_mu_unlock(&inspect->mu);
}
inspect = inspect->next;
} while (!scheduled && inspect != neighbourhood->active_root);
}
gpr_mu_unlock(&neighbourhood->mu);
}
}
if (!scheduled) {
gpr_mu_lock(&g_wq_mu);
grpc_closure_list_append(&g_wq_items, closure, error);
gpr_mu_unlock(&g_wq_mu);
GRPC_LOG_IF_ERROR("workqueue_scheduler",
grpc_wakeup_fd_wakeup(&global_wakeup_fd));
}
}
static const grpc_closure_scheduler_vtable
singleton_workqueue_scheduler_vtable = {wq_sched, wq_sched,
"epoll1_workqueue"};
static grpc_closure_scheduler singleton_workqueue_scheduler = {
&singleton_workqueue_scheduler_vtable};
static grpc_closure_scheduler *workqueue_scheduler(grpc_workqueue *workqueue) {
return &singleton_workqueue_scheduler;
}
/*******************************************************************************
* Pollset-set Definitions
*/
static grpc_pollset_set *pollset_set_create(void) {
return (grpc_pollset_set *)((intptr_t)0xdeafbeef);
}
static void pollset_set_destroy(grpc_exec_ctx *exec_ctx,
grpc_pollset_set *pss) {}
static void pollset_set_add_fd(grpc_exec_ctx *exec_ctx, grpc_pollset_set *pss,
grpc_fd *fd) {}
static void pollset_set_del_fd(grpc_exec_ctx *exec_ctx, grpc_pollset_set *pss,
grpc_fd *fd) {}
static void pollset_set_add_pollset(grpc_exec_ctx *exec_ctx,
grpc_pollset_set *pss, grpc_pollset *ps) {}
static void pollset_set_del_pollset(grpc_exec_ctx *exec_ctx,
grpc_pollset_set *pss, grpc_pollset *ps) {}
static void pollset_set_add_pollset_set(grpc_exec_ctx *exec_ctx,
grpc_pollset_set *bag,
grpc_pollset_set *item) {}
static void pollset_set_del_pollset_set(grpc_exec_ctx *exec_ctx,
grpc_pollset_set *bag,
grpc_pollset_set *item) {}
/*******************************************************************************
* Event engine binding
*/
static void shutdown_engine(void) {
fd_global_shutdown();
pollset_global_shutdown();
}
static const grpc_event_engine_vtable vtable = {
.pollset_size = sizeof(grpc_pollset),
.fd_create = fd_create,
.fd_wrapped_fd = fd_wrapped_fd,
.fd_orphan = fd_orphan,
.fd_shutdown = fd_shutdown,
.fd_is_shutdown = fd_is_shutdown,
.fd_notify_on_read = fd_notify_on_read,
.fd_notify_on_write = fd_notify_on_write,
.fd_get_read_notifier_pollset = fd_get_read_notifier_pollset,
.fd_get_workqueue = fd_get_workqueue,
.pollset_init = pollset_init,
.pollset_shutdown = pollset_shutdown,
.pollset_destroy = pollset_destroy,
.pollset_work = pollset_work,
.pollset_kick = pollset_kick,
.pollset_add_fd = pollset_add_fd,
.pollset_set_create = pollset_set_create,
.pollset_set_destroy = pollset_set_destroy,
.pollset_set_add_pollset = pollset_set_add_pollset,
.pollset_set_del_pollset = pollset_set_del_pollset,
.pollset_set_add_pollset_set = pollset_set_add_pollset_set,
.pollset_set_del_pollset_set = pollset_set_del_pollset_set,
.pollset_set_add_fd = pollset_set_add_fd,
.pollset_set_del_fd = pollset_set_del_fd,
.workqueue_ref = workqueue_ref,
.workqueue_unref = workqueue_unref,
.workqueue_scheduler = workqueue_scheduler,
.shutdown_engine = shutdown_engine,
};
/* It is possible that GLIBC has epoll but the underlying kernel doesn't.
* Create a dummy epoll_fd to make sure epoll support is available */
const grpc_event_engine_vtable *grpc_init_epoll1_linux(bool explicit_request) {
/* TODO(ctiller): temporary, until this stabilizes */
if (!explicit_request) return NULL;
if (!grpc_has_wakeup_fd()) {
return NULL;
}
g_epfd = epoll_create1(EPOLL_CLOEXEC);
if (g_epfd < 0) {
gpr_log(GPR_ERROR, "epoll unavailable");
return NULL;
}
fd_global_init();
if (!GRPC_LOG_IF_ERROR("pollset_global_init", pollset_global_init())) {
close(g_epfd);
fd_global_shutdown();
return NULL;
}
return &vtable;
}
#else /* defined(GRPC_LINUX_EPOLL) */
#if defined(GRPC_POSIX_SOCKET)
#include "src/core/lib/iomgr/ev_posix.h"
/* If GRPC_LINUX_EPOLL is not defined, it means epoll is not available. Return
* NULL */
const grpc_event_engine_vtable *grpc_init_epoll1_linux(bool explicit_request) {
return NULL;
}
#endif /* defined(GRPC_POSIX_SOCKET) */
#endif /* !defined(GRPC_LINUX_EPOLL) */

@ -0,0 +1,44 @@
/*
*
* Copyright 2017, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following disclaimer
* in the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Google Inc. nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
#ifndef GRPC_CORE_LIB_IOMGR_EV_EPOLL1_LINUX_H
#define GRPC_CORE_LIB_IOMGR_EV_EPOLL1_LINUX_H
#include "src/core/lib/iomgr/ev_posix.h"
#include "src/core/lib/iomgr/port.h"
// a polling engine that utilizes a singleton epoll set and turnstile polling
const grpc_event_engine_vtable *grpc_init_epoll1_linux(bool explicit_request);
#endif /* GRPC_CORE_LIB_IOMGR_EV_EPOLL1_LINUX_H */

File diff suppressed because it is too large Load Diff

@ -0,0 +1,43 @@
/*
*
* Copyright 2015, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following disclaimer
* in the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Google Inc. nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
#ifndef GRPC_CORE_LIB_IOMGR_EV_EPOLL_LIMITED_POLLERS_LINUX_H
#define GRPC_CORE_LIB_IOMGR_EV_EPOLL_LIMITED_POLLERS_LINUX_H
#include "src/core/lib/iomgr/ev_posix.h"
#include "src/core/lib/iomgr/port.h"
const grpc_event_engine_vtable *grpc_init_epoll_limited_pollers_linux(
bool explicitly_requested);
#endif /* GRPC_CORE_LIB_IOMGR_EV_EPOLL_LIMITED_POLLERS_LINUX_H */

File diff suppressed because it is too large Load Diff

@ -0,0 +1,43 @@
/*
*
* Copyright 2017, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following disclaimer
* in the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Google Inc. nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
#ifndef GRPC_CORE_LIB_IOMGR_EV_EPOLL_THREAD_POOL_LINUX_H
#define GRPC_CORE_LIB_IOMGR_EV_EPOLL_THREAD_POOL_LINUX_H
#include "src/core/lib/iomgr/ev_posix.h"
#include "src/core/lib/iomgr/port.h"
const grpc_event_engine_vtable *grpc_init_epoll_thread_pool_linux(
bool requested_explicitly);
#endif /* GRPC_CORE_LIB_IOMGR_EV_EPOLL_THREAD_POOL_LINUX_H */

File diff suppressed because it is too large Load Diff

@ -0,0 +1,43 @@
/*
*
* Copyright 2015, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following disclaimer
* in the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Google Inc. nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
#ifndef GRPC_CORE_LIB_IOMGR_EV_EPOLLEX_LINUX_H
#define GRPC_CORE_LIB_IOMGR_EV_EPOLLEX_LINUX_H
#include "src/core/lib/iomgr/ev_posix.h"
#include "src/core/lib/iomgr/port.h"
const grpc_event_engine_vtable *grpc_init_epollex_linux(
bool explicitly_requested);
#endif /* GRPC_CORE_LIB_IOMGR_EV_EPOLLEX_LINUX_H */

@ -36,7 +36,7 @@
/* This polling engine is only relevant on linux kernels supporting epoll() */
#ifdef GRPC_LINUX_EPOLL
#include "src/core/lib/iomgr/ev_epoll_linux.h"
#include "src/core/lib/iomgr/ev_epollsig_linux.h"
#include <assert.h>
#include <errno.h>
@ -63,11 +63,11 @@
#include "src/core/lib/profiling/timers.h"
#include "src/core/lib/support/block_annotate.h"
/* TODO: sreek - Move this to init.c and initialize this like other tracers. */
static int grpc_polling_trace = 0; /* Disabled by default */
#define GRPC_POLLING_TRACE(fmt, ...) \
if (grpc_polling_trace) { \
gpr_log(GPR_INFO, (fmt), __VA_ARGS__); \
#define GRPC_POLLSET_KICK_BROADCAST ((grpc_pollset_worker *)1)
#define GRPC_POLLING_TRACE(fmt, ...) \
if (GRPC_TRACER_ON(grpc_polling_trace)) { \
gpr_log(GPR_INFO, (fmt), __VA_ARGS__); \
}
/* Uncomment the following to enable extra checks on poll_object operations */
@ -76,11 +76,6 @@ static int grpc_polling_trace = 0; /* Disabled by default */
static int grpc_wakeup_signal = -1;
static bool is_grpc_wakeup_signal_initialized = false;
/* TODO: sreek: Right now, this wakes up all pollers. In future we should make
* sure to wake up one polling thread (which can wake up other threads if
* needed) */
static grpc_wakeup_fd global_wakeup_fd;
/* Implements the function defined in grpc_posix.h. This function might be
* called before even calling grpc_init() to set either a different signal to
* use. If signum == -1, then the use of signals is disabled */
@ -454,8 +449,8 @@ static void polling_island_add_wakeup_fd_locked(polling_island *pi,
gpr_asprintf(&err_msg,
"epoll_ctl (epoll_fd: %d) add wakeup fd: %d failed with "
"error: %d (%s)",
pi->epoll_fd, GRPC_WAKEUP_FD_GET_READ_FD(&global_wakeup_fd),
errno, strerror(errno));
pi->epoll_fd, GRPC_WAKEUP_FD_GET_READ_FD(wakeup_fd), errno,
strerror(errno));
append_error(error, GRPC_OS_ERROR(errno, err_msg), err_desc);
gpr_free(err_msg);
}
@ -558,7 +553,6 @@ static polling_island *polling_island_create(grpc_exec_ctx *exec_ctx,
goto done;
}
polling_island_add_wakeup_fd_locked(pi, &global_wakeup_fd, error);
polling_island_add_wakeup_fd_locked(pi, &pi->workqueue_wakeup_fd, error);
if (initial_fd != NULL) {
@ -1116,11 +1110,10 @@ static grpc_error *pollset_global_init(void) {
gpr_tls_init(&g_current_thread_pollset);
gpr_tls_init(&g_current_thread_worker);
poller_kick_init();
return grpc_wakeup_fd_init(&global_wakeup_fd);
return GRPC_ERROR_NONE;
}
static void pollset_global_shutdown(void) {
grpc_wakeup_fd_destroy(&global_wakeup_fd);
gpr_tls_destroy(&g_current_thread_pollset);
gpr_tls_destroy(&g_current_thread_worker);
}
@ -1226,10 +1219,6 @@ static grpc_error *pollset_kick(grpc_pollset *p,
return error;
}
static grpc_error *kick_poller(void) {
return grpc_wakeup_fd_wakeup(&global_wakeup_fd);
}
static void pollset_init(grpc_pollset *pollset, gpr_mu **mu) {
gpr_mu_init(&pollset->po.mu);
*mu = &pollset->po.mu;
@ -1332,7 +1321,7 @@ static void pollset_shutdown(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
/* pollset_shutdown is guaranteed to be called before pollset_destroy. So other
* than destroying the mutexes, there is nothing special that needs to be done
* here */
static void pollset_destroy(grpc_pollset *pollset) {
static void pollset_destroy(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset) {
GPR_ASSERT(!pollset_has_workers(pollset));
gpr_mu_destroy(&pollset->po.mu);
}
@ -1453,11 +1442,7 @@ static void pollset_work_and_unlock(grpc_exec_ctx *exec_ctx,
for (int i = 0; i < ep_rv; ++i) {
void *data_ptr = ep_ev[i].data.ptr;
if (data_ptr == &global_wakeup_fd) {
grpc_timer_consume_kick();
append_error(error, grpc_wakeup_fd_consume_wakeup(&global_wakeup_fd),
err_desc);
} else if (data_ptr == &pi->workqueue_wakeup_fd) {
if (data_ptr == &pi->workqueue_wakeup_fd) {
append_error(error,
grpc_wakeup_fd_consume_wakeup(&pi->workqueue_wakeup_fd),
err_desc);
@ -1897,8 +1882,6 @@ static const grpc_event_engine_vtable vtable = {
.pollset_set_add_fd = pollset_set_add_fd,
.pollset_set_del_fd = pollset_set_del_fd,
.kick_poller = kick_poller,
.workqueue_ref = workqueue_ref,
.workqueue_unref = workqueue_unref,
.workqueue_scheduler = workqueue_scheduler,
@ -1921,7 +1904,8 @@ static bool is_epoll_available() {
return true;
}
const grpc_event_engine_vtable *grpc_init_epoll_linux(void) {
const grpc_event_engine_vtable *grpc_init_epollsig_linux(
bool explicit_request) {
/* If use of signals is disabled, we cannot use epoll engine*/
if (is_grpc_wakeup_signal_initialized && grpc_wakeup_signal < 0) {
return NULL;
@ -1936,7 +1920,13 @@ const grpc_event_engine_vtable *grpc_init_epoll_linux(void) {
}
if (!is_grpc_wakeup_signal_initialized) {
grpc_use_signal(SIGRTMIN + 6);
/* TODO(ctiller): when other epoll engines are ready, remove the true || to
* force this to be explitly chosen if needed */
if (true || explicit_request) {
grpc_use_signal(SIGRTMIN + 6);
} else {
return NULL;
}
}
fd_global_init();
@ -1958,7 +1948,10 @@ const grpc_event_engine_vtable *grpc_init_epoll_linux(void) {
#include "src/core/lib/iomgr/ev_posix.h"
/* If GRPC_LINUX_EPOLL is not defined, it means epoll is not available. Return
* NULL */
const grpc_event_engine_vtable *grpc_init_epoll_linux(void) { return NULL; }
const grpc_event_engine_vtable *grpc_init_epollsig_linux(
bool explicit_request) {
return NULL;
}
#endif /* defined(GRPC_POSIX_SOCKET) */
void grpc_use_signal(int signum) {}

@ -31,13 +31,13 @@
*
*/
#ifndef GRPC_CORE_LIB_IOMGR_EV_EPOLL_LINUX_H
#define GRPC_CORE_LIB_IOMGR_EV_EPOLL_LINUX_H
#ifndef GRPC_CORE_LIB_IOMGR_EV_EPOLLSIG_LINUX_H
#define GRPC_CORE_LIB_IOMGR_EV_EPOLLSIG_LINUX_H
#include "src/core/lib/iomgr/ev_posix.h"
#include "src/core/lib/iomgr/port.h"
const grpc_event_engine_vtable *grpc_init_epoll_linux(void);
const grpc_event_engine_vtable *grpc_init_epollsig_linux(bool explicit_request);
#ifdef GRPC_LINUX_EPOLL
void *grpc_fd_get_polling_island(grpc_fd *fd);
@ -45,4 +45,4 @@ void *grpc_pollset_get_polling_island(grpc_pollset *ps);
bool grpc_are_polling_islands_equal(void *p, void *q);
#endif /* defined(GRPC_LINUX_EPOLL) */
#endif /* GRPC_CORE_LIB_IOMGR_EV_EPOLL_LINUX_H */
#endif /* GRPC_CORE_LIB_IOMGR_EV_EPOLLSIG_LINUX_H */

@ -58,6 +58,8 @@
#include "src/core/lib/profiling/timers.h"
#include "src/core/lib/support/block_annotate.h"
#define GRPC_POLLSET_KICK_BROADCAST ((grpc_pollset_worker *)1)
/*******************************************************************************
* FD declarations
*/
@ -122,8 +124,6 @@ struct grpc_fd {
grpc_pollset *read_notifier_pollset;
};
static grpc_wakeup_fd global_wakeup_fd;
/* Begin polling on an fd.
Registers that the given pollset is interested in this fd - so that if read
or writability interest changes, the pollset can be kicked to pick up that
@ -784,19 +784,14 @@ static grpc_error *pollset_kick(grpc_pollset *p,
static grpc_error *pollset_global_init(void) {
gpr_tls_init(&g_current_thread_poller);
gpr_tls_init(&g_current_thread_worker);
return grpc_wakeup_fd_init(&global_wakeup_fd);
return GRPC_ERROR_NONE;
}
static void pollset_global_shutdown(void) {
grpc_wakeup_fd_destroy(&global_wakeup_fd);
gpr_tls_destroy(&g_current_thread_poller);
gpr_tls_destroy(&g_current_thread_worker);
}
static grpc_error *kick_poller(void) {
return grpc_wakeup_fd_wakeup(&global_wakeup_fd);
}
/* main interface */
static void pollset_init(grpc_pollset *pollset, gpr_mu **mu) {
@ -815,7 +810,7 @@ static void pollset_init(grpc_pollset *pollset, gpr_mu **mu) {
pollset->pollset_set_count = 0;
}
static void pollset_destroy(grpc_pollset *pollset) {
static void pollset_destroy(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset) {
GPR_ASSERT(!pollset_has_workers(pollset));
GPR_ASSERT(pollset->idle_jobs.head == pollset->idle_jobs.tail);
while (pollset->local_wakeup_cache) {
@ -952,13 +947,10 @@ static grpc_error *pollset_work(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
}
fd_count = 0;
pfd_count = 2;
pfds[0].fd = GRPC_WAKEUP_FD_GET_READ_FD(&global_wakeup_fd);
pfd_count = 1;
pfds[0].fd = GRPC_WAKEUP_FD_GET_READ_FD(&worker.wakeup_fd->fd);
pfds[0].events = POLLIN;
pfds[0].revents = 0;
pfds[1].fd = GRPC_WAKEUP_FD_GET_READ_FD(&worker.wakeup_fd->fd);
pfds[1].events = POLLIN;
pfds[1].revents = 0;
for (i = 0; i < pollset->fd_count; i++) {
if (fd_is_orphaned(pollset->fds[i])) {
GRPC_FD_UNREF(pollset->fds[i], "multipoller");
@ -974,7 +966,7 @@ static grpc_error *pollset_work(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
pollset->fd_count = fd_count;
gpr_mu_unlock(&pollset->mu);
for (i = 2; i < pfd_count; i++) {
for (i = 1; i < pfd_count; i++) {
grpc_fd *fd = watchers[i].fd;
pfds[i].events = (short)fd_begin_poll(fd, pollset, &worker, POLLIN,
POLLOUT, &watchers[i]);
@ -992,7 +984,7 @@ static grpc_error *pollset_work(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
work_combine_error(&error, GRPC_OS_ERROR(errno, "poll"));
}
for (i = 2; i < pfd_count; i++) {
for (i = 1; i < pfd_count; i++) {
if (watchers[i].fd == NULL) {
fd_end_poll(exec_ctx, &watchers[i], 0, 0, NULL);
} else {
@ -1002,20 +994,15 @@ static grpc_error *pollset_work(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
}
}
} else if (r == 0) {
for (i = 2; i < pfd_count; i++) {
for (i = 1; i < pfd_count; i++) {
fd_end_poll(exec_ctx, &watchers[i], 0, 0, NULL);
}
} else {
if (pfds[0].revents & POLLIN_CHECK) {
grpc_timer_consume_kick();
work_combine_error(&error,
grpc_wakeup_fd_consume_wakeup(&global_wakeup_fd));
}
if (pfds[1].revents & POLLIN_CHECK) {
work_combine_error(
&error, grpc_wakeup_fd_consume_wakeup(&worker.wakeup_fd->fd));
}
for (i = 2; i < pfd_count; i++) {
for (i = 1; i < pfd_count; i++) {
if (watchers[i].fd == NULL) {
fd_end_poll(exec_ctx, &watchers[i], 0, 0, NULL);
} else {
@ -1560,8 +1547,6 @@ static const grpc_event_engine_vtable vtable = {
.pollset_set_add_fd = pollset_set_add_fd,
.pollset_set_del_fd = pollset_set_del_fd,
.kick_poller = kick_poller,
.workqueue_ref = workqueue_ref,
.workqueue_unref = workqueue_unref,
.workqueue_scheduler = workqueue_scheduler,
@ -1569,7 +1554,7 @@ static const grpc_event_engine_vtable vtable = {
.shutdown_engine = shutdown_engine,
};
const grpc_event_engine_vtable *grpc_init_poll_posix(void) {
const grpc_event_engine_vtable *grpc_init_poll_posix(bool explicit_request) {
if (!grpc_has_wakeup_fd()) {
return NULL;
}
@ -1579,7 +1564,7 @@ const grpc_event_engine_vtable *grpc_init_poll_posix(void) {
return &vtable;
}
const grpc_event_engine_vtable *grpc_init_poll_cv_posix(void) {
const grpc_event_engine_vtable *grpc_init_poll_cv_posix(bool explicit_request) {
global_cv_fd_table_init();
grpc_enable_cv_wakeup_fds(1);
if (!GRPC_LOG_IF_ERROR("pollset_global_init", pollset_global_init())) {

@ -36,7 +36,7 @@
#include "src/core/lib/iomgr/ev_posix.h"
const grpc_event_engine_vtable *grpc_init_poll_posix(void);
const grpc_event_engine_vtable *grpc_init_poll_cv_posix(void);
const grpc_event_engine_vtable *grpc_init_poll_posix(bool explicit_request);
const grpc_event_engine_vtable *grpc_init_poll_cv_posix(bool explicit_request);
#endif /* GRPC_CORE_LIB_IOMGR_EV_POLL_POSIX_H */

@ -44,10 +44,18 @@
#include <grpc/support/string_util.h>
#include <grpc/support/useful.h>
#include "src/core/lib/iomgr/ev_epoll_linux.h"
#include "src/core/lib/debug/trace.h"
#include "src/core/lib/iomgr/ev_epoll1_linux.h"
#include "src/core/lib/iomgr/ev_epoll_limited_pollers_linux.h"
#include "src/core/lib/iomgr/ev_epoll_thread_pool_linux.h"
#include "src/core/lib/iomgr/ev_epollex_linux.h"
#include "src/core/lib/iomgr/ev_epollsig_linux.h"
#include "src/core/lib/iomgr/ev_poll_posix.h"
#include "src/core/lib/support/env.h"
grpc_tracer_flag grpc_polling_trace =
GRPC_TRACER_INITIALIZER(false); /* Disabled by default */
/** Default poll() function - a pointer so that it can be overridden by some
* tests */
grpc_poll_function_type grpc_poll_function = poll;
@ -57,7 +65,8 @@ grpc_wakeup_fd grpc_global_wakeup_fd;
static const grpc_event_engine_vtable *g_event_engine;
static const char *g_poll_strategy_name = NULL;
typedef const grpc_event_engine_vtable *(*event_engine_factory_fn)(void);
typedef const grpc_event_engine_vtable *(*event_engine_factory_fn)(
bool explicit_request);
typedef struct {
const char *name;
@ -65,7 +74,11 @@ typedef struct {
} event_engine_factory;
static const event_engine_factory g_factories[] = {
{"epoll", grpc_init_epoll_linux},
{"epollex", grpc_init_epollex_linux},
{"epollsig", grpc_init_epollsig_linux},
{"epoll1", grpc_init_epoll1_linux},
{"epoll-threadpool", grpc_init_epoll_thread_pool_linux},
{"epoll-limited", grpc_init_epoll_limited_pollers_linux},
{"poll", grpc_init_poll_posix},
{"poll-cv", grpc_init_poll_cv_posix},
};
@ -102,7 +115,8 @@ static bool is(const char *want, const char *have) {
static void try_engine(const char *engine) {
for (size_t i = 0; i < GPR_ARRAY_SIZE(g_factories); i++) {
if (is(engine, g_factories[i].name)) {
if ((g_event_engine = g_factories[i].factory())) {
if ((g_event_engine = g_factories[i].factory(
0 == strcmp(engine, g_factories[i].name)))) {
g_poll_strategy_name = g_factories[i].name;
gpr_log(GPR_DEBUG, "Using polling engine: %s", g_factories[i].name);
return;
@ -121,6 +135,8 @@ void grpc_set_event_engine_test_only(
const char *grpc_get_poll_strategy_name() { return g_poll_strategy_name; }
void grpc_event_engine_init(void) {
grpc_register_tracer("polling", &grpc_polling_trace);
char *s = gpr_getenv("GRPC_POLL_STRATEGY");
if (s == NULL) {
s = gpr_strdup("all");
@ -197,8 +213,8 @@ void grpc_pollset_shutdown(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
g_event_engine->pollset_shutdown(exec_ctx, pollset, closure);
}
void grpc_pollset_destroy(grpc_pollset *pollset) {
g_event_engine->pollset_destroy(pollset);
void grpc_pollset_destroy(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset) {
g_event_engine->pollset_destroy(exec_ctx, pollset);
}
grpc_error *grpc_pollset_work(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
@ -260,8 +276,6 @@ void grpc_pollset_set_del_fd(grpc_exec_ctx *exec_ctx,
g_event_engine->pollset_set_del_fd(exec_ctx, pollset_set, fd);
}
grpc_error *grpc_kick_poller(void) { return g_event_engine->kick_poller(); }
#ifdef GRPC_WORKQUEUE_REFCOUNT_DEBUG
grpc_workqueue *grpc_workqueue_ref(grpc_workqueue *workqueue, const char *file,
int line, const char *reason) {

@ -36,12 +36,15 @@
#include <poll.h>
#include "src/core/lib/debug/trace.h"
#include "src/core/lib/iomgr/exec_ctx.h"
#include "src/core/lib/iomgr/pollset.h"
#include "src/core/lib/iomgr/pollset_set.h"
#include "src/core/lib/iomgr/wakeup_fd_posix.h"
#include "src/core/lib/iomgr/workqueue.h"
extern grpc_tracer_flag grpc_polling_trace; /* Disabled by default */
typedef struct grpc_fd grpc_fd;
typedef struct grpc_event_engine_vtable {
@ -64,7 +67,7 @@ typedef struct grpc_event_engine_vtable {
void (*pollset_init)(grpc_pollset *pollset, gpr_mu **mu);
void (*pollset_shutdown)(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
grpc_closure *closure);
void (*pollset_destroy)(grpc_pollset *pollset);
void (*pollset_destroy)(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset);
grpc_error *(*pollset_work)(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
grpc_pollset_worker **worker, gpr_timespec now,
gpr_timespec deadline);
@ -93,8 +96,6 @@ typedef struct grpc_event_engine_vtable {
void (*pollset_set_del_fd)(grpc_exec_ctx *exec_ctx,
grpc_pollset_set *pollset_set, grpc_fd *fd);
grpc_error *(*kick_poller)(void);
void (*shutdown_engine)(void);
#ifdef GRPC_WORKQUEUE_REFCOUNT_DEBUG

@ -62,6 +62,11 @@ bool grpc_always_ready_to_finish(grpc_exec_ctx *exec_ctx, void *arg_ignored) {
return true;
}
bool grpc_exec_ctx_has_work(grpc_exec_ctx *exec_ctx) {
return exec_ctx->active_combiner != NULL ||
!grpc_closure_list_empty(exec_ctx->closure_list);
}
bool grpc_exec_ctx_flush(grpc_exec_ctx *exec_ctx) {
bool did_something = 0;
GPR_TIMER_BEGIN("grpc_exec_ctx_flush", 0);

@ -93,6 +93,8 @@ struct grpc_exec_ctx {
extern grpc_closure_scheduler *grpc_schedule_on_exec_ctx;
bool grpc_exec_ctx_has_work(grpc_exec_ctx *exec_ctx);
/** Flush any work that has been enqueued onto this grpc_exec_ctx.
* Caller must guarantee that no interfering locks are held.
* Returns true if work was performed, false otherwise. */

@ -47,6 +47,7 @@
#include "src/core/lib/iomgr/iomgr_internal.h"
#include "src/core/lib/iomgr/network_status_tracker.h"
#include "src/core/lib/iomgr/timer.h"
#include "src/core/lib/iomgr/timer_manager.h"
#include "src/core/lib/support/env.h"
#include "src/core/lib/support/string.h"
@ -67,6 +68,8 @@ void grpc_iomgr_init(void) {
grpc_iomgr_platform_init();
}
void grpc_iomgr_start(void) { grpc_timer_manager_init(); }
static size_t count_objects(void) {
grpc_iomgr_object *obj;
size_t n = 0;
@ -88,6 +91,7 @@ void grpc_iomgr_shutdown(grpc_exec_ctx *exec_ctx) {
gpr_now(GPR_CLOCK_REALTIME), gpr_time_from_seconds(10, GPR_TIMESPAN));
gpr_timespec last_warning_time = gpr_now(GPR_CLOCK_REALTIME);
grpc_timer_manager_shutdown();
grpc_iomgr_platform_flush();
gpr_mu_lock(&g_mu);

@ -40,6 +40,9 @@
/** Initializes the iomgr. */
void grpc_iomgr_init(void);
/** Starts any background threads for iomgr. */
void grpc_iomgr_start(void);
/** Signals the intention to shutdown the iomgr. Expects to be able to flush
* exec_ctx. */
void grpc_iomgr_shutdown(grpc_exec_ctx *exec_ctx);

@ -0,0 +1,116 @@
/*
*
* Copyright 2017, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following disclaimer
* in the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Google Inc. nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
#include "src/core/lib/iomgr/port.h"
#include "src/core/lib/iomgr/is_epollexclusive_available.h"
#ifdef GRPC_LINUX_EPOLL
#include <grpc/support/log.h>
#include <errno.h>
#include <sys/eventfd.h>
#include <unistd.h>
#include "src/core/lib/iomgr/sys_epoll_wrapper.h"
/* This polling engine is only relevant on linux kernels supporting epoll() */
bool grpc_is_epollexclusive_available(void) {
static bool logged_why_not = false;
int fd = epoll_create1(EPOLL_CLOEXEC);
if (fd < 0) {
if (!logged_why_not) {
gpr_log(GPR_ERROR,
"epoll_create1 failed with error: %d. Not using epollex polling "
"engine.",
fd);
logged_why_not = true;
}
return false;
}
int evfd = eventfd(0, EFD_NONBLOCK | EFD_CLOEXEC);
if (evfd < 0) {
if (!logged_why_not) {
gpr_log(GPR_ERROR,
"eventfd failed with error: %d. Not using epollex polling "
"engine.",
fd);
logged_why_not = true;
}
close(fd);
return false;
}
struct epoll_event ev = {
/* choose events that should cause an error on
EPOLLEXCLUSIVE enabled kernels - specifically the combination of
EPOLLONESHOT and EPOLLEXCLUSIVE */
.events = (uint32_t)(EPOLLET | EPOLLIN | EPOLLEXCLUSIVE | EPOLLONESHOT),
.data.ptr = NULL};
if (epoll_ctl(fd, EPOLL_CTL_ADD, evfd, &ev) != 0) {
if (errno != EINVAL) {
if (!logged_why_not) {
gpr_log(
GPR_ERROR,
"epoll_ctl with EPOLLEXCLUSIVE | EPOLLONESHOT failed with error: "
"%d. Not using epollex polling engine.",
errno);
logged_why_not = true;
}
close(fd);
close(evfd);
return false;
}
} else {
if (!logged_why_not) {
gpr_log(GPR_ERROR,
"epoll_ctl with EPOLLEXCLUSIVE | EPOLLONESHOT succeeded. This is "
"evidence of no EPOLLEXCLUSIVE support. Not using "
"epollex polling engine.");
logged_why_not = true;
}
close(fd);
close(evfd);
return false;
}
close(evfd);
close(fd);
return true;
}
#else
bool grpc_is_epollexclusive_available(void) { return false; }
#endif

@ -31,22 +31,11 @@
*
*/
/* This is just a compilation test, to see if we have C++11. */
#ifndef GRPC_CORE_LIB_IOMGR_IS_EPOLLEXCLUSIVE_AVAILABLE_H
#define GRPC_CORE_LIB_IOMGR_IS_EPOLLEXCLUSIVE_AVAILABLE_H
#include <stdlib.h>
#include <zlib.h>
#include <stdbool.h>
class Base {
public:
virtual void foo() = 0;
};
bool grpc_is_epollexclusive_available(void);
class Foo final : public Base {
public:
void foo() override {}
};
int main() {
Foo().foo();
return 0;
}
#endif /* GRPC_CORE_LIB_IOMGR_IS_EPOLLEXCLUSIVE_AVAILABLE_H */

@ -35,6 +35,10 @@
#include <grpc/support/log.h>
#include "src/core/lib/debug/trace.h"
extern grpc_tracer_flag grpc_polling_trace;
/* 'state' holds the to call when the fd is readable or writable respectively.
It can contain one of the following values:
CLOSURE_READY : The fd has an I/O event of interest but there is no
@ -93,6 +97,10 @@ void grpc_lfev_notify_on(grpc_exec_ctx *exec_ctx, gpr_atm *state,
grpc_closure *closure) {
while (true) {
gpr_atm curr = gpr_atm_no_barrier_load(state);
if (GRPC_TRACER_ON(grpc_polling_trace)) {
gpr_log(GPR_DEBUG, "lfev_notify_on: %p curr=%p closure=%p", state,
(void *)curr, closure);
}
switch (curr) {
case CLOSURE_NOT_READY: {
/* CLOSURE_NOT_READY -> <closure>.
@ -155,6 +163,10 @@ bool grpc_lfev_set_shutdown(grpc_exec_ctx *exec_ctx, gpr_atm *state,
while (true) {
gpr_atm curr = gpr_atm_no_barrier_load(state);
if (GRPC_TRACER_ON(grpc_polling_trace)) {
gpr_log(GPR_DEBUG, "lfev_set_shutdown: %p curr=%p err=%s", state,
(void *)curr, grpc_error_string(shutdown_err));
}
switch (curr) {
case CLOSURE_READY:
case CLOSURE_NOT_READY:
@ -200,6 +212,10 @@ void grpc_lfev_set_ready(grpc_exec_ctx *exec_ctx, gpr_atm *state) {
while (true) {
gpr_atm curr = gpr_atm_no_barrier_load(state);
if (GRPC_TRACER_ON(grpc_polling_trace)) {
gpr_log(GPR_DEBUG, "lfev_set_ready: %p curr=%p", state, (void *)curr);
}
switch (curr) {
case CLOSURE_READY: {
/* Already ready. We are done here */

@ -40,8 +40,6 @@
#include "src/core/lib/iomgr/exec_ctx.h"
#define GRPC_POLLSET_KICK_BROADCAST ((grpc_pollset_worker *)1)
/* A grpc_pollset is a set of file descriptors that a higher level item is
interested in. For example:
- a server will typically keep a pollset containing all connected channels,
@ -59,7 +57,7 @@ void grpc_pollset_init(grpc_pollset *pollset, gpr_mu **mu);
* pollset's mutex must be held */
void grpc_pollset_shutdown(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
grpc_closure *closure);
void grpc_pollset_destroy(grpc_pollset *pollset);
void grpc_pollset_destroy(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset);
/* Do some work on a pollset.
May involve invoking asynchronous callbacks, or actually polling file
@ -88,8 +86,7 @@ grpc_error *grpc_pollset_work(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
gpr_timespec deadline) GRPC_MUST_USE_RESULT;
/* Break one polling thread out of polling work for this pollset.
If specific_worker is GRPC_POLLSET_KICK_BROADCAST, kick ALL the workers.
Otherwise, if specific_worker is non-NULL, then kick that worker. */
If specific_worker is non-NULL, then kick that worker. */
grpc_error *grpc_pollset_kick(grpc_pollset *pollset,
grpc_pollset_worker *specific_worker)
GRPC_MUST_USE_RESULT;

@ -106,7 +106,7 @@ void grpc_pollset_shutdown(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
grpc_closure_sched(exec_ctx, closure, GRPC_ERROR_NONE);
}
void grpc_pollset_destroy(grpc_pollset *pollset) {
void grpc_pollset_destroy(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset) {
uv_close((uv_handle_t *)&pollset->timer, timer_close_cb);
// timer.data is a boolean indicating that the timer has finished closing
pollset->timer.data = (void *)0;

@ -43,6 +43,8 @@
#include "src/core/lib/iomgr/pollset.h"
#include "src/core/lib/iomgr/pollset_windows.h"
#define GRPC_POLLSET_KICK_BROADCAST ((grpc_pollset_worker *)1)
gpr_mu grpc_polling_mu;
static grpc_pollset_worker *g_active_poller;
static grpc_pollset_worker g_global_root_worker;
@ -114,7 +116,7 @@ void grpc_pollset_shutdown(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
}
}
void grpc_pollset_destroy(grpc_pollset *pollset) {}
void grpc_pollset_destroy(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset) {}
grpc_error *grpc_pollset_work(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
grpc_pollset_worker **worker_hdl,
@ -227,6 +229,4 @@ grpc_error *grpc_pollset_kick(grpc_pollset *p,
return GRPC_ERROR_NONE;
}
void grpc_kick_poller(void) { grpc_iocp_kick(); }
#endif /* GRPC_WINSOCK_SOCKET */

@ -88,6 +88,7 @@
#ifndef __GLIBC__
#define GRPC_LINUX_EPOLL 1
#define GRPC_LINUX_EVENTFD 1
#define GRPC_MSG_IOVLEN_TYPE int
#endif
#ifndef GRPC_LINUX_EVENTFD
#define GRPC_POSIX_NO_SPECIAL_WAKEUP_FD 1

@ -44,7 +44,7 @@
#include "src/core/lib/iomgr/combiner.h"
int grpc_resource_quota_trace = 0;
grpc_tracer_flag grpc_resource_quota_trace = GRPC_TRACER_INITIALIZER(false);
#define MEMORY_USAGE_ESTIMATION_MAX 65536
@ -307,13 +307,14 @@ static bool rq_alloc(grpc_exec_ctx *exec_ctx,
resource_user->free_pool = 0;
resource_quota->free_pool -= amt;
rq_update_estimate(resource_quota);
if (grpc_resource_quota_trace) {
if (GRPC_TRACER_ON(grpc_resource_quota_trace)) {
gpr_log(GPR_DEBUG, "RQ %s %s: grant alloc %" PRId64
" bytes; rq_free_pool -> %" PRId64,
resource_quota->name, resource_user->name, amt,
resource_quota->free_pool);
}
} else if (grpc_resource_quota_trace && resource_user->free_pool >= 0) {
} else if (GRPC_TRACER_ON(grpc_resource_quota_trace) &&
resource_user->free_pool >= 0) {
gpr_log(GPR_DEBUG, "RQ %s %s: discard already satisfied alloc request",
resource_quota->name, resource_user->name);
}
@ -342,7 +343,7 @@ static bool rq_reclaim_from_per_user_free_pool(
resource_user->free_pool = 0;
resource_quota->free_pool += amt;
rq_update_estimate(resource_quota);
if (grpc_resource_quota_trace) {
if (GRPC_TRACER_ON(grpc_resource_quota_trace)) {
gpr_log(GPR_DEBUG, "RQ %s %s: reclaim_from_per_user_free_pool %" PRId64
" bytes; rq_free_pool -> %" PRId64,
resource_quota->name, resource_user->name, amt,
@ -365,7 +366,7 @@ static bool rq_reclaim(grpc_exec_ctx *exec_ctx,
: GRPC_RULIST_RECLAIMER_BENIGN;
grpc_resource_user *resource_user = rulist_pop_head(resource_quota, list);
if (resource_user == NULL) return false;
if (grpc_resource_quota_trace) {
if (GRPC_TRACER_ON(grpc_resource_quota_trace)) {
gpr_log(GPR_DEBUG, "RQ %s %s: initiate %s reclamation",
resource_quota->name, resource_user->name,
destructive ? "destructive" : "benign");
@ -786,7 +787,7 @@ void grpc_resource_user_alloc(grpc_exec_ctx *exec_ctx,
gpr_mu_lock(&resource_user->mu);
ru_ref_by(resource_user, (gpr_atm)size);
resource_user->free_pool -= (int64_t)size;
if (grpc_resource_quota_trace) {
if (GRPC_TRACER_ON(grpc_resource_quota_trace)) {
gpr_log(GPR_DEBUG, "RQ %s %s: alloc %" PRIdPTR "; free_pool -> %" PRId64,
resource_user->resource_quota->name, resource_user->name, size,
resource_user->free_pool);
@ -810,7 +811,7 @@ void grpc_resource_user_free(grpc_exec_ctx *exec_ctx,
gpr_mu_lock(&resource_user->mu);
bool was_zero_or_negative = resource_user->free_pool <= 0;
resource_user->free_pool += (int64_t)size;
if (grpc_resource_quota_trace) {
if (GRPC_TRACER_ON(grpc_resource_quota_trace)) {
gpr_log(GPR_DEBUG, "RQ %s %s: free %" PRIdPTR "; free_pool -> %" PRId64,
resource_user->resource_quota->name, resource_user->name, size,
resource_user->free_pool);
@ -839,7 +840,7 @@ void grpc_resource_user_post_reclaimer(grpc_exec_ctx *exec_ctx,
void grpc_resource_user_finish_reclamation(grpc_exec_ctx *exec_ctx,
grpc_resource_user *resource_user) {
if (grpc_resource_quota_trace) {
if (GRPC_TRACER_ON(grpc_resource_quota_trace)) {
gpr_log(GPR_DEBUG, "RQ %s %s: reclamation complete",
resource_user->resource_quota->name, resource_user->name);
}

@ -36,6 +36,7 @@
#include <grpc/grpc.h>
#include "src/core/lib/debug/trace.h"
#include "src/core/lib/iomgr/exec_ctx.h"
/** \file Tracks resource usage against a pool.
@ -75,7 +76,7 @@
maintain lists of users (which users arrange to leave before they are
destroyed) */
extern int grpc_resource_quota_trace;
extern grpc_tracer_flag grpc_resource_quota_trace;
grpc_resource_quota *grpc_resource_quota_ref_internal(
grpc_resource_quota *resource_quota);

@ -0,0 +1,43 @@
/*
*
* Copyright 2017, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following disclaimer
* in the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Google Inc. nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
#ifndef GRPC_CORE_LIB_IOMGR_SYS_EPOLL_WRAPPER_H
#define GRPC_CORE_LIB_IOMGR_SYS_EPOLL_WRAPPER_H
#include <sys/epoll.h>
#ifndef EPOLLEXCLUSIVE
#define EPOLLEXCLUSIVE (1 << 28)
#endif
#endif /* GRPC_CORE_LIB_IOMGR_SYS_EPOLL_WRAPPER_H */

@ -58,7 +58,7 @@
#include "src/core/lib/iomgr/unix_sockets_posix.h"
#include "src/core/lib/support/string.h"
extern int grpc_tcp_trace;
extern grpc_tracer_flag grpc_tcp_trace;
typedef struct {
gpr_mu mu;
@ -114,7 +114,7 @@ done:
static void tc_on_alarm(grpc_exec_ctx *exec_ctx, void *acp, grpc_error *error) {
int done;
async_connect *ac = acp;
if (grpc_tcp_trace) {
if (GRPC_TRACER_ON(grpc_tcp_trace)) {
const char *str = grpc_error_string(error);
gpr_log(GPR_DEBUG, "CLIENT_CONNECT: %s: on_alarm: error=%s", ac->addr_str,
str);
@ -152,7 +152,7 @@ static void on_writable(grpc_exec_ctx *exec_ctx, void *acp, grpc_error *error) {
GRPC_ERROR_REF(error);
if (grpc_tcp_trace) {
if (GRPC_TRACER_ON(grpc_tcp_trace)) {
const char *str = grpc_error_string(error);
gpr_log(GPR_DEBUG, "CLIENT_CONNECT: %s: on_writable: error=%s",
ac->addr_str, str);
@ -330,9 +330,9 @@ static void tcp_client_connect_impl(grpc_exec_ctx *exec_ctx,
grpc_schedule_on_exec_ctx);
ac->channel_args = grpc_channel_args_copy(channel_args);
if (grpc_tcp_trace) {
gpr_log(GPR_DEBUG, "CLIENT_CONNECT: %s: asynchronously connecting",
ac->addr_str);
if (GRPC_TRACER_ON(grpc_tcp_trace)) {
gpr_log(GPR_DEBUG, "CLIENT_CONNECT: %s: asynchronously connecting fd %p",
ac->addr_str, fdobj);
}
gpr_mu_lock(&ac->mu);

@ -46,7 +46,7 @@
#include "src/core/lib/iomgr/tcp_uv.h"
#include "src/core/lib/iomgr/timer.h"
extern int grpc_tcp_trace;
extern grpc_tracer_flag grpc_tcp_trace;
typedef struct grpc_uv_tcp_connect {
uv_connect_t connect_req;
@ -72,7 +72,7 @@ static void uv_tc_on_alarm(grpc_exec_ctx *exec_ctx, void *acp,
grpc_error *error) {
int done;
grpc_uv_tcp_connect *connect = acp;
if (grpc_tcp_trace) {
if (GRPC_TRACER_ON(grpc_tcp_trace)) {
const char *str = grpc_error_string(error);
gpr_log(GPR_DEBUG, "CLIENT_CONNECT: %s: on_alarm: error=%s",
connect->addr_name, str);
@ -156,7 +156,7 @@ static void tcp_client_connect_impl(grpc_exec_ctx *exec_ctx,
uv_tcp_init(uv_default_loop(), connect->tcp_handle);
connect->connect_req.data = connect;
if (grpc_tcp_trace) {
if (GRPC_TRACER_ON(grpc_tcp_trace)) {
gpr_log(GPR_DEBUG, "CLIENT_CONNECT: %s: asynchronously connecting",
connect->addr_name);
}

@ -74,7 +74,7 @@ typedef GRPC_MSG_IOVLEN_TYPE msg_iovlen_type;
typedef size_t msg_iovlen_type;
#endif
int grpc_tcp_trace = 0;
grpc_tracer_flag grpc_tcp_trace = GRPC_TRACER_INITIALIZER(false);
typedef struct {
grpc_endpoint base;
@ -221,7 +221,7 @@ static void call_read_cb(grpc_exec_ctx *exec_ctx, grpc_tcp *tcp,
grpc_error *error) {
grpc_closure *cb = tcp->read_cb;
if (grpc_tcp_trace) {
if (GRPC_TRACER_ON(grpc_tcp_trace)) {
size_t i;
const char *str = grpc_error_string(error);
gpr_log(GPR_DEBUG, "read: error=%s", str);
@ -468,14 +468,14 @@ static void tcp_handle_write(grpc_exec_ctx *exec_ctx, void *arg /* grpc_tcp */,
}
if (!tcp_flush(tcp, &error)) {
if (grpc_tcp_trace) {
if (GRPC_TRACER_ON(grpc_tcp_trace)) {
gpr_log(GPR_DEBUG, "write: delayed");
}
grpc_fd_notify_on_write(exec_ctx, tcp->em_fd, &tcp->write_closure);
} else {
cb = tcp->write_cb;
tcp->write_cb = NULL;
if (grpc_tcp_trace) {
if (GRPC_TRACER_ON(grpc_tcp_trace)) {
const char *str = grpc_error_string(error);
gpr_log(GPR_DEBUG, "write: %s", str);
}
@ -490,7 +490,7 @@ static void tcp_write(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep,
grpc_tcp *tcp = (grpc_tcp *)ep;
grpc_error *error = GRPC_ERROR_NONE;
if (grpc_tcp_trace) {
if (GRPC_TRACER_ON(grpc_tcp_trace)) {
size_t i;
for (i = 0; i < buf->count; i++) {
@ -521,12 +521,12 @@ static void tcp_write(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep,
if (!tcp_flush(tcp, &error)) {
TCP_REF(tcp, "write");
tcp->write_cb = cb;
if (grpc_tcp_trace) {
if (GRPC_TRACER_ON(grpc_tcp_trace)) {
gpr_log(GPR_DEBUG, "write: delayed");
}
grpc_fd_notify_on_write(exec_ctx, tcp->em_fd, &tcp->write_closure);
} else {
if (grpc_tcp_trace) {
if (GRPC_TRACER_ON(grpc_tcp_trace)) {
const char *str = grpc_error_string(error);
gpr_log(GPR_DEBUG, "write: %s", str);
}

@ -44,10 +44,11 @@
otherwise specified.
*/
#include "src/core/lib/debug/trace.h"
#include "src/core/lib/iomgr/endpoint.h"
#include "src/core/lib/iomgr/ev_posix.h"
extern int grpc_tcp_trace;
extern grpc_tracer_flag grpc_tcp_trace;
/* Create a tcp endpoint given a file desciptor and a read slice size.
Takes ownership of fd. */

@ -257,7 +257,7 @@ static void on_read(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *err) {
addr_str = grpc_sockaddr_to_uri(&addr);
gpr_asprintf(&name, "tcp-server-connection:%s", addr_str);
if (grpc_tcp_trace) {
if (GRPC_TRACER_ON(grpc_tcp_trace)) {
gpr_log(GPR_DEBUG, "SERVER_CONNECT: incoming connection: %s", addr_str);
}

Some files were not shown because too many files have changed in this diff Show More

Loading…
Cancel
Save