Merge branch 'master' into fix-time

pull/14894/head
Sree Kuchibhotla 7 years ago
commit 6d5c2c250d
  1. 14
      .github/ISSUE_TEMPLATE.md
  2. 108
      .pylintrc-tests
  3. 24
      BUILD
  4. 175
      CMakeLists.txt
  5. 278
      Makefile
  6. 1
      README.md
  7. 4
      bazel/grpc_deps.bzl
  8. 57
      build.yaml
  9. 2
      config.m4
  10. 1
      config.w32
  11. 1
      doc/environment_variables.md
  12. 31
      doc/g_stands_for.md
  13. 33
      etc/roots.pem
  14. 5
      examples/csharp/helloworld-from-cli/global.json
  15. 2
      examples/node/dynamic_codegen/route_guide/route_guide_server.js
  16. 5
      gRPC-C++.podspec
  17. 6
      gRPC-Core.podspec
  18. 2
      gRPC-ProtoRPC.podspec
  19. 2
      gRPC-RxLibrary.podspec
  20. 2
      gRPC.podspec
  21. 2
      grpc.gemspec
  22. 18
      grpc.gyp
  23. 6
      include/grpc/support/log.h
  24. 6
      package.xml
  25. 19
      src/compiler/node_generator.cc
  26. 9
      src/compiler/node_generator.h
  27. 22
      src/compiler/node_plugin.cc
  28. 3
      src/compiler/objective_c_generator.cc
  29. 454
      src/core/ext/filters/client_channel/client_channel.cc
  30. 2
      src/core/ext/filters/client_channel/http_connect_handshaker.cc
  31. 4
      src/core/ext/filters/client_channel/lb_policy.cc
  32. 4
      src/core/ext/filters/client_channel/lb_policy.h
  33. 37
      src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.cc
  34. 36
      src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.h
  35. 349
      src/core/ext/filters/client_channel/lb_policy/pick_first/pick_first.cc
  36. 709
      src/core/ext/filters/client_channel/lb_policy/round_robin/round_robin.cc
  37. 253
      src/core/ext/filters/client_channel/lb_policy/subchannel_list.cc
  38. 596
      src/core/ext/filters/client_channel/lb_policy/subchannel_list.h
  39. 4
      src/core/ext/filters/client_channel/method_params.h
  40. 4
      src/core/ext/filters/client_channel/resolver.h
  41. 12
      src/core/ext/filters/client_channel/resolver/dns/c_ares/dns_resolver_ares.cc
  42. 5
      src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.cc
  43. 12
      src/core/ext/filters/client_channel/resolver/dns/native/dns_resolver.cc
  44. 4
      src/core/ext/filters/client_channel/retry_throttle.h
  45. 4
      src/core/ext/filters/http/message_compress/message_compress_filter.cc
  46. 25
      src/core/ext/transport/chttp2/transport/chttp2_transport.cc
  47. 6
      src/core/ext/transport/chttp2/transport/frame_settings.cc
  48. 4
      src/core/ext/transport/chttp2/transport/hpack_encoder.cc
  49. 2
      src/core/ext/transport/chttp2/transport/hpack_parser.cc
  50. 4
      src/core/ext/transport/chttp2/transport/hpack_table.cc
  51. 6
      src/core/ext/transport/chttp2/transport/stream_lists.cc
  52. 10
      src/core/ext/transport/chttp2/transport/writing.cc
  53. 84
      src/core/ext/transport/inproc/inproc_transport.cc
  54. 28
      src/core/lib/channel/channel_args.cc
  55. 4
      src/core/lib/channel/channel_args.h
  56. 47
      src/core/lib/channel/handshaker.cc
  57. 4
      src/core/lib/channel/handshaker.h
  58. 11
      src/core/lib/debug/trace.h
  59. 8
      src/core/lib/gprpp/orphanable.h
  60. 8
      src/core/lib/gprpp/ref_counted.h
  61. 26
      src/core/lib/iomgr/call_combiner.cc
  62. 6
      src/core/lib/iomgr/closure.h
  63. 22
      src/core/lib/iomgr/combiner.cc
  64. 48
      src/core/lib/iomgr/ev_epoll1_linux.cc
  65. 54
      src/core/lib/iomgr/ev_epollex_linux.cc
  66. 4
      src/core/lib/iomgr/ev_epollsig_linux.cc
  67. 6
      src/core/lib/iomgr/ev_poll_posix.cc
  68. 2
      src/core/lib/iomgr/ev_posix.cc
  69. 12
      src/core/lib/iomgr/executor.cc
  70. 1
      src/core/lib/iomgr/fork_posix.cc
  71. 21
      src/core/lib/iomgr/resource_quota.cc
  72. 4
      src/core/lib/iomgr/tcp_client_custom.cc
  73. 8
      src/core/lib/iomgr/tcp_client_posix.cc
  74. 20
      src/core/lib/iomgr/tcp_custom.cc
  75. 50
      src/core/lib/iomgr/tcp_posix.cc
  76. 10
      src/core/lib/iomgr/tcp_server_custom.cc
  77. 2
      src/core/lib/iomgr/tcp_server_posix.cc
  78. 3
      src/core/lib/iomgr/tcp_uv.cc
  79. 16
      src/core/lib/iomgr/tcp_windows.cc
  80. 35
      src/core/lib/iomgr/timer_generic.cc
  81. 22
      src/core/lib/iomgr/timer_manager.cc
  82. 3
      src/core/lib/iomgr/timer_uv.cc
  83. 1
      src/core/lib/profiling/basic_timers.cc
  84. 1
      src/core/lib/security/credentials/credentials.h
  85. 98
      src/core/lib/security/credentials/google_default/google_default_credentials.cc
  86. 6
      src/core/lib/security/credentials/google_default/google_default_credentials.h
  87. 25
      src/core/lib/security/security_connector/security_connector.cc
  88. 7
      src/core/lib/security/security_connector/security_connector.h
  89. 4
      src/core/lib/security/transport/secure_endpoint.cc
  90. 8
      src/core/lib/security/transport/security_handshaker.cc
  91. 4
      src/core/lib/slice/slice_hash_table.h
  92. 4
      src/core/lib/slice/slice_weak_hash_table.h
  93. 8
      src/core/lib/surface/call.cc
  94. 16
      src/core/lib/surface/server.cc
  95. 2
      src/core/lib/surface/version.cc
  96. 6
      src/core/lib/transport/bdp_estimator.cc
  97. 4
      src/core/lib/transport/bdp_estimator.h
  98. 13
      src/core/lib/transport/connectivity_state.cc
  99. 4
      src/core/tsi/ssl/session_cache/ssl_session_cache.h
  100. 19
      src/core/tsi/ssl_transport_security.cc
  101. Some files were not shown because too many files have changed in this diff Show More

@ -1,12 +1,12 @@
Please answer these questions before submitting your issue.
<!--
### Should this be an issue in the gRPC issue tracker?
This form is for bug reports and feature requests ONLY!
For general questions and troubleshooting, please ask/look for answers here:
- grpc.io mailing list: https://groups.google.com/forum/#!forum/grpc-io
- StackOverflow, with "grpc" tag: http://stackoverflow.com/questions/tagged/grpc
Create new issues for bugs and feature requests. An issue needs to be actionable. General gRPC discussions and usage questions belong to:
- [grpc.io mailing list](https://groups.google.com/forum/#!forum/grpc-io)
- [StackOverflow, with `grpc` tag](http://stackoverflow.com/questions/tagged/grpc)
*Please don't double post your questions in more locations; we are monitoring both channels, and the time spent de-duplicating questions is better spent answering more user questions.*
Issues specific to *grpc-java*, *grpc-go*, *grpc-node*, *grpc-dart*, *grpc-web* should be created in the repository they belong to (e.g. https://github.com/grpc/grpc-LANGUAGE/issues/new)
-->
### What version of gRPC and what language are you using?

@ -0,0 +1,108 @@
[VARIABLES]
# TODO(https://github.com/PyCQA/pylint/issues/1345): How does the inspection
# not include "unused_" and "ignored_" by default?
dummy-variables-rgx=^ignored_|^unused_
[DESIGN]
# NOTE(nathaniel): Not particularly attached to this value; it just seems to
# be what works for us at the moment (excepting the dead-code-walking Beta
# API).
max-args=6
[MISCELLANEOUS]
# NOTE(nathaniel): We are big fans of "TODO(<issue link>): " and
# "NOTE(<username or issue link>): ". We do not allow "TODO:",
# "TODO(<username>):", "FIXME:", or anything else.
notes=FIXME,XXX
[MESSAGES CONTROL]
disable=
# These suppressions are specific to tests:
#
# TODO(https://github.com/grpc/grpc/issues/261): investigate
# each of the following one by one and consider eliminating
# the suppression category.
# Eventually, the hope is to eliminate the .pylintrc-tests
# altogether and rely on .pylintrc for everything.
pointless-statement,
no-member,
no-self-use,
attribute-defined-outside-init,
unused-argument,
unused-variable,
unused-import,
redefined-builtin,
too-many-public-methods,
too-many-locals,
redefined-variable-type,
redefined-outer-name,
ungrouped-imports,
too-many-branches,
too-many-arguments,
too-many-format-args,
too-many-return-statements,
too-many-statements,
line-too-long,
wrong-import-position,
wrong-import-order,
# -- END OF TEST-SPECIFIC SUPPRESSIONS --
# TODO(https://github.com/PyCQA/pylint/issues/59#issuecomment-283774279):
# Enable cyclic-import after a 1.7-or-later pylint release that
# recognizes our disable=cyclic-import suppressions.
cyclic-import,
# TODO(https://github.com/grpc/grpc/issues/8622): Enable this after the
# Beta API is removed.
duplicate-code,
# TODO(https://github.com/grpc/grpc/issues/261): Doesn't seem to
# understand enum and concurrent.futures; look into this later with the
# latest pylint version.
import-error,
# TODO(https://github.com/grpc/grpc/issues/261): Enable this one.
# Should take a little configuration but not much.
invalid-name,
# TODO(https://github.com/grpc/grpc/issues/261): This doesn't seem to
# work for now? Try with a later pylint?
locally-disabled,
# NOTE(nathaniel): What even is this? *Enabling* an inspection results
# in a warning? How does that encourage more analysis and coverage?
locally-enabled,
# NOTE(nathaniel): We don't write doc strings for most private code
# elements.
missing-docstring,
# NOTE(nathaniel): In numeric comparisons it is better to have the
# lesser (or lesser-or-equal-to) quantity on the left when the
# expression is true than it is to worry about which is an identifier
# and which a literal value.
misplaced-comparison-constant,
# NOTE(nathaniel): Our completely abstract interface classes don't have
# constructors.
no-init,
# TODO(https://github.com/grpc/grpc/issues/261): Doesn't yet play
# nicely with some of our code being implemented in Cython. Maybe in a
# later version?
no-name-in-module,
# TODO(https://github.com/grpc/grpc/issues/261): Suppress these where
# the odd shape of the authentication portion of the API forces them on
# us and enable everywhere else.
protected-access,
# NOTE(nathaniel): Pylint and I will probably never agree on this.
too-few-public-methods,
# NOTE(nathaniel): Pylint and I wil probably never agree on this for
# private classes. For public classes maybe?
too-many-instance-attributes,
# NOTE(nathaniel): Some of our modules have a lot of lines... of
# specification and documentation. Maybe if this were
# lines-of-code-based we would use it.
too-many-lines,
# TODO(https://github.com/grpc/grpc/issues/261): Maybe we could have
# this one if we extracted just a few more helper functions...
too-many-nested-blocks,
# NOTE(nathaniel): I have disputed the premise of this inspection from
# the beginning and will continue to do so until it goes away for good.
useless-else-on-loop,

24
BUILD

@ -64,11 +64,11 @@ config_setting(
)
# This should be updated along with build.yaml
g_stands_for = "glorious"
g_stands_for = "gloriosa"
core_version = "6.0.0-dev"
version = "1.12.0-dev"
version = "1.13.0-dev"
GPR_PUBLIC_HDRS = [
"include/grpc/support/alloc.h",
@ -1183,6 +1183,7 @@ grpc_cc_library(
],
hdrs = [
"src/core/ext/filters/client_channel/lb_policy/grpclb/client_load_reporting_filter.h",
"src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.h",
"src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_channel.h",
"src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_client_stats.h",
"src/core/ext/filters/client_channel/lb_policy/grpclb/load_balancer_api.h",
@ -1211,6 +1212,7 @@ grpc_cc_library(
],
hdrs = [
"src/core/ext/filters/client_channel/lb_policy/grpclb/client_load_reporting_filter.h",
"src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.h",
"src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_channel.h",
"src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_client_stats.h",
"src/core/ext/filters/client_channel/lb_policy/grpclb/load_balancer_api.h",
@ -1230,9 +1232,6 @@ grpc_cc_library(
grpc_cc_library(
name = "grpc_lb_subchannel_list",
srcs = [
"src/core/ext/filters/client_channel/lb_policy/subchannel_list.cc",
],
hdrs = [
"src/core/ext/filters/client_channel/lb_policy/subchannel_list.h",
],
@ -1285,6 +1284,20 @@ grpc_cc_library(
],
)
grpc_cc_library(
name = "lb_load_data_store",
srcs = [
"src/cpp/server/load_reporter/load_data_store.cc",
],
hdrs = [
"src/cpp/server/load_reporter/load_data_store.h",
],
language = "c++",
deps = [
"grpc++",
],
)
grpc_cc_library(
name = "grpc_resolver_dns_native",
srcs = [
@ -1375,6 +1388,7 @@ grpc_cc_library(
"src/core/lib/surface/init_secure.cc",
],
hdrs = [
"src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.h",
"src/core/lib/security/context/security_context.h",
"src/core/lib/security/credentials/alts/alts_credentials.h",
"src/core/lib/security/credentials/composite/composite_credentials.h",

@ -24,7 +24,7 @@
cmake_minimum_required(VERSION 2.8)
set(PACKAGE_NAME "grpc")
set(PACKAGE_VERSION "1.12.0-dev")
set(PACKAGE_VERSION "1.13.0-dev")
set(PACKAGE_STRING "${PACKAGE_NAME} ${PACKAGE_VERSION}")
set(PACKAGE_TARNAME "${PACKAGE_NAME}-${PACKAGE_VERSION}")
set(PACKAGE_BUGREPORT "https://github.com/grpc/grpc/issues/")
@ -584,6 +584,7 @@ endif()
if(_gRPC_PLATFORM_LINUX OR _gRPC_PLATFORM_MAC OR _gRPC_PLATFORM_POSIX)
add_dependencies(buildtests_cxx json_run_localhost)
endif()
add_dependencies(buildtests_cxx lb_load_data_store_test)
add_dependencies(buildtests_cxx memory_test)
add_dependencies(buildtests_cxx metrics_client)
add_dependencies(buildtests_cxx mock_test)
@ -1193,7 +1194,6 @@ add_library(grpc
src/core/ext/filters/client_channel/lb_policy/grpclb/proto/grpc/lb/v1/load_balancer.pb.c
src/core/ext/filters/client_channel/resolver/fake/fake_resolver.cc
src/core/ext/filters/client_channel/lb_policy/pick_first/pick_first.cc
src/core/ext/filters/client_channel/lb_policy/subchannel_list.cc
src/core/ext/filters/client_channel/lb_policy/round_robin/round_robin.cc
src/core/ext/filters/client_channel/resolver/dns/c_ares/dns_resolver_ares.cc
src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver_posix.cc
@ -2499,7 +2499,6 @@ add_library(grpc_unsecure
third_party/nanopb/pb_decode.c
third_party/nanopb/pb_encode.c
src/core/ext/filters/client_channel/lb_policy/pick_first/pick_first.cc
src/core/ext/filters/client_channel/lb_policy/subchannel_list.cc
src/core/ext/filters/client_channel/lb_policy/round_robin/round_robin.cc
src/core/ext/census/grpc_context.cc
src/core/ext/filters/max_age/max_age_filter.cc
@ -4972,6 +4971,49 @@ target_link_libraries(interop_server_main
)
endif (gRPC_BUILD_TESTS)
if (gRPC_BUILD_TESTS)
add_library(lb_load_data_store
src/cpp/server/load_reporter/load_data_store.cc
)
if(WIN32 AND MSVC)
set_target_properties(lb_load_data_store PROPERTIES COMPILE_PDB_NAME "lb_load_data_store"
COMPILE_PDB_OUTPUT_DIRECTORY "${CMAKE_BINARY_DIR}"
)
if (gRPC_INSTALL)
install(FILES ${CMAKE_CURRENT_BINARY_DIR}/lb_load_data_store.pdb
DESTINATION ${gRPC_INSTALL_LIBDIR} OPTIONAL
)
endif()
endif()
target_include_directories(lb_load_data_store
PUBLIC $<INSTALL_INTERFACE:${gRPC_INSTALL_INCLUDEDIR}> $<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/include>
PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
PRIVATE ${_gRPC_PROTOBUF_INCLUDE_DIR}
PRIVATE ${_gRPC_ZLIB_INCLUDE_DIR}
PRIVATE ${_gRPC_BENCHMARK_INCLUDE_DIR}
PRIVATE ${_gRPC_CARES_INCLUDE_DIR}
PRIVATE ${_gRPC_GFLAGS_INCLUDE_DIR}
PRIVATE ${_gRPC_ADDRESS_SORTING_INCLUDE_DIR}
PRIVATE third_party/googletest/googletest/include
PRIVATE third_party/googletest/googletest
PRIVATE third_party/googletest/googlemock/include
PRIVATE third_party/googletest/googlemock
PRIVATE ${_gRPC_PROTO_GENS_DIR}
)
target_link_libraries(lb_load_data_store
${_gRPC_PROTOBUF_LIBRARIES}
${_gRPC_ALLTARGETS_LIBRARIES}
grpc++
)
endif (gRPC_BUILD_TESTS)
if (gRPC_BUILD_TESTS)
@ -4993,10 +5035,18 @@ add_library(qps
${_gRPC_PROTO_GENS_DIR}/src/proto/grpc/testing/control.grpc.pb.cc
${_gRPC_PROTO_GENS_DIR}/src/proto/grpc/testing/control.pb.h
${_gRPC_PROTO_GENS_DIR}/src/proto/grpc/testing/control.grpc.pb.h
${_gRPC_PROTO_GENS_DIR}/src/proto/grpc/testing/services.pb.cc
${_gRPC_PROTO_GENS_DIR}/src/proto/grpc/testing/services.grpc.pb.cc
${_gRPC_PROTO_GENS_DIR}/src/proto/grpc/testing/services.pb.h
${_gRPC_PROTO_GENS_DIR}/src/proto/grpc/testing/services.grpc.pb.h
${_gRPC_PROTO_GENS_DIR}/src/proto/grpc/testing/benchmark_service.pb.cc
${_gRPC_PROTO_GENS_DIR}/src/proto/grpc/testing/benchmark_service.grpc.pb.cc
${_gRPC_PROTO_GENS_DIR}/src/proto/grpc/testing/benchmark_service.pb.h
${_gRPC_PROTO_GENS_DIR}/src/proto/grpc/testing/benchmark_service.grpc.pb.h
${_gRPC_PROTO_GENS_DIR}/src/proto/grpc/testing/report_qps_scenario_service.pb.cc
${_gRPC_PROTO_GENS_DIR}/src/proto/grpc/testing/report_qps_scenario_service.grpc.pb.cc
${_gRPC_PROTO_GENS_DIR}/src/proto/grpc/testing/report_qps_scenario_service.pb.h
${_gRPC_PROTO_GENS_DIR}/src/proto/grpc/testing/report_qps_scenario_service.grpc.pb.h
${_gRPC_PROTO_GENS_DIR}/src/proto/grpc/testing/worker_service.pb.cc
${_gRPC_PROTO_GENS_DIR}/src/proto/grpc/testing/worker_service.grpc.pb.cc
${_gRPC_PROTO_GENS_DIR}/src/proto/grpc/testing/worker_service.pb.h
${_gRPC_PROTO_GENS_DIR}/src/proto/grpc/testing/worker_service.grpc.pb.h
test/cpp/qps/benchmark_config.cc
test/cpp/qps/client_async.cc
test/cpp/qps/client_sync.cc
@ -5033,7 +5083,13 @@ protobuf_generate_grpc_cpp(
src/proto/grpc/testing/control.proto
)
protobuf_generate_grpc_cpp(
src/proto/grpc/testing/services.proto
src/proto/grpc/testing/benchmark_service.proto
)
protobuf_generate_grpc_cpp(
src/proto/grpc/testing/report_qps_scenario_service.proto
)
protobuf_generate_grpc_cpp(
src/proto/grpc/testing/worker_service.proto
)
target_include_directories(qps
@ -5248,6 +5304,7 @@ add_library(end2end_tests
test/core/end2end/tests/retry_exceeds_buffer_size_in_initial_batch.cc
test/core/end2end/tests/retry_exceeds_buffer_size_in_subsequent_batch.cc
test/core/end2end/tests/retry_non_retriable_status.cc
test/core/end2end/tests/retry_non_retriable_status_before_recv_trailing_metadata_started.cc
test/core/end2end/tests/retry_recv_initial_metadata.cc
test/core/end2end/tests/retry_recv_message.cc
test/core/end2end/tests/retry_server_pushback_delay.cc
@ -5365,6 +5422,7 @@ add_library(end2end_nosec_tests
test/core/end2end/tests/retry_exceeds_buffer_size_in_initial_batch.cc
test/core/end2end/tests/retry_exceeds_buffer_size_in_subsequent_batch.cc
test/core/end2end/tests/retry_non_retriable_status.cc
test/core/end2end/tests/retry_non_retriable_status_before_recv_trailing_metadata_started.cc
test/core/end2end/tests/retry_recv_initial_metadata.cc
test/core/end2end/tests/retry_recv_message.cc
test/core/end2end/tests/retry_server_pushback_delay.cc
@ -9686,6 +9744,7 @@ target_link_libraries(bm_arena
grpc_unsecure
gpr_test_util
gpr
grpc++_test_config
${_gRPC_GFLAGS_LIBRARIES}
)
@ -9729,6 +9788,7 @@ target_link_libraries(bm_call_create
grpc_unsecure
gpr_test_util
gpr
grpc++_test_config
${_gRPC_GFLAGS_LIBRARIES}
)
@ -9772,6 +9832,7 @@ target_link_libraries(bm_chttp2_hpack
grpc_unsecure
gpr_test_util
gpr
grpc++_test_config
${_gRPC_GFLAGS_LIBRARIES}
)
@ -9815,6 +9876,7 @@ target_link_libraries(bm_chttp2_transport
grpc_unsecure
gpr_test_util
gpr
grpc++_test_config
${_gRPC_GFLAGS_LIBRARIES}
)
@ -9858,6 +9920,7 @@ target_link_libraries(bm_closure
grpc_unsecure
gpr_test_util
gpr
grpc++_test_config
${_gRPC_GFLAGS_LIBRARIES}
)
@ -9901,6 +9964,7 @@ target_link_libraries(bm_cq
grpc_unsecure
gpr_test_util
gpr
grpc++_test_config
${_gRPC_GFLAGS_LIBRARIES}
)
@ -9944,6 +10008,7 @@ target_link_libraries(bm_cq_multiple_threads
grpc_unsecure
gpr_test_util
gpr
grpc++_test_config
${_gRPC_GFLAGS_LIBRARIES}
)
@ -9987,6 +10052,7 @@ target_link_libraries(bm_error
grpc_unsecure
gpr_test_util
gpr
grpc++_test_config
${_gRPC_GFLAGS_LIBRARIES}
)
@ -10030,6 +10096,7 @@ target_link_libraries(bm_fullstack_streaming_ping_pong
grpc_unsecure
gpr_test_util
gpr
grpc++_test_config
${_gRPC_GFLAGS_LIBRARIES}
)
@ -10073,6 +10140,7 @@ target_link_libraries(bm_fullstack_streaming_pump
grpc_unsecure
gpr_test_util
gpr
grpc++_test_config
${_gRPC_GFLAGS_LIBRARIES}
)
@ -10160,6 +10228,7 @@ target_link_libraries(bm_fullstack_unary_ping_pong
grpc_unsecure
gpr_test_util
gpr
grpc++_test_config
${_gRPC_GFLAGS_LIBRARIES}
)
@ -10203,6 +10272,7 @@ target_link_libraries(bm_metadata
grpc_unsecure
gpr_test_util
gpr
grpc++_test_config
${_gRPC_GFLAGS_LIBRARIES}
)
@ -10246,6 +10316,7 @@ target_link_libraries(bm_pollset
grpc_unsecure
gpr_test_util
gpr
grpc++_test_config
${_gRPC_GFLAGS_LIBRARIES}
)
@ -10733,10 +10804,18 @@ add_executable(codegen_test_full
${_gRPC_PROTO_GENS_DIR}/src/proto/grpc/testing/payloads.grpc.pb.cc
${_gRPC_PROTO_GENS_DIR}/src/proto/grpc/testing/payloads.pb.h
${_gRPC_PROTO_GENS_DIR}/src/proto/grpc/testing/payloads.grpc.pb.h
${_gRPC_PROTO_GENS_DIR}/src/proto/grpc/testing/services.pb.cc
${_gRPC_PROTO_GENS_DIR}/src/proto/grpc/testing/services.grpc.pb.cc
${_gRPC_PROTO_GENS_DIR}/src/proto/grpc/testing/services.pb.h
${_gRPC_PROTO_GENS_DIR}/src/proto/grpc/testing/services.grpc.pb.h
${_gRPC_PROTO_GENS_DIR}/src/proto/grpc/testing/benchmark_service.pb.cc
${_gRPC_PROTO_GENS_DIR}/src/proto/grpc/testing/benchmark_service.grpc.pb.cc
${_gRPC_PROTO_GENS_DIR}/src/proto/grpc/testing/benchmark_service.pb.h
${_gRPC_PROTO_GENS_DIR}/src/proto/grpc/testing/benchmark_service.grpc.pb.h
${_gRPC_PROTO_GENS_DIR}/src/proto/grpc/testing/report_qps_scenario_service.pb.cc
${_gRPC_PROTO_GENS_DIR}/src/proto/grpc/testing/report_qps_scenario_service.grpc.pb.cc
${_gRPC_PROTO_GENS_DIR}/src/proto/grpc/testing/report_qps_scenario_service.pb.h
${_gRPC_PROTO_GENS_DIR}/src/proto/grpc/testing/report_qps_scenario_service.grpc.pb.h
${_gRPC_PROTO_GENS_DIR}/src/proto/grpc/testing/worker_service.pb.cc
${_gRPC_PROTO_GENS_DIR}/src/proto/grpc/testing/worker_service.grpc.pb.cc
${_gRPC_PROTO_GENS_DIR}/src/proto/grpc/testing/worker_service.pb.h
${_gRPC_PROTO_GENS_DIR}/src/proto/grpc/testing/worker_service.grpc.pb.h
${_gRPC_PROTO_GENS_DIR}/src/proto/grpc/testing/stats.pb.cc
${_gRPC_PROTO_GENS_DIR}/src/proto/grpc/testing/stats.grpc.pb.cc
${_gRPC_PROTO_GENS_DIR}/src/proto/grpc/testing/stats.pb.h
@ -10756,7 +10835,13 @@ protobuf_generate_grpc_cpp(
src/proto/grpc/testing/payloads.proto
)
protobuf_generate_grpc_cpp(
src/proto/grpc/testing/services.proto
src/proto/grpc/testing/benchmark_service.proto
)
protobuf_generate_grpc_cpp(
src/proto/grpc/testing/report_qps_scenario_service.proto
)
protobuf_generate_grpc_cpp(
src/proto/grpc/testing/worker_service.proto
)
protobuf_generate_grpc_cpp(
src/proto/grpc/testing/stats.proto
@ -10805,10 +10890,18 @@ add_executable(codegen_test_minimal
${_gRPC_PROTO_GENS_DIR}/src/proto/grpc/testing/payloads.grpc.pb.cc
${_gRPC_PROTO_GENS_DIR}/src/proto/grpc/testing/payloads.pb.h
${_gRPC_PROTO_GENS_DIR}/src/proto/grpc/testing/payloads.grpc.pb.h
${_gRPC_PROTO_GENS_DIR}/src/proto/grpc/testing/services.pb.cc
${_gRPC_PROTO_GENS_DIR}/src/proto/grpc/testing/services.grpc.pb.cc
${_gRPC_PROTO_GENS_DIR}/src/proto/grpc/testing/services.pb.h
${_gRPC_PROTO_GENS_DIR}/src/proto/grpc/testing/services.grpc.pb.h
${_gRPC_PROTO_GENS_DIR}/src/proto/grpc/testing/benchmark_service.pb.cc
${_gRPC_PROTO_GENS_DIR}/src/proto/grpc/testing/benchmark_service.grpc.pb.cc
${_gRPC_PROTO_GENS_DIR}/src/proto/grpc/testing/benchmark_service.pb.h
${_gRPC_PROTO_GENS_DIR}/src/proto/grpc/testing/benchmark_service.grpc.pb.h
${_gRPC_PROTO_GENS_DIR}/src/proto/grpc/testing/report_qps_scenario_service.pb.cc
${_gRPC_PROTO_GENS_DIR}/src/proto/grpc/testing/report_qps_scenario_service.grpc.pb.cc
${_gRPC_PROTO_GENS_DIR}/src/proto/grpc/testing/report_qps_scenario_service.pb.h
${_gRPC_PROTO_GENS_DIR}/src/proto/grpc/testing/report_qps_scenario_service.grpc.pb.h
${_gRPC_PROTO_GENS_DIR}/src/proto/grpc/testing/worker_service.pb.cc
${_gRPC_PROTO_GENS_DIR}/src/proto/grpc/testing/worker_service.grpc.pb.cc
${_gRPC_PROTO_GENS_DIR}/src/proto/grpc/testing/worker_service.pb.h
${_gRPC_PROTO_GENS_DIR}/src/proto/grpc/testing/worker_service.grpc.pb.h
${_gRPC_PROTO_GENS_DIR}/src/proto/grpc/testing/stats.pb.cc
${_gRPC_PROTO_GENS_DIR}/src/proto/grpc/testing/stats.grpc.pb.cc
${_gRPC_PROTO_GENS_DIR}/src/proto/grpc/testing/stats.pb.h
@ -10829,7 +10922,13 @@ protobuf_generate_grpc_cpp(
src/proto/grpc/testing/payloads.proto
)
protobuf_generate_grpc_cpp(
src/proto/grpc/testing/services.proto
src/proto/grpc/testing/benchmark_service.proto
)
protobuf_generate_grpc_cpp(
src/proto/grpc/testing/report_qps_scenario_service.proto
)
protobuf_generate_grpc_cpp(
src/proto/grpc/testing/worker_service.proto
)
protobuf_generate_grpc_cpp(
src/proto/grpc/testing/stats.proto
@ -12214,6 +12313,46 @@ endif()
endif (gRPC_BUILD_TESTS)
if (gRPC_BUILD_TESTS)
add_executable(lb_load_data_store_test
test/cpp/server/load_reporter/load_data_store_test.cc
third_party/googletest/googletest/src/gtest-all.cc
third_party/googletest/googlemock/src/gmock-all.cc
)
target_include_directories(lb_load_data_store_test
PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
PRIVATE ${_gRPC_PROTOBUF_INCLUDE_DIR}
PRIVATE ${_gRPC_ZLIB_INCLUDE_DIR}
PRIVATE ${_gRPC_BENCHMARK_INCLUDE_DIR}
PRIVATE ${_gRPC_CARES_INCLUDE_DIR}
PRIVATE ${_gRPC_GFLAGS_INCLUDE_DIR}
PRIVATE ${_gRPC_ADDRESS_SORTING_INCLUDE_DIR}
PRIVATE third_party/googletest/googletest/include
PRIVATE third_party/googletest/googletest
PRIVATE third_party/googletest/googlemock/include
PRIVATE third_party/googletest/googlemock
PRIVATE ${_gRPC_PROTO_GENS_DIR}
)
target_link_libraries(lb_load_data_store_test
${_gRPC_PROTOBUF_LIBRARIES}
${_gRPC_ALLTARGETS_LIBRARIES}
lb_load_data_store
grpc++_test_util
grpc_test_util
grpc++
grpc
gpr_test_util
gpr
${_gRPC_GFLAGS_LIBRARIES}
)
endif (gRPC_BUILD_TESTS)
if (gRPC_BUILD_TESTS)
add_executable(memory_test
test/core/gprpp/memory_test.cc
third_party/googletest/googletest/src/gtest-all.cc

@ -421,8 +421,8 @@ Q = @
endif
CORE_VERSION = 6.0.0-dev
CPP_VERSION = 1.12.0-dev
CSHARP_VERSION = 1.12.0-dev
CPP_VERSION = 1.13.0-dev
CSHARP_VERSION = 1.13.0-dev
CPPFLAGS_NO_ARCH += $(addprefix -I, $(INCLUDES)) $(addprefix -D, $(DEFINES))
CPPFLAGS += $(CPPFLAGS_NO_ARCH) $(ARCH_FLAGS)
@ -1178,6 +1178,7 @@ interop_client: $(BINDIR)/$(CONFIG)/interop_client
interop_server: $(BINDIR)/$(CONFIG)/interop_server
interop_test: $(BINDIR)/$(CONFIG)/interop_test
json_run_localhost: $(BINDIR)/$(CONFIG)/json_run_localhost
lb_load_data_store_test: $(BINDIR)/$(CONFIG)/lb_load_data_store_test
memory_test: $(BINDIR)/$(CONFIG)/memory_test
metrics_client: $(BINDIR)/$(CONFIG)/metrics_client
mock_test: $(BINDIR)/$(CONFIG)/mock_test
@ -1390,9 +1391,9 @@ pc_cxx: $(LIBDIR)/$(CONFIG)/pkgconfig/grpc++.pc
pc_cxx_unsecure: $(LIBDIR)/$(CONFIG)/pkgconfig/grpc++_unsecure.pc
ifeq ($(EMBED_OPENSSL),true)
privatelibs_cxx: $(LIBDIR)/$(CONFIG)/libgrpc++_core_stats.a $(LIBDIR)/$(CONFIG)/libgrpc++_proto_reflection_desc_db.a $(LIBDIR)/$(CONFIG)/libgrpc++_test_config.a $(LIBDIR)/$(CONFIG)/libgrpc++_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc++_test_util_unsecure.a $(LIBDIR)/$(CONFIG)/libgrpc_cli_libs.a $(LIBDIR)/$(CONFIG)/libhttp2_client_main.a $(LIBDIR)/$(CONFIG)/libinterop_client_helper.a $(LIBDIR)/$(CONFIG)/libinterop_client_main.a $(LIBDIR)/$(CONFIG)/libinterop_server_helper.a $(LIBDIR)/$(CONFIG)/libinterop_server_lib.a $(LIBDIR)/$(CONFIG)/libinterop_server_main.a $(LIBDIR)/$(CONFIG)/libqps.a $(LIBDIR)/$(CONFIG)/libboringssl_test_util.a $(LIBDIR)/$(CONFIG)/libboringssl_crypto_test_data_lib.a $(LIBDIR)/$(CONFIG)/libboringssl_asn1_test_lib.a $(LIBDIR)/$(CONFIG)/libboringssl_base64_test_lib.a $(LIBDIR)/$(CONFIG)/libboringssl_bio_test_lib.a $(LIBDIR)/$(CONFIG)/libboringssl_buf_test_lib.a $(LIBDIR)/$(CONFIG)/libboringssl_bytestring_test_lib.a $(LIBDIR)/$(CONFIG)/libboringssl_chacha_test_lib.a $(LIBDIR)/$(CONFIG)/libboringssl_aead_test_lib.a $(LIBDIR)/$(CONFIG)/libboringssl_cipher_test_lib.a $(LIBDIR)/$(CONFIG)/libboringssl_cmac_test_lib.a $(LIBDIR)/$(CONFIG)/libboringssl_compiler_test_lib.a $(LIBDIR)/$(CONFIG)/libboringssl_constant_time_test_lib.a $(LIBDIR)/$(CONFIG)/libboringssl_ed25519_test_lib.a $(LIBDIR)/$(CONFIG)/libboringssl_spake25519_test_lib.a $(LIBDIR)/$(CONFIG)/libboringssl_x25519_test_lib.a $(LIBDIR)/$(CONFIG)/libboringssl_dh_test_lib.a $(LIBDIR)/$(CONFIG)/libboringssl_digest_test_lib.a $(LIBDIR)/$(CONFIG)/libboringssl_dsa_test_lib.a $(LIBDIR)/$(CONFIG)/libboringssl_ecdh_test_lib.a $(LIBDIR)/$(CONFIG)/libboringssl_err_test_lib.a $(LIBDIR)/$(CONFIG)/libboringssl_evp_extra_test_lib.a $(LIBDIR)/$(CONFIG)/libboringssl_evp_test_lib.a $(LIBDIR)/$(CONFIG)/libboringssl_pbkdf_test_lib.a $(LIBDIR)/$(CONFIG)/libboringssl_scrypt_test_lib.a $(LIBDIR)/$(CONFIG)/libboringssl_aes_test_lib.a $(LIBDIR)/$(CONFIG)/libboringssl_bn_test_lib.a $(LIBDIR)/$(CONFIG)/libboringssl_ec_test_lib.a $(LIBDIR)/$(CONFIG)/libboringssl_p256-x86_64_test_lib.a $(LIBDIR)/$(CONFIG)/libboringssl_ecdsa_test_lib.a $(LIBDIR)/$(CONFIG)/libboringssl_gcm_test_lib.a $(LIBDIR)/$(CONFIG)/libboringssl_ctrdrbg_test_lib.a $(LIBDIR)/$(CONFIG)/libboringssl_hkdf_test_lib.a $(LIBDIR)/$(CONFIG)/libboringssl_hmac_test_lib.a $(LIBDIR)/$(CONFIG)/libboringssl_lhash_test_lib.a $(LIBDIR)/$(CONFIG)/libboringssl_obj_test_lib.a $(LIBDIR)/$(CONFIG)/libboringssl_pkcs7_test_lib.a $(LIBDIR)/$(CONFIG)/libboringssl_pkcs12_test_lib.a $(LIBDIR)/$(CONFIG)/libboringssl_pkcs8_test_lib.a $(LIBDIR)/$(CONFIG)/libboringssl_poly1305_test_lib.a $(LIBDIR)/$(CONFIG)/libboringssl_pool_test_lib.a $(LIBDIR)/$(CONFIG)/libboringssl_refcount_test_lib.a $(LIBDIR)/$(CONFIG)/libboringssl_rsa_test_lib.a $(LIBDIR)/$(CONFIG)/libboringssl_file_test_gtest_lib.a $(LIBDIR)/$(CONFIG)/libboringssl_gtest_main_lib.a $(LIBDIR)/$(CONFIG)/libboringssl_thread_test_lib.a $(LIBDIR)/$(CONFIG)/libboringssl_x509_test_lib.a $(LIBDIR)/$(CONFIG)/libboringssl_tab_test_lib.a $(LIBDIR)/$(CONFIG)/libboringssl_v3name_test_lib.a $(LIBDIR)/$(CONFIG)/libboringssl_span_test_lib.a $(LIBDIR)/$(CONFIG)/libboringssl_ssl_test_lib.a $(LIBDIR)/$(CONFIG)/libbenchmark.a
privatelibs_cxx: $(LIBDIR)/$(CONFIG)/libgrpc++_core_stats.a $(LIBDIR)/$(CONFIG)/libgrpc++_proto_reflection_desc_db.a $(LIBDIR)/$(CONFIG)/libgrpc++_test_config.a $(LIBDIR)/$(CONFIG)/libgrpc++_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc++_test_util_unsecure.a $(LIBDIR)/$(CONFIG)/libgrpc_cli_libs.a $(LIBDIR)/$(CONFIG)/libhttp2_client_main.a $(LIBDIR)/$(CONFIG)/libinterop_client_helper.a $(LIBDIR)/$(CONFIG)/libinterop_client_main.a $(LIBDIR)/$(CONFIG)/libinterop_server_helper.a $(LIBDIR)/$(CONFIG)/libinterop_server_lib.a $(LIBDIR)/$(CONFIG)/libinterop_server_main.a $(LIBDIR)/$(CONFIG)/liblb_load_data_store.a $(LIBDIR)/$(CONFIG)/libqps.a $(LIBDIR)/$(CONFIG)/libboringssl_test_util.a $(LIBDIR)/$(CONFIG)/libboringssl_crypto_test_data_lib.a $(LIBDIR)/$(CONFIG)/libboringssl_asn1_test_lib.a $(LIBDIR)/$(CONFIG)/libboringssl_base64_test_lib.a $(LIBDIR)/$(CONFIG)/libboringssl_bio_test_lib.a $(LIBDIR)/$(CONFIG)/libboringssl_buf_test_lib.a $(LIBDIR)/$(CONFIG)/libboringssl_bytestring_test_lib.a $(LIBDIR)/$(CONFIG)/libboringssl_chacha_test_lib.a $(LIBDIR)/$(CONFIG)/libboringssl_aead_test_lib.a $(LIBDIR)/$(CONFIG)/libboringssl_cipher_test_lib.a $(LIBDIR)/$(CONFIG)/libboringssl_cmac_test_lib.a $(LIBDIR)/$(CONFIG)/libboringssl_compiler_test_lib.a $(LIBDIR)/$(CONFIG)/libboringssl_constant_time_test_lib.a $(LIBDIR)/$(CONFIG)/libboringssl_ed25519_test_lib.a $(LIBDIR)/$(CONFIG)/libboringssl_spake25519_test_lib.a $(LIBDIR)/$(CONFIG)/libboringssl_x25519_test_lib.a $(LIBDIR)/$(CONFIG)/libboringssl_dh_test_lib.a $(LIBDIR)/$(CONFIG)/libboringssl_digest_test_lib.a $(LIBDIR)/$(CONFIG)/libboringssl_dsa_test_lib.a $(LIBDIR)/$(CONFIG)/libboringssl_ecdh_test_lib.a $(LIBDIR)/$(CONFIG)/libboringssl_err_test_lib.a $(LIBDIR)/$(CONFIG)/libboringssl_evp_extra_test_lib.a $(LIBDIR)/$(CONFIG)/libboringssl_evp_test_lib.a $(LIBDIR)/$(CONFIG)/libboringssl_pbkdf_test_lib.a $(LIBDIR)/$(CONFIG)/libboringssl_scrypt_test_lib.a $(LIBDIR)/$(CONFIG)/libboringssl_aes_test_lib.a $(LIBDIR)/$(CONFIG)/libboringssl_bn_test_lib.a $(LIBDIR)/$(CONFIG)/libboringssl_ec_test_lib.a $(LIBDIR)/$(CONFIG)/libboringssl_p256-x86_64_test_lib.a $(LIBDIR)/$(CONFIG)/libboringssl_ecdsa_test_lib.a $(LIBDIR)/$(CONFIG)/libboringssl_gcm_test_lib.a $(LIBDIR)/$(CONFIG)/libboringssl_ctrdrbg_test_lib.a $(LIBDIR)/$(CONFIG)/libboringssl_hkdf_test_lib.a $(LIBDIR)/$(CONFIG)/libboringssl_hmac_test_lib.a $(LIBDIR)/$(CONFIG)/libboringssl_lhash_test_lib.a $(LIBDIR)/$(CONFIG)/libboringssl_obj_test_lib.a $(LIBDIR)/$(CONFIG)/libboringssl_pkcs7_test_lib.a $(LIBDIR)/$(CONFIG)/libboringssl_pkcs12_test_lib.a $(LIBDIR)/$(CONFIG)/libboringssl_pkcs8_test_lib.a $(LIBDIR)/$(CONFIG)/libboringssl_poly1305_test_lib.a $(LIBDIR)/$(CONFIG)/libboringssl_pool_test_lib.a $(LIBDIR)/$(CONFIG)/libboringssl_refcount_test_lib.a $(LIBDIR)/$(CONFIG)/libboringssl_rsa_test_lib.a $(LIBDIR)/$(CONFIG)/libboringssl_file_test_gtest_lib.a $(LIBDIR)/$(CONFIG)/libboringssl_gtest_main_lib.a $(LIBDIR)/$(CONFIG)/libboringssl_thread_test_lib.a $(LIBDIR)/$(CONFIG)/libboringssl_x509_test_lib.a $(LIBDIR)/$(CONFIG)/libboringssl_tab_test_lib.a $(LIBDIR)/$(CONFIG)/libboringssl_v3name_test_lib.a $(LIBDIR)/$(CONFIG)/libboringssl_span_test_lib.a $(LIBDIR)/$(CONFIG)/libboringssl_ssl_test_lib.a $(LIBDIR)/$(CONFIG)/libbenchmark.a
else
privatelibs_cxx: $(LIBDIR)/$(CONFIG)/libgrpc++_core_stats.a $(LIBDIR)/$(CONFIG)/libgrpc++_proto_reflection_desc_db.a $(LIBDIR)/$(CONFIG)/libgrpc++_test_config.a $(LIBDIR)/$(CONFIG)/libgrpc++_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc++_test_util_unsecure.a $(LIBDIR)/$(CONFIG)/libgrpc_cli_libs.a $(LIBDIR)/$(CONFIG)/libhttp2_client_main.a $(LIBDIR)/$(CONFIG)/libinterop_client_helper.a $(LIBDIR)/$(CONFIG)/libinterop_client_main.a $(LIBDIR)/$(CONFIG)/libinterop_server_helper.a $(LIBDIR)/$(CONFIG)/libinterop_server_lib.a $(LIBDIR)/$(CONFIG)/libinterop_server_main.a $(LIBDIR)/$(CONFIG)/libqps.a $(LIBDIR)/$(CONFIG)/libbenchmark.a
privatelibs_cxx: $(LIBDIR)/$(CONFIG)/libgrpc++_core_stats.a $(LIBDIR)/$(CONFIG)/libgrpc++_proto_reflection_desc_db.a $(LIBDIR)/$(CONFIG)/libgrpc++_test_config.a $(LIBDIR)/$(CONFIG)/libgrpc++_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc++_test_util_unsecure.a $(LIBDIR)/$(CONFIG)/libgrpc_cli_libs.a $(LIBDIR)/$(CONFIG)/libhttp2_client_main.a $(LIBDIR)/$(CONFIG)/libinterop_client_helper.a $(LIBDIR)/$(CONFIG)/libinterop_client_main.a $(LIBDIR)/$(CONFIG)/libinterop_server_helper.a $(LIBDIR)/$(CONFIG)/libinterop_server_lib.a $(LIBDIR)/$(CONFIG)/libinterop_server_main.a $(LIBDIR)/$(CONFIG)/liblb_load_data_store.a $(LIBDIR)/$(CONFIG)/libqps.a $(LIBDIR)/$(CONFIG)/libbenchmark.a
endif
@ -1660,6 +1661,7 @@ buildtests_cxx: privatelibs_cxx \
$(BINDIR)/$(CONFIG)/interop_server \
$(BINDIR)/$(CONFIG)/interop_test \
$(BINDIR)/$(CONFIG)/json_run_localhost \
$(BINDIR)/$(CONFIG)/lb_load_data_store_test \
$(BINDIR)/$(CONFIG)/memory_test \
$(BINDIR)/$(CONFIG)/metrics_client \
$(BINDIR)/$(CONFIG)/mock_test \
@ -1831,6 +1833,7 @@ buildtests_cxx: privatelibs_cxx \
$(BINDIR)/$(CONFIG)/interop_server \
$(BINDIR)/$(CONFIG)/interop_test \
$(BINDIR)/$(CONFIG)/json_run_localhost \
$(BINDIR)/$(CONFIG)/lb_load_data_store_test \
$(BINDIR)/$(CONFIG)/memory_test \
$(BINDIR)/$(CONFIG)/metrics_client \
$(BINDIR)/$(CONFIG)/mock_test \
@ -2283,6 +2286,8 @@ test_cxx: buildtests_cxx
$(Q) $(BINDIR)/$(CONFIG)/inproc_sync_unary_ping_pong_test || ( echo test inproc_sync_unary_ping_pong_test failed ; exit 1 )
$(E) "[RUN] Testing interop_test"
$(Q) $(BINDIR)/$(CONFIG)/interop_test || ( echo test interop_test failed ; exit 1 )
$(E) "[RUN] Testing lb_load_data_store_test"
$(Q) $(BINDIR)/$(CONFIG)/lb_load_data_store_test || ( echo test lb_load_data_store_test failed ; exit 1 )
$(E) "[RUN] Testing memory_test"
$(Q) $(BINDIR)/$(CONFIG)/memory_test || ( echo test memory_test failed ; exit 1 )
$(E) "[RUN] Testing mock_test"
@ -2569,6 +2574,22 @@ $(GENDIR)/src/proto/grpc/status/status.grpc.pb.cc: src/proto/grpc/status/status.
$(Q) $(PROTOC) -Ithird_party/protobuf/src -I. --grpc_out=$(GENDIR) --plugin=protoc-gen-grpc=$(PROTOC_PLUGINS_DIR)/grpc_cpp_plugin$(EXECUTABLE_SUFFIX) $<
endif
ifeq ($(NO_PROTOC),true)
$(GENDIR)/src/proto/grpc/testing/benchmark_service.pb.cc: protoc_dep_error
$(GENDIR)/src/proto/grpc/testing/benchmark_service.grpc.pb.cc: protoc_dep_error
else
$(GENDIR)/src/proto/grpc/testing/benchmark_service.pb.cc: src/proto/grpc/testing/benchmark_service.proto $(PROTOBUF_DEP) $(PROTOC_PLUGINS) $(GENDIR)/src/proto/grpc/testing/messages.pb.cc
$(E) "[PROTOC] Generating protobuf CC file from $<"
$(Q) mkdir -p `dirname $@`
$(Q) $(PROTOC) -Ithird_party/protobuf/src -I. --cpp_out=$(GENDIR) $<
$(GENDIR)/src/proto/grpc/testing/benchmark_service.grpc.pb.cc: src/proto/grpc/testing/benchmark_service.proto $(GENDIR)/src/proto/grpc/testing/benchmark_service.pb.cc $(PROTOBUF_DEP) $(PROTOC_PLUGINS) $(GENDIR)/src/proto/grpc/testing/messages.pb.cc $(GENDIR)/src/proto/grpc/testing/messages.grpc.pb.cc
$(E) "[GRPC] Generating gRPC's protobuf service CC file from $<"
$(Q) mkdir -p `dirname $@`
$(Q) $(PROTOC) -Ithird_party/protobuf/src -I. --grpc_out=$(GENDIR) --plugin=protoc-gen-grpc=$(PROTOC_PLUGINS_DIR)/grpc_cpp_plugin$(EXECUTABLE_SUFFIX) $<
endif
ifeq ($(NO_PROTOC),true)
$(GENDIR)/src/proto/grpc/testing/compiler_test.pb.cc: protoc_dep_error
$(GENDIR)/src/proto/grpc/testing/compiler_test.grpc.pb.cc: protoc_dep_error
@ -2716,16 +2737,16 @@ $(GENDIR)/src/proto/grpc/testing/payloads.grpc.pb.cc: src/proto/grpc/testing/pay
endif
ifeq ($(NO_PROTOC),true)
$(GENDIR)/src/proto/grpc/testing/services.pb.cc: protoc_dep_error
$(GENDIR)/src/proto/grpc/testing/services.grpc.pb.cc: protoc_dep_error
$(GENDIR)/src/proto/grpc/testing/report_qps_scenario_service.pb.cc: protoc_dep_error
$(GENDIR)/src/proto/grpc/testing/report_qps_scenario_service.grpc.pb.cc: protoc_dep_error
else
$(GENDIR)/src/proto/grpc/testing/services.pb.cc: src/proto/grpc/testing/services.proto $(PROTOBUF_DEP) $(PROTOC_PLUGINS) $(GENDIR)/src/proto/grpc/testing/messages.pb.cc $(GENDIR)/src/proto/grpc/testing/control.pb.cc
$(GENDIR)/src/proto/grpc/testing/report_qps_scenario_service.pb.cc: src/proto/grpc/testing/report_qps_scenario_service.proto $(PROTOBUF_DEP) $(PROTOC_PLUGINS) $(GENDIR)/src/proto/grpc/testing/control.pb.cc
$(E) "[PROTOC] Generating protobuf CC file from $<"
$(Q) mkdir -p `dirname $@`
$(Q) $(PROTOC) -Ithird_party/protobuf/src -I. --cpp_out=$(GENDIR) $<
$(GENDIR)/src/proto/grpc/testing/services.grpc.pb.cc: src/proto/grpc/testing/services.proto $(GENDIR)/src/proto/grpc/testing/services.pb.cc $(PROTOBUF_DEP) $(PROTOC_PLUGINS) $(GENDIR)/src/proto/grpc/testing/messages.pb.cc $(GENDIR)/src/proto/grpc/testing/messages.grpc.pb.cc $(GENDIR)/src/proto/grpc/testing/control.pb.cc $(GENDIR)/src/proto/grpc/testing/control.grpc.pb.cc
$(GENDIR)/src/proto/grpc/testing/report_qps_scenario_service.grpc.pb.cc: src/proto/grpc/testing/report_qps_scenario_service.proto $(GENDIR)/src/proto/grpc/testing/report_qps_scenario_service.pb.cc $(PROTOBUF_DEP) $(PROTOC_PLUGINS) $(GENDIR)/src/proto/grpc/testing/control.pb.cc $(GENDIR)/src/proto/grpc/testing/control.grpc.pb.cc
$(E) "[GRPC] Generating gRPC's protobuf service CC file from $<"
$(Q) mkdir -p `dirname $@`
$(Q) $(PROTOC) -Ithird_party/protobuf/src -I. --grpc_out=$(GENDIR) --plugin=protoc-gen-grpc=$(PROTOC_PLUGINS_DIR)/grpc_cpp_plugin$(EXECUTABLE_SUFFIX) $<
@ -2763,6 +2784,22 @@ $(GENDIR)/src/proto/grpc/testing/test.grpc.pb.cc: src/proto/grpc/testing/test.pr
$(Q) $(PROTOC) -Ithird_party/protobuf/src -I. --grpc_out=$(GENDIR) --plugin=protoc-gen-grpc=$(PROTOC_PLUGINS_DIR)/grpc_cpp_plugin$(EXECUTABLE_SUFFIX) $<
endif
ifeq ($(NO_PROTOC),true)
$(GENDIR)/src/proto/grpc/testing/worker_service.pb.cc: protoc_dep_error
$(GENDIR)/src/proto/grpc/testing/worker_service.grpc.pb.cc: protoc_dep_error
else
$(GENDIR)/src/proto/grpc/testing/worker_service.pb.cc: src/proto/grpc/testing/worker_service.proto $(PROTOBUF_DEP) $(PROTOC_PLUGINS) $(GENDIR)/src/proto/grpc/testing/control.pb.cc
$(E) "[PROTOC] Generating protobuf CC file from $<"
$(Q) mkdir -p `dirname $@`
$(Q) $(PROTOC) -Ithird_party/protobuf/src -I. --cpp_out=$(GENDIR) $<
$(GENDIR)/src/proto/grpc/testing/worker_service.grpc.pb.cc: src/proto/grpc/testing/worker_service.proto $(GENDIR)/src/proto/grpc/testing/worker_service.pb.cc $(PROTOBUF_DEP) $(PROTOC_PLUGINS) $(GENDIR)/src/proto/grpc/testing/control.pb.cc $(GENDIR)/src/proto/grpc/testing/control.grpc.pb.cc
$(E) "[GRPC] Generating gRPC's protobuf service CC file from $<"
$(Q) mkdir -p `dirname $@`
$(Q) $(PROTOC) -Ithird_party/protobuf/src -I. --grpc_out=$(GENDIR) --plugin=protoc-gen-grpc=$(PROTOC_PLUGINS_DIR)/grpc_cpp_plugin$(EXECUTABLE_SUFFIX) $<
endif
ifeq ($(CONFIG),stapprof)
src/core/profiling/stap_timers.c: $(GENDIR)/src/core/profiling/stap_probes.h
@ -3543,7 +3580,6 @@ LIBGRPC_SRC = \
src/core/ext/filters/client_channel/lb_policy/grpclb/proto/grpc/lb/v1/load_balancer.pb.c \
src/core/ext/filters/client_channel/resolver/fake/fake_resolver.cc \
src/core/ext/filters/client_channel/lb_policy/pick_first/pick_first.cc \
src/core/ext/filters/client_channel/lb_policy/subchannel_list.cc \
src/core/ext/filters/client_channel/lb_policy/round_robin/round_robin.cc \
src/core/ext/filters/client_channel/resolver/dns/c_ares/dns_resolver_ares.cc \
src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver_posix.cc \
@ -4819,7 +4855,6 @@ LIBGRPC_UNSECURE_SRC = \
third_party/nanopb/pb_decode.c \
third_party/nanopb/pb_encode.c \
src/core/ext/filters/client_channel/lb_policy/pick_first/pick_first.cc \
src/core/ext/filters/client_channel/lb_policy/subchannel_list.cc \
src/core/ext/filters/client_channel/lb_policy/round_robin/round_robin.cc \
src/core/ext/census/grpc_context.cc \
src/core/ext/filters/max_age/max_age_filter.cc \
@ -7175,12 +7210,63 @@ endif
endif
LIBLB_LOAD_DATA_STORE_SRC = \
src/cpp/server/load_reporter/load_data_store.cc \
PUBLIC_HEADERS_CXX += \
LIBLB_LOAD_DATA_STORE_OBJS = $(addprefix $(OBJDIR)/$(CONFIG)/, $(addsuffix .o, $(basename $(LIBLB_LOAD_DATA_STORE_SRC))))
ifeq ($(NO_SECURE),true)
# You can't build secure libraries if you don't have OpenSSL.
$(LIBDIR)/$(CONFIG)/liblb_load_data_store.a: openssl_dep_error
else
ifeq ($(NO_PROTOBUF),true)
# You can't build a C++ library if you don't have protobuf - a bit overreached, but still okay.
$(LIBDIR)/$(CONFIG)/liblb_load_data_store.a: protobuf_dep_error
else
$(LIBDIR)/$(CONFIG)/liblb_load_data_store.a: $(ZLIB_DEP) $(OPENSSL_DEP) $(CARES_DEP) $(ADDRESS_SORTING_DEP) $(PROTOBUF_DEP) $(LIBLB_LOAD_DATA_STORE_OBJS)
$(E) "[AR] Creating $@"
$(Q) mkdir -p `dirname $@`
$(Q) rm -f $(LIBDIR)/$(CONFIG)/liblb_load_data_store.a
$(Q) $(AR) $(AROPTS) $(LIBDIR)/$(CONFIG)/liblb_load_data_store.a $(LIBLB_LOAD_DATA_STORE_OBJS)
ifeq ($(SYSTEM),Darwin)
$(Q) ranlib -no_warning_for_no_symbols $(LIBDIR)/$(CONFIG)/liblb_load_data_store.a
endif
endif
endif
ifneq ($(NO_SECURE),true)
ifneq ($(NO_DEPS),true)
-include $(LIBLB_LOAD_DATA_STORE_OBJS:.o=.dep)
endif
endif
LIBQPS_SRC = \
$(GENDIR)/src/proto/grpc/testing/messages.pb.cc $(GENDIR)/src/proto/grpc/testing/messages.grpc.pb.cc \
$(GENDIR)/src/proto/grpc/testing/payloads.pb.cc $(GENDIR)/src/proto/grpc/testing/payloads.grpc.pb.cc \
$(GENDIR)/src/proto/grpc/testing/stats.pb.cc $(GENDIR)/src/proto/grpc/testing/stats.grpc.pb.cc \
$(GENDIR)/src/proto/grpc/testing/control.pb.cc $(GENDIR)/src/proto/grpc/testing/control.grpc.pb.cc \
$(GENDIR)/src/proto/grpc/testing/services.pb.cc $(GENDIR)/src/proto/grpc/testing/services.grpc.pb.cc \
$(GENDIR)/src/proto/grpc/testing/benchmark_service.pb.cc $(GENDIR)/src/proto/grpc/testing/benchmark_service.grpc.pb.cc \
$(GENDIR)/src/proto/grpc/testing/report_qps_scenario_service.pb.cc $(GENDIR)/src/proto/grpc/testing/report_qps_scenario_service.grpc.pb.cc \
$(GENDIR)/src/proto/grpc/testing/worker_service.pb.cc $(GENDIR)/src/proto/grpc/testing/worker_service.grpc.pb.cc \
test/cpp/qps/benchmark_config.cc \
test/cpp/qps/client_async.cc \
test/cpp/qps/client_sync.cc \
@ -7236,16 +7322,16 @@ ifneq ($(NO_DEPS),true)
-include $(LIBQPS_OBJS:.o=.dep)
endif
endif
$(OBJDIR)/$(CONFIG)/test/cpp/qps/benchmark_config.o: $(GENDIR)/src/proto/grpc/testing/messages.pb.cc $(GENDIR)/src/proto/grpc/testing/messages.grpc.pb.cc $(GENDIR)/src/proto/grpc/testing/payloads.pb.cc $(GENDIR)/src/proto/grpc/testing/payloads.grpc.pb.cc $(GENDIR)/src/proto/grpc/testing/stats.pb.cc $(GENDIR)/src/proto/grpc/testing/stats.grpc.pb.cc $(GENDIR)/src/proto/grpc/testing/control.pb.cc $(GENDIR)/src/proto/grpc/testing/control.grpc.pb.cc $(GENDIR)/src/proto/grpc/testing/services.pb.cc $(GENDIR)/src/proto/grpc/testing/services.grpc.pb.cc
$(OBJDIR)/$(CONFIG)/test/cpp/qps/client_async.o: $(GENDIR)/src/proto/grpc/testing/messages.pb.cc $(GENDIR)/src/proto/grpc/testing/messages.grpc.pb.cc $(GENDIR)/src/proto/grpc/testing/payloads.pb.cc $(GENDIR)/src/proto/grpc/testing/payloads.grpc.pb.cc $(GENDIR)/src/proto/grpc/testing/stats.pb.cc $(GENDIR)/src/proto/grpc/testing/stats.grpc.pb.cc $(GENDIR)/src/proto/grpc/testing/control.pb.cc $(GENDIR)/src/proto/grpc/testing/control.grpc.pb.cc $(GENDIR)/src/proto/grpc/testing/services.pb.cc $(GENDIR)/src/proto/grpc/testing/services.grpc.pb.cc
$(OBJDIR)/$(CONFIG)/test/cpp/qps/client_sync.o: $(GENDIR)/src/proto/grpc/testing/messages.pb.cc $(GENDIR)/src/proto/grpc/testing/messages.grpc.pb.cc $(GENDIR)/src/proto/grpc/testing/payloads.pb.cc $(GENDIR)/src/proto/grpc/testing/payloads.grpc.pb.cc $(GENDIR)/src/proto/grpc/testing/stats.pb.cc $(GENDIR)/src/proto/grpc/testing/stats.grpc.pb.cc $(GENDIR)/src/proto/grpc/testing/control.pb.cc $(GENDIR)/src/proto/grpc/testing/control.grpc.pb.cc $(GENDIR)/src/proto/grpc/testing/services.pb.cc $(GENDIR)/src/proto/grpc/testing/services.grpc.pb.cc
$(OBJDIR)/$(CONFIG)/test/cpp/qps/driver.o: $(GENDIR)/src/proto/grpc/testing/messages.pb.cc $(GENDIR)/src/proto/grpc/testing/messages.grpc.pb.cc $(GENDIR)/src/proto/grpc/testing/payloads.pb.cc $(GENDIR)/src/proto/grpc/testing/payloads.grpc.pb.cc $(GENDIR)/src/proto/grpc/testing/stats.pb.cc $(GENDIR)/src/proto/grpc/testing/stats.grpc.pb.cc $(GENDIR)/src/proto/grpc/testing/control.pb.cc $(GENDIR)/src/proto/grpc/testing/control.grpc.pb.cc $(GENDIR)/src/proto/grpc/testing/services.pb.cc $(GENDIR)/src/proto/grpc/testing/services.grpc.pb.cc
$(OBJDIR)/$(CONFIG)/test/cpp/qps/parse_json.o: $(GENDIR)/src/proto/grpc/testing/messages.pb.cc $(GENDIR)/src/proto/grpc/testing/messages.grpc.pb.cc $(GENDIR)/src/proto/grpc/testing/payloads.pb.cc $(GENDIR)/src/proto/grpc/testing/payloads.grpc.pb.cc $(GENDIR)/src/proto/grpc/testing/stats.pb.cc $(GENDIR)/src/proto/grpc/testing/stats.grpc.pb.cc $(GENDIR)/src/proto/grpc/testing/control.pb.cc $(GENDIR)/src/proto/grpc/testing/control.grpc.pb.cc $(GENDIR)/src/proto/grpc/testing/services.pb.cc $(GENDIR)/src/proto/grpc/testing/services.grpc.pb.cc
$(OBJDIR)/$(CONFIG)/test/cpp/qps/qps_worker.o: $(GENDIR)/src/proto/grpc/testing/messages.pb.cc $(GENDIR)/src/proto/grpc/testing/messages.grpc.pb.cc $(GENDIR)/src/proto/grpc/testing/payloads.pb.cc $(GENDIR)/src/proto/grpc/testing/payloads.grpc.pb.cc $(GENDIR)/src/proto/grpc/testing/stats.pb.cc $(GENDIR)/src/proto/grpc/testing/stats.grpc.pb.cc $(GENDIR)/src/proto/grpc/testing/control.pb.cc $(GENDIR)/src/proto/grpc/testing/control.grpc.pb.cc $(GENDIR)/src/proto/grpc/testing/services.pb.cc $(GENDIR)/src/proto/grpc/testing/services.grpc.pb.cc
$(OBJDIR)/$(CONFIG)/test/cpp/qps/report.o: $(GENDIR)/src/proto/grpc/testing/messages.pb.cc $(GENDIR)/src/proto/grpc/testing/messages.grpc.pb.cc $(GENDIR)/src/proto/grpc/testing/payloads.pb.cc $(GENDIR)/src/proto/grpc/testing/payloads.grpc.pb.cc $(GENDIR)/src/proto/grpc/testing/stats.pb.cc $(GENDIR)/src/proto/grpc/testing/stats.grpc.pb.cc $(GENDIR)/src/proto/grpc/testing/control.pb.cc $(GENDIR)/src/proto/grpc/testing/control.grpc.pb.cc $(GENDIR)/src/proto/grpc/testing/services.pb.cc $(GENDIR)/src/proto/grpc/testing/services.grpc.pb.cc
$(OBJDIR)/$(CONFIG)/test/cpp/qps/server_async.o: $(GENDIR)/src/proto/grpc/testing/messages.pb.cc $(GENDIR)/src/proto/grpc/testing/messages.grpc.pb.cc $(GENDIR)/src/proto/grpc/testing/payloads.pb.cc $(GENDIR)/src/proto/grpc/testing/payloads.grpc.pb.cc $(GENDIR)/src/proto/grpc/testing/stats.pb.cc $(GENDIR)/src/proto/grpc/testing/stats.grpc.pb.cc $(GENDIR)/src/proto/grpc/testing/control.pb.cc $(GENDIR)/src/proto/grpc/testing/control.grpc.pb.cc $(GENDIR)/src/proto/grpc/testing/services.pb.cc $(GENDIR)/src/proto/grpc/testing/services.grpc.pb.cc
$(OBJDIR)/$(CONFIG)/test/cpp/qps/server_sync.o: $(GENDIR)/src/proto/grpc/testing/messages.pb.cc $(GENDIR)/src/proto/grpc/testing/messages.grpc.pb.cc $(GENDIR)/src/proto/grpc/testing/payloads.pb.cc $(GENDIR)/src/proto/grpc/testing/payloads.grpc.pb.cc $(GENDIR)/src/proto/grpc/testing/stats.pb.cc $(GENDIR)/src/proto/grpc/testing/stats.grpc.pb.cc $(GENDIR)/src/proto/grpc/testing/control.pb.cc $(GENDIR)/src/proto/grpc/testing/control.grpc.pb.cc $(GENDIR)/src/proto/grpc/testing/services.pb.cc $(GENDIR)/src/proto/grpc/testing/services.grpc.pb.cc
$(OBJDIR)/$(CONFIG)/test/cpp/qps/usage_timer.o: $(GENDIR)/src/proto/grpc/testing/messages.pb.cc $(GENDIR)/src/proto/grpc/testing/messages.grpc.pb.cc $(GENDIR)/src/proto/grpc/testing/payloads.pb.cc $(GENDIR)/src/proto/grpc/testing/payloads.grpc.pb.cc $(GENDIR)/src/proto/grpc/testing/stats.pb.cc $(GENDIR)/src/proto/grpc/testing/stats.grpc.pb.cc $(GENDIR)/src/proto/grpc/testing/control.pb.cc $(GENDIR)/src/proto/grpc/testing/control.grpc.pb.cc $(GENDIR)/src/proto/grpc/testing/services.pb.cc $(GENDIR)/src/proto/grpc/testing/services.grpc.pb.cc
$(OBJDIR)/$(CONFIG)/test/cpp/qps/benchmark_config.o: $(GENDIR)/src/proto/grpc/testing/messages.pb.cc $(GENDIR)/src/proto/grpc/testing/messages.grpc.pb.cc $(GENDIR)/src/proto/grpc/testing/payloads.pb.cc $(GENDIR)/src/proto/grpc/testing/payloads.grpc.pb.cc $(GENDIR)/src/proto/grpc/testing/stats.pb.cc $(GENDIR)/src/proto/grpc/testing/stats.grpc.pb.cc $(GENDIR)/src/proto/grpc/testing/control.pb.cc $(GENDIR)/src/proto/grpc/testing/control.grpc.pb.cc $(GENDIR)/src/proto/grpc/testing/benchmark_service.pb.cc $(GENDIR)/src/proto/grpc/testing/benchmark_service.grpc.pb.cc $(GENDIR)/src/proto/grpc/testing/report_qps_scenario_service.pb.cc $(GENDIR)/src/proto/grpc/testing/report_qps_scenario_service.grpc.pb.cc $(GENDIR)/src/proto/grpc/testing/worker_service.pb.cc $(GENDIR)/src/proto/grpc/testing/worker_service.grpc.pb.cc
$(OBJDIR)/$(CONFIG)/test/cpp/qps/client_async.o: $(GENDIR)/src/proto/grpc/testing/messages.pb.cc $(GENDIR)/src/proto/grpc/testing/messages.grpc.pb.cc $(GENDIR)/src/proto/grpc/testing/payloads.pb.cc $(GENDIR)/src/proto/grpc/testing/payloads.grpc.pb.cc $(GENDIR)/src/proto/grpc/testing/stats.pb.cc $(GENDIR)/src/proto/grpc/testing/stats.grpc.pb.cc $(GENDIR)/src/proto/grpc/testing/control.pb.cc $(GENDIR)/src/proto/grpc/testing/control.grpc.pb.cc $(GENDIR)/src/proto/grpc/testing/benchmark_service.pb.cc $(GENDIR)/src/proto/grpc/testing/benchmark_service.grpc.pb.cc $(GENDIR)/src/proto/grpc/testing/report_qps_scenario_service.pb.cc $(GENDIR)/src/proto/grpc/testing/report_qps_scenario_service.grpc.pb.cc $(GENDIR)/src/proto/grpc/testing/worker_service.pb.cc $(GENDIR)/src/proto/grpc/testing/worker_service.grpc.pb.cc
$(OBJDIR)/$(CONFIG)/test/cpp/qps/client_sync.o: $(GENDIR)/src/proto/grpc/testing/messages.pb.cc $(GENDIR)/src/proto/grpc/testing/messages.grpc.pb.cc $(GENDIR)/src/proto/grpc/testing/payloads.pb.cc $(GENDIR)/src/proto/grpc/testing/payloads.grpc.pb.cc $(GENDIR)/src/proto/grpc/testing/stats.pb.cc $(GENDIR)/src/proto/grpc/testing/stats.grpc.pb.cc $(GENDIR)/src/proto/grpc/testing/control.pb.cc $(GENDIR)/src/proto/grpc/testing/control.grpc.pb.cc $(GENDIR)/src/proto/grpc/testing/benchmark_service.pb.cc $(GENDIR)/src/proto/grpc/testing/benchmark_service.grpc.pb.cc $(GENDIR)/src/proto/grpc/testing/report_qps_scenario_service.pb.cc $(GENDIR)/src/proto/grpc/testing/report_qps_scenario_service.grpc.pb.cc $(GENDIR)/src/proto/grpc/testing/worker_service.pb.cc $(GENDIR)/src/proto/grpc/testing/worker_service.grpc.pb.cc
$(OBJDIR)/$(CONFIG)/test/cpp/qps/driver.o: $(GENDIR)/src/proto/grpc/testing/messages.pb.cc $(GENDIR)/src/proto/grpc/testing/messages.grpc.pb.cc $(GENDIR)/src/proto/grpc/testing/payloads.pb.cc $(GENDIR)/src/proto/grpc/testing/payloads.grpc.pb.cc $(GENDIR)/src/proto/grpc/testing/stats.pb.cc $(GENDIR)/src/proto/grpc/testing/stats.grpc.pb.cc $(GENDIR)/src/proto/grpc/testing/control.pb.cc $(GENDIR)/src/proto/grpc/testing/control.grpc.pb.cc $(GENDIR)/src/proto/grpc/testing/benchmark_service.pb.cc $(GENDIR)/src/proto/grpc/testing/benchmark_service.grpc.pb.cc $(GENDIR)/src/proto/grpc/testing/report_qps_scenario_service.pb.cc $(GENDIR)/src/proto/grpc/testing/report_qps_scenario_service.grpc.pb.cc $(GENDIR)/src/proto/grpc/testing/worker_service.pb.cc $(GENDIR)/src/proto/grpc/testing/worker_service.grpc.pb.cc
$(OBJDIR)/$(CONFIG)/test/cpp/qps/parse_json.o: $(GENDIR)/src/proto/grpc/testing/messages.pb.cc $(GENDIR)/src/proto/grpc/testing/messages.grpc.pb.cc $(GENDIR)/src/proto/grpc/testing/payloads.pb.cc $(GENDIR)/src/proto/grpc/testing/payloads.grpc.pb.cc $(GENDIR)/src/proto/grpc/testing/stats.pb.cc $(GENDIR)/src/proto/grpc/testing/stats.grpc.pb.cc $(GENDIR)/src/proto/grpc/testing/control.pb.cc $(GENDIR)/src/proto/grpc/testing/control.grpc.pb.cc $(GENDIR)/src/proto/grpc/testing/benchmark_service.pb.cc $(GENDIR)/src/proto/grpc/testing/benchmark_service.grpc.pb.cc $(GENDIR)/src/proto/grpc/testing/report_qps_scenario_service.pb.cc $(GENDIR)/src/proto/grpc/testing/report_qps_scenario_service.grpc.pb.cc $(GENDIR)/src/proto/grpc/testing/worker_service.pb.cc $(GENDIR)/src/proto/grpc/testing/worker_service.grpc.pb.cc
$(OBJDIR)/$(CONFIG)/test/cpp/qps/qps_worker.o: $(GENDIR)/src/proto/grpc/testing/messages.pb.cc $(GENDIR)/src/proto/grpc/testing/messages.grpc.pb.cc $(GENDIR)/src/proto/grpc/testing/payloads.pb.cc $(GENDIR)/src/proto/grpc/testing/payloads.grpc.pb.cc $(GENDIR)/src/proto/grpc/testing/stats.pb.cc $(GENDIR)/src/proto/grpc/testing/stats.grpc.pb.cc $(GENDIR)/src/proto/grpc/testing/control.pb.cc $(GENDIR)/src/proto/grpc/testing/control.grpc.pb.cc $(GENDIR)/src/proto/grpc/testing/benchmark_service.pb.cc $(GENDIR)/src/proto/grpc/testing/benchmark_service.grpc.pb.cc $(GENDIR)/src/proto/grpc/testing/report_qps_scenario_service.pb.cc $(GENDIR)/src/proto/grpc/testing/report_qps_scenario_service.grpc.pb.cc $(GENDIR)/src/proto/grpc/testing/worker_service.pb.cc $(GENDIR)/src/proto/grpc/testing/worker_service.grpc.pb.cc
$(OBJDIR)/$(CONFIG)/test/cpp/qps/report.o: $(GENDIR)/src/proto/grpc/testing/messages.pb.cc $(GENDIR)/src/proto/grpc/testing/messages.grpc.pb.cc $(GENDIR)/src/proto/grpc/testing/payloads.pb.cc $(GENDIR)/src/proto/grpc/testing/payloads.grpc.pb.cc $(GENDIR)/src/proto/grpc/testing/stats.pb.cc $(GENDIR)/src/proto/grpc/testing/stats.grpc.pb.cc $(GENDIR)/src/proto/grpc/testing/control.pb.cc $(GENDIR)/src/proto/grpc/testing/control.grpc.pb.cc $(GENDIR)/src/proto/grpc/testing/benchmark_service.pb.cc $(GENDIR)/src/proto/grpc/testing/benchmark_service.grpc.pb.cc $(GENDIR)/src/proto/grpc/testing/report_qps_scenario_service.pb.cc $(GENDIR)/src/proto/grpc/testing/report_qps_scenario_service.grpc.pb.cc $(GENDIR)/src/proto/grpc/testing/worker_service.pb.cc $(GENDIR)/src/proto/grpc/testing/worker_service.grpc.pb.cc
$(OBJDIR)/$(CONFIG)/test/cpp/qps/server_async.o: $(GENDIR)/src/proto/grpc/testing/messages.pb.cc $(GENDIR)/src/proto/grpc/testing/messages.grpc.pb.cc $(GENDIR)/src/proto/grpc/testing/payloads.pb.cc $(GENDIR)/src/proto/grpc/testing/payloads.grpc.pb.cc $(GENDIR)/src/proto/grpc/testing/stats.pb.cc $(GENDIR)/src/proto/grpc/testing/stats.grpc.pb.cc $(GENDIR)/src/proto/grpc/testing/control.pb.cc $(GENDIR)/src/proto/grpc/testing/control.grpc.pb.cc $(GENDIR)/src/proto/grpc/testing/benchmark_service.pb.cc $(GENDIR)/src/proto/grpc/testing/benchmark_service.grpc.pb.cc $(GENDIR)/src/proto/grpc/testing/report_qps_scenario_service.pb.cc $(GENDIR)/src/proto/grpc/testing/report_qps_scenario_service.grpc.pb.cc $(GENDIR)/src/proto/grpc/testing/worker_service.pb.cc $(GENDIR)/src/proto/grpc/testing/worker_service.grpc.pb.cc
$(OBJDIR)/$(CONFIG)/test/cpp/qps/server_sync.o: $(GENDIR)/src/proto/grpc/testing/messages.pb.cc $(GENDIR)/src/proto/grpc/testing/messages.grpc.pb.cc $(GENDIR)/src/proto/grpc/testing/payloads.pb.cc $(GENDIR)/src/proto/grpc/testing/payloads.grpc.pb.cc $(GENDIR)/src/proto/grpc/testing/stats.pb.cc $(GENDIR)/src/proto/grpc/testing/stats.grpc.pb.cc $(GENDIR)/src/proto/grpc/testing/control.pb.cc $(GENDIR)/src/proto/grpc/testing/control.grpc.pb.cc $(GENDIR)/src/proto/grpc/testing/benchmark_service.pb.cc $(GENDIR)/src/proto/grpc/testing/benchmark_service.grpc.pb.cc $(GENDIR)/src/proto/grpc/testing/report_qps_scenario_service.pb.cc $(GENDIR)/src/proto/grpc/testing/report_qps_scenario_service.grpc.pb.cc $(GENDIR)/src/proto/grpc/testing/worker_service.pb.cc $(GENDIR)/src/proto/grpc/testing/worker_service.grpc.pb.cc
$(OBJDIR)/$(CONFIG)/test/cpp/qps/usage_timer.o: $(GENDIR)/src/proto/grpc/testing/messages.pb.cc $(GENDIR)/src/proto/grpc/testing/messages.grpc.pb.cc $(GENDIR)/src/proto/grpc/testing/payloads.pb.cc $(GENDIR)/src/proto/grpc/testing/payloads.grpc.pb.cc $(GENDIR)/src/proto/grpc/testing/stats.pb.cc $(GENDIR)/src/proto/grpc/testing/stats.grpc.pb.cc $(GENDIR)/src/proto/grpc/testing/control.pb.cc $(GENDIR)/src/proto/grpc/testing/control.grpc.pb.cc $(GENDIR)/src/proto/grpc/testing/benchmark_service.pb.cc $(GENDIR)/src/proto/grpc/testing/benchmark_service.grpc.pb.cc $(GENDIR)/src/proto/grpc/testing/report_qps_scenario_service.pb.cc $(GENDIR)/src/proto/grpc/testing/report_qps_scenario_service.grpc.pb.cc $(GENDIR)/src/proto/grpc/testing/worker_service.pb.cc $(GENDIR)/src/proto/grpc/testing/worker_service.grpc.pb.cc
LIBGRPC_CSHARP_EXT_SRC = \
@ -9876,6 +9962,7 @@ LIBEND2END_TESTS_SRC = \
test/core/end2end/tests/retry_exceeds_buffer_size_in_initial_batch.cc \
test/core/end2end/tests/retry_exceeds_buffer_size_in_subsequent_batch.cc \
test/core/end2end/tests/retry_non_retriable_status.cc \
test/core/end2end/tests/retry_non_retriable_status_before_recv_trailing_metadata_started.cc \
test/core/end2end/tests/retry_recv_initial_metadata.cc \
test/core/end2end/tests/retry_recv_message.cc \
test/core/end2end/tests/retry_server_pushback_delay.cc \
@ -9991,6 +10078,7 @@ LIBEND2END_NOSEC_TESTS_SRC = \
test/core/end2end/tests/retry_exceeds_buffer_size_in_initial_batch.cc \
test/core/end2end/tests/retry_exceeds_buffer_size_in_subsequent_batch.cc \
test/core/end2end/tests/retry_non_retriable_status.cc \
test/core/end2end/tests/retry_non_retriable_status_before_recv_trailing_metadata_started.cc \
test/core/end2end/tests/retry_recv_initial_metadata.cc \
test/core/end2end/tests/retry_recv_message.cc \
test/core/end2end/tests/retry_server_pushback_delay.cc \
@ -15375,17 +15463,17 @@ $(BINDIR)/$(CONFIG)/bm_arena: protobuf_dep_error
else
$(BINDIR)/$(CONFIG)/bm_arena: $(PROTOBUF_DEP) $(BM_ARENA_OBJS) $(LIBDIR)/$(CONFIG)/libgrpc_benchmark.a $(LIBDIR)/$(CONFIG)/libbenchmark.a $(LIBDIR)/$(CONFIG)/libgrpc++_test_util_unsecure.a $(LIBDIR)/$(CONFIG)/libgrpc_test_util_unsecure.a $(LIBDIR)/$(CONFIG)/libgrpc++_unsecure.a $(LIBDIR)/$(CONFIG)/libgrpc_unsecure.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a
$(BINDIR)/$(CONFIG)/bm_arena: $(PROTOBUF_DEP) $(BM_ARENA_OBJS) $(LIBDIR)/$(CONFIG)/libgrpc_benchmark.a $(LIBDIR)/$(CONFIG)/libbenchmark.a $(LIBDIR)/$(CONFIG)/libgrpc++_test_util_unsecure.a $(LIBDIR)/$(CONFIG)/libgrpc_test_util_unsecure.a $(LIBDIR)/$(CONFIG)/libgrpc++_unsecure.a $(LIBDIR)/$(CONFIG)/libgrpc_unsecure.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a $(LIBDIR)/$(CONFIG)/libgrpc++_test_config.a
$(E) "[LD] Linking $@"
$(Q) mkdir -p `dirname $@`
$(Q) $(LDXX) $(LDFLAGS) $(BM_ARENA_OBJS) $(LIBDIR)/$(CONFIG)/libgrpc_benchmark.a $(LIBDIR)/$(CONFIG)/libbenchmark.a $(LIBDIR)/$(CONFIG)/libgrpc++_test_util_unsecure.a $(LIBDIR)/$(CONFIG)/libgrpc_test_util_unsecure.a $(LIBDIR)/$(CONFIG)/libgrpc++_unsecure.a $(LIBDIR)/$(CONFIG)/libgrpc_unsecure.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a $(LDLIBSXX) $(LDLIBS_PROTOBUF) $(LDLIBS) $(LDLIBS_SECURE) $(GTEST_LIB) -o $(BINDIR)/$(CONFIG)/bm_arena
$(Q) $(LDXX) $(LDFLAGS) $(BM_ARENA_OBJS) $(LIBDIR)/$(CONFIG)/libgrpc_benchmark.a $(LIBDIR)/$(CONFIG)/libbenchmark.a $(LIBDIR)/$(CONFIG)/libgrpc++_test_util_unsecure.a $(LIBDIR)/$(CONFIG)/libgrpc_test_util_unsecure.a $(LIBDIR)/$(CONFIG)/libgrpc++_unsecure.a $(LIBDIR)/$(CONFIG)/libgrpc_unsecure.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a $(LIBDIR)/$(CONFIG)/libgrpc++_test_config.a $(LDLIBSXX) $(LDLIBS_PROTOBUF) $(LDLIBS) $(LDLIBS_SECURE) $(GTEST_LIB) -o $(BINDIR)/$(CONFIG)/bm_arena
endif
endif
$(BM_ARENA_OBJS): CPPFLAGS += -Ithird_party/benchmark/include -DHAVE_POSIX_REGEX
$(OBJDIR)/$(CONFIG)/test/cpp/microbenchmarks/bm_arena.o: $(LIBDIR)/$(CONFIG)/libgrpc_benchmark.a $(LIBDIR)/$(CONFIG)/libbenchmark.a $(LIBDIR)/$(CONFIG)/libgrpc++_test_util_unsecure.a $(LIBDIR)/$(CONFIG)/libgrpc_test_util_unsecure.a $(LIBDIR)/$(CONFIG)/libgrpc++_unsecure.a $(LIBDIR)/$(CONFIG)/libgrpc_unsecure.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a
$(OBJDIR)/$(CONFIG)/test/cpp/microbenchmarks/bm_arena.o: $(LIBDIR)/$(CONFIG)/libgrpc_benchmark.a $(LIBDIR)/$(CONFIG)/libbenchmark.a $(LIBDIR)/$(CONFIG)/libgrpc++_test_util_unsecure.a $(LIBDIR)/$(CONFIG)/libgrpc_test_util_unsecure.a $(LIBDIR)/$(CONFIG)/libgrpc++_unsecure.a $(LIBDIR)/$(CONFIG)/libgrpc_unsecure.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a $(LIBDIR)/$(CONFIG)/libgrpc++_test_config.a
deps_bm_arena: $(BM_ARENA_OBJS:.o=.dep)
@ -15419,17 +15507,17 @@ $(BINDIR)/$(CONFIG)/bm_call_create: protobuf_dep_error
else
$(BINDIR)/$(CONFIG)/bm_call_create: $(PROTOBUF_DEP) $(BM_CALL_CREATE_OBJS) $(LIBDIR)/$(CONFIG)/libgrpc_benchmark.a $(LIBDIR)/$(CONFIG)/libbenchmark.a $(LIBDIR)/$(CONFIG)/libgrpc++_test_util_unsecure.a $(LIBDIR)/$(CONFIG)/libgrpc_test_util_unsecure.a $(LIBDIR)/$(CONFIG)/libgrpc++_unsecure.a $(LIBDIR)/$(CONFIG)/libgrpc_unsecure.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a
$(BINDIR)/$(CONFIG)/bm_call_create: $(PROTOBUF_DEP) $(BM_CALL_CREATE_OBJS) $(LIBDIR)/$(CONFIG)/libgrpc_benchmark.a $(LIBDIR)/$(CONFIG)/libbenchmark.a $(LIBDIR)/$(CONFIG)/libgrpc++_test_util_unsecure.a $(LIBDIR)/$(CONFIG)/libgrpc_test_util_unsecure.a $(LIBDIR)/$(CONFIG)/libgrpc++_unsecure.a $(LIBDIR)/$(CONFIG)/libgrpc_unsecure.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a $(LIBDIR)/$(CONFIG)/libgrpc++_test_config.a
$(E) "[LD] Linking $@"
$(Q) mkdir -p `dirname $@`
$(Q) $(LDXX) $(LDFLAGS) $(BM_CALL_CREATE_OBJS) $(LIBDIR)/$(CONFIG)/libgrpc_benchmark.a $(LIBDIR)/$(CONFIG)/libbenchmark.a $(LIBDIR)/$(CONFIG)/libgrpc++_test_util_unsecure.a $(LIBDIR)/$(CONFIG)/libgrpc_test_util_unsecure.a $(LIBDIR)/$(CONFIG)/libgrpc++_unsecure.a $(LIBDIR)/$(CONFIG)/libgrpc_unsecure.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a $(LDLIBSXX) $(LDLIBS_PROTOBUF) $(LDLIBS) $(LDLIBS_SECURE) $(GTEST_LIB) -o $(BINDIR)/$(CONFIG)/bm_call_create
$(Q) $(LDXX) $(LDFLAGS) $(BM_CALL_CREATE_OBJS) $(LIBDIR)/$(CONFIG)/libgrpc_benchmark.a $(LIBDIR)/$(CONFIG)/libbenchmark.a $(LIBDIR)/$(CONFIG)/libgrpc++_test_util_unsecure.a $(LIBDIR)/$(CONFIG)/libgrpc_test_util_unsecure.a $(LIBDIR)/$(CONFIG)/libgrpc++_unsecure.a $(LIBDIR)/$(CONFIG)/libgrpc_unsecure.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a $(LIBDIR)/$(CONFIG)/libgrpc++_test_config.a $(LDLIBSXX) $(LDLIBS_PROTOBUF) $(LDLIBS) $(LDLIBS_SECURE) $(GTEST_LIB) -o $(BINDIR)/$(CONFIG)/bm_call_create
endif
endif
$(BM_CALL_CREATE_OBJS): CPPFLAGS += -Ithird_party/benchmark/include -DHAVE_POSIX_REGEX
$(OBJDIR)/$(CONFIG)/test/cpp/microbenchmarks/bm_call_create.o: $(LIBDIR)/$(CONFIG)/libgrpc_benchmark.a $(LIBDIR)/$(CONFIG)/libbenchmark.a $(LIBDIR)/$(CONFIG)/libgrpc++_test_util_unsecure.a $(LIBDIR)/$(CONFIG)/libgrpc_test_util_unsecure.a $(LIBDIR)/$(CONFIG)/libgrpc++_unsecure.a $(LIBDIR)/$(CONFIG)/libgrpc_unsecure.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a
$(OBJDIR)/$(CONFIG)/test/cpp/microbenchmarks/bm_call_create.o: $(LIBDIR)/$(CONFIG)/libgrpc_benchmark.a $(LIBDIR)/$(CONFIG)/libbenchmark.a $(LIBDIR)/$(CONFIG)/libgrpc++_test_util_unsecure.a $(LIBDIR)/$(CONFIG)/libgrpc_test_util_unsecure.a $(LIBDIR)/$(CONFIG)/libgrpc++_unsecure.a $(LIBDIR)/$(CONFIG)/libgrpc_unsecure.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a $(LIBDIR)/$(CONFIG)/libgrpc++_test_config.a
deps_bm_call_create: $(BM_CALL_CREATE_OBJS:.o=.dep)
@ -15463,17 +15551,17 @@ $(BINDIR)/$(CONFIG)/bm_chttp2_hpack: protobuf_dep_error
else
$(BINDIR)/$(CONFIG)/bm_chttp2_hpack: $(PROTOBUF_DEP) $(BM_CHTTP2_HPACK_OBJS) $(LIBDIR)/$(CONFIG)/libgrpc_benchmark.a $(LIBDIR)/$(CONFIG)/libbenchmark.a $(LIBDIR)/$(CONFIG)/libgrpc++_test_util_unsecure.a $(LIBDIR)/$(CONFIG)/libgrpc_test_util_unsecure.a $(LIBDIR)/$(CONFIG)/libgrpc++_unsecure.a $(LIBDIR)/$(CONFIG)/libgrpc_unsecure.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a
$(BINDIR)/$(CONFIG)/bm_chttp2_hpack: $(PROTOBUF_DEP) $(BM_CHTTP2_HPACK_OBJS) $(LIBDIR)/$(CONFIG)/libgrpc_benchmark.a $(LIBDIR)/$(CONFIG)/libbenchmark.a $(LIBDIR)/$(CONFIG)/libgrpc++_test_util_unsecure.a $(LIBDIR)/$(CONFIG)/libgrpc_test_util_unsecure.a $(LIBDIR)/$(CONFIG)/libgrpc++_unsecure.a $(LIBDIR)/$(CONFIG)/libgrpc_unsecure.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a $(LIBDIR)/$(CONFIG)/libgrpc++_test_config.a
$(E) "[LD] Linking $@"
$(Q) mkdir -p `dirname $@`
$(Q) $(LDXX) $(LDFLAGS) $(BM_CHTTP2_HPACK_OBJS) $(LIBDIR)/$(CONFIG)/libgrpc_benchmark.a $(LIBDIR)/$(CONFIG)/libbenchmark.a $(LIBDIR)/$(CONFIG)/libgrpc++_test_util_unsecure.a $(LIBDIR)/$(CONFIG)/libgrpc_test_util_unsecure.a $(LIBDIR)/$(CONFIG)/libgrpc++_unsecure.a $(LIBDIR)/$(CONFIG)/libgrpc_unsecure.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a $(LDLIBSXX) $(LDLIBS_PROTOBUF) $(LDLIBS) $(LDLIBS_SECURE) $(GTEST_LIB) -o $(BINDIR)/$(CONFIG)/bm_chttp2_hpack
$(Q) $(LDXX) $(LDFLAGS) $(BM_CHTTP2_HPACK_OBJS) $(LIBDIR)/$(CONFIG)/libgrpc_benchmark.a $(LIBDIR)/$(CONFIG)/libbenchmark.a $(LIBDIR)/$(CONFIG)/libgrpc++_test_util_unsecure.a $(LIBDIR)/$(CONFIG)/libgrpc_test_util_unsecure.a $(LIBDIR)/$(CONFIG)/libgrpc++_unsecure.a $(LIBDIR)/$(CONFIG)/libgrpc_unsecure.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a $(LIBDIR)/$(CONFIG)/libgrpc++_test_config.a $(LDLIBSXX) $(LDLIBS_PROTOBUF) $(LDLIBS) $(LDLIBS_SECURE) $(GTEST_LIB) -o $(BINDIR)/$(CONFIG)/bm_chttp2_hpack
endif
endif
$(BM_CHTTP2_HPACK_OBJS): CPPFLAGS += -Ithird_party/benchmark/include -DHAVE_POSIX_REGEX
$(OBJDIR)/$(CONFIG)/test/cpp/microbenchmarks/bm_chttp2_hpack.o: $(LIBDIR)/$(CONFIG)/libgrpc_benchmark.a $(LIBDIR)/$(CONFIG)/libbenchmark.a $(LIBDIR)/$(CONFIG)/libgrpc++_test_util_unsecure.a $(LIBDIR)/$(CONFIG)/libgrpc_test_util_unsecure.a $(LIBDIR)/$(CONFIG)/libgrpc++_unsecure.a $(LIBDIR)/$(CONFIG)/libgrpc_unsecure.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a
$(OBJDIR)/$(CONFIG)/test/cpp/microbenchmarks/bm_chttp2_hpack.o: $(LIBDIR)/$(CONFIG)/libgrpc_benchmark.a $(LIBDIR)/$(CONFIG)/libbenchmark.a $(LIBDIR)/$(CONFIG)/libgrpc++_test_util_unsecure.a $(LIBDIR)/$(CONFIG)/libgrpc_test_util_unsecure.a $(LIBDIR)/$(CONFIG)/libgrpc++_unsecure.a $(LIBDIR)/$(CONFIG)/libgrpc_unsecure.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a $(LIBDIR)/$(CONFIG)/libgrpc++_test_config.a
deps_bm_chttp2_hpack: $(BM_CHTTP2_HPACK_OBJS:.o=.dep)
@ -15507,17 +15595,17 @@ $(BINDIR)/$(CONFIG)/bm_chttp2_transport: protobuf_dep_error
else
$(BINDIR)/$(CONFIG)/bm_chttp2_transport: $(PROTOBUF_DEP) $(BM_CHTTP2_TRANSPORT_OBJS) $(LIBDIR)/$(CONFIG)/libgrpc_benchmark.a $(LIBDIR)/$(CONFIG)/libbenchmark.a $(LIBDIR)/$(CONFIG)/libgrpc++_test_util_unsecure.a $(LIBDIR)/$(CONFIG)/libgrpc_test_util_unsecure.a $(LIBDIR)/$(CONFIG)/libgrpc++_unsecure.a $(LIBDIR)/$(CONFIG)/libgrpc_unsecure.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a
$(BINDIR)/$(CONFIG)/bm_chttp2_transport: $(PROTOBUF_DEP) $(BM_CHTTP2_TRANSPORT_OBJS) $(LIBDIR)/$(CONFIG)/libgrpc_benchmark.a $(LIBDIR)/$(CONFIG)/libbenchmark.a $(LIBDIR)/$(CONFIG)/libgrpc++_test_util_unsecure.a $(LIBDIR)/$(CONFIG)/libgrpc_test_util_unsecure.a $(LIBDIR)/$(CONFIG)/libgrpc++_unsecure.a $(LIBDIR)/$(CONFIG)/libgrpc_unsecure.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a $(LIBDIR)/$(CONFIG)/libgrpc++_test_config.a
$(E) "[LD] Linking $@"
$(Q) mkdir -p `dirname $@`
$(Q) $(LDXX) $(LDFLAGS) $(BM_CHTTP2_TRANSPORT_OBJS) $(LIBDIR)/$(CONFIG)/libgrpc_benchmark.a $(LIBDIR)/$(CONFIG)/libbenchmark.a $(LIBDIR)/$(CONFIG)/libgrpc++_test_util_unsecure.a $(LIBDIR)/$(CONFIG)/libgrpc_test_util_unsecure.a $(LIBDIR)/$(CONFIG)/libgrpc++_unsecure.a $(LIBDIR)/$(CONFIG)/libgrpc_unsecure.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a $(LDLIBSXX) $(LDLIBS_PROTOBUF) $(LDLIBS) $(LDLIBS_SECURE) $(GTEST_LIB) -o $(BINDIR)/$(CONFIG)/bm_chttp2_transport
$(Q) $(LDXX) $(LDFLAGS) $(BM_CHTTP2_TRANSPORT_OBJS) $(LIBDIR)/$(CONFIG)/libgrpc_benchmark.a $(LIBDIR)/$(CONFIG)/libbenchmark.a $(LIBDIR)/$(CONFIG)/libgrpc++_test_util_unsecure.a $(LIBDIR)/$(CONFIG)/libgrpc_test_util_unsecure.a $(LIBDIR)/$(CONFIG)/libgrpc++_unsecure.a $(LIBDIR)/$(CONFIG)/libgrpc_unsecure.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a $(LIBDIR)/$(CONFIG)/libgrpc++_test_config.a $(LDLIBSXX) $(LDLIBS_PROTOBUF) $(LDLIBS) $(LDLIBS_SECURE) $(GTEST_LIB) -o $(BINDIR)/$(CONFIG)/bm_chttp2_transport
endif
endif
$(BM_CHTTP2_TRANSPORT_OBJS): CPPFLAGS += -Ithird_party/benchmark/include -DHAVE_POSIX_REGEX
$(OBJDIR)/$(CONFIG)/test/cpp/microbenchmarks/bm_chttp2_transport.o: $(LIBDIR)/$(CONFIG)/libgrpc_benchmark.a $(LIBDIR)/$(CONFIG)/libbenchmark.a $(LIBDIR)/$(CONFIG)/libgrpc++_test_util_unsecure.a $(LIBDIR)/$(CONFIG)/libgrpc_test_util_unsecure.a $(LIBDIR)/$(CONFIG)/libgrpc++_unsecure.a $(LIBDIR)/$(CONFIG)/libgrpc_unsecure.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a
$(OBJDIR)/$(CONFIG)/test/cpp/microbenchmarks/bm_chttp2_transport.o: $(LIBDIR)/$(CONFIG)/libgrpc_benchmark.a $(LIBDIR)/$(CONFIG)/libbenchmark.a $(LIBDIR)/$(CONFIG)/libgrpc++_test_util_unsecure.a $(LIBDIR)/$(CONFIG)/libgrpc_test_util_unsecure.a $(LIBDIR)/$(CONFIG)/libgrpc++_unsecure.a $(LIBDIR)/$(CONFIG)/libgrpc_unsecure.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a $(LIBDIR)/$(CONFIG)/libgrpc++_test_config.a
deps_bm_chttp2_transport: $(BM_CHTTP2_TRANSPORT_OBJS:.o=.dep)
@ -15551,17 +15639,17 @@ $(BINDIR)/$(CONFIG)/bm_closure: protobuf_dep_error
else
$(BINDIR)/$(CONFIG)/bm_closure: $(PROTOBUF_DEP) $(BM_CLOSURE_OBJS) $(LIBDIR)/$(CONFIG)/libgrpc_benchmark.a $(LIBDIR)/$(CONFIG)/libbenchmark.a $(LIBDIR)/$(CONFIG)/libgrpc++_test_util_unsecure.a $(LIBDIR)/$(CONFIG)/libgrpc_test_util_unsecure.a $(LIBDIR)/$(CONFIG)/libgrpc++_unsecure.a $(LIBDIR)/$(CONFIG)/libgrpc_unsecure.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a
$(BINDIR)/$(CONFIG)/bm_closure: $(PROTOBUF_DEP) $(BM_CLOSURE_OBJS) $(LIBDIR)/$(CONFIG)/libgrpc_benchmark.a $(LIBDIR)/$(CONFIG)/libbenchmark.a $(LIBDIR)/$(CONFIG)/libgrpc++_test_util_unsecure.a $(LIBDIR)/$(CONFIG)/libgrpc_test_util_unsecure.a $(LIBDIR)/$(CONFIG)/libgrpc++_unsecure.a $(LIBDIR)/$(CONFIG)/libgrpc_unsecure.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a $(LIBDIR)/$(CONFIG)/libgrpc++_test_config.a
$(E) "[LD] Linking $@"
$(Q) mkdir -p `dirname $@`
$(Q) $(LDXX) $(LDFLAGS) $(BM_CLOSURE_OBJS) $(LIBDIR)/$(CONFIG)/libgrpc_benchmark.a $(LIBDIR)/$(CONFIG)/libbenchmark.a $(LIBDIR)/$(CONFIG)/libgrpc++_test_util_unsecure.a $(LIBDIR)/$(CONFIG)/libgrpc_test_util_unsecure.a $(LIBDIR)/$(CONFIG)/libgrpc++_unsecure.a $(LIBDIR)/$(CONFIG)/libgrpc_unsecure.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a $(LDLIBSXX) $(LDLIBS_PROTOBUF) $(LDLIBS) $(LDLIBS_SECURE) $(GTEST_LIB) -o $(BINDIR)/$(CONFIG)/bm_closure
$(Q) $(LDXX) $(LDFLAGS) $(BM_CLOSURE_OBJS) $(LIBDIR)/$(CONFIG)/libgrpc_benchmark.a $(LIBDIR)/$(CONFIG)/libbenchmark.a $(LIBDIR)/$(CONFIG)/libgrpc++_test_util_unsecure.a $(LIBDIR)/$(CONFIG)/libgrpc_test_util_unsecure.a $(LIBDIR)/$(CONFIG)/libgrpc++_unsecure.a $(LIBDIR)/$(CONFIG)/libgrpc_unsecure.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a $(LIBDIR)/$(CONFIG)/libgrpc++_test_config.a $(LDLIBSXX) $(LDLIBS_PROTOBUF) $(LDLIBS) $(LDLIBS_SECURE) $(GTEST_LIB) -o $(BINDIR)/$(CONFIG)/bm_closure
endif
endif
$(BM_CLOSURE_OBJS): CPPFLAGS += -Ithird_party/benchmark/include -DHAVE_POSIX_REGEX
$(OBJDIR)/$(CONFIG)/test/cpp/microbenchmarks/bm_closure.o: $(LIBDIR)/$(CONFIG)/libgrpc_benchmark.a $(LIBDIR)/$(CONFIG)/libbenchmark.a $(LIBDIR)/$(CONFIG)/libgrpc++_test_util_unsecure.a $(LIBDIR)/$(CONFIG)/libgrpc_test_util_unsecure.a $(LIBDIR)/$(CONFIG)/libgrpc++_unsecure.a $(LIBDIR)/$(CONFIG)/libgrpc_unsecure.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a
$(OBJDIR)/$(CONFIG)/test/cpp/microbenchmarks/bm_closure.o: $(LIBDIR)/$(CONFIG)/libgrpc_benchmark.a $(LIBDIR)/$(CONFIG)/libbenchmark.a $(LIBDIR)/$(CONFIG)/libgrpc++_test_util_unsecure.a $(LIBDIR)/$(CONFIG)/libgrpc_test_util_unsecure.a $(LIBDIR)/$(CONFIG)/libgrpc++_unsecure.a $(LIBDIR)/$(CONFIG)/libgrpc_unsecure.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a $(LIBDIR)/$(CONFIG)/libgrpc++_test_config.a
deps_bm_closure: $(BM_CLOSURE_OBJS:.o=.dep)
@ -15595,17 +15683,17 @@ $(BINDIR)/$(CONFIG)/bm_cq: protobuf_dep_error
else
$(BINDIR)/$(CONFIG)/bm_cq: $(PROTOBUF_DEP) $(BM_CQ_OBJS) $(LIBDIR)/$(CONFIG)/libgrpc_benchmark.a $(LIBDIR)/$(CONFIG)/libbenchmark.a $(LIBDIR)/$(CONFIG)/libgrpc++_test_util_unsecure.a $(LIBDIR)/$(CONFIG)/libgrpc_test_util_unsecure.a $(LIBDIR)/$(CONFIG)/libgrpc++_unsecure.a $(LIBDIR)/$(CONFIG)/libgrpc_unsecure.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a
$(BINDIR)/$(CONFIG)/bm_cq: $(PROTOBUF_DEP) $(BM_CQ_OBJS) $(LIBDIR)/$(CONFIG)/libgrpc_benchmark.a $(LIBDIR)/$(CONFIG)/libbenchmark.a $(LIBDIR)/$(CONFIG)/libgrpc++_test_util_unsecure.a $(LIBDIR)/$(CONFIG)/libgrpc_test_util_unsecure.a $(LIBDIR)/$(CONFIG)/libgrpc++_unsecure.a $(LIBDIR)/$(CONFIG)/libgrpc_unsecure.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a $(LIBDIR)/$(CONFIG)/libgrpc++_test_config.a
$(E) "[LD] Linking $@"
$(Q) mkdir -p `dirname $@`
$(Q) $(LDXX) $(LDFLAGS) $(BM_CQ_OBJS) $(LIBDIR)/$(CONFIG)/libgrpc_benchmark.a $(LIBDIR)/$(CONFIG)/libbenchmark.a $(LIBDIR)/$(CONFIG)/libgrpc++_test_util_unsecure.a $(LIBDIR)/$(CONFIG)/libgrpc_test_util_unsecure.a $(LIBDIR)/$(CONFIG)/libgrpc++_unsecure.a $(LIBDIR)/$(CONFIG)/libgrpc_unsecure.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a $(LDLIBSXX) $(LDLIBS_PROTOBUF) $(LDLIBS) $(LDLIBS_SECURE) $(GTEST_LIB) -o $(BINDIR)/$(CONFIG)/bm_cq
$(Q) $(LDXX) $(LDFLAGS) $(BM_CQ_OBJS) $(LIBDIR)/$(CONFIG)/libgrpc_benchmark.a $(LIBDIR)/$(CONFIG)/libbenchmark.a $(LIBDIR)/$(CONFIG)/libgrpc++_test_util_unsecure.a $(LIBDIR)/$(CONFIG)/libgrpc_test_util_unsecure.a $(LIBDIR)/$(CONFIG)/libgrpc++_unsecure.a $(LIBDIR)/$(CONFIG)/libgrpc_unsecure.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a $(LIBDIR)/$(CONFIG)/libgrpc++_test_config.a $(LDLIBSXX) $(LDLIBS_PROTOBUF) $(LDLIBS) $(LDLIBS_SECURE) $(GTEST_LIB) -o $(BINDIR)/$(CONFIG)/bm_cq
endif
endif
$(BM_CQ_OBJS): CPPFLAGS += -Ithird_party/benchmark/include -DHAVE_POSIX_REGEX
$(OBJDIR)/$(CONFIG)/test/cpp/microbenchmarks/bm_cq.o: $(LIBDIR)/$(CONFIG)/libgrpc_benchmark.a $(LIBDIR)/$(CONFIG)/libbenchmark.a $(LIBDIR)/$(CONFIG)/libgrpc++_test_util_unsecure.a $(LIBDIR)/$(CONFIG)/libgrpc_test_util_unsecure.a $(LIBDIR)/$(CONFIG)/libgrpc++_unsecure.a $(LIBDIR)/$(CONFIG)/libgrpc_unsecure.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a
$(OBJDIR)/$(CONFIG)/test/cpp/microbenchmarks/bm_cq.o: $(LIBDIR)/$(CONFIG)/libgrpc_benchmark.a $(LIBDIR)/$(CONFIG)/libbenchmark.a $(LIBDIR)/$(CONFIG)/libgrpc++_test_util_unsecure.a $(LIBDIR)/$(CONFIG)/libgrpc_test_util_unsecure.a $(LIBDIR)/$(CONFIG)/libgrpc++_unsecure.a $(LIBDIR)/$(CONFIG)/libgrpc_unsecure.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a $(LIBDIR)/$(CONFIG)/libgrpc++_test_config.a
deps_bm_cq: $(BM_CQ_OBJS:.o=.dep)
@ -15639,17 +15727,17 @@ $(BINDIR)/$(CONFIG)/bm_cq_multiple_threads: protobuf_dep_error
else
$(BINDIR)/$(CONFIG)/bm_cq_multiple_threads: $(PROTOBUF_DEP) $(BM_CQ_MULTIPLE_THREADS_OBJS) $(LIBDIR)/$(CONFIG)/libgrpc_benchmark.a $(LIBDIR)/$(CONFIG)/libbenchmark.a $(LIBDIR)/$(CONFIG)/libgrpc++_test_util_unsecure.a $(LIBDIR)/$(CONFIG)/libgrpc_test_util_unsecure.a $(LIBDIR)/$(CONFIG)/libgrpc++_unsecure.a $(LIBDIR)/$(CONFIG)/libgrpc_unsecure.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a
$(BINDIR)/$(CONFIG)/bm_cq_multiple_threads: $(PROTOBUF_DEP) $(BM_CQ_MULTIPLE_THREADS_OBJS) $(LIBDIR)/$(CONFIG)/libgrpc_benchmark.a $(LIBDIR)/$(CONFIG)/libbenchmark.a $(LIBDIR)/$(CONFIG)/libgrpc++_test_util_unsecure.a $(LIBDIR)/$(CONFIG)/libgrpc_test_util_unsecure.a $(LIBDIR)/$(CONFIG)/libgrpc++_unsecure.a $(LIBDIR)/$(CONFIG)/libgrpc_unsecure.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a $(LIBDIR)/$(CONFIG)/libgrpc++_test_config.a
$(E) "[LD] Linking $@"
$(Q) mkdir -p `dirname $@`
$(Q) $(LDXX) $(LDFLAGS) $(BM_CQ_MULTIPLE_THREADS_OBJS) $(LIBDIR)/$(CONFIG)/libgrpc_benchmark.a $(LIBDIR)/$(CONFIG)/libbenchmark.a $(LIBDIR)/$(CONFIG)/libgrpc++_test_util_unsecure.a $(LIBDIR)/$(CONFIG)/libgrpc_test_util_unsecure.a $(LIBDIR)/$(CONFIG)/libgrpc++_unsecure.a $(LIBDIR)/$(CONFIG)/libgrpc_unsecure.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a $(LDLIBSXX) $(LDLIBS_PROTOBUF) $(LDLIBS) $(LDLIBS_SECURE) $(GTEST_LIB) -o $(BINDIR)/$(CONFIG)/bm_cq_multiple_threads
$(Q) $(LDXX) $(LDFLAGS) $(BM_CQ_MULTIPLE_THREADS_OBJS) $(LIBDIR)/$(CONFIG)/libgrpc_benchmark.a $(LIBDIR)/$(CONFIG)/libbenchmark.a $(LIBDIR)/$(CONFIG)/libgrpc++_test_util_unsecure.a $(LIBDIR)/$(CONFIG)/libgrpc_test_util_unsecure.a $(LIBDIR)/$(CONFIG)/libgrpc++_unsecure.a $(LIBDIR)/$(CONFIG)/libgrpc_unsecure.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a $(LIBDIR)/$(CONFIG)/libgrpc++_test_config.a $(LDLIBSXX) $(LDLIBS_PROTOBUF) $(LDLIBS) $(LDLIBS_SECURE) $(GTEST_LIB) -o $(BINDIR)/$(CONFIG)/bm_cq_multiple_threads
endif
endif
$(BM_CQ_MULTIPLE_THREADS_OBJS): CPPFLAGS += -Ithird_party/benchmark/include -DHAVE_POSIX_REGEX
$(OBJDIR)/$(CONFIG)/test/cpp/microbenchmarks/bm_cq_multiple_threads.o: $(LIBDIR)/$(CONFIG)/libgrpc_benchmark.a $(LIBDIR)/$(CONFIG)/libbenchmark.a $(LIBDIR)/$(CONFIG)/libgrpc++_test_util_unsecure.a $(LIBDIR)/$(CONFIG)/libgrpc_test_util_unsecure.a $(LIBDIR)/$(CONFIG)/libgrpc++_unsecure.a $(LIBDIR)/$(CONFIG)/libgrpc_unsecure.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a
$(OBJDIR)/$(CONFIG)/test/cpp/microbenchmarks/bm_cq_multiple_threads.o: $(LIBDIR)/$(CONFIG)/libgrpc_benchmark.a $(LIBDIR)/$(CONFIG)/libbenchmark.a $(LIBDIR)/$(CONFIG)/libgrpc++_test_util_unsecure.a $(LIBDIR)/$(CONFIG)/libgrpc_test_util_unsecure.a $(LIBDIR)/$(CONFIG)/libgrpc++_unsecure.a $(LIBDIR)/$(CONFIG)/libgrpc_unsecure.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a $(LIBDIR)/$(CONFIG)/libgrpc++_test_config.a
deps_bm_cq_multiple_threads: $(BM_CQ_MULTIPLE_THREADS_OBJS:.o=.dep)
@ -15683,17 +15771,17 @@ $(BINDIR)/$(CONFIG)/bm_error: protobuf_dep_error
else
$(BINDIR)/$(CONFIG)/bm_error: $(PROTOBUF_DEP) $(BM_ERROR_OBJS) $(LIBDIR)/$(CONFIG)/libgrpc_benchmark.a $(LIBDIR)/$(CONFIG)/libbenchmark.a $(LIBDIR)/$(CONFIG)/libgrpc++_test_util_unsecure.a $(LIBDIR)/$(CONFIG)/libgrpc_test_util_unsecure.a $(LIBDIR)/$(CONFIG)/libgrpc++_unsecure.a $(LIBDIR)/$(CONFIG)/libgrpc_unsecure.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a
$(BINDIR)/$(CONFIG)/bm_error: $(PROTOBUF_DEP) $(BM_ERROR_OBJS) $(LIBDIR)/$(CONFIG)/libgrpc_benchmark.a $(LIBDIR)/$(CONFIG)/libbenchmark.a $(LIBDIR)/$(CONFIG)/libgrpc++_test_util_unsecure.a $(LIBDIR)/$(CONFIG)/libgrpc_test_util_unsecure.a $(LIBDIR)/$(CONFIG)/libgrpc++_unsecure.a $(LIBDIR)/$(CONFIG)/libgrpc_unsecure.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a $(LIBDIR)/$(CONFIG)/libgrpc++_test_config.a
$(E) "[LD] Linking $@"
$(Q) mkdir -p `dirname $@`
$(Q) $(LDXX) $(LDFLAGS) $(BM_ERROR_OBJS) $(LIBDIR)/$(CONFIG)/libgrpc_benchmark.a $(LIBDIR)/$(CONFIG)/libbenchmark.a $(LIBDIR)/$(CONFIG)/libgrpc++_test_util_unsecure.a $(LIBDIR)/$(CONFIG)/libgrpc_test_util_unsecure.a $(LIBDIR)/$(CONFIG)/libgrpc++_unsecure.a $(LIBDIR)/$(CONFIG)/libgrpc_unsecure.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a $(LDLIBSXX) $(LDLIBS_PROTOBUF) $(LDLIBS) $(LDLIBS_SECURE) $(GTEST_LIB) -o $(BINDIR)/$(CONFIG)/bm_error
$(Q) $(LDXX) $(LDFLAGS) $(BM_ERROR_OBJS) $(LIBDIR)/$(CONFIG)/libgrpc_benchmark.a $(LIBDIR)/$(CONFIG)/libbenchmark.a $(LIBDIR)/$(CONFIG)/libgrpc++_test_util_unsecure.a $(LIBDIR)/$(CONFIG)/libgrpc_test_util_unsecure.a $(LIBDIR)/$(CONFIG)/libgrpc++_unsecure.a $(LIBDIR)/$(CONFIG)/libgrpc_unsecure.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a $(LIBDIR)/$(CONFIG)/libgrpc++_test_config.a $(LDLIBSXX) $(LDLIBS_PROTOBUF) $(LDLIBS) $(LDLIBS_SECURE) $(GTEST_LIB) -o $(BINDIR)/$(CONFIG)/bm_error
endif
endif
$(BM_ERROR_OBJS): CPPFLAGS += -Ithird_party/benchmark/include -DHAVE_POSIX_REGEX
$(OBJDIR)/$(CONFIG)/test/cpp/microbenchmarks/bm_error.o: $(LIBDIR)/$(CONFIG)/libgrpc_benchmark.a $(LIBDIR)/$(CONFIG)/libbenchmark.a $(LIBDIR)/$(CONFIG)/libgrpc++_test_util_unsecure.a $(LIBDIR)/$(CONFIG)/libgrpc_test_util_unsecure.a $(LIBDIR)/$(CONFIG)/libgrpc++_unsecure.a $(LIBDIR)/$(CONFIG)/libgrpc_unsecure.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a
$(OBJDIR)/$(CONFIG)/test/cpp/microbenchmarks/bm_error.o: $(LIBDIR)/$(CONFIG)/libgrpc_benchmark.a $(LIBDIR)/$(CONFIG)/libbenchmark.a $(LIBDIR)/$(CONFIG)/libgrpc++_test_util_unsecure.a $(LIBDIR)/$(CONFIG)/libgrpc_test_util_unsecure.a $(LIBDIR)/$(CONFIG)/libgrpc++_unsecure.a $(LIBDIR)/$(CONFIG)/libgrpc_unsecure.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a $(LIBDIR)/$(CONFIG)/libgrpc++_test_config.a
deps_bm_error: $(BM_ERROR_OBJS:.o=.dep)
@ -15727,17 +15815,17 @@ $(BINDIR)/$(CONFIG)/bm_fullstack_streaming_ping_pong: protobuf_dep_error
else
$(BINDIR)/$(CONFIG)/bm_fullstack_streaming_ping_pong: $(PROTOBUF_DEP) $(BM_FULLSTACK_STREAMING_PING_PONG_OBJS) $(LIBDIR)/$(CONFIG)/libgrpc_benchmark.a $(LIBDIR)/$(CONFIG)/libbenchmark.a $(LIBDIR)/$(CONFIG)/libgrpc++_test_util_unsecure.a $(LIBDIR)/$(CONFIG)/libgrpc_test_util_unsecure.a $(LIBDIR)/$(CONFIG)/libgrpc++_unsecure.a $(LIBDIR)/$(CONFIG)/libgrpc_unsecure.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a
$(BINDIR)/$(CONFIG)/bm_fullstack_streaming_ping_pong: $(PROTOBUF_DEP) $(BM_FULLSTACK_STREAMING_PING_PONG_OBJS) $(LIBDIR)/$(CONFIG)/libgrpc_benchmark.a $(LIBDIR)/$(CONFIG)/libbenchmark.a $(LIBDIR)/$(CONFIG)/libgrpc++_test_util_unsecure.a $(LIBDIR)/$(CONFIG)/libgrpc_test_util_unsecure.a $(LIBDIR)/$(CONFIG)/libgrpc++_unsecure.a $(LIBDIR)/$(CONFIG)/libgrpc_unsecure.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a $(LIBDIR)/$(CONFIG)/libgrpc++_test_config.a
$(E) "[LD] Linking $@"
$(Q) mkdir -p `dirname $@`
$(Q) $(LDXX) $(LDFLAGS) $(BM_FULLSTACK_STREAMING_PING_PONG_OBJS) $(LIBDIR)/$(CONFIG)/libgrpc_benchmark.a $(LIBDIR)/$(CONFIG)/libbenchmark.a $(LIBDIR)/$(CONFIG)/libgrpc++_test_util_unsecure.a $(LIBDIR)/$(CONFIG)/libgrpc_test_util_unsecure.a $(LIBDIR)/$(CONFIG)/libgrpc++_unsecure.a $(LIBDIR)/$(CONFIG)/libgrpc_unsecure.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a $(LDLIBSXX) $(LDLIBS_PROTOBUF) $(LDLIBS) $(LDLIBS_SECURE) $(GTEST_LIB) -o $(BINDIR)/$(CONFIG)/bm_fullstack_streaming_ping_pong
$(Q) $(LDXX) $(LDFLAGS) $(BM_FULLSTACK_STREAMING_PING_PONG_OBJS) $(LIBDIR)/$(CONFIG)/libgrpc_benchmark.a $(LIBDIR)/$(CONFIG)/libbenchmark.a $(LIBDIR)/$(CONFIG)/libgrpc++_test_util_unsecure.a $(LIBDIR)/$(CONFIG)/libgrpc_test_util_unsecure.a $(LIBDIR)/$(CONFIG)/libgrpc++_unsecure.a $(LIBDIR)/$(CONFIG)/libgrpc_unsecure.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a $(LIBDIR)/$(CONFIG)/libgrpc++_test_config.a $(LDLIBSXX) $(LDLIBS_PROTOBUF) $(LDLIBS) $(LDLIBS_SECURE) $(GTEST_LIB) -o $(BINDIR)/$(CONFIG)/bm_fullstack_streaming_ping_pong
endif
endif
$(BM_FULLSTACK_STREAMING_PING_PONG_OBJS): CPPFLAGS += -Ithird_party/benchmark/include -DHAVE_POSIX_REGEX
$(OBJDIR)/$(CONFIG)/test/cpp/microbenchmarks/bm_fullstack_streaming_ping_pong.o: $(LIBDIR)/$(CONFIG)/libgrpc_benchmark.a $(LIBDIR)/$(CONFIG)/libbenchmark.a $(LIBDIR)/$(CONFIG)/libgrpc++_test_util_unsecure.a $(LIBDIR)/$(CONFIG)/libgrpc_test_util_unsecure.a $(LIBDIR)/$(CONFIG)/libgrpc++_unsecure.a $(LIBDIR)/$(CONFIG)/libgrpc_unsecure.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a
$(OBJDIR)/$(CONFIG)/test/cpp/microbenchmarks/bm_fullstack_streaming_ping_pong.o: $(LIBDIR)/$(CONFIG)/libgrpc_benchmark.a $(LIBDIR)/$(CONFIG)/libbenchmark.a $(LIBDIR)/$(CONFIG)/libgrpc++_test_util_unsecure.a $(LIBDIR)/$(CONFIG)/libgrpc_test_util_unsecure.a $(LIBDIR)/$(CONFIG)/libgrpc++_unsecure.a $(LIBDIR)/$(CONFIG)/libgrpc_unsecure.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a $(LIBDIR)/$(CONFIG)/libgrpc++_test_config.a
deps_bm_fullstack_streaming_ping_pong: $(BM_FULLSTACK_STREAMING_PING_PONG_OBJS:.o=.dep)
@ -15771,17 +15859,17 @@ $(BINDIR)/$(CONFIG)/bm_fullstack_streaming_pump: protobuf_dep_error
else
$(BINDIR)/$(CONFIG)/bm_fullstack_streaming_pump: $(PROTOBUF_DEP) $(BM_FULLSTACK_STREAMING_PUMP_OBJS) $(LIBDIR)/$(CONFIG)/libgrpc_benchmark.a $(LIBDIR)/$(CONFIG)/libbenchmark.a $(LIBDIR)/$(CONFIG)/libgrpc++_test_util_unsecure.a $(LIBDIR)/$(CONFIG)/libgrpc_test_util_unsecure.a $(LIBDIR)/$(CONFIG)/libgrpc++_unsecure.a $(LIBDIR)/$(CONFIG)/libgrpc_unsecure.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a
$(BINDIR)/$(CONFIG)/bm_fullstack_streaming_pump: $(PROTOBUF_DEP) $(BM_FULLSTACK_STREAMING_PUMP_OBJS) $(LIBDIR)/$(CONFIG)/libgrpc_benchmark.a $(LIBDIR)/$(CONFIG)/libbenchmark.a $(LIBDIR)/$(CONFIG)/libgrpc++_test_util_unsecure.a $(LIBDIR)/$(CONFIG)/libgrpc_test_util_unsecure.a $(LIBDIR)/$(CONFIG)/libgrpc++_unsecure.a $(LIBDIR)/$(CONFIG)/libgrpc_unsecure.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a $(LIBDIR)/$(CONFIG)/libgrpc++_test_config.a
$(E) "[LD] Linking $@"
$(Q) mkdir -p `dirname $@`
$(Q) $(LDXX) $(LDFLAGS) $(BM_FULLSTACK_STREAMING_PUMP_OBJS) $(LIBDIR)/$(CONFIG)/libgrpc_benchmark.a $(LIBDIR)/$(CONFIG)/libbenchmark.a $(LIBDIR)/$(CONFIG)/libgrpc++_test_util_unsecure.a $(LIBDIR)/$(CONFIG)/libgrpc_test_util_unsecure.a $(LIBDIR)/$(CONFIG)/libgrpc++_unsecure.a $(LIBDIR)/$(CONFIG)/libgrpc_unsecure.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a $(LDLIBSXX) $(LDLIBS_PROTOBUF) $(LDLIBS) $(LDLIBS_SECURE) $(GTEST_LIB) -o $(BINDIR)/$(CONFIG)/bm_fullstack_streaming_pump
$(Q) $(LDXX) $(LDFLAGS) $(BM_FULLSTACK_STREAMING_PUMP_OBJS) $(LIBDIR)/$(CONFIG)/libgrpc_benchmark.a $(LIBDIR)/$(CONFIG)/libbenchmark.a $(LIBDIR)/$(CONFIG)/libgrpc++_test_util_unsecure.a $(LIBDIR)/$(CONFIG)/libgrpc_test_util_unsecure.a $(LIBDIR)/$(CONFIG)/libgrpc++_unsecure.a $(LIBDIR)/$(CONFIG)/libgrpc_unsecure.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a $(LIBDIR)/$(CONFIG)/libgrpc++_test_config.a $(LDLIBSXX) $(LDLIBS_PROTOBUF) $(LDLIBS) $(LDLIBS_SECURE) $(GTEST_LIB) -o $(BINDIR)/$(CONFIG)/bm_fullstack_streaming_pump
endif
endif
$(BM_FULLSTACK_STREAMING_PUMP_OBJS): CPPFLAGS += -Ithird_party/benchmark/include -DHAVE_POSIX_REGEX
$(OBJDIR)/$(CONFIG)/test/cpp/microbenchmarks/bm_fullstack_streaming_pump.o: $(LIBDIR)/$(CONFIG)/libgrpc_benchmark.a $(LIBDIR)/$(CONFIG)/libbenchmark.a $(LIBDIR)/$(CONFIG)/libgrpc++_test_util_unsecure.a $(LIBDIR)/$(CONFIG)/libgrpc_test_util_unsecure.a $(LIBDIR)/$(CONFIG)/libgrpc++_unsecure.a $(LIBDIR)/$(CONFIG)/libgrpc_unsecure.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a
$(OBJDIR)/$(CONFIG)/test/cpp/microbenchmarks/bm_fullstack_streaming_pump.o: $(LIBDIR)/$(CONFIG)/libgrpc_benchmark.a $(LIBDIR)/$(CONFIG)/libbenchmark.a $(LIBDIR)/$(CONFIG)/libgrpc++_test_util_unsecure.a $(LIBDIR)/$(CONFIG)/libgrpc_test_util_unsecure.a $(LIBDIR)/$(CONFIG)/libgrpc++_unsecure.a $(LIBDIR)/$(CONFIG)/libgrpc_unsecure.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a $(LIBDIR)/$(CONFIG)/libgrpc++_test_config.a
deps_bm_fullstack_streaming_pump: $(BM_FULLSTACK_STREAMING_PUMP_OBJS:.o=.dep)
@ -15859,17 +15947,17 @@ $(BINDIR)/$(CONFIG)/bm_fullstack_unary_ping_pong: protobuf_dep_error
else
$(BINDIR)/$(CONFIG)/bm_fullstack_unary_ping_pong: $(PROTOBUF_DEP) $(BM_FULLSTACK_UNARY_PING_PONG_OBJS) $(LIBDIR)/$(CONFIG)/libgrpc_benchmark.a $(LIBDIR)/$(CONFIG)/libbenchmark.a $(LIBDIR)/$(CONFIG)/libgrpc++_test_util_unsecure.a $(LIBDIR)/$(CONFIG)/libgrpc_test_util_unsecure.a $(LIBDIR)/$(CONFIG)/libgrpc++_unsecure.a $(LIBDIR)/$(CONFIG)/libgrpc_unsecure.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a
$(BINDIR)/$(CONFIG)/bm_fullstack_unary_ping_pong: $(PROTOBUF_DEP) $(BM_FULLSTACK_UNARY_PING_PONG_OBJS) $(LIBDIR)/$(CONFIG)/libgrpc_benchmark.a $(LIBDIR)/$(CONFIG)/libbenchmark.a $(LIBDIR)/$(CONFIG)/libgrpc++_test_util_unsecure.a $(LIBDIR)/$(CONFIG)/libgrpc_test_util_unsecure.a $(LIBDIR)/$(CONFIG)/libgrpc++_unsecure.a $(LIBDIR)/$(CONFIG)/libgrpc_unsecure.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a $(LIBDIR)/$(CONFIG)/libgrpc++_test_config.a
$(E) "[LD] Linking $@"
$(Q) mkdir -p `dirname $@`
$(Q) $(LDXX) $(LDFLAGS) $(BM_FULLSTACK_UNARY_PING_PONG_OBJS) $(LIBDIR)/$(CONFIG)/libgrpc_benchmark.a $(LIBDIR)/$(CONFIG)/libbenchmark.a $(LIBDIR)/$(CONFIG)/libgrpc++_test_util_unsecure.a $(LIBDIR)/$(CONFIG)/libgrpc_test_util_unsecure.a $(LIBDIR)/$(CONFIG)/libgrpc++_unsecure.a $(LIBDIR)/$(CONFIG)/libgrpc_unsecure.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a $(LDLIBSXX) $(LDLIBS_PROTOBUF) $(LDLIBS) $(LDLIBS_SECURE) $(GTEST_LIB) -o $(BINDIR)/$(CONFIG)/bm_fullstack_unary_ping_pong
$(Q) $(LDXX) $(LDFLAGS) $(BM_FULLSTACK_UNARY_PING_PONG_OBJS) $(LIBDIR)/$(CONFIG)/libgrpc_benchmark.a $(LIBDIR)/$(CONFIG)/libbenchmark.a $(LIBDIR)/$(CONFIG)/libgrpc++_test_util_unsecure.a $(LIBDIR)/$(CONFIG)/libgrpc_test_util_unsecure.a $(LIBDIR)/$(CONFIG)/libgrpc++_unsecure.a $(LIBDIR)/$(CONFIG)/libgrpc_unsecure.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a $(LIBDIR)/$(CONFIG)/libgrpc++_test_config.a $(LDLIBSXX) $(LDLIBS_PROTOBUF) $(LDLIBS) $(LDLIBS_SECURE) $(GTEST_LIB) -o $(BINDIR)/$(CONFIG)/bm_fullstack_unary_ping_pong
endif
endif
$(BM_FULLSTACK_UNARY_PING_PONG_OBJS): CPPFLAGS += -Ithird_party/benchmark/include -DHAVE_POSIX_REGEX
$(OBJDIR)/$(CONFIG)/test/cpp/microbenchmarks/bm_fullstack_unary_ping_pong.o: $(LIBDIR)/$(CONFIG)/libgrpc_benchmark.a $(LIBDIR)/$(CONFIG)/libbenchmark.a $(LIBDIR)/$(CONFIG)/libgrpc++_test_util_unsecure.a $(LIBDIR)/$(CONFIG)/libgrpc_test_util_unsecure.a $(LIBDIR)/$(CONFIG)/libgrpc++_unsecure.a $(LIBDIR)/$(CONFIG)/libgrpc_unsecure.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a
$(OBJDIR)/$(CONFIG)/test/cpp/microbenchmarks/bm_fullstack_unary_ping_pong.o: $(LIBDIR)/$(CONFIG)/libgrpc_benchmark.a $(LIBDIR)/$(CONFIG)/libbenchmark.a $(LIBDIR)/$(CONFIG)/libgrpc++_test_util_unsecure.a $(LIBDIR)/$(CONFIG)/libgrpc_test_util_unsecure.a $(LIBDIR)/$(CONFIG)/libgrpc++_unsecure.a $(LIBDIR)/$(CONFIG)/libgrpc_unsecure.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a $(LIBDIR)/$(CONFIG)/libgrpc++_test_config.a
deps_bm_fullstack_unary_ping_pong: $(BM_FULLSTACK_UNARY_PING_PONG_OBJS:.o=.dep)
@ -15903,17 +15991,17 @@ $(BINDIR)/$(CONFIG)/bm_metadata: protobuf_dep_error
else
$(BINDIR)/$(CONFIG)/bm_metadata: $(PROTOBUF_DEP) $(BM_METADATA_OBJS) $(LIBDIR)/$(CONFIG)/libgrpc_benchmark.a $(LIBDIR)/$(CONFIG)/libbenchmark.a $(LIBDIR)/$(CONFIG)/libgrpc++_test_util_unsecure.a $(LIBDIR)/$(CONFIG)/libgrpc_test_util_unsecure.a $(LIBDIR)/$(CONFIG)/libgrpc++_unsecure.a $(LIBDIR)/$(CONFIG)/libgrpc_unsecure.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a
$(BINDIR)/$(CONFIG)/bm_metadata: $(PROTOBUF_DEP) $(BM_METADATA_OBJS) $(LIBDIR)/$(CONFIG)/libgrpc_benchmark.a $(LIBDIR)/$(CONFIG)/libbenchmark.a $(LIBDIR)/$(CONFIG)/libgrpc++_test_util_unsecure.a $(LIBDIR)/$(CONFIG)/libgrpc_test_util_unsecure.a $(LIBDIR)/$(CONFIG)/libgrpc++_unsecure.a $(LIBDIR)/$(CONFIG)/libgrpc_unsecure.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a $(LIBDIR)/$(CONFIG)/libgrpc++_test_config.a
$(E) "[LD] Linking $@"
$(Q) mkdir -p `dirname $@`
$(Q) $(LDXX) $(LDFLAGS) $(BM_METADATA_OBJS) $(LIBDIR)/$(CONFIG)/libgrpc_benchmark.a $(LIBDIR)/$(CONFIG)/libbenchmark.a $(LIBDIR)/$(CONFIG)/libgrpc++_test_util_unsecure.a $(LIBDIR)/$(CONFIG)/libgrpc_test_util_unsecure.a $(LIBDIR)/$(CONFIG)/libgrpc++_unsecure.a $(LIBDIR)/$(CONFIG)/libgrpc_unsecure.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a $(LDLIBSXX) $(LDLIBS_PROTOBUF) $(LDLIBS) $(LDLIBS_SECURE) $(GTEST_LIB) -o $(BINDIR)/$(CONFIG)/bm_metadata
$(Q) $(LDXX) $(LDFLAGS) $(BM_METADATA_OBJS) $(LIBDIR)/$(CONFIG)/libgrpc_benchmark.a $(LIBDIR)/$(CONFIG)/libbenchmark.a $(LIBDIR)/$(CONFIG)/libgrpc++_test_util_unsecure.a $(LIBDIR)/$(CONFIG)/libgrpc_test_util_unsecure.a $(LIBDIR)/$(CONFIG)/libgrpc++_unsecure.a $(LIBDIR)/$(CONFIG)/libgrpc_unsecure.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a $(LIBDIR)/$(CONFIG)/libgrpc++_test_config.a $(LDLIBSXX) $(LDLIBS_PROTOBUF) $(LDLIBS) $(LDLIBS_SECURE) $(GTEST_LIB) -o $(BINDIR)/$(CONFIG)/bm_metadata
endif
endif
$(BM_METADATA_OBJS): CPPFLAGS += -Ithird_party/benchmark/include -DHAVE_POSIX_REGEX
$(OBJDIR)/$(CONFIG)/test/cpp/microbenchmarks/bm_metadata.o: $(LIBDIR)/$(CONFIG)/libgrpc_benchmark.a $(LIBDIR)/$(CONFIG)/libbenchmark.a $(LIBDIR)/$(CONFIG)/libgrpc++_test_util_unsecure.a $(LIBDIR)/$(CONFIG)/libgrpc_test_util_unsecure.a $(LIBDIR)/$(CONFIG)/libgrpc++_unsecure.a $(LIBDIR)/$(CONFIG)/libgrpc_unsecure.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a
$(OBJDIR)/$(CONFIG)/test/cpp/microbenchmarks/bm_metadata.o: $(LIBDIR)/$(CONFIG)/libgrpc_benchmark.a $(LIBDIR)/$(CONFIG)/libbenchmark.a $(LIBDIR)/$(CONFIG)/libgrpc++_test_util_unsecure.a $(LIBDIR)/$(CONFIG)/libgrpc_test_util_unsecure.a $(LIBDIR)/$(CONFIG)/libgrpc++_unsecure.a $(LIBDIR)/$(CONFIG)/libgrpc_unsecure.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a $(LIBDIR)/$(CONFIG)/libgrpc++_test_config.a
deps_bm_metadata: $(BM_METADATA_OBJS:.o=.dep)
@ -15947,17 +16035,17 @@ $(BINDIR)/$(CONFIG)/bm_pollset: protobuf_dep_error
else
$(BINDIR)/$(CONFIG)/bm_pollset: $(PROTOBUF_DEP) $(BM_POLLSET_OBJS) $(LIBDIR)/$(CONFIG)/libgrpc_benchmark.a $(LIBDIR)/$(CONFIG)/libbenchmark.a $(LIBDIR)/$(CONFIG)/libgrpc++_test_util_unsecure.a $(LIBDIR)/$(CONFIG)/libgrpc_test_util_unsecure.a $(LIBDIR)/$(CONFIG)/libgrpc++_unsecure.a $(LIBDIR)/$(CONFIG)/libgrpc_unsecure.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a
$(BINDIR)/$(CONFIG)/bm_pollset: $(PROTOBUF_DEP) $(BM_POLLSET_OBJS) $(LIBDIR)/$(CONFIG)/libgrpc_benchmark.a $(LIBDIR)/$(CONFIG)/libbenchmark.a $(LIBDIR)/$(CONFIG)/libgrpc++_test_util_unsecure.a $(LIBDIR)/$(CONFIG)/libgrpc_test_util_unsecure.a $(LIBDIR)/$(CONFIG)/libgrpc++_unsecure.a $(LIBDIR)/$(CONFIG)/libgrpc_unsecure.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a $(LIBDIR)/$(CONFIG)/libgrpc++_test_config.a
$(E) "[LD] Linking $@"
$(Q) mkdir -p `dirname $@`
$(Q) $(LDXX) $(LDFLAGS) $(BM_POLLSET_OBJS) $(LIBDIR)/$(CONFIG)/libgrpc_benchmark.a $(LIBDIR)/$(CONFIG)/libbenchmark.a $(LIBDIR)/$(CONFIG)/libgrpc++_test_util_unsecure.a $(LIBDIR)/$(CONFIG)/libgrpc_test_util_unsecure.a $(LIBDIR)/$(CONFIG)/libgrpc++_unsecure.a $(LIBDIR)/$(CONFIG)/libgrpc_unsecure.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a $(LDLIBSXX) $(LDLIBS_PROTOBUF) $(LDLIBS) $(LDLIBS_SECURE) $(GTEST_LIB) -o $(BINDIR)/$(CONFIG)/bm_pollset
$(Q) $(LDXX) $(LDFLAGS) $(BM_POLLSET_OBJS) $(LIBDIR)/$(CONFIG)/libgrpc_benchmark.a $(LIBDIR)/$(CONFIG)/libbenchmark.a $(LIBDIR)/$(CONFIG)/libgrpc++_test_util_unsecure.a $(LIBDIR)/$(CONFIG)/libgrpc_test_util_unsecure.a $(LIBDIR)/$(CONFIG)/libgrpc++_unsecure.a $(LIBDIR)/$(CONFIG)/libgrpc_unsecure.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a $(LIBDIR)/$(CONFIG)/libgrpc++_test_config.a $(LDLIBSXX) $(LDLIBS_PROTOBUF) $(LDLIBS) $(LDLIBS_SECURE) $(GTEST_LIB) -o $(BINDIR)/$(CONFIG)/bm_pollset
endif
endif
$(BM_POLLSET_OBJS): CPPFLAGS += -Ithird_party/benchmark/include -DHAVE_POSIX_REGEX
$(OBJDIR)/$(CONFIG)/test/cpp/microbenchmarks/bm_pollset.o: $(LIBDIR)/$(CONFIG)/libgrpc_benchmark.a $(LIBDIR)/$(CONFIG)/libbenchmark.a $(LIBDIR)/$(CONFIG)/libgrpc++_test_util_unsecure.a $(LIBDIR)/$(CONFIG)/libgrpc_test_util_unsecure.a $(LIBDIR)/$(CONFIG)/libgrpc++_unsecure.a $(LIBDIR)/$(CONFIG)/libgrpc_unsecure.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a
$(OBJDIR)/$(CONFIG)/test/cpp/microbenchmarks/bm_pollset.o: $(LIBDIR)/$(CONFIG)/libgrpc_benchmark.a $(LIBDIR)/$(CONFIG)/libbenchmark.a $(LIBDIR)/$(CONFIG)/libgrpc++_test_util_unsecure.a $(LIBDIR)/$(CONFIG)/libgrpc_test_util_unsecure.a $(LIBDIR)/$(CONFIG)/libgrpc++_unsecure.a $(LIBDIR)/$(CONFIG)/libgrpc_unsecure.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a $(LIBDIR)/$(CONFIG)/libgrpc++_test_config.a
deps_bm_pollset: $(BM_POLLSET_OBJS:.o=.dep)
@ -16496,7 +16584,9 @@ CODEGEN_TEST_FULL_SRC = \
$(GENDIR)/src/proto/grpc/testing/control.pb.cc $(GENDIR)/src/proto/grpc/testing/control.grpc.pb.cc \
$(GENDIR)/src/proto/grpc/testing/messages.pb.cc $(GENDIR)/src/proto/grpc/testing/messages.grpc.pb.cc \
$(GENDIR)/src/proto/grpc/testing/payloads.pb.cc $(GENDIR)/src/proto/grpc/testing/payloads.grpc.pb.cc \
$(GENDIR)/src/proto/grpc/testing/services.pb.cc $(GENDIR)/src/proto/grpc/testing/services.grpc.pb.cc \
$(GENDIR)/src/proto/grpc/testing/benchmark_service.pb.cc $(GENDIR)/src/proto/grpc/testing/benchmark_service.grpc.pb.cc \
$(GENDIR)/src/proto/grpc/testing/report_qps_scenario_service.pb.cc $(GENDIR)/src/proto/grpc/testing/report_qps_scenario_service.grpc.pb.cc \
$(GENDIR)/src/proto/grpc/testing/worker_service.pb.cc $(GENDIR)/src/proto/grpc/testing/worker_service.grpc.pb.cc \
$(GENDIR)/src/proto/grpc/testing/stats.pb.cc $(GENDIR)/src/proto/grpc/testing/stats.grpc.pb.cc \
test/cpp/codegen/codegen_test_full.cc \
@ -16535,7 +16625,11 @@ $(OBJDIR)/$(CONFIG)/src/proto/grpc/testing/messages.o: $(LIBDIR)/$(CONFIG)/libg
$(OBJDIR)/$(CONFIG)/src/proto/grpc/testing/payloads.o: $(LIBDIR)/$(CONFIG)/libgrpc++_core_stats.a $(LIBDIR)/$(CONFIG)/libgrpc++.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr.a
$(OBJDIR)/$(CONFIG)/src/proto/grpc/testing/services.o: $(LIBDIR)/$(CONFIG)/libgrpc++_core_stats.a $(LIBDIR)/$(CONFIG)/libgrpc++.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr.a
$(OBJDIR)/$(CONFIG)/src/proto/grpc/testing/benchmark_service.o: $(LIBDIR)/$(CONFIG)/libgrpc++_core_stats.a $(LIBDIR)/$(CONFIG)/libgrpc++.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr.a
$(OBJDIR)/$(CONFIG)/src/proto/grpc/testing/report_qps_scenario_service.o: $(LIBDIR)/$(CONFIG)/libgrpc++_core_stats.a $(LIBDIR)/$(CONFIG)/libgrpc++.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr.a
$(OBJDIR)/$(CONFIG)/src/proto/grpc/testing/worker_service.o: $(LIBDIR)/$(CONFIG)/libgrpc++_core_stats.a $(LIBDIR)/$(CONFIG)/libgrpc++.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr.a
$(OBJDIR)/$(CONFIG)/src/proto/grpc/testing/stats.o: $(LIBDIR)/$(CONFIG)/libgrpc++_core_stats.a $(LIBDIR)/$(CONFIG)/libgrpc++.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr.a
@ -16548,14 +16642,16 @@ ifneq ($(NO_DEPS),true)
-include $(CODEGEN_TEST_FULL_OBJS:.o=.dep)
endif
endif
$(OBJDIR)/$(CONFIG)/test/cpp/codegen/codegen_test_full.o: $(GENDIR)/src/proto/grpc/testing/control.pb.cc $(GENDIR)/src/proto/grpc/testing/control.grpc.pb.cc $(GENDIR)/src/proto/grpc/testing/messages.pb.cc $(GENDIR)/src/proto/grpc/testing/messages.grpc.pb.cc $(GENDIR)/src/proto/grpc/testing/payloads.pb.cc $(GENDIR)/src/proto/grpc/testing/payloads.grpc.pb.cc $(GENDIR)/src/proto/grpc/testing/services.pb.cc $(GENDIR)/src/proto/grpc/testing/services.grpc.pb.cc $(GENDIR)/src/proto/grpc/testing/stats.pb.cc $(GENDIR)/src/proto/grpc/testing/stats.grpc.pb.cc
$(OBJDIR)/$(CONFIG)/test/cpp/codegen/codegen_test_full.o: $(GENDIR)/src/proto/grpc/testing/control.pb.cc $(GENDIR)/src/proto/grpc/testing/control.grpc.pb.cc $(GENDIR)/src/proto/grpc/testing/messages.pb.cc $(GENDIR)/src/proto/grpc/testing/messages.grpc.pb.cc $(GENDIR)/src/proto/grpc/testing/payloads.pb.cc $(GENDIR)/src/proto/grpc/testing/payloads.grpc.pb.cc $(GENDIR)/src/proto/grpc/testing/benchmark_service.pb.cc $(GENDIR)/src/proto/grpc/testing/benchmark_service.grpc.pb.cc $(GENDIR)/src/proto/grpc/testing/report_qps_scenario_service.pb.cc $(GENDIR)/src/proto/grpc/testing/report_qps_scenario_service.grpc.pb.cc $(GENDIR)/src/proto/grpc/testing/worker_service.pb.cc $(GENDIR)/src/proto/grpc/testing/worker_service.grpc.pb.cc $(GENDIR)/src/proto/grpc/testing/stats.pb.cc $(GENDIR)/src/proto/grpc/testing/stats.grpc.pb.cc
CODEGEN_TEST_MINIMAL_SRC = \
$(GENDIR)/src/proto/grpc/testing/control.pb.cc $(GENDIR)/src/proto/grpc/testing/control.grpc.pb.cc \
$(GENDIR)/src/proto/grpc/testing/messages.pb.cc $(GENDIR)/src/proto/grpc/testing/messages.grpc.pb.cc \
$(GENDIR)/src/proto/grpc/testing/payloads.pb.cc $(GENDIR)/src/proto/grpc/testing/payloads.grpc.pb.cc \
$(GENDIR)/src/proto/grpc/testing/services.pb.cc $(GENDIR)/src/proto/grpc/testing/services.grpc.pb.cc \
$(GENDIR)/src/proto/grpc/testing/benchmark_service.pb.cc $(GENDIR)/src/proto/grpc/testing/benchmark_service.grpc.pb.cc \
$(GENDIR)/src/proto/grpc/testing/report_qps_scenario_service.pb.cc $(GENDIR)/src/proto/grpc/testing/report_qps_scenario_service.grpc.pb.cc \
$(GENDIR)/src/proto/grpc/testing/worker_service.pb.cc $(GENDIR)/src/proto/grpc/testing/worker_service.grpc.pb.cc \
$(GENDIR)/src/proto/grpc/testing/stats.pb.cc $(GENDIR)/src/proto/grpc/testing/stats.grpc.pb.cc \
test/cpp/codegen/codegen_test_minimal.cc \
src/cpp/codegen/codegen_init.cc \
@ -16595,7 +16691,11 @@ $(OBJDIR)/$(CONFIG)/src/proto/grpc/testing/messages.o: $(LIBDIR)/$(CONFIG)/libg
$(OBJDIR)/$(CONFIG)/src/proto/grpc/testing/payloads.o: $(LIBDIR)/$(CONFIG)/libgrpc++_core_stats.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr.a
$(OBJDIR)/$(CONFIG)/src/proto/grpc/testing/services.o: $(LIBDIR)/$(CONFIG)/libgrpc++_core_stats.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr.a
$(OBJDIR)/$(CONFIG)/src/proto/grpc/testing/benchmark_service.o: $(LIBDIR)/$(CONFIG)/libgrpc++_core_stats.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr.a
$(OBJDIR)/$(CONFIG)/src/proto/grpc/testing/report_qps_scenario_service.o: $(LIBDIR)/$(CONFIG)/libgrpc++_core_stats.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr.a
$(OBJDIR)/$(CONFIG)/src/proto/grpc/testing/worker_service.o: $(LIBDIR)/$(CONFIG)/libgrpc++_core_stats.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr.a
$(OBJDIR)/$(CONFIG)/src/proto/grpc/testing/stats.o: $(LIBDIR)/$(CONFIG)/libgrpc++_core_stats.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr.a
@ -16610,8 +16710,8 @@ ifneq ($(NO_DEPS),true)
-include $(CODEGEN_TEST_MINIMAL_OBJS:.o=.dep)
endif
endif
$(OBJDIR)/$(CONFIG)/test/cpp/codegen/codegen_test_minimal.o: $(GENDIR)/src/proto/grpc/testing/control.pb.cc $(GENDIR)/src/proto/grpc/testing/control.grpc.pb.cc $(GENDIR)/src/proto/grpc/testing/messages.pb.cc $(GENDIR)/src/proto/grpc/testing/messages.grpc.pb.cc $(GENDIR)/src/proto/grpc/testing/payloads.pb.cc $(GENDIR)/src/proto/grpc/testing/payloads.grpc.pb.cc $(GENDIR)/src/proto/grpc/testing/services.pb.cc $(GENDIR)/src/proto/grpc/testing/services.grpc.pb.cc $(GENDIR)/src/proto/grpc/testing/stats.pb.cc $(GENDIR)/src/proto/grpc/testing/stats.grpc.pb.cc
$(OBJDIR)/$(CONFIG)/src/cpp/codegen/codegen_init.o: $(GENDIR)/src/proto/grpc/testing/control.pb.cc $(GENDIR)/src/proto/grpc/testing/control.grpc.pb.cc $(GENDIR)/src/proto/grpc/testing/messages.pb.cc $(GENDIR)/src/proto/grpc/testing/messages.grpc.pb.cc $(GENDIR)/src/proto/grpc/testing/payloads.pb.cc $(GENDIR)/src/proto/grpc/testing/payloads.grpc.pb.cc $(GENDIR)/src/proto/grpc/testing/services.pb.cc $(GENDIR)/src/proto/grpc/testing/services.grpc.pb.cc $(GENDIR)/src/proto/grpc/testing/stats.pb.cc $(GENDIR)/src/proto/grpc/testing/stats.grpc.pb.cc
$(OBJDIR)/$(CONFIG)/test/cpp/codegen/codegen_test_minimal.o: $(GENDIR)/src/proto/grpc/testing/control.pb.cc $(GENDIR)/src/proto/grpc/testing/control.grpc.pb.cc $(GENDIR)/src/proto/grpc/testing/messages.pb.cc $(GENDIR)/src/proto/grpc/testing/messages.grpc.pb.cc $(GENDIR)/src/proto/grpc/testing/payloads.pb.cc $(GENDIR)/src/proto/grpc/testing/payloads.grpc.pb.cc $(GENDIR)/src/proto/grpc/testing/benchmark_service.pb.cc $(GENDIR)/src/proto/grpc/testing/benchmark_service.grpc.pb.cc $(GENDIR)/src/proto/grpc/testing/report_qps_scenario_service.pb.cc $(GENDIR)/src/proto/grpc/testing/report_qps_scenario_service.grpc.pb.cc $(GENDIR)/src/proto/grpc/testing/worker_service.pb.cc $(GENDIR)/src/proto/grpc/testing/worker_service.grpc.pb.cc $(GENDIR)/src/proto/grpc/testing/stats.pb.cc $(GENDIR)/src/proto/grpc/testing/stats.grpc.pb.cc
$(OBJDIR)/$(CONFIG)/src/cpp/codegen/codegen_init.o: $(GENDIR)/src/proto/grpc/testing/control.pb.cc $(GENDIR)/src/proto/grpc/testing/control.grpc.pb.cc $(GENDIR)/src/proto/grpc/testing/messages.pb.cc $(GENDIR)/src/proto/grpc/testing/messages.grpc.pb.cc $(GENDIR)/src/proto/grpc/testing/payloads.pb.cc $(GENDIR)/src/proto/grpc/testing/payloads.grpc.pb.cc $(GENDIR)/src/proto/grpc/testing/benchmark_service.pb.cc $(GENDIR)/src/proto/grpc/testing/benchmark_service.grpc.pb.cc $(GENDIR)/src/proto/grpc/testing/report_qps_scenario_service.pb.cc $(GENDIR)/src/proto/grpc/testing/report_qps_scenario_service.grpc.pb.cc $(GENDIR)/src/proto/grpc/testing/worker_service.pb.cc $(GENDIR)/src/proto/grpc/testing/worker_service.grpc.pb.cc $(GENDIR)/src/proto/grpc/testing/stats.pb.cc $(GENDIR)/src/proto/grpc/testing/stats.grpc.pb.cc
CREDENTIALS_TEST_SRC = \
@ -17979,6 +18079,49 @@ endif
endif
LB_LOAD_DATA_STORE_TEST_SRC = \
test/cpp/server/load_reporter/load_data_store_test.cc \
LB_LOAD_DATA_STORE_TEST_OBJS = $(addprefix $(OBJDIR)/$(CONFIG)/, $(addsuffix .o, $(basename $(LB_LOAD_DATA_STORE_TEST_SRC))))
ifeq ($(NO_SECURE),true)
# You can't build secure targets if you don't have OpenSSL.
$(BINDIR)/$(CONFIG)/lb_load_data_store_test: openssl_dep_error
else
ifeq ($(NO_PROTOBUF),true)
# You can't build the protoc plugins or protobuf-enabled targets if you don't have protobuf 3.5.0+.
$(BINDIR)/$(CONFIG)/lb_load_data_store_test: protobuf_dep_error
else
$(BINDIR)/$(CONFIG)/lb_load_data_store_test: $(PROTOBUF_DEP) $(LB_LOAD_DATA_STORE_TEST_OBJS) $(LIBDIR)/$(CONFIG)/liblb_load_data_store.a $(LIBDIR)/$(CONFIG)/libgrpc++_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc++.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a
$(E) "[LD] Linking $@"
$(Q) mkdir -p `dirname $@`
$(Q) $(LDXX) $(LDFLAGS) $(LB_LOAD_DATA_STORE_TEST_OBJS) $(LIBDIR)/$(CONFIG)/liblb_load_data_store.a $(LIBDIR)/$(CONFIG)/libgrpc++_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc++.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a $(LDLIBSXX) $(LDLIBS_PROTOBUF) $(LDLIBS) $(LDLIBS_SECURE) $(GTEST_LIB) -o $(BINDIR)/$(CONFIG)/lb_load_data_store_test
endif
endif
$(OBJDIR)/$(CONFIG)/test/cpp/server/load_reporter/load_data_store_test.o: $(LIBDIR)/$(CONFIG)/liblb_load_data_store.a $(LIBDIR)/$(CONFIG)/libgrpc++_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc++.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a
deps_lb_load_data_store_test: $(LB_LOAD_DATA_STORE_TEST_OBJS:.o=.dep)
ifneq ($(NO_SECURE),true)
ifneq ($(NO_DEPS),true)
-include $(LB_LOAD_DATA_STORE_TEST_OBJS:.o=.dep)
endif
endif
MEMORY_TEST_SRC = \
test/core/gprpp/memory_test.cc \
@ -23847,6 +23990,7 @@ src/cpp/common/secure_channel_arguments.cc: $(OPENSSL_DEP)
src/cpp/common/secure_create_auth_context.cc: $(OPENSSL_DEP)
src/cpp/ext/proto_server_reflection.cc: $(OPENSSL_DEP)
src/cpp/ext/proto_server_reflection_plugin.cc: $(OPENSSL_DEP)
src/cpp/server/load_reporter/load_data_store.cc: $(OPENSSL_DEP)
src/cpp/server/secure_server_credentials.cc: $(OPENSSL_DEP)
src/cpp/util/core_stats.cc: $(OPENSSL_DEP)
src/cpp/util/error_details.cc: $(OPENSSL_DEP)

@ -39,6 +39,7 @@ Libraries in different languages may be in different states of development. We a
| Java | [grpc-java](http://github.com/grpc/grpc-java) |
| Go | [grpc-go](http://github.com/grpc/grpc-go) |
| NodeJS | [grpc-node](https://github.com/grpc/grpc-node) |
| WebJS | [grpc-web](https://github.com/grpc/grpc-web) |
| Dart | [grpc-dart](https://github.com/grpc/grpc-dart) |
See [MANIFEST.md](MANIFEST.md) for a listing of top-level items in the

@ -85,8 +85,8 @@ def grpc_deps():
if "com_google_protobuf" not in native.existing_rules():
native.http_archive(
name = "com_google_protobuf",
strip_prefix = "protobuf-2761122b810fe8861004ae785cc3ab39f384d342",
url = "https://github.com/google/protobuf/archive/2761122b810fe8861004ae785cc3ab39f384d342.tar.gz",
strip_prefix = "protobuf-b5fbb742af122b565925987e65c08957739976a7",
url = "https://github.com/google/protobuf/archive/b5fbb742af122b565925987e65c08957739976a7.tar.gz",
)
if "com_github_google_googletest" not in native.existing_rules():

@ -13,8 +13,8 @@ settings:
'#09': Per-language overrides are possible with (eg) ruby_version tag here
'#10': See the expand_version.py for all the quirks here
core_version: 6.0.0-dev
g_stands_for: glorious
version: 1.12.0-dev
g_stands_for: gloriosa
version: 1.13.0-dev
filegroups:
- name: alts_proto
headers:
@ -628,6 +628,7 @@ filegroups:
- name: grpc_lb_policy_grpclb
headers:
- src/core/ext/filters/client_channel/lb_policy/grpclb/client_load_reporting_filter.h
- src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.h
- src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_channel.h
- src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_client_stats.h
- src/core/ext/filters/client_channel/lb_policy/grpclb/load_balancer_api.h
@ -648,6 +649,7 @@ filegroups:
- name: grpc_lb_policy_grpclb_secure
headers:
- src/core/ext/filters/client_channel/lb_policy/grpclb/client_load_reporting_filter.h
- src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.h
- src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_channel.h
- src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_client_stats.h
- src/core/ext/filters/client_channel/lb_policy/grpclb/load_balancer_api.h
@ -685,8 +687,6 @@ filegroups:
- name: grpc_lb_subchannel_list
headers:
- src/core/ext/filters/client_channel/lb_policy/subchannel_list.h
src:
- src/core/ext/filters/client_channel/lb_policy/subchannel_list.cc
uses:
- grpc_base
- grpc_client_channel
@ -746,6 +746,7 @@ filegroups:
public_headers:
- include/grpc/grpc_security.h
headers:
- src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.h
- src/core/lib/security/context/security_context.h
- src/core/lib/security/credentials/alts/alts_credentials.h
- src/core/lib/security/credentials/composite/composite_credentials.h
@ -1890,6 +1891,15 @@ libs:
- test/cpp/interop/interop_server_bootstrap.cc
deps:
- interop_server_lib
- name: lb_load_data_store
build: private
language: c++
headers:
- src/cpp/server/load_reporter/load_data_store.h
src:
- src/cpp/server/load_reporter/load_data_store.cc
deps:
- grpc++
- name: qps
build: private
language: c++
@ -1910,7 +1920,9 @@ libs:
- src/proto/grpc/testing/payloads.proto
- src/proto/grpc/testing/stats.proto
- src/proto/grpc/testing/control.proto
- src/proto/grpc/testing/services.proto
- src/proto/grpc/testing/benchmark_service.proto
- src/proto/grpc/testing/report_qps_scenario_service.proto
- src/proto/grpc/testing/worker_service.proto
- test/cpp/qps/benchmark_config.cc
- test/cpp/qps/client_async.cc
- test/cpp/qps/client_sync.cc
@ -3810,6 +3822,7 @@ targets:
- grpc_unsecure
- gpr_test_util
- gpr
- grpc++_test_config
benchmark: true
defaults: benchmark
platforms:
@ -3831,6 +3844,7 @@ targets:
- grpc_unsecure
- gpr_test_util
- gpr
- grpc++_test_config
benchmark: true
defaults: benchmark
platforms:
@ -3852,6 +3866,7 @@ targets:
- grpc_unsecure
- gpr_test_util
- gpr
- grpc++_test_config
benchmark: true
defaults: benchmark
platforms:
@ -3873,6 +3888,7 @@ targets:
- grpc_unsecure
- gpr_test_util
- gpr
- grpc++_test_config
benchmark: true
defaults: benchmark
platforms:
@ -3893,6 +3909,7 @@ targets:
- grpc_unsecure
- gpr_test_util
- gpr
- grpc++_test_config
benchmark: true
defaults: benchmark
platforms:
@ -3913,6 +3930,7 @@ targets:
- grpc_unsecure
- gpr_test_util
- gpr
- grpc++_test_config
benchmark: true
defaults: benchmark
platforms:
@ -3933,6 +3951,7 @@ targets:
- grpc_unsecure
- gpr_test_util
- gpr
- grpc++_test_config
benchmark: true
defaults: benchmark
platforms:
@ -3953,6 +3972,7 @@ targets:
- grpc_unsecure
- gpr_test_util
- gpr
- grpc++_test_config
benchmark: true
defaults: benchmark
platforms:
@ -3976,6 +3996,7 @@ targets:
- grpc_unsecure
- gpr_test_util
- gpr
- grpc++_test_config
benchmark: true
defaults: benchmark
excluded_poll_engines:
@ -4002,6 +4023,7 @@ targets:
- grpc_unsecure
- gpr_test_util
- gpr
- grpc++_test_config
benchmark: true
defaults: benchmark
excluded_poll_engines:
@ -4055,6 +4077,7 @@ targets:
- grpc_unsecure
- gpr_test_util
- gpr
- grpc++_test_config
benchmark: true
defaults: benchmark
excluded_poll_engines:
@ -4079,6 +4102,7 @@ targets:
- grpc_unsecure
- gpr_test_util
- gpr
- grpc++_test_config
benchmark: true
defaults: benchmark
platforms:
@ -4100,6 +4124,7 @@ targets:
- grpc_unsecure
- gpr_test_util
- gpr
- grpc++_test_config
benchmark: true
defaults: benchmark
platforms:
@ -4266,7 +4291,9 @@ targets:
- src/proto/grpc/testing/control.proto
- src/proto/grpc/testing/messages.proto
- src/proto/grpc/testing/payloads.proto
- src/proto/grpc/testing/services.proto
- src/proto/grpc/testing/benchmark_service.proto
- src/proto/grpc/testing/report_qps_scenario_service.proto
- src/proto/grpc/testing/worker_service.proto
- src/proto/grpc/testing/stats.proto
- test/cpp/codegen/codegen_test_full.cc
deps:
@ -4285,7 +4312,9 @@ targets:
- src/proto/grpc/testing/control.proto
- src/proto/grpc/testing/messages.proto
- src/proto/grpc/testing/payloads.proto
- src/proto/grpc/testing/services.proto
- src/proto/grpc/testing/benchmark_service.proto
- src/proto/grpc/testing/report_qps_scenario_service.proto
- src/proto/grpc/testing/worker_service.proto
- src/proto/grpc/testing/stats.proto
- test/cpp/codegen/codegen_test_minimal.cc
deps:
@ -4747,6 +4776,20 @@ targets:
- mac
- linux
- posix
- name: lb_load_data_store_test
gtest: true
build: test
language: c++
src:
- test/cpp/server/load_reporter/load_data_store_test.cc
deps:
- lb_load_data_store
- grpc++_test_util
- grpc_test_util
- grpc++
- grpc
- gpr_test_util
- gpr
- name: memory_test
gtest: true
build: test

@ -369,7 +369,6 @@ if test "$PHP_GRPC" != "no"; then
src/core/ext/filters/client_channel/lb_policy/grpclb/proto/grpc/lb/v1/load_balancer.pb.c \
src/core/ext/filters/client_channel/resolver/fake/fake_resolver.cc \
src/core/ext/filters/client_channel/lb_policy/pick_first/pick_first.cc \
src/core/ext/filters/client_channel/lb_policy/subchannel_list.cc \
src/core/ext/filters/client_channel/lb_policy/round_robin/round_robin.cc \
src/core/ext/filters/client_channel/resolver/dns/c_ares/dns_resolver_ares.cc \
src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver_posix.cc \
@ -650,7 +649,6 @@ if test "$PHP_GRPC" != "no"; then
PHP_ADD_BUILD_DIR($ext_builddir/src/boringssl)
PHP_ADD_BUILD_DIR($ext_builddir/src/core/ext/census)
PHP_ADD_BUILD_DIR($ext_builddir/src/core/ext/filters/client_channel)
PHP_ADD_BUILD_DIR($ext_builddir/src/core/ext/filters/client_channel/lb_policy)
PHP_ADD_BUILD_DIR($ext_builddir/src/core/ext/filters/client_channel/lb_policy/grpclb)
PHP_ADD_BUILD_DIR($ext_builddir/src/core/ext/filters/client_channel/lb_policy/grpclb/proto/grpc/lb/v1)
PHP_ADD_BUILD_DIR($ext_builddir/src/core/ext/filters/client_channel/lb_policy/pick_first)

@ -345,7 +345,6 @@ if (PHP_GRPC != "no") {
"src\\core\\ext\\filters\\client_channel\\lb_policy\\grpclb\\proto\\grpc\\lb\\v1\\load_balancer.pb.c " +
"src\\core\\ext\\filters\\client_channel\\resolver\\fake\\fake_resolver.cc " +
"src\\core\\ext\\filters\\client_channel\\lb_policy\\pick_first\\pick_first.cc " +
"src\\core\\ext\\filters\\client_channel\\lb_policy\\subchannel_list.cc " +
"src\\core\\ext\\filters\\client_channel\\lb_policy\\round_robin\\round_robin.cc " +
"src\\core\\ext\\filters\\client_channel\\resolver\\dns\\c_ares\\dns_resolver_ares.cc " +
"src\\core\\ext\\filters\\client_channel\\resolver\\dns\\c_ares\\grpc_ares_ev_driver_posix.cc " +

@ -50,6 +50,7 @@ some configuration as environment variables that can be set.
- channel_stack_builder - traces information about channel stacks being built
- executor - traces grpc's internal thread pool ('the executor')
- glb - traces the grpclb load balancer
- handshaker - traces handshaking state
- http - traces state in the http2 transport engine
- http2_stream_state - traces all http2 stream state mutations.
- http1 - traces HTTP/1.x operations performed by gRPC

@ -1,18 +1,15 @@
Each version of gRPC gets a new description of what the 'g' stands for, since
we've never really been able to figure it out.
'g' stands for something different every gRPC release:
Below is a list of already-used definitions (that should not be repeated in the
future), and the corresponding version numbers that used them:
- 1.0 'g' stands for 'gRPC'
- 1.1 'g' stands for 'good'
- 1.2 'g' stands for 'green'
- 1.3 'g' stands for 'gentle'
- 1.4 'g' stands for 'gregarious'
- 1.6 'g' stands for 'garcia'
- 1.7 'g' stands for 'gambit'
- 1.8 'g' stands for 'generous'
- 1.9 'g' stands for 'glossy'
- 1.10 'g' stands for 'glamorous'
- 1.11 'g' stands for 'gorgeous'
- 1.12 'g' stands for 'glorious'
- 1.0 'g' stands for ['gRPC'](https://github.com/grpc/grpc/tree/v1.0.x)
- 1.1 'g' stands for ['good'](https://github.com/grpc/grpc/tree/v1.1.x)
- 1.2 'g' stands for ['green'](https://github.com/grpc/grpc/tree/v1.2.x)
- 1.3 'g' stands for ['gentle'](https://github.com/grpc/grpc/tree/v1.3.x)
- 1.4 'g' stands for ['gregarious'](https://github.com/grpc/grpc/tree/v1.4.x)
- 1.6 'g' stands for ['garcia'](https://github.com/grpc/grpc/tree/v1.6.x)
- 1.7 'g' stands for ['gambit'](https://github.com/grpc/grpc/tree/v1.7.x)
- 1.8 'g' stands for ['generous'](https://github.com/grpc/grpc/tree/v1.8.x)
- 1.9 'g' stands for ['glossy'](https://github.com/grpc/grpc/tree/v1.9.x)
- 1.10 'g' stands for ['glamorous'](https://github.com/grpc/grpc/tree/v1.10.x)
- 1.11 'g' stands for ['gorgeous'](https://github.com/grpc/grpc/tree/v1.11.x)
- 1.12 'g' stands for ['glorious'](https://github.com/grpc/grpc/tree/v1.12.x)
- 1.13 'g' stands for ['gloriosa'](https://github.com/grpc/grpc/tree/master)

@ -3525,39 +3525,6 @@ AAoACxGV2lZFA4gKn2fQ1XmxqI1AbQ3CekD6819kR5LLU7m7Wc5P/dAVUwHY3+vZ
5nbv0CO7O6l5s9UCKc2Jo5YPSjXnTkLAdc0Hz+Ys63su
-----END CERTIFICATE-----
# Issuer: CN=TÜRKTRUST Elektronik Sertifika Hizmet Sağlayıcısı H5 O=TÜRKTRUST Bilgi İletişim ve Bilişim Güvenliği Hizmetleri A.Ş.
# Subject: CN=TÜRKTRUST Elektronik Sertifika Hizmet Sağlayıcısı H5 O=TÜRKTRUST Bilgi İletişim ve Bilişim Güvenliği Hizmetleri A.Ş.
# Label: "TÜRKTRUST Elektronik Sertifika Hizmet Sağlayıcısı H5"
# Serial: 156233699172481
# MD5 Fingerprint: da:70:8e:f0:22:df:93:26:f6:5f:9f:d3:15:06:52:4e
# SHA1 Fingerprint: c4:18:f6:4d:46:d1:df:00:3d:27:30:13:72:43:a9:12:11:c6:75:fb
# SHA256 Fingerprint: 49:35:1b:90:34:44:c1:85:cc:dc:5c:69:3d:24:d8:55:5c:b2:08:d6:a8:14:13:07:69:9f:4a:f0:63:19:9d:78
-----BEGIN CERTIFICATE-----
MIIEJzCCAw+gAwIBAgIHAI4X/iQggTANBgkqhkiG9w0BAQsFADCBsTELMAkGA1UE
BhMCVFIxDzANBgNVBAcMBkFua2FyYTFNMEsGA1UECgxEVMOcUktUUlVTVCBCaWxn
aSDEsGxldGnFn2ltIHZlIEJpbGnFn2ltIEfDvHZlbmxpxJ9pIEhpem1ldGxlcmkg
QS7Fni4xQjBABgNVBAMMOVTDnFJLVFJVU1QgRWxla3Ryb25payBTZXJ0aWZpa2Eg
SGl6bWV0IFNhxJ9sYXnEsWPEsXPEsSBINTAeFw0xMzA0MzAwODA3MDFaFw0yMzA0
MjgwODA3MDFaMIGxMQswCQYDVQQGEwJUUjEPMA0GA1UEBwwGQW5rYXJhMU0wSwYD
VQQKDERUw5xSS1RSVVNUIEJpbGdpIMSwbGV0acWfaW0gdmUgQmlsacWfaW0gR8O8
dmVubGnEn2kgSGl6bWV0bGVyaSBBLsWeLjFCMEAGA1UEAww5VMOcUktUUlVTVCBF
bGVrdHJvbmlrIFNlcnRpZmlrYSBIaXptZXQgU2HEn2xhecSxY8Sxc8SxIEg1MIIB
IjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEApCUZ4WWe60ghUEoI5RHwWrom
/4NZzkQqL/7hzmAD/I0Dpe3/a6i6zDQGn1k19uwsu537jVJp45wnEFPzpALFp/kR
Gml1bsMdi9GYjZOHp3GXDSHHmflS0yxjXVW86B8BSLlg/kJK9siArs1mep5Fimh3
4khon6La8eHBEJ/rPCmBp+EyCNSgBbGM+42WAA4+Jd9ThiI7/PS98wl+d+yG6w8z
5UNP9FR1bSmZLmZaQ9/LXMrI5Tjxfjs1nQ/0xVqhzPMggCTTV+wVunUlm+hkS7M0
hO8EuPbJbKoCPrZV4jI3X/xml1/N1p7HIL9Nxqw/dV8c7TKcfGkAaZHjIxhT6QID
AQABo0IwQDAdBgNVHQ4EFgQUVpkHHtOsDGlktAxQR95DLL4gwPswDgYDVR0PAQH/
BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wDQYJKoZIhvcNAQELBQADggEBAJ5FdnsX
SDLyOIspve6WSk6BGLFRRyDN0GSxDsnZAdkJzsiZ3GglE9Rc8qPoBP5yCccLqh0l
VX6Wmle3usURehnmp349hQ71+S4pL+f5bFgWV1Al9j4uPqrtd3GqqpmWRgqujuwq
URawXs3qZwQcWDD1YIq9pr1N5Za0/EKJAWv2cMhQOQwt1WbZyNKzMrcbGW3LM/nf
peYVhDfwwvJllpKQd/Ct9JDpEXjXk4nAPQu6KfTomZ1yju2dL+6SfaHx/126M2CF
Yv4HAqGEVka+lgqaE9chTLd8B59OTj+RdPsnnRHM3eaxynFNExc5JsUpISuTKWqW
+qtB4Uu2NQvAmxU=
-----END CERTIFICATE-----
# Issuer: CN=Certinomis - Root CA O=Certinomis OU=0002 433998903
# Subject: CN=Certinomis - Root CA O=Certinomis OU=0002 433998903
# Label: "Certinomis - Root CA"

@ -1,5 +0,0 @@
{
"sdk": {
"version": "1.0.0"
}
}

@ -122,7 +122,7 @@ function getDistance(start, end) {
var deltalon = lon2-lon1;
var a = Math.sin(deltalat/2) * Math.sin(deltalat/2) +
Math.cos(lat1) * Math.cos(lat2) *
Math.sin(dlon/2) * Math.sin(dlon/2);
Math.sin(deltalon/2) * Math.sin(deltalon/2);
var c = 2 * Math.atan2(Math.sqrt(a), Math.sqrt(1-a));
return R * c;
}

@ -23,7 +23,7 @@
Pod::Spec.new do |s|
s.name = 'gRPC-C++'
# TODO (mxyan): use version that match gRPC version when pod is stabilized
# version = '1.12.0-dev'
# version = '1.13.0-dev'
version = '0.0.2'
s.version = version
s.summary = 'gRPC C++ library'
@ -31,7 +31,7 @@ Pod::Spec.new do |s|
s.license = 'Apache License, Version 2.0'
s.authors = { 'The gRPC contributors' => 'grpc-packages@google.com' }
grpc_version = '1.12.0-dev'
grpc_version = '1.13.0-dev'
s.source = {
:git => 'https://github.com/grpc/grpc.git',
@ -260,6 +260,7 @@ Pod::Spec.new do |s|
'src/core/ext/filters/http/client/http_client_filter.h',
'src/core/ext/filters/http/message_compress/message_compress_filter.h',
'src/core/ext/filters/http/server/http_server_filter.h',
'src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.h',
'src/core/lib/security/context/security_context.h',
'src/core/lib/security/credentials/alts/alts_credentials.h',
'src/core/lib/security/credentials/composite/composite_credentials.h',

@ -22,7 +22,7 @@
Pod::Spec.new do |s|
s.name = 'gRPC-Core'
version = '1.12.0-dev'
version = '1.13.0-dev'
s.version = version
s.summary = 'Core cross-platform gRPC library, written in C'
s.homepage = 'https://grpc.io'
@ -270,6 +270,7 @@ Pod::Spec.new do |s|
'src/core/ext/filters/http/client/http_client_filter.h',
'src/core/ext/filters/http/message_compress/message_compress_filter.h',
'src/core/ext/filters/http/server/http_server_filter.h',
'src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.h',
'src/core/lib/security/context/security_context.h',
'src/core/lib/security/credentials/alts/alts_credentials.h',
'src/core/lib/security/credentials/composite/composite_credentials.h',
@ -784,7 +785,6 @@ Pod::Spec.new do |s|
'src/core/ext/filters/client_channel/lb_policy/grpclb/proto/grpc/lb/v1/load_balancer.pb.c',
'src/core/ext/filters/client_channel/resolver/fake/fake_resolver.cc',
'src/core/ext/filters/client_channel/lb_policy/pick_first/pick_first.cc',
'src/core/ext/filters/client_channel/lb_policy/subchannel_list.cc',
'src/core/ext/filters/client_channel/lb_policy/round_robin/round_robin.cc',
'src/core/ext/filters/client_channel/resolver/dns/c_ares/dns_resolver_ares.cc',
'src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver_posix.cc',
@ -850,6 +850,7 @@ Pod::Spec.new do |s|
'src/core/ext/filters/http/client/http_client_filter.h',
'src/core/ext/filters/http/message_compress/message_compress_filter.h',
'src/core/ext/filters/http/server/http_server_filter.h',
'src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.h',
'src/core/lib/security/context/security_context.h',
'src/core/lib/security/credentials/alts/alts_credentials.h',
'src/core/lib/security/credentials/composite/composite_credentials.h',
@ -1214,6 +1215,7 @@ Pod::Spec.new do |s|
'test/core/end2end/tests/retry_exceeds_buffer_size_in_initial_batch.cc',
'test/core/end2end/tests/retry_exceeds_buffer_size_in_subsequent_batch.cc',
'test/core/end2end/tests/retry_non_retriable_status.cc',
'test/core/end2end/tests/retry_non_retriable_status_before_recv_trailing_metadata_started.cc',
'test/core/end2end/tests/retry_recv_initial_metadata.cc',
'test/core/end2end/tests/retry_recv_message.cc',
'test/core/end2end/tests/retry_server_pushback_delay.cc',

@ -21,7 +21,7 @@
Pod::Spec.new do |s|
s.name = 'gRPC-ProtoRPC'
version = '1.12.0-dev'
version = '1.13.0-dev'
s.version = version
s.summary = 'RPC library for Protocol Buffers, based on gRPC'
s.homepage = 'https://grpc.io'

@ -21,7 +21,7 @@
Pod::Spec.new do |s|
s.name = 'gRPC-RxLibrary'
version = '1.12.0-dev'
version = '1.13.0-dev'
s.version = version
s.summary = 'Reactive Extensions library for iOS/OSX.'
s.homepage = 'https://grpc.io'

@ -20,7 +20,7 @@
Pod::Spec.new do |s|
s.name = 'gRPC'
version = '1.12.0-dev'
version = '1.13.0-dev'
s.version = version
s.summary = 'gRPC client library for iOS/OSX'
s.homepage = 'https://grpc.io'

@ -201,6 +201,7 @@ Gem::Specification.new do |s|
s.files += %w( src/core/ext/filters/http/client/http_client_filter.h )
s.files += %w( src/core/ext/filters/http/message_compress/message_compress_filter.h )
s.files += %w( src/core/ext/filters/http/server/http_server_filter.h )
s.files += %w( src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.h )
s.files += %w( src/core/lib/security/context/security_context.h )
s.files += %w( src/core/lib/security/credentials/alts/alts_credentials.h )
s.files += %w( src/core/lib/security/credentials/composite/composite_credentials.h )
@ -722,7 +723,6 @@ Gem::Specification.new do |s|
s.files += %w( src/core/ext/filters/client_channel/lb_policy/grpclb/proto/grpc/lb/v1/load_balancer.pb.c )
s.files += %w( src/core/ext/filters/client_channel/resolver/fake/fake_resolver.cc )
s.files += %w( src/core/ext/filters/client_channel/lb_policy/pick_first/pick_first.cc )
s.files += %w( src/core/ext/filters/client_channel/lb_policy/subchannel_list.cc )
s.files += %w( src/core/ext/filters/client_channel/lb_policy/round_robin/round_robin.cc )
s.files += %w( src/core/ext/filters/client_channel/resolver/dns/c_ares/dns_resolver_ares.cc )
s.files += %w( src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver_posix.cc )

@ -529,7 +529,6 @@
'src/core/ext/filters/client_channel/lb_policy/grpclb/proto/grpc/lb/v1/load_balancer.pb.c',
'src/core/ext/filters/client_channel/resolver/fake/fake_resolver.cc',
'src/core/ext/filters/client_channel/lb_policy/pick_first/pick_first.cc',
'src/core/ext/filters/client_channel/lb_policy/subchannel_list.cc',
'src/core/ext/filters/client_channel/lb_policy/round_robin/round_robin.cc',
'src/core/ext/filters/client_channel/resolver/dns/c_ares/dns_resolver_ares.cc',
'src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver_posix.cc',
@ -1258,7 +1257,6 @@
'third_party/nanopb/pb_decode.c',
'third_party/nanopb/pb_encode.c',
'src/core/ext/filters/client_channel/lb_policy/pick_first/pick_first.cc',
'src/core/ext/filters/client_channel/lb_policy/subchannel_list.cc',
'src/core/ext/filters/client_channel/lb_policy/round_robin/round_robin.cc',
'src/core/ext/census/grpc_context.cc',
'src/core/ext/filters/max_age/max_age_filter.cc',
@ -1637,6 +1635,16 @@
'test/cpp/interop/interop_server_bootstrap.cc',
],
},
{
'target_name': 'lb_load_data_store',
'type': 'static_library',
'dependencies': [
'grpc++',
],
'sources': [
'src/cpp/server/load_reporter/load_data_store.cc',
],
},
{
'target_name': 'qps',
'type': 'static_library',
@ -1652,7 +1660,9 @@
'src/proto/grpc/testing/payloads.proto',
'src/proto/grpc/testing/stats.proto',
'src/proto/grpc/testing/control.proto',
'src/proto/grpc/testing/services.proto',
'src/proto/grpc/testing/benchmark_service.proto',
'src/proto/grpc/testing/report_qps_scenario_service.proto',
'src/proto/grpc/testing/worker_service.proto',
'test/cpp/qps/benchmark_config.cc',
'test/cpp/qps/client_async.cc',
'test/cpp/qps/client_sync.cc',
@ -2622,6 +2632,7 @@
'test/core/end2end/tests/retry_exceeds_buffer_size_in_initial_batch.cc',
'test/core/end2end/tests/retry_exceeds_buffer_size_in_subsequent_batch.cc',
'test/core/end2end/tests/retry_non_retriable_status.cc',
'test/core/end2end/tests/retry_non_retriable_status_before_recv_trailing_metadata_started.cc',
'test/core/end2end/tests/retry_recv_initial_metadata.cc',
'test/core/end2end/tests/retry_recv_message.cc',
'test/core/end2end/tests/retry_server_pushback_delay.cc',
@ -2711,6 +2722,7 @@
'test/core/end2end/tests/retry_exceeds_buffer_size_in_initial_batch.cc',
'test/core/end2end/tests/retry_exceeds_buffer_size_in_subsequent_batch.cc',
'test/core/end2end/tests/retry_non_retriable_status.cc',
'test/core/end2end/tests/retry_non_retriable_status_before_recv_trailing_metadata_started.cc',
'test/core/end2end/tests/retry_recv_initial_metadata.cc',
'test/core/end2end/tests/retry_recv_message.cc',
'test/core/end2end/tests/retry_server_pushback_delay.cc',

@ -99,6 +99,12 @@ GPRAPI void gpr_set_log_function(gpr_log_func func);
} \
} while (0)
#ifndef NDEBUG
#define GPR_DEBUG_ASSERT(x) GPR_ASSERT(x)
#else
#define GPR_DEBUG_ASSERT(x)
#endif
#ifdef __cplusplus
}
#endif

@ -13,8 +13,8 @@
<date>2018-01-19</date>
<time>16:06:07</time>
<version>
<release>1.12.0dev</release>
<api>1.12.0dev</api>
<release>1.13.0dev</release>
<api>1.13.0dev</api>
</version>
<stability>
<release>beta</release>
@ -208,6 +208,7 @@
<file baseinstalldir="/" name="src/core/ext/filters/http/client/http_client_filter.h" role="src" />
<file baseinstalldir="/" name="src/core/ext/filters/http/message_compress/message_compress_filter.h" role="src" />
<file baseinstalldir="/" name="src/core/ext/filters/http/server/http_server_filter.h" role="src" />
<file baseinstalldir="/" name="src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.h" role="src" />
<file baseinstalldir="/" name="src/core/lib/security/context/security_context.h" role="src" />
<file baseinstalldir="/" name="src/core/lib/security/credentials/alts/alts_credentials.h" role="src" />
<file baseinstalldir="/" name="src/core/lib/security/credentials/composite/composite_credentials.h" role="src" />
@ -729,7 +730,6 @@
<file baseinstalldir="/" name="src/core/ext/filters/client_channel/lb_policy/grpclb/proto/grpc/lb/v1/load_balancer.pb.c" role="src" />
<file baseinstalldir="/" name="src/core/ext/filters/client_channel/resolver/fake/fake_resolver.cc" role="src" />
<file baseinstalldir="/" name="src/core/ext/filters/client_channel/lb_policy/pick_first/pick_first.cc" role="src" />
<file baseinstalldir="/" name="src/core/ext/filters/client_channel/lb_policy/subchannel_list.cc" role="src" />
<file baseinstalldir="/" name="src/core/ext/filters/client_channel/lb_policy/round_robin/round_robin.cc" role="src" />
<file baseinstalldir="/" name="src/core/ext/filters/client_channel/resolver/dns/c_ares/dns_resolver_ares.cc" role="src" />
<file baseinstalldir="/" name="src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver_posix.cc" role="src" />

@ -20,6 +20,7 @@
#include "src/compiler/config.h"
#include "src/compiler/generator_helpers.h"
#include "src/compiler/node_generator.h"
#include "src/compiler/node_generator_helpers.h"
using grpc::protobuf::Descriptor;
@ -119,7 +120,8 @@ grpc::string NodeObjectPath(const Descriptor* descriptor) {
}
// Prints out the message serializer and deserializer functions
void PrintMessageTransformer(const Descriptor* descriptor, Printer* out) {
void PrintMessageTransformer(const Descriptor* descriptor, Printer* out,
const Parameters& params) {
map<grpc::string, grpc::string> template_vars;
grpc::string full_name = descriptor->full_name();
template_vars["identifier_name"] = MessageIdentifierName(full_name);
@ -134,7 +136,12 @@ void PrintMessageTransformer(const Descriptor* descriptor, Printer* out) {
"throw new Error('Expected argument of type $name$');\n");
out->Outdent();
out->Print("}\n");
if (params.minimum_node_version > 5) {
// Node version is > 5, we should use Buffer.from
out->Print("return Buffer.from(arg.serializeBinary());\n");
} else {
out->Print("return new Buffer(arg.serializeBinary());\n");
}
out->Outdent();
out->Print("}\n\n");
@ -219,12 +226,13 @@ void PrintImports(const FileDescriptor* file, Printer* out) {
out->Print("\n");
}
void PrintTransformers(const FileDescriptor* file, Printer* out) {
void PrintTransformers(const FileDescriptor* file, Printer* out,
const Parameters& params) {
map<grpc::string, const Descriptor*> messages = GetAllMessages(file);
for (std::map<grpc::string, const Descriptor*>::iterator it =
messages.begin();
it != messages.end(); it++) {
PrintMessageTransformer(it->second, out);
PrintMessageTransformer(it->second, out, params);
}
out->Print("\n");
}
@ -236,7 +244,8 @@ void PrintServices(const FileDescriptor* file, Printer* out) {
}
} // namespace
grpc::string GenerateFile(const FileDescriptor* file) {
grpc::string GenerateFile(const FileDescriptor* file,
const Parameters& params) {
grpc::string output;
{
StringOutputStream output_stream(&output);
@ -257,7 +266,7 @@ grpc::string GenerateFile(const FileDescriptor* file) {
PrintImports(file, &out);
PrintTransformers(file, &out);
PrintTransformers(file, &out, params);
PrintServices(file, &out);

@ -23,7 +23,14 @@
namespace grpc_node_generator {
grpc::string GenerateFile(const grpc::protobuf::FileDescriptor* file);
// Contains all the parameters that are parsed from the command line.
struct Parameters {
// Sets the earliest version of nodejs that needs to be supported.
int minimum_node_version;
};
grpc::string GenerateFile(const grpc::protobuf::FileDescriptor* file,
const Parameters& params);
} // namespace grpc_node_generator

@ -36,7 +36,27 @@ class NodeGrpcGenerator : public grpc::protobuf::compiler::CodeGenerator {
const grpc::string& parameter,
grpc::protobuf::compiler::GeneratorContext* context,
grpc::string* error) const {
grpc::string code = GenerateFile(file);
grpc_node_generator::Parameters generator_parameters;
generator_parameters.minimum_node_version = 4;
if (!parameter.empty()) {
std::vector<grpc::string> parameters_list =
grpc_generator::tokenize(parameter, ",");
for (auto parameter_string = parameters_list.begin();
parameter_string != parameters_list.end(); parameter_string++) {
std::vector<grpc::string> param =
grpc_generator::tokenize(*parameter_string, "=");
if (param[0] == "minimum_node_version") {
sscanf(param[1].c_str(), "%d",
&generator_parameters.minimum_node_version);
} else {
*error = grpc::string("Unknown parameter: ") + *parameter_string;
return false;
}
}
}
grpc::string code = GenerateFile(file, generator_parameters);
if (code.size() == 0) {
return true;
}

@ -222,7 +222,8 @@ void PrintMethodImplementations(Printer* printer,
map< ::grpc::string, ::grpc::string> vars = {
{"service_class", ServiceClassName(service)}};
printer.Print(vars, "@protocol $service_class$ <NSObject>\n\n");
printer.Print(
vars, "@protocol $service_class$ <NSObject, GRPCProtoServiceInit>\n\n");
for (int i = 0; i < service->method_count(); i++) {
PrintMethodDeclarations(&printer, service->method(i));
}

@ -174,7 +174,7 @@ static void set_channel_connectivity_state_locked(channel_data* chand,
}
}
if (grpc_client_channel_trace.enabled()) {
gpr_log(GPR_DEBUG, "chand=%p: setting connectivity state to %s", chand,
gpr_log(GPR_INFO, "chand=%p: setting connectivity state to %s", chand,
grpc_connectivity_state_name(state));
}
grpc_connectivity_state_set(&chand->state_tracker, state, error, reason);
@ -186,7 +186,7 @@ static void on_lb_policy_state_changed_locked(void* arg, grpc_error* error) {
/* check if the notification is for the latest policy */
if (w->lb_policy == w->chand->lb_policy.get()) {
if (grpc_client_channel_trace.enabled()) {
gpr_log(GPR_DEBUG, "chand=%p: lb_policy=%p state changed to %s", w->chand,
gpr_log(GPR_INFO, "chand=%p: lb_policy=%p state changed to %s", w->chand,
w->lb_policy, grpc_connectivity_state_name(w->state));
}
set_channel_connectivity_state_locked(w->chand, w->state,
@ -215,7 +215,7 @@ static void watch_lb_policy_locked(channel_data* chand,
static void start_resolving_locked(channel_data* chand) {
if (grpc_client_channel_trace.enabled()) {
gpr_log(GPR_DEBUG, "chand=%p: starting name resolution", chand);
gpr_log(GPR_INFO, "chand=%p: starting name resolution", chand);
}
GPR_ASSERT(!chand->started_resolving);
chand->started_resolving = true;
@ -297,7 +297,7 @@ static void request_reresolution_locked(void* arg, grpc_error* error) {
return;
}
if (grpc_client_channel_trace.enabled()) {
gpr_log(GPR_DEBUG, "chand=%p: started name re-resolving", chand);
gpr_log(GPR_INFO, "chand=%p: started name re-resolving", chand);
}
chand->resolver->RequestReresolutionLocked();
// Give back the closure to the LB policy.
@ -311,7 +311,7 @@ static void request_reresolution_locked(void* arg, grpc_error* error) {
static void on_resolver_result_changed_locked(void* arg, grpc_error* error) {
channel_data* chand = static_cast<channel_data*>(arg);
if (grpc_client_channel_trace.enabled()) {
gpr_log(GPR_DEBUG,
gpr_log(GPR_INFO,
"chand=%p: got resolver result: resolver_result=%p error=%s", chand,
chand->resolver_result, grpc_error_string(error));
}
@ -431,7 +431,7 @@ static void on_resolver_result_changed_locked(void* arg, grpc_error* error) {
}
}
if (grpc_client_channel_trace.enabled()) {
gpr_log(GPR_DEBUG,
gpr_log(GPR_INFO,
"chand=%p: resolver result: lb_policy_name=\"%s\"%s, "
"service_config=\"%s\"",
chand, lb_policy_name_dup,
@ -466,7 +466,7 @@ static void on_resolver_result_changed_locked(void* arg, grpc_error* error) {
chand->resolver == nullptr) {
if (chand->lb_policy != nullptr) {
if (grpc_client_channel_trace.enabled()) {
gpr_log(GPR_DEBUG, "chand=%p: unreffing lb_policy=%p", chand,
gpr_log(GPR_INFO, "chand=%p: unreffing lb_policy=%p", chand,
chand->lb_policy.get());
}
grpc_pollset_set_del_pollset_set(chand->lb_policy->interested_parties(),
@ -480,11 +480,11 @@ static void on_resolver_result_changed_locked(void* arg, grpc_error* error) {
// error or shutdown.
if (error != GRPC_ERROR_NONE || chand->resolver == nullptr) {
if (grpc_client_channel_trace.enabled()) {
gpr_log(GPR_DEBUG, "chand=%p: shutting down", chand);
gpr_log(GPR_INFO, "chand=%p: shutting down", chand);
}
if (chand->resolver != nullptr) {
if (grpc_client_channel_trace.enabled()) {
gpr_log(GPR_DEBUG, "chand=%p: shutting down resolver", chand);
gpr_log(GPR_INFO, "chand=%p: shutting down resolver", chand);
}
chand->resolver.reset();
}
@ -506,7 +506,7 @@ static void on_resolver_result_changed_locked(void* arg, grpc_error* error) {
GRPC_ERROR_CREATE_FROM_STATIC_STRING("No load balancing policy");
if (lb_policy_created) {
if (grpc_client_channel_trace.enabled()) {
gpr_log(GPR_DEBUG, "chand=%p: initializing new LB policy", chand);
gpr_log(GPR_INFO, "chand=%p: initializing new LB policy", chand);
}
GRPC_ERROR_UNREF(state_error);
state = chand->lb_policy->CheckConnectivityLocked(&state_error);
@ -842,10 +842,11 @@ typedef struct {
bool completed_recv_trailing_metadata : 1;
// State for callback processing.
bool retry_dispatched : 1;
bool recv_initial_metadata_ready_deferred : 1;
bool recv_message_ready_deferred : 1;
subchannel_batch_data* recv_initial_metadata_ready_deferred_batch;
grpc_error* recv_initial_metadata_error;
subchannel_batch_data* recv_message_ready_deferred_batch;
grpc_error* recv_message_error;
subchannel_batch_data* recv_trailing_metadata_internal_batch;
} subchannel_call_retry_state;
// Pending batches stored in call data.
@ -994,6 +995,39 @@ static void maybe_cache_send_ops_for_batch(call_data* calld,
}
}
// Frees cached send_initial_metadata.
static void free_cached_send_initial_metadata(channel_data* chand,
call_data* calld) {
if (grpc_client_channel_trace.enabled()) {
gpr_log(GPR_INFO,
"chand=%p calld=%p: destroying calld->send_initial_metadata", chand,
calld);
}
grpc_metadata_batch_destroy(&calld->send_initial_metadata);
}
// Frees cached send_message at index idx.
static void free_cached_send_message(channel_data* chand, call_data* calld,
size_t idx) {
if (grpc_client_channel_trace.enabled()) {
gpr_log(GPR_INFO,
"chand=%p calld=%p: destroying calld->send_messages[%" PRIuPTR "]",
chand, calld, idx);
}
(*calld->send_messages)[idx]->Destroy();
}
// Frees cached send_trailing_metadata.
static void free_cached_send_trailing_metadata(channel_data* chand,
call_data* calld) {
if (grpc_client_channel_trace.enabled()) {
gpr_log(GPR_INFO,
"chand=%p calld=%p: destroying calld->send_trailing_metadata",
chand, calld);
}
grpc_metadata_batch_destroy(&calld->send_trailing_metadata);
}
// Frees cached send ops that have already been completed after
// committing the call.
static void free_cached_send_op_data_after_commit(
@ -1001,19 +1035,13 @@ static void free_cached_send_op_data_after_commit(
channel_data* chand = static_cast<channel_data*>(elem->channel_data);
call_data* calld = static_cast<call_data*>(elem->call_data);
if (retry_state->completed_send_initial_metadata) {
grpc_metadata_batch_destroy(&calld->send_initial_metadata);
free_cached_send_initial_metadata(chand, calld);
}
for (size_t i = 0; i < retry_state->completed_send_message_count; ++i) {
if (grpc_client_channel_trace.enabled()) {
gpr_log(GPR_DEBUG,
"chand=%p calld=%p: destroying calld->send_messages[%" PRIuPTR
"]",
chand, calld, i);
}
(*calld->send_messages)[i]->Destroy();
free_cached_send_message(chand, calld, i);
}
if (retry_state->completed_send_trailing_metadata) {
grpc_metadata_batch_destroy(&calld->send_trailing_metadata);
free_cached_send_trailing_metadata(chand, calld);
}
}
@ -1025,20 +1053,14 @@ static void free_cached_send_op_data_for_completed_batch(
channel_data* chand = static_cast<channel_data*>(elem->channel_data);
call_data* calld = static_cast<call_data*>(elem->call_data);
if (batch_data->batch.send_initial_metadata) {
grpc_metadata_batch_destroy(&calld->send_initial_metadata);
free_cached_send_initial_metadata(chand, calld);
}
if (batch_data->batch.send_message) {
if (grpc_client_channel_trace.enabled()) {
gpr_log(GPR_DEBUG,
"chand=%p calld=%p: destroying calld->send_messages[%" PRIuPTR
"]",
chand, calld, retry_state->completed_send_message_count - 1);
}
(*calld->send_messages)[retry_state->completed_send_message_count - 1]
->Destroy();
free_cached_send_message(chand, calld,
retry_state->completed_send_message_count - 1);
}
if (batch_data->batch.send_trailing_metadata) {
grpc_metadata_batch_destroy(&calld->send_trailing_metadata);
free_cached_send_trailing_metadata(chand, calld);
}
}
@ -1066,7 +1088,7 @@ static void pending_batches_add(grpc_call_element* elem,
call_data* calld = static_cast<call_data*>(elem->call_data);
const size_t idx = get_batch_index(batch);
if (grpc_client_channel_trace.enabled()) {
gpr_log(GPR_DEBUG,
gpr_log(GPR_INFO,
"chand=%p calld=%p: adding pending batch at index %" PRIuPTR, chand,
calld, idx);
}
@ -1094,7 +1116,7 @@ static void pending_batches_add(grpc_call_element* elem,
}
if (calld->bytes_buffered_for_retry > chand->per_rpc_retry_buffer_size) {
if (grpc_client_channel_trace.enabled()) {
gpr_log(GPR_DEBUG,
gpr_log(GPR_INFO,
"chand=%p calld=%p: exceeded retry buffer size, committing",
chand, calld);
}
@ -1109,7 +1131,7 @@ static void pending_batches_add(grpc_call_element* elem,
// retries are disabled so that we don't bother with retry overhead.
if (calld->num_attempts_completed == 0) {
if (grpc_client_channel_trace.enabled()) {
gpr_log(GPR_DEBUG,
gpr_log(GPR_INFO,
"chand=%p calld=%p: disabling retries before first attempt",
chand, calld);
}
@ -1156,7 +1178,7 @@ static void pending_batches_fail(grpc_call_element* elem, grpc_error* error,
for (size_t i = 0; i < GPR_ARRAY_SIZE(calld->pending_batches); ++i) {
if (calld->pending_batches[i].batch != nullptr) ++num_batches;
}
gpr_log(GPR_DEBUG,
gpr_log(GPR_INFO,
"chand=%p calld=%p: failing %" PRIuPTR " pending batches: %s",
elem->channel_data, calld, num_batches, grpc_error_string(error));
}
@ -1218,7 +1240,7 @@ static void pending_batches_resume(grpc_call_element* elem) {
for (size_t i = 0; i < GPR_ARRAY_SIZE(calld->pending_batches); ++i) {
if (calld->pending_batches[i].batch != nullptr) ++num_batches;
}
gpr_log(GPR_DEBUG,
gpr_log(GPR_INFO,
"chand=%p calld=%p: starting %" PRIuPTR
" pending batches on subchannel_call=%p",
chand, calld, num_batches, calld->subchannel_call);
@ -1263,7 +1285,7 @@ static void maybe_clear_pending_batch(grpc_call_element* elem,
(!batch->recv_message ||
batch->payload->recv_message.recv_message_ready == nullptr)) {
if (grpc_client_channel_trace.enabled()) {
gpr_log(GPR_DEBUG, "chand=%p calld=%p: clearing pending batch", chand,
gpr_log(GPR_INFO, "chand=%p calld=%p: clearing pending batch", chand,
calld);
}
pending_batch_clear(calld, pending);
@ -1353,7 +1375,7 @@ static void retry_commit(grpc_call_element* elem,
if (calld->retry_committed) return;
calld->retry_committed = true;
if (grpc_client_channel_trace.enabled()) {
gpr_log(GPR_DEBUG, "chand=%p calld=%p: committing retries", chand, calld);
gpr_log(GPR_INFO, "chand=%p calld=%p: committing retries", chand, calld);
}
if (retry_state != nullptr) {
free_cached_send_op_data_after_commit(elem, retry_state);
@ -1398,7 +1420,7 @@ static void do_retry(grpc_call_element* elem,
next_attempt_time = calld->retry_backoff->NextAttemptTime();
}
if (grpc_client_channel_trace.enabled()) {
gpr_log(GPR_DEBUG,
gpr_log(GPR_INFO,
"chand=%p calld=%p: retrying failed call in %" PRId64 " ms", chand,
calld, next_attempt_time - grpc_core::ExecCtx::Get()->Now());
}
@ -1432,7 +1454,7 @@ static bool maybe_retry(grpc_call_element* elem,
batch_data->subchannel_call));
if (retry_state->retry_dispatched) {
if (grpc_client_channel_trace.enabled()) {
gpr_log(GPR_DEBUG, "chand=%p calld=%p: retry already dispatched", chand,
gpr_log(GPR_INFO, "chand=%p calld=%p: retry already dispatched", chand,
calld);
}
return true;
@ -1444,14 +1466,14 @@ static bool maybe_retry(grpc_call_element* elem,
calld->retry_throttle_data->RecordSuccess();
}
if (grpc_client_channel_trace.enabled()) {
gpr_log(GPR_DEBUG, "chand=%p calld=%p: call succeeded", chand, calld);
gpr_log(GPR_INFO, "chand=%p calld=%p: call succeeded", chand, calld);
}
return false;
}
// Status is not OK. Check whether the status is retryable.
if (!retry_policy->retryable_status_codes.Contains(status)) {
if (grpc_client_channel_trace.enabled()) {
gpr_log(GPR_DEBUG,
gpr_log(GPR_INFO,
"chand=%p calld=%p: status %s not configured as retryable", chand,
calld, grpc_status_code_to_string(status));
}
@ -1467,14 +1489,14 @@ static bool maybe_retry(grpc_call_element* elem,
if (calld->retry_throttle_data != nullptr &&
!calld->retry_throttle_data->RecordFailure()) {
if (grpc_client_channel_trace.enabled()) {
gpr_log(GPR_DEBUG, "chand=%p calld=%p: retries throttled", chand, calld);
gpr_log(GPR_INFO, "chand=%p calld=%p: retries throttled", chand, calld);
}
return false;
}
// Check whether the call is committed.
if (calld->retry_committed) {
if (grpc_client_channel_trace.enabled()) {
gpr_log(GPR_DEBUG, "chand=%p calld=%p: retries already committed", chand,
gpr_log(GPR_INFO, "chand=%p calld=%p: retries already committed", chand,
calld);
}
return false;
@ -1483,7 +1505,7 @@ static bool maybe_retry(grpc_call_element* elem,
++calld->num_attempts_completed;
if (calld->num_attempts_completed >= retry_policy->max_attempts) {
if (grpc_client_channel_trace.enabled()) {
gpr_log(GPR_DEBUG, "chand=%p calld=%p: exceeded %d retry attempts", chand,
gpr_log(GPR_INFO, "chand=%p calld=%p: exceeded %d retry attempts", chand,
calld, retry_policy->max_attempts);
}
return false;
@ -1491,7 +1513,7 @@ static bool maybe_retry(grpc_call_element* elem,
// If the call was cancelled from the surface, don't retry.
if (calld->cancel_error != GRPC_ERROR_NONE) {
if (grpc_client_channel_trace.enabled()) {
gpr_log(GPR_DEBUG,
gpr_log(GPR_INFO,
"chand=%p calld=%p: call cancelled from surface, not retrying",
chand, calld);
}
@ -1504,16 +1526,15 @@ static bool maybe_retry(grpc_call_element* elem,
uint32_t ms;
if (!grpc_parse_slice_to_uint32(GRPC_MDVALUE(*server_pushback_md), &ms)) {
if (grpc_client_channel_trace.enabled()) {
gpr_log(GPR_DEBUG,
gpr_log(GPR_INFO,
"chand=%p calld=%p: not retrying due to server push-back",
chand, calld);
}
return false;
} else {
if (grpc_client_channel_trace.enabled()) {
gpr_log(GPR_DEBUG,
"chand=%p calld=%p: server push-back: retry in %u ms", chand,
calld, ms);
gpr_log(GPR_INFO, "chand=%p calld=%p: server push-back: retry in %u ms",
chand, calld, ms);
}
server_pushback_ms = (grpc_millis)ms;
}
@ -1586,7 +1607,7 @@ static void invoke_recv_initial_metadata_callback(void* arg,
batch->payload->recv_initial_metadata.recv_initial_metadata_ready !=
nullptr) {
if (grpc_client_channel_trace.enabled()) {
gpr_log(GPR_DEBUG,
gpr_log(GPR_INFO,
"chand=%p calld=%p: invoking recv_initial_metadata_ready for "
"pending batch at index %" PRIuPTR,
chand, calld, i);
@ -1622,7 +1643,7 @@ static void recv_initial_metadata_ready(void* arg, grpc_error* error) {
channel_data* chand = static_cast<channel_data*>(elem->channel_data);
call_data* calld = static_cast<call_data*>(elem->call_data);
if (grpc_client_channel_trace.enabled()) {
gpr_log(GPR_DEBUG,
gpr_log(GPR_INFO,
"chand=%p calld=%p: got recv_initial_metadata_ready, error=%s",
chand, calld, grpc_error_string(error));
}
@ -1637,12 +1658,12 @@ static void recv_initial_metadata_ready(void* arg, grpc_error* error) {
if ((batch_data->trailing_metadata_available || error != GRPC_ERROR_NONE) &&
!retry_state->completed_recv_trailing_metadata) {
if (grpc_client_channel_trace.enabled()) {
gpr_log(GPR_DEBUG,
gpr_log(GPR_INFO,
"chand=%p calld=%p: deferring recv_initial_metadata_ready "
"(Trailers-Only)",
chand, calld);
}
retry_state->recv_initial_metadata_ready_deferred = true;
retry_state->recv_initial_metadata_ready_deferred_batch = batch_data;
retry_state->recv_initial_metadata_error = GRPC_ERROR_REF(error);
if (!retry_state->started_recv_trailing_metadata) {
// recv_trailing_metadata not yet started by application; start it
@ -1679,7 +1700,7 @@ static void invoke_recv_message_callback(void* arg, grpc_error* error) {
if (batch != nullptr && batch->recv_message &&
batch->payload->recv_message.recv_message_ready != nullptr) {
if (grpc_client_channel_trace.enabled()) {
gpr_log(GPR_DEBUG,
gpr_log(GPR_INFO,
"chand=%p calld=%p: invoking recv_message_ready for "
"pending batch at index %" PRIuPTR,
chand, calld, i);
@ -1712,7 +1733,7 @@ static void recv_message_ready(void* arg, grpc_error* error) {
channel_data* chand = static_cast<channel_data*>(elem->channel_data);
call_data* calld = static_cast<call_data*>(elem->call_data);
if (grpc_client_channel_trace.enabled()) {
gpr_log(GPR_DEBUG, "chand=%p calld=%p: got recv_message_ready, error=%s",
gpr_log(GPR_INFO, "chand=%p calld=%p: got recv_message_ready, error=%s",
chand, calld, grpc_error_string(error));
}
subchannel_call_retry_state* retry_state =
@ -1726,12 +1747,12 @@ static void recv_message_ready(void* arg, grpc_error* error) {
if ((batch_data->recv_message == nullptr || error != GRPC_ERROR_NONE) &&
!retry_state->completed_recv_trailing_metadata) {
if (grpc_client_channel_trace.enabled()) {
gpr_log(GPR_DEBUG,
gpr_log(GPR_INFO,
"chand=%p calld=%p: deferring recv_message_ready (nullptr "
"message and recv_trailing_metadata pending)",
chand, calld);
}
retry_state->recv_message_ready_deferred = true;
retry_state->recv_message_ready_deferred_batch = batch_data;
retry_state->recv_message_error = GRPC_ERROR_REF(error);
if (!retry_state->started_recv_trailing_metadata) {
// recv_trailing_metadata not yet started by application; start it
@ -1749,6 +1770,59 @@ static void recv_message_ready(void* arg, grpc_error* error) {
GRPC_ERROR_UNREF(error);
}
//
// list of closures to execute in call combiner
//
// Represents a closure that needs to run in the call combiner as part of
// starting or completing a batch.
typedef struct {
grpc_closure* closure;
grpc_error* error;
const char* reason;
bool free_reason = false;
} closure_to_execute;
static void execute_closures_in_call_combiner(grpc_call_element* elem,
const char* caller,
closure_to_execute* closures,
size_t num_closures) {
channel_data* chand = static_cast<channel_data*>(elem->channel_data);
call_data* calld = static_cast<call_data*>(elem->call_data);
// Note that the call combiner will be yielded for each closure that
// we schedule. We're already running in the call combiner, so one of
// the closures can be scheduled directly, but the others will
// have to re-enter the call combiner.
if (num_closures > 0) {
if (grpc_client_channel_trace.enabled()) {
gpr_log(GPR_INFO, "chand=%p calld=%p: %s starting closure: %s", chand,
calld, caller, closures[0].reason);
}
GRPC_CLOSURE_SCHED(closures[0].closure, closures[0].error);
if (closures[0].free_reason) {
gpr_free(const_cast<char*>(closures[0].reason));
}
for (size_t i = 1; i < num_closures; ++i) {
if (grpc_client_channel_trace.enabled()) {
gpr_log(GPR_INFO,
"chand=%p calld=%p: %s starting closure in call combiner: %s",
chand, calld, caller, closures[i].reason);
}
GRPC_CALL_COMBINER_START(calld->call_combiner, closures[i].closure,
closures[i].error, closures[i].reason);
if (closures[i].free_reason) {
gpr_free(const_cast<char*>(closures[i].reason));
}
}
} else {
if (grpc_client_channel_trace.enabled()) {
gpr_log(GPR_INFO, "chand=%p calld=%p: no closures to run for %s", chand,
calld, caller);
}
GRPC_CALL_COMBINER_STOP(calld->call_combiner, "no closures to run");
}
}
//
// on_complete callback handling
//
@ -1777,36 +1851,35 @@ static void update_retry_state_for_completed_batch(
}
}
// Represents a closure that needs to run as a result of a completed batch.
typedef struct {
grpc_closure* closure;
grpc_error* error;
const char* reason;
} closure_to_execute;
// Adds any necessary closures for deferred recv_initial_metadata and
// recv_message callbacks to closures, updating *num_closures as needed.
static void add_closures_for_deferred_recv_callbacks(
subchannel_batch_data* batch_data, subchannel_call_retry_state* retry_state,
closure_to_execute* closures, size_t* num_closures) {
if (batch_data->batch.recv_trailing_metadata &&
retry_state->recv_initial_metadata_ready_deferred) {
if (batch_data->batch.recv_trailing_metadata) {
// Add closure for deferred recv_initial_metadata_ready.
if (retry_state->recv_initial_metadata_ready_deferred_batch != nullptr) {
closure_to_execute* closure = &closures[(*num_closures)++];
closure->closure =
GRPC_CLOSURE_INIT(&batch_data->recv_initial_metadata_ready,
invoke_recv_initial_metadata_callback, batch_data,
closure->closure = GRPC_CLOSURE_INIT(
&batch_data->recv_initial_metadata_ready,
invoke_recv_initial_metadata_callback,
retry_state->recv_initial_metadata_ready_deferred_batch,
grpc_schedule_on_exec_ctx);
closure->error = retry_state->recv_initial_metadata_error;
closure->reason = "resuming recv_initial_metadata_ready";
retry_state->recv_initial_metadata_ready_deferred_batch = nullptr;
}
if (batch_data->batch.recv_trailing_metadata &&
retry_state->recv_message_ready_deferred) {
// Add closure for deferred recv_message_ready.
if (retry_state->recv_message_ready_deferred_batch != nullptr) {
closure_to_execute* closure = &closures[(*num_closures)++];
closure->closure = GRPC_CLOSURE_INIT(&batch_data->recv_message_ready,
invoke_recv_message_callback,
batch_data, grpc_schedule_on_exec_ctx);
closure->closure = GRPC_CLOSURE_INIT(
&batch_data->recv_message_ready, invoke_recv_message_callback,
retry_state->recv_message_ready_deferred_batch,
grpc_schedule_on_exec_ctx);
closure->error = retry_state->recv_message_error;
closure->reason = "resuming recv_message_ready";
retry_state->recv_message_ready_deferred_batch = nullptr;
}
}
}
@ -1838,7 +1911,7 @@ static void add_closures_for_replay_or_pending_send_ops(
}
if (have_pending_send_message_ops || have_pending_send_trailing_metadata_op) {
if (grpc_client_channel_trace.enabled()) {
gpr_log(GPR_DEBUG,
gpr_log(GPR_INFO,
"chand=%p calld=%p: starting next batch for pending send op(s)",
chand, calld);
}
@ -1863,7 +1936,7 @@ static void add_closures_for_completed_pending_batches(
pending_batch* pending = &calld->pending_batches[i];
if (pending_batch_is_completed(pending, calld, retry_state)) {
if (grpc_client_channel_trace.enabled()) {
gpr_log(GPR_DEBUG,
gpr_log(GPR_INFO,
"chand=%p calld=%p: pending batch completed at index %" PRIuPTR,
chand, calld, i);
}
@ -1896,7 +1969,7 @@ static void add_closures_to_fail_unstarted_pending_batches(
pending_batch* pending = &calld->pending_batches[i];
if (pending_batch_is_unstarted(pending, calld, retry_state)) {
if (grpc_client_channel_trace.enabled()) {
gpr_log(GPR_DEBUG,
gpr_log(GPR_INFO,
"chand=%p calld=%p: failing unstarted pending batch at index "
"%" PRIuPTR,
chand, calld, i);
@ -1940,7 +2013,7 @@ static void on_complete(void* arg, grpc_error* error) {
call_data* calld = static_cast<call_data*>(elem->call_data);
if (grpc_client_channel_trace.enabled()) {
char* batch_str = grpc_transport_stream_op_batch_string(&batch_data->batch);
gpr_log(GPR_DEBUG, "chand=%p calld=%p: got on_complete, error=%s, batch=%s",
gpr_log(GPR_INFO, "chand=%p calld=%p: got on_complete, error=%s, batch=%s",
chand, calld, grpc_error_string(error), batch_str);
gpr_free(batch_str);
}
@ -1951,11 +2024,13 @@ static void on_complete(void* arg, grpc_error* error) {
// If we have previously completed recv_trailing_metadata, then the
// call is finished.
bool call_finished = retry_state->completed_recv_trailing_metadata;
// Record whether we were already committed before receiving this callback.
const bool previously_committed = calld->retry_committed;
// Update bookkeeping in retry_state.
update_retry_state_for_completed_batch(batch_data, retry_state);
if (call_finished) {
if (grpc_client_channel_trace.enabled()) {
gpr_log(GPR_DEBUG, "chand=%p calld=%p: call already finished", chand,
gpr_log(GPR_INFO, "chand=%p calld=%p: call already finished", chand,
calld);
}
} else {
@ -1979,35 +2054,39 @@ static void on_complete(void* arg, grpc_error* error) {
if (md_batch->idx.named.grpc_retry_pushback_ms != nullptr) {
server_pushback_md = &md_batch->idx.named.grpc_retry_pushback_ms->md;
}
} else if (retry_state->completed_recv_trailing_metadata) {
call_finished = true;
}
if (call_finished && grpc_client_channel_trace.enabled()) {
gpr_log(GPR_DEBUG, "chand=%p calld=%p: call finished, status=%s", chand,
// If the call just finished, check if we should retry.
if (call_finished) {
if (grpc_client_channel_trace.enabled()) {
gpr_log(GPR_INFO, "chand=%p calld=%p: call finished, status=%s", chand,
calld, grpc_status_code_to_string(status));
}
// If the call is finished, check if we should retry.
if (call_finished &&
maybe_retry(elem, batch_data, status, server_pushback_md)) {
if (maybe_retry(elem, batch_data, status, server_pushback_md)) {
// Unref batch_data for deferred recv_initial_metadata_ready or
// recv_message_ready callbacks, if any.
if (batch_data->batch.recv_trailing_metadata &&
retry_state->recv_initial_metadata_ready_deferred) {
retry_state->recv_initial_metadata_ready_deferred_batch !=
nullptr) {
batch_data_unref(batch_data);
GRPC_ERROR_UNREF(retry_state->recv_initial_metadata_error);
}
if (batch_data->batch.recv_trailing_metadata &&
retry_state->recv_message_ready_deferred) {
retry_state->recv_message_ready_deferred_batch != nullptr) {
batch_data_unref(batch_data);
GRPC_ERROR_UNREF(retry_state->recv_message_error);
}
batch_data_unref(batch_data);
return;
}
// Not retrying, so commit the call.
retry_commit(elem, retry_state);
}
}
// If the call is finished or retries are committed, free cached data for
// send ops that we've just completed.
if (call_finished || calld->retry_committed) {
// If we were already committed before receiving this callback, free
// cached data for send ops that we've just completed. (If the call has
// just now finished, the call to retry_commit() above will have freed all
// cached send ops, so we don't need to do it here.)
if (previously_committed) {
free_cached_send_op_data_for_completed_batch(elem, batch_data, retry_state);
}
// Call not being retried.
@ -2042,20 +2121,8 @@ static void on_complete(void* arg, grpc_error* error) {
// Don't need batch_data anymore.
batch_data_unref(batch_data);
// Schedule all of the closures identified above.
// Note that the call combiner will be yielded for each closure that
// we schedule. We're already running in the call combiner, so one of
// the closures can be scheduled directly, but the others will
// have to re-enter the call combiner.
if (num_closures > 0) {
GRPC_CLOSURE_SCHED(closures[0].closure, closures[0].error);
for (size_t i = 1; i < num_closures; ++i) {
GRPC_CALL_COMBINER_START(calld->call_combiner, closures[i].closure,
closures[i].error, closures[i].reason);
}
} else {
GRPC_CALL_COMBINER_STOP(calld->call_combiner,
"no closures to run for on_complete");
}
execute_closures_in_call_combiner(elem, "on_complete", closures,
num_closures);
}
//
@ -2072,6 +2139,31 @@ static void start_batch_in_call_combiner(void* arg, grpc_error* ignored) {
grpc_subchannel_call_process_op(subchannel_call, batch);
}
// Adds a closure to closures that will execute batch in the call combiner.
static void add_closure_for_subchannel_batch(
call_data* calld, grpc_transport_stream_op_batch* batch,
closure_to_execute* closures, size_t* num_closures) {
batch->handler_private.extra_arg = calld->subchannel_call;
GRPC_CLOSURE_INIT(&batch->handler_private.closure,
start_batch_in_call_combiner, batch,
grpc_schedule_on_exec_ctx);
closure_to_execute* closure = &closures[(*num_closures)++];
closure->closure = &batch->handler_private.closure;
closure->error = GRPC_ERROR_NONE;
// If the tracer is enabled, we log a more detailed message, which
// requires dynamic allocation. This will be freed in
// start_retriable_subchannel_batches().
if (grpc_client_channel_trace.enabled()) {
char* batch_str = grpc_transport_stream_op_batch_string(batch);
gpr_asprintf(const_cast<char**>(&closure->reason),
"starting batch in call combiner: %s", batch_str);
gpr_free(batch_str);
closure->free_reason = true;
} else {
closure->reason = "start_subchannel_batch";
}
}
// Adds retriable send_initial_metadata op to batch_data.
static void add_retriable_send_initial_metadata_op(
call_data* calld, subchannel_call_retry_state* retry_state,
@ -2131,7 +2223,7 @@ static void add_retriable_send_message_op(
channel_data* chand = static_cast<channel_data*>(elem->channel_data);
call_data* calld = static_cast<call_data*>(elem->call_data);
if (grpc_client_channel_trace.enabled()) {
gpr_log(GPR_DEBUG,
gpr_log(GPR_INFO,
"chand=%p calld=%p: starting calld->send_messages[%" PRIuPTR "]",
chand, calld, retry_state->started_send_message_count);
}
@ -2218,7 +2310,7 @@ static void start_internal_recv_trailing_metadata(grpc_call_element* elem) {
channel_data* chand = static_cast<channel_data*>(elem->channel_data);
call_data* calld = static_cast<call_data*>(elem->call_data);
if (grpc_client_channel_trace.enabled()) {
gpr_log(GPR_DEBUG,
gpr_log(GPR_INFO,
"chand=%p calld=%p: call failed but recv_trailing_metadata not "
"started; starting it internally",
chand, calld);
@ -2227,8 +2319,12 @@ static void start_internal_recv_trailing_metadata(grpc_call_element* elem) {
static_cast<subchannel_call_retry_state*>(
grpc_connected_subchannel_call_get_parent_data(
calld->subchannel_call));
subchannel_batch_data* batch_data = batch_data_create(elem, 1);
// Create batch_data with 2 refs, since this batch will be unreffed twice:
// once when the subchannel batch returns, and again when we actually get
// a recv_trailing_metadata op from the surface.
subchannel_batch_data* batch_data = batch_data_create(elem, 2);
add_retriable_recv_trailing_metadata_op(calld, retry_state, batch_data);
retry_state->recv_trailing_metadata_internal_batch = batch_data;
// Note: This will release the call combiner.
grpc_subchannel_call_process_op(calld->subchannel_call, &batch_data->batch);
}
@ -2246,7 +2342,7 @@ static subchannel_batch_data* maybe_create_subchannel_batch_for_replay(
!retry_state->started_send_initial_metadata &&
!calld->pending_send_initial_metadata) {
if (grpc_client_channel_trace.enabled()) {
gpr_log(GPR_DEBUG,
gpr_log(GPR_INFO,
"chand=%p calld=%p: replaying previously completed "
"send_initial_metadata op",
chand, calld);
@ -2262,7 +2358,7 @@ static subchannel_batch_data* maybe_create_subchannel_batch_for_replay(
retry_state->completed_send_message_count &&
!calld->pending_send_message) {
if (grpc_client_channel_trace.enabled()) {
gpr_log(GPR_DEBUG,
gpr_log(GPR_INFO,
"chand=%p calld=%p: replaying previously completed "
"send_message op",
chand, calld);
@ -2281,7 +2377,7 @@ static subchannel_batch_data* maybe_create_subchannel_batch_for_replay(
!retry_state->started_send_trailing_metadata &&
!calld->pending_send_trailing_metadata) {
if (grpc_client_channel_trace.enabled()) {
gpr_log(GPR_DEBUG,
gpr_log(GPR_INFO,
"chand=%p calld=%p: replaying previously completed "
"send_trailing_metadata op",
chand, calld);
@ -2299,7 +2395,7 @@ static subchannel_batch_data* maybe_create_subchannel_batch_for_replay(
// *num_batches as needed.
static void add_subchannel_batches_for_pending_batches(
grpc_call_element* elem, subchannel_call_retry_state* retry_state,
grpc_transport_stream_op_batch** batches, size_t* num_batches) {
closure_to_execute* closures, size_t* num_closures) {
call_data* calld = static_cast<call_data*>(elem->call_data);
for (size_t i = 0; i < GPR_ARRAY_SIZE(calld->pending_batches); ++i) {
pending_batch* pending = &calld->pending_batches[i];
@ -2342,13 +2438,37 @@ static void add_subchannel_batches_for_pending_batches(
}
if (batch->recv_trailing_metadata &&
retry_state->started_recv_trailing_metadata) {
// If we previously completed a recv_trailing_metadata op
// initiated by start_internal_recv_trailing_metadata(), use the
// result of that instead of trying to re-start this op.
if (retry_state->recv_trailing_metadata_internal_batch != nullptr) {
// If the batch completed, then trigger the completion callback
// directly, so that we return the previously returned results to
// the application. Otherwise, just unref the internally
// started subchannel batch, since we'll propagate the
// completion when it completes.
if (retry_state->completed_recv_trailing_metadata) {
subchannel_batch_data* batch_data =
retry_state->recv_trailing_metadata_internal_batch;
closure_to_execute* closure = &closures[(*num_closures)++];
closure->closure = &batch_data->on_complete;
// Batches containing recv_trailing_metadata always succeed.
closure->error = GRPC_ERROR_NONE;
closure->reason =
"re-executing on_complete for recv_trailing_metadata "
"to propagate internally triggered result";
} else {
batch_data_unref(retry_state->recv_trailing_metadata_internal_batch);
}
retry_state->recv_trailing_metadata_internal_batch = nullptr;
}
continue;
}
// If we're not retrying, just send the batch as-is.
if (calld->method_params == nullptr ||
calld->method_params->retry_policy() == nullptr ||
calld->retry_committed) {
batches[(*num_batches)++] = batch;
add_closure_for_subchannel_batch(calld, batch, closures, num_closures);
pending_batch_clear(calld, pending);
continue;
}
@ -2385,7 +2505,8 @@ static void add_subchannel_batches_for_pending_batches(
GPR_ASSERT(batch->collect_stats);
add_retriable_recv_trailing_metadata_op(calld, retry_state, batch_data);
}
batches[(*num_batches)++] = &batch_data->batch;
add_closure_for_subchannel_batch(calld, &batch_data->batch, closures,
num_closures);
}
}
@ -2396,69 +2517,36 @@ static void start_retriable_subchannel_batches(void* arg, grpc_error* ignored) {
channel_data* chand = static_cast<channel_data*>(elem->channel_data);
call_data* calld = static_cast<call_data*>(elem->call_data);
if (grpc_client_channel_trace.enabled()) {
gpr_log(GPR_DEBUG, "chand=%p calld=%p: constructing retriable batches",
gpr_log(GPR_INFO, "chand=%p calld=%p: constructing retriable batches",
chand, calld);
}
subchannel_call_retry_state* retry_state =
static_cast<subchannel_call_retry_state*>(
grpc_connected_subchannel_call_get_parent_data(
calld->subchannel_call));
// Construct list of closures to execute, one for each pending batch.
// We can start up to 6 batches.
grpc_transport_stream_op_batch*
batches[GPR_ARRAY_SIZE(calld->pending_batches)];
size_t num_batches = 0;
closure_to_execute closures[GPR_ARRAY_SIZE(calld->pending_batches)];
size_t num_closures = 0;
// Replay previously-returned send_* ops if needed.
subchannel_batch_data* replay_batch_data =
maybe_create_subchannel_batch_for_replay(elem, retry_state);
if (replay_batch_data != nullptr) {
batches[num_batches++] = &replay_batch_data->batch;
add_closure_for_subchannel_batch(calld, &replay_batch_data->batch, closures,
&num_closures);
}
// Now add pending batches.
add_subchannel_batches_for_pending_batches(elem, retry_state, batches,
&num_batches);
add_subchannel_batches_for_pending_batches(elem, retry_state, closures,
&num_closures);
// Start batches on subchannel call.
// Note that the call combiner will be yielded for each batch that we
// send down. We're already running in the call combiner, so one of
// the batches can be started directly, but the others will have to
// re-enter the call combiner.
if (grpc_client_channel_trace.enabled()) {
gpr_log(GPR_DEBUG,
gpr_log(GPR_INFO,
"chand=%p calld=%p: starting %" PRIuPTR
" retriable batches on subchannel_call=%p",
chand, calld, num_batches, calld->subchannel_call);
}
if (num_batches == 0) {
// This should be fairly rare, but it can happen when (e.g.) an
// attempt completes before it has finished replaying all
// previously sent messages.
GRPC_CALL_COMBINER_STOP(calld->call_combiner,
"no retriable subchannel batches to start");
} else {
for (size_t i = 1; i < num_batches; ++i) {
if (grpc_client_channel_trace.enabled()) {
char* batch_str = grpc_transport_stream_op_batch_string(batches[i]);
gpr_log(GPR_DEBUG,
"chand=%p calld=%p: starting batch in call combiner: %s", chand,
calld, batch_str);
gpr_free(batch_str);
}
batches[i]->handler_private.extra_arg = calld->subchannel_call;
GRPC_CLOSURE_INIT(&batches[i]->handler_private.closure,
start_batch_in_call_combiner, batches[i],
grpc_schedule_on_exec_ctx);
GRPC_CALL_COMBINER_START(calld->call_combiner,
&batches[i]->handler_private.closure,
GRPC_ERROR_NONE, "start_subchannel_batch");
}
if (grpc_client_channel_trace.enabled()) {
char* batch_str = grpc_transport_stream_op_batch_string(batches[0]);
gpr_log(GPR_DEBUG, "chand=%p calld=%p: starting batch: %s", chand, calld,
batch_str);
gpr_free(batch_str);
}
// Note: This will release the call combiner.
grpc_subchannel_call_process_op(calld->subchannel_call, batches[0]);
chand, calld, num_closures, calld->subchannel_call);
}
execute_closures_in_call_combiner(elem, "start_retriable_subchannel_batches",
closures, num_closures);
}
//
@ -2483,7 +2571,7 @@ static void create_subchannel_call(grpc_call_element* elem, grpc_error* error) {
grpc_error* new_error = calld->pick.connected_subchannel->CreateCall(
call_args, &calld->subchannel_call);
if (grpc_client_channel_trace.enabled()) {
gpr_log(GPR_DEBUG, "chand=%p calld=%p: create subchannel_call=%p: error=%s",
gpr_log(GPR_INFO, "chand=%p calld=%p: create subchannel_call=%p: error=%s",
chand, calld, calld->subchannel_call, grpc_error_string(new_error));
}
if (new_error != GRPC_ERROR_NONE) {
@ -2524,7 +2612,7 @@ static void pick_done(void* arg, grpc_error* error) {
: GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
"Failed to create subchannel", &error, 1);
if (grpc_client_channel_trace.enabled()) {
gpr_log(GPR_DEBUG,
gpr_log(GPR_INFO,
"chand=%p calld=%p: failed to create subchannel: error=%s",
chand, calld, grpc_error_string(new_error));
}
@ -2568,7 +2656,7 @@ static void pick_callback_cancel_locked(void* arg, grpc_error* error) {
// the one we started it on. However, this will just be a no-op.
if (error != GRPC_ERROR_NONE && chand->lb_policy != nullptr) {
if (grpc_client_channel_trace.enabled()) {
gpr_log(GPR_DEBUG, "chand=%p calld=%p: cancelling pick from LB policy %p",
gpr_log(GPR_INFO, "chand=%p calld=%p: cancelling pick from LB policy %p",
chand, calld, chand->lb_policy.get());
}
chand->lb_policy->CancelPickLocked(&calld->pick, GRPC_ERROR_REF(error));
@ -2583,8 +2671,8 @@ static void pick_callback_done_locked(void* arg, grpc_error* error) {
channel_data* chand = static_cast<channel_data*>(elem->channel_data);
call_data* calld = static_cast<call_data*>(elem->call_data);
if (grpc_client_channel_trace.enabled()) {
gpr_log(GPR_DEBUG, "chand=%p calld=%p: pick completed asynchronously",
chand, calld);
gpr_log(GPR_INFO, "chand=%p calld=%p: pick completed asynchronously", chand,
calld);
}
async_pick_done_locked(elem, GRPC_ERROR_REF(error));
GRPC_CALL_STACK_UNREF(calld->owning_call, "pick_callback");
@ -2596,7 +2684,7 @@ static void apply_service_config_to_call_locked(grpc_call_element* elem) {
channel_data* chand = static_cast<channel_data*>(elem->channel_data);
call_data* calld = static_cast<call_data*>(elem->call_data);
if (grpc_client_channel_trace.enabled()) {
gpr_log(GPR_DEBUG, "chand=%p calld=%p: applying service config to call",
gpr_log(GPR_INFO, "chand=%p calld=%p: applying service config to call",
chand, calld);
}
if (chand->retry_throttle_data != nullptr) {
@ -2634,8 +2722,8 @@ static bool pick_callback_start_locked(grpc_call_element* elem) {
channel_data* chand = static_cast<channel_data*>(elem->channel_data);
call_data* calld = static_cast<call_data*>(elem->call_data);
if (grpc_client_channel_trace.enabled()) {
gpr_log(GPR_DEBUG, "chand=%p calld=%p: starting pick on lb_policy=%p",
chand, calld, chand->lb_policy.get());
gpr_log(GPR_INFO, "chand=%p calld=%p: starting pick on lb_policy=%p", chand,
calld, chand->lb_policy.get());
}
// Only get service config data on the first attempt.
if (calld->num_attempts_completed == 0) {
@ -2682,7 +2770,7 @@ static bool pick_callback_start_locked(grpc_call_element* elem) {
if (pick_done) {
// Pick completed synchronously.
if (grpc_client_channel_trace.enabled()) {
gpr_log(GPR_DEBUG, "chand=%p calld=%p: pick completed synchronously",
gpr_log(GPR_INFO, "chand=%p calld=%p: pick completed synchronously",
chand, calld);
}
GRPC_CALL_STACK_UNREF(calld->owning_call, "pick_callback");
@ -2726,7 +2814,7 @@ static void pick_after_resolver_result_cancel_locked(void* arg,
channel_data* chand = static_cast<channel_data*>(elem->channel_data);
call_data* calld = static_cast<call_data*>(elem->call_data);
if (grpc_client_channel_trace.enabled()) {
gpr_log(GPR_DEBUG,
gpr_log(GPR_INFO,
"chand=%p calld=%p: cancelling pick waiting for resolver result",
chand, calld);
}
@ -2746,7 +2834,7 @@ static void pick_after_resolver_result_done_locked(void* arg,
if (args->finished) {
/* cancelled, do nothing */
if (grpc_client_channel_trace.enabled()) {
gpr_log(GPR_DEBUG, "call cancelled before resolver result");
gpr_log(GPR_INFO, "call cancelled before resolver result");
}
gpr_free(args);
return;
@ -2757,14 +2845,14 @@ static void pick_after_resolver_result_done_locked(void* arg,
call_data* calld = static_cast<call_data*>(elem->call_data);
if (error != GRPC_ERROR_NONE) {
if (grpc_client_channel_trace.enabled()) {
gpr_log(GPR_DEBUG, "chand=%p calld=%p: resolver failed to return data",
gpr_log(GPR_INFO, "chand=%p calld=%p: resolver failed to return data",
chand, calld);
}
async_pick_done_locked(elem, GRPC_ERROR_REF(error));
} else if (chand->resolver == nullptr) {
// Shutting down.
if (grpc_client_channel_trace.enabled()) {
gpr_log(GPR_DEBUG, "chand=%p calld=%p: resolver disconnected", chand,
gpr_log(GPR_INFO, "chand=%p calld=%p: resolver disconnected", chand,
calld);
}
async_pick_done_locked(
@ -2780,7 +2868,7 @@ static void pick_after_resolver_result_done_locked(void* arg,
.send_initial_metadata_flags;
if (send_initial_metadata_flags & GRPC_INITIAL_METADATA_WAIT_FOR_READY) {
if (grpc_client_channel_trace.enabled()) {
gpr_log(GPR_DEBUG,
gpr_log(GPR_INFO,
"chand=%p calld=%p: resolver returned but no LB policy; "
"wait_for_ready=true; trying again",
chand, calld);
@ -2788,7 +2876,7 @@ static void pick_after_resolver_result_done_locked(void* arg,
pick_after_resolver_result_start_locked(elem);
} else {
if (grpc_client_channel_trace.enabled()) {
gpr_log(GPR_DEBUG,
gpr_log(GPR_INFO,
"chand=%p calld=%p: resolver returned but no LB policy; "
"wait_for_ready=false; failing",
chand, calld);
@ -2801,7 +2889,7 @@ static void pick_after_resolver_result_done_locked(void* arg,
}
} else {
if (grpc_client_channel_trace.enabled()) {
gpr_log(GPR_DEBUG, "chand=%p calld=%p: resolver returned, doing pick",
gpr_log(GPR_INFO, "chand=%p calld=%p: resolver returned, doing pick",
chand, calld);
}
if (pick_callback_start_locked(elem)) {
@ -2819,7 +2907,7 @@ static void pick_after_resolver_result_start_locked(grpc_call_element* elem) {
channel_data* chand = static_cast<channel_data*>(elem->channel_data);
call_data* calld = static_cast<call_data*>(elem->call_data);
if (grpc_client_channel_trace.enabled()) {
gpr_log(GPR_DEBUG,
gpr_log(GPR_INFO,
"chand=%p calld=%p: deferring pick pending resolver result", chand,
calld);
}
@ -2886,7 +2974,7 @@ static void cc_start_transport_stream_op_batch(
// If we've previously been cancelled, immediately fail any new batches.
if (calld->cancel_error != GRPC_ERROR_NONE) {
if (grpc_client_channel_trace.enabled()) {
gpr_log(GPR_DEBUG, "chand=%p calld=%p: failing batch with error: %s",
gpr_log(GPR_INFO, "chand=%p calld=%p: failing batch with error: %s",
chand, calld, grpc_error_string(calld->cancel_error));
}
// Note: This will release the call combiner.
@ -2905,7 +2993,7 @@ static void cc_start_transport_stream_op_batch(
calld->cancel_error =
GRPC_ERROR_REF(batch->payload->cancel_stream.cancel_error);
if (grpc_client_channel_trace.enabled()) {
gpr_log(GPR_DEBUG, "chand=%p calld=%p: recording cancel_error=%s", chand,
gpr_log(GPR_INFO, "chand=%p calld=%p: recording cancel_error=%s", chand,
calld, grpc_error_string(calld->cancel_error));
}
// If we do not have a subchannel call (i.e., a pick has not yet
@ -2931,7 +3019,7 @@ static void cc_start_transport_stream_op_batch(
// streaming calls).
if (calld->subchannel_call != nullptr) {
if (grpc_client_channel_trace.enabled()) {
gpr_log(GPR_DEBUG,
gpr_log(GPR_INFO,
"chand=%p calld=%p: starting batch on subchannel_call=%p", chand,
calld, calld->subchannel_call);
}
@ -2943,7 +3031,7 @@ static void cc_start_transport_stream_op_batch(
// combiner to start a pick.
if (batch->send_initial_metadata) {
if (grpc_client_channel_trace.enabled()) {
gpr_log(GPR_DEBUG, "chand=%p calld=%p: entering client_channel combiner",
gpr_log(GPR_INFO, "chand=%p calld=%p: entering client_channel combiner",
chand, calld);
}
GRPC_CLOSURE_SCHED(
@ -2953,7 +3041,7 @@ static void cc_start_transport_stream_op_batch(
} else {
// For all other batches, release the call combiner.
if (grpc_client_channel_trace.enabled()) {
gpr_log(GPR_DEBUG,
gpr_log(GPR_INFO,
"chand=%p calld=%p: saved batch, yeilding call combiner", chand,
calld);
}
@ -3164,6 +3252,8 @@ static void watch_connectivity_state_locked(void* arg,
external_connectivity_watcher* found = nullptr;
if (w->state != nullptr) {
external_connectivity_watcher_list_append(w->chand, w);
// An assumption is being made that the closure is scheduled on the exec ctx
// scheduler and that GRPC_CLOSURE_RUN would run the closure immediately.
GRPC_CLOSURE_RUN(w->watcher_timer_init, GRPC_ERROR_NONE);
GRPC_CLOSURE_INIT(&w->my_closure, on_external_watch_complete_locked, w,
grpc_combiner_scheduler(w->chand->combiner));

@ -326,7 +326,7 @@ static void http_connect_handshaker_do_handshake(
static const grpc_handshaker_vtable http_connect_handshaker_vtable = {
http_connect_handshaker_destroy, http_connect_handshaker_shutdown,
http_connect_handshaker_do_handshake};
http_connect_handshaker_do_handshake, "http_connect"};
static grpc_handshaker* grpc_http_connect_handshaker_create() {
http_connect_handshaker* handshaker =

@ -44,13 +44,13 @@ void LoadBalancingPolicy::TryReresolutionLocked(
GRPC_CLOSURE_SCHED(request_reresolution_, error);
request_reresolution_ = nullptr;
if (grpc_lb_trace->enabled()) {
gpr_log(GPR_DEBUG,
gpr_log(GPR_INFO,
"%s %p: scheduling re-resolution closure with error=%s.",
grpc_lb_trace->name(), this, grpc_error_string(error));
}
} else {
if (grpc_lb_trace->enabled()) {
gpr_log(GPR_DEBUG, "%s %p: no available re-resolution closure.",
gpr_log(GPR_INFO, "%s %p: no available re-resolution closure.",
grpc_lb_trace->name(), this);
}
}

@ -162,6 +162,10 @@ class LoadBalancingPolicy
GRPC_ABSTRACT_BASE_CLASS
protected:
// So Delete() can access our protected dtor.
template <typename T>
friend void Delete(T*);
explicit LoadBalancingPolicy(const Args& args);
virtual ~LoadBalancingPolicy();

@ -76,6 +76,7 @@
#include "src/core/ext/filters/client_channel/client_channel.h"
#include "src/core/ext/filters/client_channel/client_channel_factory.h"
#include "src/core/ext/filters/client_channel/lb_policy/grpclb/client_load_reporting_filter.h"
#include "src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.h"
#include "src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_channel.h"
#include "src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_client_stats.h"
#include "src/core/ext/filters/client_channel/lb_policy/grpclb/load_balancer_api.h"
@ -189,6 +190,10 @@ class GrpcLb : public LoadBalancingPolicy {
bool seen_initial_response() const { return seen_initial_response_; }
private:
// So Delete() can access our private dtor.
template <typename T>
friend void grpc_core::Delete(T*);
~BalancerCallState();
GrpcLb* grpclb_policy() const {
@ -999,6 +1004,9 @@ grpc_channel_args* BuildBalancerChannelArgs(
// address updates into the LB channel.
grpc_core::FakeResolverResponseGenerator::MakeChannelArg(
response_generator),
// A channel arg indicating the target is a grpclb load balancer.
grpc_channel_arg_integer_create(
const_cast<char*>(GRPC_ARG_ADDRESS_IS_GRPCLB_LOAD_BALANCER), 1),
};
// Construct channel args.
grpc_channel_args* new_args = grpc_channel_args_copy_and_add_and_remove(
@ -1243,7 +1251,7 @@ bool GrpcLb::PickLocked(PickState* pick) {
}
} else { // rr_policy_ == NULL
if (grpc_lb_glb_trace.enabled()) {
gpr_log(GPR_DEBUG,
gpr_log(GPR_INFO,
"[grpclb %p] No RR policy. Adding to grpclb's pending picks",
this);
}
@ -1409,13 +1417,13 @@ void GrpcLb::OnFallbackTimerLocked(void* arg, grpc_error* error) {
void GrpcLb::StartBalancerCallRetryTimerLocked() {
grpc_millis next_try = lb_call_backoff_.NextAttemptTime();
if (grpc_lb_glb_trace.enabled()) {
gpr_log(GPR_DEBUG, "[grpclb %p] Connection to LB server lost...", this);
gpr_log(GPR_INFO, "[grpclb %p] Connection to LB server lost...", this);
grpc_millis timeout = next_try - ExecCtx::Get()->Now();
if (timeout > 0) {
gpr_log(GPR_DEBUG, "[grpclb %p] ... retry_timer_active in %" PRId64 "ms.",
gpr_log(GPR_INFO, "[grpclb %p] ... retry_timer_active in %" PRId64 "ms.",
this, timeout);
} else {
gpr_log(GPR_DEBUG, "[grpclb %p] ... retry_timer_active immediately.",
gpr_log(GPR_INFO, "[grpclb %p] ... retry_timer_active immediately.",
this);
}
}
@ -1694,9 +1702,11 @@ void GrpcLb::CreateRoundRobinPolicyLocked(const Args& args) {
grpc_channel_args* GrpcLb::CreateRoundRobinPolicyArgsLocked() {
grpc_lb_addresses* addresses;
bool is_backend_from_grpclb_load_balancer = false;
if (serverlist_ != nullptr) {
GPR_ASSERT(serverlist_->num_servers > 0);
addresses = ProcessServerlist(serverlist_);
is_backend_from_grpclb_load_balancer = true;
} else {
// If CreateOrUpdateRoundRobinPolicyLocked() is invoked when we haven't
// received any serverlist from the balancer, we use the fallback backends
@ -1710,9 +1720,18 @@ grpc_channel_args* GrpcLb::CreateRoundRobinPolicyArgsLocked() {
// Replace the LB addresses in the channel args that we pass down to
// the subchannel.
static const char* keys_to_remove[] = {GRPC_ARG_LB_ADDRESSES};
const grpc_arg arg = grpc_lb_addresses_create_channel_arg(addresses);
const grpc_arg args_to_add[] = {
grpc_lb_addresses_create_channel_arg(addresses),
// A channel arg indicating if the target is a backend inferred from a
// grpclb load balancer.
grpc_channel_arg_integer_create(
const_cast<char*>(
GRPC_ARG_ADDRESS_IS_BACKEND_FROM_GRPCLB_LOAD_BALANCER),
is_backend_from_grpclb_load_balancer),
};
grpc_channel_args* args = grpc_channel_args_copy_and_add_and_remove(
args_, keys_to_remove, GPR_ARRAY_SIZE(keys_to_remove), &arg, 1);
args_, keys_to_remove, GPR_ARRAY_SIZE(keys_to_remove), args_to_add,
GPR_ARRAY_SIZE(args_to_add));
grpc_lb_addresses_destroy(addresses);
return args;
}
@ -1723,7 +1742,7 @@ void GrpcLb::CreateOrUpdateRoundRobinPolicyLocked() {
GPR_ASSERT(args != nullptr);
if (rr_policy_ != nullptr) {
if (grpc_lb_glb_trace.enabled()) {
gpr_log(GPR_DEBUG, "[grpclb %p] Updating RR policy %p", this,
gpr_log(GPR_INFO, "[grpclb %p] Updating RR policy %p", this,
rr_policy_.get());
}
rr_policy_->UpdateLocked(*args);
@ -1734,7 +1753,7 @@ void GrpcLb::CreateOrUpdateRoundRobinPolicyLocked() {
lb_policy_args.args = args;
CreateRoundRobinPolicyLocked(lb_policy_args);
if (grpc_lb_glb_trace.enabled()) {
gpr_log(GPR_DEBUG, "[grpclb %p] Created new RR policy %p", this,
gpr_log(GPR_INFO, "[grpclb %p] Created new RR policy %p", this,
rr_policy_.get());
}
}
@ -1750,7 +1769,7 @@ void GrpcLb::OnRoundRobinRequestReresolutionLocked(void* arg,
}
if (grpc_lb_glb_trace.enabled()) {
gpr_log(
GPR_DEBUG,
GPR_INFO,
"[grpclb %p] Re-resolution requested from the internal RR policy (%p).",
grpclb_policy, grpclb_policy->rr_policy_.get());
}

@ -0,0 +1,36 @@
/*
*
* Copyright 2018 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
#ifndef GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_LB_POLICY_GRPCLB_GRPCLB_H
#define GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_LB_POLICY_GRPCLB_GRPCLB_H
#include <grpc/support/port_platform.h>
/** Channel arg indicating if a target corresponding to the address is grpclb
* loadbalancer. The type of this arg is an integer and the value is treated as
* a bool. */
#define GRPC_ARG_ADDRESS_IS_GRPCLB_LOAD_BALANCER \
"grpc.address_is_grpclb_load_balancer"
/** Channel arg indicating if a target corresponding to the address is a backend
* received from a balancer. The type of this arg is an integer and the value is
* treated as a bool. */
#define GRPC_ARG_ADDRESS_IS_BACKEND_FROM_GRPCLB_LOAD_BALANCER \
"grpc.address_is_backend_from_grpclb_load_balancer"
#endif /* GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_LB_POLICY_GRPCLB_GRPCLB_H \
*/

@ -62,31 +62,65 @@ class PickFirst : public LoadBalancingPolicy {
private:
~PickFirst();
class PickFirstSubchannelList;
class PickFirstSubchannelData
: public SubchannelData<PickFirstSubchannelList,
PickFirstSubchannelData> {
public:
PickFirstSubchannelData(PickFirstSubchannelList* subchannel_list,
const grpc_lb_user_data_vtable* user_data_vtable,
const grpc_lb_address& address,
grpc_subchannel* subchannel,
grpc_combiner* combiner)
: SubchannelData(subchannel_list, user_data_vtable, address, subchannel,
combiner) {}
void ProcessConnectivityChangeLocked(
grpc_connectivity_state connectivity_state, grpc_error* error) override;
};
class PickFirstSubchannelList
: public SubchannelList<PickFirstSubchannelList,
PickFirstSubchannelData> {
public:
PickFirstSubchannelList(PickFirst* policy, TraceFlag* tracer,
const grpc_lb_addresses* addresses,
grpc_combiner* combiner,
grpc_client_channel_factory* client_channel_factory,
const grpc_channel_args& args)
: SubchannelList(policy, tracer, addresses, combiner,
client_channel_factory, args) {
// Need to maintain a ref to the LB policy as long as we maintain
// any references to subchannels, since the subchannels'
// pollset_sets will include the LB policy's pollset_set.
policy->Ref(DEBUG_LOCATION, "subchannel_list").release();
}
~PickFirstSubchannelList() {
PickFirst* p = static_cast<PickFirst*>(policy());
p->Unref(DEBUG_LOCATION, "subchannel_list");
}
};
void ShutdownLocked() override;
void StartPickingLocked();
void DestroyUnselectedSubchannelsLocked();
static void OnConnectivityChangedLocked(void* arg, grpc_error* error);
void SubchannelListRefForConnectivityWatch(
grpc_lb_subchannel_list* subchannel_list, const char* reason);
void SubchannelListUnrefForConnectivityWatch(
grpc_lb_subchannel_list* subchannel_list, const char* reason);
/** all our subchannels */
grpc_lb_subchannel_list* subchannel_list_ = nullptr;
/** latest pending subchannel list */
grpc_lb_subchannel_list* latest_pending_subchannel_list_ = nullptr;
/** selected subchannel in \a subchannel_list */
grpc_lb_subchannel_data* selected_ = nullptr;
/** have we started picking? */
// All our subchannels.
OrphanablePtr<PickFirstSubchannelList> subchannel_list_;
// Latest pending subchannel list.
OrphanablePtr<PickFirstSubchannelList> latest_pending_subchannel_list_;
// Selected subchannel in \a subchannel_list_.
PickFirstSubchannelData* selected_ = nullptr;
// Have we started picking?
bool started_picking_ = false;
/** are we shut down? */
// Are we shut down?
bool shutdown_ = false;
/** list of picks that are waiting on connectivity */
// List of picks that are waiting on connectivity.
PickState* pending_picks_ = nullptr;
/** our connectivity state tracker */
// Our connectivity state tracker.
grpc_connectivity_state_tracker state_tracker_;
};
@ -95,7 +129,7 @@ PickFirst::PickFirst(const Args& args) : LoadBalancingPolicy(args) {
grpc_connectivity_state_init(&state_tracker_, GRPC_CHANNEL_IDLE,
"pick_first");
if (grpc_lb_pick_first_trace.enabled()) {
gpr_log(GPR_DEBUG, "Pick First %p created.", this);
gpr_log(GPR_INFO, "Pick First %p created.", this);
}
UpdateLocked(*args.args);
grpc_subchannel_index_ref();
@ -103,7 +137,7 @@ PickFirst::PickFirst(const Args& args) : LoadBalancingPolicy(args) {
PickFirst::~PickFirst() {
if (grpc_lb_pick_first_trace.enabled()) {
gpr_log(GPR_DEBUG, "Destroying Pick First %p", this);
gpr_log(GPR_INFO, "Destroying Pick First %p", this);
}
GPR_ASSERT(subchannel_list_ == nullptr);
GPR_ASSERT(latest_pending_subchannel_list_ == nullptr);
@ -126,7 +160,7 @@ void PickFirst::HandOffPendingPicksLocked(LoadBalancingPolicy* new_policy) {
void PickFirst::ShutdownLocked() {
grpc_error* error = GRPC_ERROR_CREATE_FROM_STATIC_STRING("Channel shutdown");
if (grpc_lb_pick_first_trace.enabled()) {
gpr_log(GPR_DEBUG, "Pick First %p Shutting down", this);
gpr_log(GPR_INFO, "Pick First %p Shutting down", this);
}
shutdown_ = true;
PickState* pick;
@ -137,15 +171,8 @@ void PickFirst::ShutdownLocked() {
}
grpc_connectivity_state_set(&state_tracker_, GRPC_CHANNEL_SHUTDOWN,
GRPC_ERROR_REF(error), "shutdown");
if (subchannel_list_ != nullptr) {
grpc_lb_subchannel_list_shutdown_and_unref(subchannel_list_, "pf_shutdown");
subchannel_list_ = nullptr;
}
if (latest_pending_subchannel_list_ != nullptr) {
grpc_lb_subchannel_list_shutdown_and_unref(latest_pending_subchannel_list_,
"pf_shutdown");
latest_pending_subchannel_list_ = nullptr;
}
subchannel_list_.reset();
latest_pending_subchannel_list_.reset();
TryReresolutionLocked(&grpc_lb_pick_first_trace, GRPC_ERROR_CANCELLED);
GRPC_ERROR_UNREF(error);
}
@ -192,14 +219,10 @@ void PickFirst::CancelMatchingPicksLocked(uint32_t initial_metadata_flags_mask,
void PickFirst::StartPickingLocked() {
started_picking_ = true;
if (subchannel_list_ != nullptr && subchannel_list_->num_subchannels > 0) {
subchannel_list_->checking_subchannel = 0;
for (size_t i = 0; i < subchannel_list_->num_subchannels; ++i) {
if (subchannel_list_->subchannels[i].subchannel != nullptr) {
SubchannelListRefForConnectivityWatch(
subchannel_list_, "connectivity_watch+start_picking");
grpc_lb_subchannel_data_start_connectivity_watch(
&subchannel_list_->subchannels[i]);
if (subchannel_list_ != nullptr) {
for (size_t i = 0; i < subchannel_list_->num_subchannels(); ++i) {
if (subchannel_list_->subchannel(i)->subchannel() != nullptr) {
subchannel_list_->subchannel(i)->StartConnectivityWatchLocked();
break;
}
}
@ -215,7 +238,7 @@ void PickFirst::ExitIdleLocked() {
bool PickFirst::PickLocked(PickState* pick) {
// If we have a selected subchannel already, return synchronously.
if (selected_ != nullptr) {
pick->connected_subchannel = selected_->connected_subchannel;
pick->connected_subchannel = selected_->connected_subchannel()->Ref();
return true;
}
// No subchannel selected yet, so handle asynchronously.
@ -228,11 +251,10 @@ bool PickFirst::PickLocked(PickState* pick) {
}
void PickFirst::DestroyUnselectedSubchannelsLocked() {
for (size_t i = 0; i < subchannel_list_->num_subchannels; ++i) {
grpc_lb_subchannel_data* sd = &subchannel_list_->subchannels[i];
for (size_t i = 0; i < subchannel_list_->num_subchannels(); ++i) {
PickFirstSubchannelData* sd = subchannel_list_->subchannel(i);
if (selected_ != sd) {
grpc_lb_subchannel_data_unref_subchannel(sd,
"selected_different_subchannel");
sd->UnrefSubchannelLocked("selected_different_subchannel");
}
}
}
@ -249,7 +271,7 @@ void PickFirst::NotifyOnStateChangeLocked(grpc_connectivity_state* current,
void PickFirst::PingOneLocked(grpc_closure* on_initiate, grpc_closure* on_ack) {
if (selected_ != nullptr) {
selected_->connected_subchannel->Ping(on_initiate, on_ack);
selected_->connected_subchannel()->Ping(on_initiate, on_ack);
} else {
GRPC_CLOSURE_SCHED(on_initiate,
GRPC_ERROR_CREATE_FROM_STATIC_STRING("Not connected"));
@ -258,24 +280,6 @@ void PickFirst::PingOneLocked(grpc_closure* on_initiate, grpc_closure* on_ack) {
}
}
void PickFirst::SubchannelListRefForConnectivityWatch(
grpc_lb_subchannel_list* subchannel_list, const char* reason) {
// TODO(roth): We currently track this ref manually. Once the new
// ClosureRef API is ready and the subchannel_list code has been
// converted to a C++ API, find a way to hold the RefCountedPtr<>
// somewhere (maybe in the subchannel_data object) instead of doing
// this manually.
auto self = Ref(DEBUG_LOCATION, reason);
self.release();
grpc_lb_subchannel_list_ref(subchannel_list, reason);
}
void PickFirst::SubchannelListUnrefForConnectivityWatch(
grpc_lb_subchannel_list* subchannel_list, const char* reason) {
Unref(DEBUG_LOCATION, reason);
grpc_lb_subchannel_list_unref(subchannel_list, reason);
}
void PickFirst::UpdateLocked(const grpc_channel_args& args) {
const grpc_arg* arg = grpc_channel_args_find(&args, GRPC_ARG_LB_ADDRESSES);
if (arg == nullptr || arg->type != GRPC_ARG_POINTER) {
@ -295,76 +299,68 @@ void PickFirst::UpdateLocked(const grpc_channel_args& args) {
return;
}
const grpc_lb_addresses* addresses =
(const grpc_lb_addresses*)arg->value.pointer.p;
static_cast<const grpc_lb_addresses*>(arg->value.pointer.p);
if (grpc_lb_pick_first_trace.enabled()) {
gpr_log(GPR_INFO,
"Pick First %p received update with %" PRIuPTR " addresses", this,
addresses->num_addresses);
}
grpc_lb_subchannel_list* subchannel_list = grpc_lb_subchannel_list_create(
auto subchannel_list = MakeOrphanable<PickFirstSubchannelList>(
this, &grpc_lb_pick_first_trace, addresses, combiner(),
client_channel_factory(), args, &PickFirst::OnConnectivityChangedLocked);
if (subchannel_list->num_subchannels == 0) {
client_channel_factory(), args);
if (subchannel_list->num_subchannels() == 0) {
// Empty update or no valid subchannels. Unsubscribe from all current
// subchannels and put the channel in TRANSIENT_FAILURE.
grpc_connectivity_state_set(
&state_tracker_, GRPC_CHANNEL_TRANSIENT_FAILURE,
GRPC_ERROR_CREATE_FROM_STATIC_STRING("Empty update"),
"pf_update_empty");
if (subchannel_list_ != nullptr) {
grpc_lb_subchannel_list_shutdown_and_unref(subchannel_list_,
"sl_shutdown_empty_update");
}
subchannel_list_ = subchannel_list; // Empty list.
subchannel_list_ = std::move(subchannel_list); // Empty list.
selected_ = nullptr;
return;
}
if (selected_ == nullptr) {
// We don't yet have a selected subchannel, so replace the current
// subchannel list immediately.
if (subchannel_list_ != nullptr) {
grpc_lb_subchannel_list_shutdown_and_unref(subchannel_list_,
"pf_update_before_selected");
subchannel_list_ = std::move(subchannel_list);
// If we've started picking, start trying to connect to the first
// subchannel in the new list.
if (started_picking_) {
subchannel_list_->subchannel(0)->StartConnectivityWatchLocked();
}
subchannel_list_ = subchannel_list;
} else {
// We do have a selected subchannel.
// Check if it's present in the new list. If so, we're done.
for (size_t i = 0; i < subchannel_list->num_subchannels; ++i) {
grpc_lb_subchannel_data* sd = &subchannel_list->subchannels[i];
if (sd->subchannel == selected_->subchannel) {
for (size_t i = 0; i < subchannel_list->num_subchannels(); ++i) {
PickFirstSubchannelData* sd = subchannel_list->subchannel(i);
if (sd->subchannel() == selected_->subchannel()) {
// The currently selected subchannel is in the update: we are done.
if (grpc_lb_pick_first_trace.enabled()) {
gpr_log(GPR_INFO,
"Pick First %p found already selected subchannel %p "
"at update index %" PRIuPTR " of %" PRIuPTR "; update done",
this, selected_->subchannel, i,
subchannel_list->num_subchannels);
}
if (selected_->connected_subchannel != nullptr) {
sd->connected_subchannel = selected_->connected_subchannel;
}
this, selected_->subchannel(), i,
subchannel_list->num_subchannels());
}
// Make sure it's in state READY. It might not be if we grabbed
// the combiner while a connectivity state notification
// informing us otherwise is pending.
// Note that CheckConnectivityStateLocked() also takes a ref to
// the connected subchannel.
grpc_error* error = GRPC_ERROR_NONE;
if (sd->CheckConnectivityStateLocked(&error) == GRPC_CHANNEL_READY) {
selected_ = sd;
if (subchannel_list_ != nullptr) {
grpc_lb_subchannel_list_shutdown_and_unref(
subchannel_list_, "pf_update_includes_selected");
}
subchannel_list_ = subchannel_list;
subchannel_list_ = std::move(subchannel_list);
DestroyUnselectedSubchannelsLocked();
SubchannelListRefForConnectivityWatch(
subchannel_list, "connectivity_watch+replace_selected");
grpc_lb_subchannel_data_start_connectivity_watch(sd);
sd->StartConnectivityWatchLocked();
// If there was a previously pending update (which may or may
// not have contained the currently selected subchannel), drop
// it, so that it doesn't override what we've done here.
if (latest_pending_subchannel_list_ != nullptr) {
grpc_lb_subchannel_list_shutdown_and_unref(
latest_pending_subchannel_list_,
"pf_update_includes_selected+outdated");
latest_pending_subchannel_list_ = nullptr;
}
latest_pending_subchannel_list_.reset();
return;
}
GRPC_ERROR_UNREF(error);
}
}
// Not keeping the previous selected subchannel, so set the latest
// pending subchannel list to the new subchannel list. We will wait
@ -372,88 +368,66 @@ void PickFirst::UpdateLocked(const grpc_channel_args& args) {
// subchannel list.
if (latest_pending_subchannel_list_ != nullptr) {
if (grpc_lb_pick_first_trace.enabled()) {
gpr_log(GPR_DEBUG,
gpr_log(GPR_INFO,
"Pick First %p Shutting down latest pending subchannel list "
"%p, about to be replaced by newer latest %p",
this, latest_pending_subchannel_list_, subchannel_list);
}
grpc_lb_subchannel_list_shutdown_and_unref(
latest_pending_subchannel_list_, "sl_outdated_dont_smash");
this, latest_pending_subchannel_list_.get(),
subchannel_list.get());
}
latest_pending_subchannel_list_ = subchannel_list;
}
latest_pending_subchannel_list_ = std::move(subchannel_list);
// If we've started picking, start trying to connect to the first
// subchannel in the new list.
if (started_picking_) {
SubchannelListRefForConnectivityWatch(subchannel_list,
"connectivity_watch+update");
grpc_lb_subchannel_data_start_connectivity_watch(
&subchannel_list->subchannels[0]);
latest_pending_subchannel_list_->subchannel(0)
->StartConnectivityWatchLocked();
}
}
}
void PickFirst::OnConnectivityChangedLocked(void* arg, grpc_error* error) {
grpc_lb_subchannel_data* sd = static_cast<grpc_lb_subchannel_data*>(arg);
PickFirst* p = static_cast<PickFirst*>(sd->subchannel_list->policy);
void PickFirst::PickFirstSubchannelData::ProcessConnectivityChangeLocked(
grpc_connectivity_state connectivity_state, grpc_error* error) {
PickFirst* p = static_cast<PickFirst*>(subchannel_list()->policy());
// The notification must be for a subchannel in either the current or
// latest pending subchannel lists.
GPR_ASSERT(subchannel_list() == p->subchannel_list_.get() ||
subchannel_list() == p->latest_pending_subchannel_list_.get());
// Handle updates for the currently selected subchannel.
if (p->selected_ == this) {
if (grpc_lb_pick_first_trace.enabled()) {
gpr_log(GPR_DEBUG,
"Pick First %p connectivity changed for subchannel %p (%" PRIuPTR
" of %" PRIuPTR
"), subchannel_list %p: state=%s p->shutdown_=%d "
"sd->subchannel_list->shutting_down=%d error=%s",
p, sd->subchannel, sd->subchannel_list->checking_subchannel,
sd->subchannel_list->num_subchannels, sd->subchannel_list,
grpc_connectivity_state_name(sd->pending_connectivity_state_unsafe),
p->shutdown_, sd->subchannel_list->shutting_down,
grpc_error_string(error));
}
// If the policy is shutting down, unref and return.
if (p->shutdown_) {
grpc_lb_subchannel_data_stop_connectivity_watch(sd);
grpc_lb_subchannel_data_unref_subchannel(sd, "pf_shutdown");
p->SubchannelListUnrefForConnectivityWatch(sd->subchannel_list,
"pf_shutdown");
return;
}
// If the subchannel list is shutting down, stop watching.
if (sd->subchannel_list->shutting_down || error == GRPC_ERROR_CANCELLED) {
grpc_lb_subchannel_data_stop_connectivity_watch(sd);
grpc_lb_subchannel_data_unref_subchannel(sd, "pf_sl_shutdown");
p->SubchannelListUnrefForConnectivityWatch(sd->subchannel_list,
"pf_sl_shutdown");
return;
gpr_log(GPR_INFO,
"Pick First %p connectivity changed for selected subchannel", p);
}
// If we're still here, the notification must be for a subchannel in
// either the current or latest pending subchannel lists.
GPR_ASSERT(sd->subchannel_list == p->subchannel_list_ ||
sd->subchannel_list == p->latest_pending_subchannel_list_);
// Update state.
sd->curr_connectivity_state = sd->pending_connectivity_state_unsafe;
// Handle updates for the currently selected subchannel.
if (p->selected_ == sd) {
// If the new state is anything other than READY and there is a
// pending update, switch to the pending update.
if (sd->curr_connectivity_state != GRPC_CHANNEL_READY &&
if (connectivity_state != GRPC_CHANNEL_READY &&
p->latest_pending_subchannel_list_ != nullptr) {
if (grpc_lb_pick_first_trace.enabled()) {
gpr_log(GPR_INFO,
"Pick First %p promoting pending subchannel list %p to "
"replace %p",
p, p->latest_pending_subchannel_list_.get(),
p->subchannel_list_.get());
}
p->selected_ = nullptr;
grpc_lb_subchannel_data_stop_connectivity_watch(sd);
p->SubchannelListUnrefForConnectivityWatch(
sd->subchannel_list, "selected_not_ready+switch_to_update");
grpc_lb_subchannel_list_shutdown_and_unref(
p->subchannel_list_, "selected_not_ready+switch_to_update");
p->subchannel_list_ = p->latest_pending_subchannel_list_;
p->latest_pending_subchannel_list_ = nullptr;
StopConnectivityWatchLocked();
p->subchannel_list_ = std::move(p->latest_pending_subchannel_list_);
grpc_connectivity_state_set(
&p->state_tracker_, GRPC_CHANNEL_TRANSIENT_FAILURE,
GRPC_ERROR_REF(error), "selected_not_ready+switch_to_update");
error != GRPC_ERROR_NONE
? GRPC_ERROR_REF(error)
: GRPC_ERROR_CREATE_FROM_STATIC_STRING(
"selected subchannel not ready; switching to pending "
"update"),
"selected_not_ready+switch_to_update");
} else {
// TODO(juanlishen): we re-resolve when the selected subchannel goes to
// TRANSIENT_FAILURE because we used to shut down in this case before
// re-resolution is introduced. But we need to investigate whether we
// really want to take any action instead of waiting for the selected
// subchannel reconnecting.
GPR_ASSERT(sd->curr_connectivity_state != GRPC_CHANNEL_SHUTDOWN);
if (sd->curr_connectivity_state == GRPC_CHANNEL_TRANSIENT_FAILURE) {
GPR_ASSERT(connectivity_state != GRPC_CHANNEL_SHUTDOWN);
if (connectivity_state == GRPC_CHANNEL_TRANSIENT_FAILURE) {
// If the selected channel goes bad, request a re-resolution.
grpc_connectivity_state_set(&p->state_tracker_, GRPC_CHANNEL_IDLE,
GRPC_ERROR_NONE,
@ -462,19 +436,16 @@ void PickFirst::OnConnectivityChangedLocked(void* arg, grpc_error* error) {
p->TryReresolutionLocked(&grpc_lb_pick_first_trace, GRPC_ERROR_NONE);
// In transient failure. Rely on re-resolution to recover.
p->selected_ = nullptr;
grpc_lb_subchannel_data_stop_connectivity_watch(sd);
p->SubchannelListUnrefForConnectivityWatch(sd->subchannel_list,
"pf_selected_shutdown");
grpc_lb_subchannel_data_unref_subchannel(
sd, "pf_selected_shutdown"); // Unrefs connected subchannel
UnrefSubchannelLocked("pf_selected_shutdown");
StopConnectivityWatchLocked();
} else {
grpc_connectivity_state_set(&p->state_tracker_,
sd->curr_connectivity_state,
grpc_connectivity_state_set(&p->state_tracker_, connectivity_state,
GRPC_ERROR_REF(error), "selected_changed");
// Renew notification.
grpc_lb_subchannel_data_start_connectivity_watch(sd);
RenewConnectivityWatchLocked();
}
}
GRPC_ERROR_UNREF(error);
return;
}
// If we get here, there are two possible cases:
@ -486,26 +457,27 @@ void PickFirst::OnConnectivityChangedLocked(void* arg, grpc_error* error) {
// for a subchannel in p->latest_pending_subchannel_list_. The
// goal here is to find a subchannel from the update that we can
// select in place of the current one.
switch (sd->curr_connectivity_state) {
switch (connectivity_state) {
case GRPC_CHANNEL_READY: {
// Case 2. Promote p->latest_pending_subchannel_list_ to
// p->subchannel_list_.
sd->connected_subchannel =
grpc_subchannel_get_connected_subchannel(sd->subchannel);
if (sd->subchannel_list == p->latest_pending_subchannel_list_) {
GPR_ASSERT(p->subchannel_list_ != nullptr);
grpc_lb_subchannel_list_shutdown_and_unref(p->subchannel_list_,
"finish_update");
p->subchannel_list_ = p->latest_pending_subchannel_list_;
p->latest_pending_subchannel_list_ = nullptr;
if (subchannel_list() == p->latest_pending_subchannel_list_.get()) {
if (grpc_lb_pick_first_trace.enabled()) {
gpr_log(GPR_INFO,
"Pick First %p promoting pending subchannel list %p to "
"replace %p",
p, p->latest_pending_subchannel_list_.get(),
p->subchannel_list_.get());
}
p->subchannel_list_ = std::move(p->latest_pending_subchannel_list_);
}
// Cases 1 and 2.
grpc_connectivity_state_set(&p->state_tracker_, GRPC_CHANNEL_READY,
GRPC_ERROR_NONE, "connecting_ready");
p->selected_ = sd;
p->selected_ = this;
if (grpc_lb_pick_first_trace.enabled()) {
gpr_log(GPR_INFO, "Pick First %p selected subchannel %p", p,
sd->subchannel);
subchannel());
}
// Drop all other subchannels, since we are now connected.
p->DestroyUnselectedSubchannelsLocked();
@ -513,7 +485,8 @@ void PickFirst::OnConnectivityChangedLocked(void* arg, grpc_error* error) {
PickState* pick;
while ((pick = p->pending_picks_)) {
p->pending_picks_ = pick->next;
pick->connected_subchannel = p->selected_->connected_subchannel;
pick->connected_subchannel =
p->selected_->connected_subchannel()->Ref();
if (grpc_lb_pick_first_trace.enabled()) {
gpr_log(GPR_INFO,
"Servicing pending pick with selected subchannel %p",
@ -522,45 +495,43 @@ void PickFirst::OnConnectivityChangedLocked(void* arg, grpc_error* error) {
GRPC_CLOSURE_SCHED(pick->on_complete, GRPC_ERROR_NONE);
}
// Renew notification.
grpc_lb_subchannel_data_start_connectivity_watch(sd);
RenewConnectivityWatchLocked();
break;
}
case GRPC_CHANNEL_TRANSIENT_FAILURE: {
grpc_lb_subchannel_data_stop_connectivity_watch(sd);
StopConnectivityWatchLocked();
PickFirstSubchannelData* sd = this;
do {
sd->subchannel_list->checking_subchannel =
(sd->subchannel_list->checking_subchannel + 1) %
sd->subchannel_list->num_subchannels;
sd = &sd->subchannel_list
->subchannels[sd->subchannel_list->checking_subchannel];
} while (sd->subchannel == nullptr);
size_t next_index =
(sd->Index() + 1) % subchannel_list()->num_subchannels();
sd = subchannel_list()->subchannel(next_index);
} while (sd->subchannel() == nullptr);
// Case 1: Only set state to TRANSIENT_FAILURE if we've tried
// all subchannels.
if (sd->subchannel_list->checking_subchannel == 0 &&
sd->subchannel_list == p->subchannel_list_) {
if (sd->Index() == 0 && subchannel_list() == p->subchannel_list_.get()) {
grpc_connectivity_state_set(
&p->state_tracker_, GRPC_CHANNEL_TRANSIENT_FAILURE,
GRPC_ERROR_REF(error), "connecting_transient_failure");
}
// Reuses the connectivity refs from the previous watch.
grpc_lb_subchannel_data_start_connectivity_watch(sd);
sd->StartConnectivityWatchLocked();
break;
}
case GRPC_CHANNEL_CONNECTING:
case GRPC_CHANNEL_IDLE: {
// Only update connectivity state in case 1.
if (sd->subchannel_list == p->subchannel_list_) {
if (subchannel_list() == p->subchannel_list_.get()) {
grpc_connectivity_state_set(&p->state_tracker_, GRPC_CHANNEL_CONNECTING,
GRPC_ERROR_REF(error),
"connecting_changed");
}
// Renew notification.
grpc_lb_subchannel_data_start_connectivity_watch(sd);
RenewConnectivityWatchLocked();
break;
}
case GRPC_CHANNEL_SHUTDOWN:
GPR_UNREACHABLE_CODE(break);
}
GRPC_ERROR_UNREF(error);
}
//

@ -73,23 +73,127 @@ class RoundRobin : public LoadBalancingPolicy {
private:
~RoundRobin();
void ShutdownLocked() override;
// Forward declaration.
class RoundRobinSubchannelList;
// Data for a particular subchannel in a subchannel list.
// This subclass adds the following functionality:
// - Tracks user_data associated with each address, which will be
// returned along with picks that select the subchannel.
// - Tracks the previous connectivity state of the subchannel, so that
// we know how many subchannels are in each state.
class RoundRobinSubchannelData
: public SubchannelData<RoundRobinSubchannelList,
RoundRobinSubchannelData> {
public:
RoundRobinSubchannelData(RoundRobinSubchannelList* subchannel_list,
const grpc_lb_user_data_vtable* user_data_vtable,
const grpc_lb_address& address,
grpc_subchannel* subchannel,
grpc_combiner* combiner)
: SubchannelData(subchannel_list, user_data_vtable, address, subchannel,
combiner),
user_data_vtable_(user_data_vtable),
user_data_(user_data_vtable_ != nullptr
? user_data_vtable_->copy(address.user_data)
: nullptr) {}
void UnrefSubchannelLocked(const char* reason) override {
SubchannelData::UnrefSubchannelLocked(reason);
if (user_data_ != nullptr) {
GPR_ASSERT(user_data_vtable_ != nullptr);
user_data_vtable_->destroy(user_data_);
user_data_ = nullptr;
}
}
void* user_data() const { return user_data_; }
grpc_connectivity_state connectivity_state() const {
return last_connectivity_state_;
}
void UpdateConnectivityStateLocked(
grpc_connectivity_state connectivity_state, grpc_error* error);
private:
void ProcessConnectivityChangeLocked(
grpc_connectivity_state connectivity_state, grpc_error* error) override;
const grpc_lb_user_data_vtable* user_data_vtable_;
void* user_data_ = nullptr;
grpc_connectivity_state last_connectivity_state_ = GRPC_CHANNEL_IDLE;
};
// A list of subchannels.
class RoundRobinSubchannelList
: public SubchannelList<RoundRobinSubchannelList,
RoundRobinSubchannelData> {
public:
RoundRobinSubchannelList(
RoundRobin* policy, TraceFlag* tracer,
const grpc_lb_addresses* addresses, grpc_combiner* combiner,
grpc_client_channel_factory* client_channel_factory,
const grpc_channel_args& args)
: SubchannelList(policy, tracer, addresses, combiner,
client_channel_factory, args) {
// Need to maintain a ref to the LB policy as long as we maintain
// any references to subchannels, since the subchannels'
// pollset_sets will include the LB policy's pollset_set.
policy->Ref(DEBUG_LOCATION, "subchannel_list").release();
}
~RoundRobinSubchannelList() {
GRPC_ERROR_UNREF(last_transient_failure_error_);
RoundRobin* p = static_cast<RoundRobin*>(policy());
p->Unref(DEBUG_LOCATION, "subchannel_list");
}
// Starts watching the subchannels in this list.
void StartWatchingLocked();
// Updates the counters of subchannels in each state when a
// subchannel transitions from old_state to new_state.
// transient_failure_error is the error that is reported when
// new_state is TRANSIENT_FAILURE.
void UpdateStateCountersLocked(grpc_connectivity_state old_state,
grpc_connectivity_state new_state,
grpc_error* transient_failure_error);
// If this subchannel list is the RR policy's current subchannel
// list, updates the RR policy's connectivity state based on the
// subchannel list's state counters.
void MaybeUpdateRoundRobinConnectivityStateLocked();
// Updates the RR policy's overall state based on the counters of
// subchannels in each state.
void UpdateRoundRobinStateFromSubchannelStateCountsLocked();
void StartPickingLocked();
size_t GetNextReadySubchannelIndexLocked();
void UpdateLastReadySubchannelIndexLocked(size_t last_ready_index);
void UpdateConnectivityStatusLocked(grpc_lb_subchannel_data* sd,
grpc_error* error);
static void OnConnectivityChangedLocked(void* arg, grpc_error* error);
private:
size_t num_ready_ = 0;
size_t num_connecting_ = 0;
size_t num_transient_failure_ = 0;
grpc_error* last_transient_failure_error_ = GRPC_ERROR_NONE;
size_t last_ready_index_ = -1; // Index into list of last pick.
};
void ShutdownLocked() override;
void SubchannelListRefForConnectivityWatch(
grpc_lb_subchannel_list* subchannel_list, const char* reason);
void SubchannelListUnrefForConnectivityWatch(
grpc_lb_subchannel_list* subchannel_list, const char* reason);
void StartPickingLocked();
bool DoPickLocked(PickState* pick);
void DrainPendingPicksLocked();
/** list of subchannels */
grpc_lb_subchannel_list* subchannel_list_ = nullptr;
OrphanablePtr<RoundRobinSubchannelList> subchannel_list_;
/** Latest version of the subchannel list.
* Subchannel connectivity callbacks will only promote updated subchannel
* lists if they equal \a latest_pending_subchannel_list. In other words,
* racing callbacks that reference outdated subchannel lists won't perform any
* update. */
OrphanablePtr<RoundRobinSubchannelList> latest_pending_subchannel_list_;
/** have we started picking? */
bool started_picking_ = false;
/** are we shutting down? */
@ -98,14 +202,6 @@ class RoundRobin : public LoadBalancingPolicy {
PickState* pending_picks_ = nullptr;
/** our connectivity state tracker */
grpc_connectivity_state_tracker state_tracker_;
/** Index into subchannels for last pick. */
size_t last_ready_subchannel_index_ = 0;
/** Latest version of the subchannel list.
* Subchannel connectivity callbacks will only promote updated subchannel
* lists if they equal \a latest_pending_subchannel_list. In other words,
* racing callbacks that reference outdated subchannel lists won't perform any
* update. */
grpc_lb_subchannel_list* latest_pending_subchannel_list_ = nullptr;
};
RoundRobin::RoundRobin(const Args& args) : LoadBalancingPolicy(args) {
@ -114,15 +210,15 @@ RoundRobin::RoundRobin(const Args& args) : LoadBalancingPolicy(args) {
"round_robin");
UpdateLocked(*args.args);
if (grpc_lb_round_robin_trace.enabled()) {
gpr_log(GPR_DEBUG, "[RR %p] Created with %" PRIuPTR " subchannels", this,
subchannel_list_->num_subchannels);
gpr_log(GPR_INFO, "[RR %p] Created with %" PRIuPTR " subchannels", this,
subchannel_list_->num_subchannels());
}
grpc_subchannel_index_ref();
}
RoundRobin::~RoundRobin() {
if (grpc_lb_round_robin_trace.enabled()) {
gpr_log(GPR_DEBUG, "[RR %p] Destroying Round Robin policy", this);
gpr_log(GPR_INFO, "[RR %p] Destroying Round Robin policy", this);
}
GPR_ASSERT(subchannel_list_ == nullptr);
GPR_ASSERT(latest_pending_subchannel_list_ == nullptr);
@ -131,68 +227,6 @@ RoundRobin::~RoundRobin() {
grpc_subchannel_index_unref();
}
/** Returns the index into p->subchannel_list->subchannels of the next
* subchannel in READY state, or p->subchannel_list->num_subchannels if no
* subchannel is READY.
*
* Note that this function does *not* update p->last_ready_subchannel_index.
* The caller must do that if it returns a pick. */
size_t RoundRobin::GetNextReadySubchannelIndexLocked() {
GPR_ASSERT(subchannel_list_ != nullptr);
if (grpc_lb_round_robin_trace.enabled()) {
gpr_log(GPR_INFO,
"[RR %p] getting next ready subchannel (out of %" PRIuPTR
"), "
"last_ready_subchannel_index=%" PRIuPTR,
this, subchannel_list_->num_subchannels,
last_ready_subchannel_index_);
}
for (size_t i = 0; i < subchannel_list_->num_subchannels; ++i) {
const size_t index = (i + last_ready_subchannel_index_ + 1) %
subchannel_list_->num_subchannels;
if (grpc_lb_round_robin_trace.enabled()) {
gpr_log(
GPR_DEBUG,
"[RR %p] checking subchannel %p, subchannel_list %p, index %" PRIuPTR
": state=%s",
this, subchannel_list_->subchannels[index].subchannel,
subchannel_list_, index,
grpc_connectivity_state_name(
subchannel_list_->subchannels[index].curr_connectivity_state));
}
if (subchannel_list_->subchannels[index].curr_connectivity_state ==
GRPC_CHANNEL_READY) {
if (grpc_lb_round_robin_trace.enabled()) {
gpr_log(GPR_DEBUG,
"[RR %p] found next ready subchannel (%p) at index %" PRIuPTR
" of subchannel_list %p",
this, subchannel_list_->subchannels[index].subchannel, index,
subchannel_list_);
}
return index;
}
}
if (grpc_lb_round_robin_trace.enabled()) {
gpr_log(GPR_DEBUG, "[RR %p] no subchannels in ready state", this);
}
return subchannel_list_->num_subchannels;
}
// Sets last_ready_subchannel_index_ to last_ready_index.
void RoundRobin::UpdateLastReadySubchannelIndexLocked(size_t last_ready_index) {
GPR_ASSERT(last_ready_index < subchannel_list_->num_subchannels);
last_ready_subchannel_index_ = last_ready_index;
if (grpc_lb_round_robin_trace.enabled()) {
gpr_log(GPR_DEBUG,
"[RR %p] setting last_ready_subchannel_index=%" PRIuPTR
" (SC %p, CSC %p)",
this, last_ready_index,
subchannel_list_->subchannels[last_ready_index].subchannel,
subchannel_list_->subchannels[last_ready_index]
.connected_subchannel.get());
}
}
void RoundRobin::HandOffPendingPicksLocked(LoadBalancingPolicy* new_policy) {
PickState* pick;
while ((pick = pending_picks_) != nullptr) {
@ -207,7 +241,7 @@ void RoundRobin::HandOffPendingPicksLocked(LoadBalancingPolicy* new_policy) {
void RoundRobin::ShutdownLocked() {
grpc_error* error = GRPC_ERROR_CREATE_FROM_STATIC_STRING("Channel shutdown");
if (grpc_lb_round_robin_trace.enabled()) {
gpr_log(GPR_DEBUG, "[RR %p] Shutting down", this);
gpr_log(GPR_INFO, "[RR %p] Shutting down", this);
}
shutdown_ = true;
PickState* pick;
@ -218,16 +252,8 @@ void RoundRobin::ShutdownLocked() {
}
grpc_connectivity_state_set(&state_tracker_, GRPC_CHANNEL_SHUTDOWN,
GRPC_ERROR_REF(error), "rr_shutdown");
if (subchannel_list_ != nullptr) {
grpc_lb_subchannel_list_shutdown_and_unref(subchannel_list_,
"sl_shutdown_rr_shutdown");
subchannel_list_ = nullptr;
}
if (latest_pending_subchannel_list_ != nullptr) {
grpc_lb_subchannel_list_shutdown_and_unref(
latest_pending_subchannel_list_, "sl_shutdown_pending_rr_shutdown");
latest_pending_subchannel_list_ = nullptr;
}
subchannel_list_.reset();
latest_pending_subchannel_list_.reset();
TryReresolutionLocked(&grpc_lb_round_robin_trace, GRPC_ERROR_CANCELLED);
GRPC_ERROR_UNREF(error);
}
@ -273,34 +299,9 @@ void RoundRobin::CancelMatchingPicksLocked(uint32_t initial_metadata_flags_mask,
GRPC_ERROR_UNREF(error);
}
void RoundRobin::SubchannelListRefForConnectivityWatch(
grpc_lb_subchannel_list* subchannel_list, const char* reason) {
// TODO(roth): We currently track this ref manually. Once the new
// ClosureRef API is ready and the subchannel_list code has been
// converted to a C++ API, find a way to hold the RefCountedPtr<>
// somewhere (maybe in the subchannel_data object) instead of doing
// this manually.
auto self = Ref(DEBUG_LOCATION, reason);
self.release();
grpc_lb_subchannel_list_ref(subchannel_list, reason);
}
void RoundRobin::SubchannelListUnrefForConnectivityWatch(
grpc_lb_subchannel_list* subchannel_list, const char* reason) {
Unref(DEBUG_LOCATION, reason);
grpc_lb_subchannel_list_unref(subchannel_list, reason);
}
void RoundRobin::StartPickingLocked() {
started_picking_ = true;
for (size_t i = 0; i < subchannel_list_->num_subchannels; i++) {
if (subchannel_list_->subchannels[i].subchannel != nullptr) {
SubchannelListRefForConnectivityWatch(subchannel_list_,
"connectivity_watch");
grpc_lb_subchannel_data_start_connectivity_watch(
&subchannel_list_->subchannels[i]);
}
}
subchannel_list_->StartWatchingLocked();
}
void RoundRobin::ExitIdleLocked() {
@ -309,34 +310,48 @@ void RoundRobin::ExitIdleLocked() {
}
}
bool RoundRobin::PickLocked(PickState* pick) {
if (grpc_lb_round_robin_trace.enabled()) {
gpr_log(GPR_DEBUG, "[RR %p] Trying to pick (shutdown: %d)", this,
shutdown_);
}
GPR_ASSERT(!shutdown_);
if (subchannel_list_ != nullptr) {
const size_t next_ready_index = GetNextReadySubchannelIndexLocked();
if (next_ready_index < subchannel_list_->num_subchannels) {
bool RoundRobin::DoPickLocked(PickState* pick) {
const size_t next_ready_index =
subchannel_list_->GetNextReadySubchannelIndexLocked();
if (next_ready_index < subchannel_list_->num_subchannels()) {
/* readily available, report right away */
grpc_lb_subchannel_data* sd =
&subchannel_list_->subchannels[next_ready_index];
pick->connected_subchannel = sd->connected_subchannel;
RoundRobinSubchannelData* sd =
subchannel_list_->subchannel(next_ready_index);
GPR_ASSERT(sd->connected_subchannel() != nullptr);
pick->connected_subchannel = sd->connected_subchannel()->Ref();
if (pick->user_data != nullptr) {
*pick->user_data = sd->user_data;
*pick->user_data = sd->user_data();
}
if (grpc_lb_round_robin_trace.enabled()) {
gpr_log(
GPR_DEBUG,
gpr_log(GPR_INFO,
"[RR %p] Picked target <-- Subchannel %p (connected %p) (sl %p, "
"index %" PRIuPTR ")",
this, sd->subchannel, pick->connected_subchannel.get(),
sd->subchannel_list, next_ready_index);
this, sd->subchannel(), pick->connected_subchannel.get(),
sd->subchannel_list(), next_ready_index);
}
/* only advance the last picked pointer if the selection was used */
UpdateLastReadySubchannelIndexLocked(next_ready_index);
subchannel_list_->UpdateLastReadySubchannelIndexLocked(next_ready_index);
return true;
}
return false;
}
void RoundRobin::DrainPendingPicksLocked() {
PickState* pick;
while ((pick = pending_picks_)) {
pending_picks_ = pick->next;
GPR_ASSERT(DoPickLocked(pick));
GRPC_CLOSURE_SCHED(pick->on_complete, GRPC_ERROR_NONE);
}
}
bool RoundRobin::PickLocked(PickState* pick) {
if (grpc_lb_round_robin_trace.enabled()) {
gpr_log(GPR_INFO, "[RR %p] Trying to pick (shutdown: %d)", this, shutdown_);
}
GPR_ASSERT(!shutdown_);
if (subchannel_list_ != nullptr) {
if (DoPickLocked(pick)) return true;
}
/* no pick currently available. Save for later in list of pending picks */
if (!started_picking_) {
@ -347,36 +362,62 @@ bool RoundRobin::PickLocked(PickState* pick) {
return false;
}
void UpdateStateCountersLocked(grpc_lb_subchannel_data* sd) {
grpc_lb_subchannel_list* subchannel_list = sd->subchannel_list;
GPR_ASSERT(sd->prev_connectivity_state != GRPC_CHANNEL_SHUTDOWN);
GPR_ASSERT(sd->curr_connectivity_state != GRPC_CHANNEL_SHUTDOWN);
if (sd->prev_connectivity_state == GRPC_CHANNEL_READY) {
GPR_ASSERT(subchannel_list->num_ready > 0);
--subchannel_list->num_ready;
} else if (sd->prev_connectivity_state == GRPC_CHANNEL_TRANSIENT_FAILURE) {
GPR_ASSERT(subchannel_list->num_transient_failures > 0);
--subchannel_list->num_transient_failures;
} else if (sd->prev_connectivity_state == GRPC_CHANNEL_IDLE) {
GPR_ASSERT(subchannel_list->num_idle > 0);
--subchannel_list->num_idle;
}
sd->prev_connectivity_state = sd->curr_connectivity_state;
if (sd->curr_connectivity_state == GRPC_CHANNEL_READY) {
++subchannel_list->num_ready;
} else if (sd->curr_connectivity_state == GRPC_CHANNEL_TRANSIENT_FAILURE) {
++subchannel_list->num_transient_failures;
} else if (sd->curr_connectivity_state == GRPC_CHANNEL_IDLE) {
++subchannel_list->num_idle;
void RoundRobin::RoundRobinSubchannelList::StartWatchingLocked() {
if (num_subchannels() == 0) return;
// Check current state of each subchannel synchronously, since any
// subchannel already used by some other channel may have a non-IDLE
// state.
for (size_t i = 0; i < num_subchannels(); ++i) {
grpc_error* error = GRPC_ERROR_NONE;
grpc_connectivity_state state =
subchannel(i)->CheckConnectivityStateLocked(&error);
if (state != GRPC_CHANNEL_IDLE) {
subchannel(i)->UpdateConnectivityStateLocked(state, error);
}
}
// Now set the LB policy's state based on the subchannels' states.
UpdateRoundRobinStateFromSubchannelStateCountsLocked();
// Start connectivity watch for each subchannel.
for (size_t i = 0; i < num_subchannels(); i++) {
if (subchannel(i)->subchannel() != nullptr) {
subchannel(i)->StartConnectivityWatchLocked();
}
}
}
/** Sets the policy's connectivity status based on that of the passed-in \a sd
* (the grpc_lb_subchannel_data associated with the updated subchannel) and the
* subchannel list \a sd belongs to (sd->subchannel_list). \a error will be used
* only if the policy transitions to state TRANSIENT_FAILURE. */
void RoundRobin::UpdateConnectivityStatusLocked(grpc_lb_subchannel_data* sd,
grpc_error* error) {
void RoundRobin::RoundRobinSubchannelList::UpdateStateCountersLocked(
grpc_connectivity_state old_state, grpc_connectivity_state new_state,
grpc_error* transient_failure_error) {
GPR_ASSERT(old_state != GRPC_CHANNEL_SHUTDOWN);
GPR_ASSERT(new_state != GRPC_CHANNEL_SHUTDOWN);
if (old_state == GRPC_CHANNEL_READY) {
GPR_ASSERT(num_ready_ > 0);
--num_ready_;
} else if (old_state == GRPC_CHANNEL_CONNECTING) {
GPR_ASSERT(num_connecting_ > 0);
--num_connecting_;
} else if (old_state == GRPC_CHANNEL_TRANSIENT_FAILURE) {
GPR_ASSERT(num_transient_failure_ > 0);
--num_transient_failure_;
}
if (new_state == GRPC_CHANNEL_READY) {
++num_ready_;
} else if (new_state == GRPC_CHANNEL_CONNECTING) {
++num_connecting_;
} else if (new_state == GRPC_CHANNEL_TRANSIENT_FAILURE) {
++num_transient_failure_;
}
GRPC_ERROR_UNREF(last_transient_failure_error_);
last_transient_failure_error_ = transient_failure_error;
}
// Sets the RR policy's connectivity state based on the current
// subchannel list.
void RoundRobin::RoundRobinSubchannelList::
MaybeUpdateRoundRobinConnectivityStateLocked() {
RoundRobin* p = static_cast<RoundRobin*>(policy());
// Only set connectivity state if this is the current subchannel list.
if (p->subchannel_list_.get() != this) return;
/* In priority order. The first rule to match terminates the search (ie, if we
* are on rule n, all previous rules were unfulfilled).
*
@ -391,155 +432,151 @@ void RoundRobin::UpdateConnectivityStatusLocked(grpc_lb_subchannel_data* sd,
* CHECK: subchannel_list->num_transient_failures ==
* subchannel_list->num_subchannels.
*/
grpc_lb_subchannel_list* subchannel_list = sd->subchannel_list;
GPR_ASSERT(sd->curr_connectivity_state != GRPC_CHANNEL_IDLE);
if (subchannel_list->num_ready > 0) {
if (num_ready_ > 0) {
/* 1) READY */
grpc_connectivity_state_set(&state_tracker_, GRPC_CHANNEL_READY,
grpc_connectivity_state_set(&p->state_tracker_, GRPC_CHANNEL_READY,
GRPC_ERROR_NONE, "rr_ready");
} else if (sd->curr_connectivity_state == GRPC_CHANNEL_CONNECTING) {
} else if (num_connecting_ > 0) {
/* 2) CONNECTING */
grpc_connectivity_state_set(&state_tracker_, GRPC_CHANNEL_CONNECTING,
grpc_connectivity_state_set(&p->state_tracker_, GRPC_CHANNEL_CONNECTING,
GRPC_ERROR_NONE, "rr_connecting");
} else if (subchannel_list->num_transient_failures ==
subchannel_list->num_subchannels) {
} else if (num_transient_failure_ == num_subchannels()) {
/* 3) TRANSIENT_FAILURE */
grpc_connectivity_state_set(&state_tracker_, GRPC_CHANNEL_TRANSIENT_FAILURE,
GRPC_ERROR_REF(error),
grpc_connectivity_state_set(&p->state_tracker_,
GRPC_CHANNEL_TRANSIENT_FAILURE,
GRPC_ERROR_REF(last_transient_failure_error_),
"rr_exhausted_subchannels");
}
GRPC_ERROR_UNREF(error);
}
void RoundRobin::OnConnectivityChangedLocked(void* arg, grpc_error* error) {
grpc_lb_subchannel_data* sd = static_cast<grpc_lb_subchannel_data*>(arg);
RoundRobin* p = static_cast<RoundRobin*>(sd->subchannel_list->policy);
void RoundRobin::RoundRobinSubchannelList::
UpdateRoundRobinStateFromSubchannelStateCountsLocked() {
RoundRobin* p = static_cast<RoundRobin*>(policy());
if (num_ready_ > 0) {
if (p->subchannel_list_.get() != this) {
// Promote this list to p->subchannel_list_.
// This list must be p->latest_pending_subchannel_list_, because
// any previous update would have been shut down already and
// therefore we would not be receiving a notification for them.
GPR_ASSERT(p->latest_pending_subchannel_list_.get() == this);
GPR_ASSERT(!shutting_down());
if (grpc_lb_round_robin_trace.enabled()) {
gpr_log(
GPR_DEBUG,
"[RR %p] connectivity changed for subchannel %p, subchannel_list %p: "
"prev_state=%s new_state=%s p->shutdown=%d "
"sd->subchannel_list->shutting_down=%d error=%s",
p, sd->subchannel, sd->subchannel_list,
grpc_connectivity_state_name(sd->prev_connectivity_state),
grpc_connectivity_state_name(sd->pending_connectivity_state_unsafe),
p->shutdown_, sd->subchannel_list->shutting_down,
grpc_error_string(error));
}
GPR_ASSERT(sd->subchannel != nullptr);
// If the policy is shutting down, unref and return.
if (p->shutdown_) {
grpc_lb_subchannel_data_stop_connectivity_watch(sd);
grpc_lb_subchannel_data_unref_subchannel(sd, "rr_shutdown");
p->SubchannelListUnrefForConnectivityWatch(sd->subchannel_list,
"rr_shutdown");
return;
const size_t old_num_subchannels =
p->subchannel_list_ != nullptr
? p->subchannel_list_->num_subchannels()
: 0;
gpr_log(GPR_INFO,
"[RR %p] phasing out subchannel list %p (size %" PRIuPTR
") in favor of %p (size %" PRIuPTR ")",
p, p->subchannel_list_.get(), old_num_subchannels, this,
num_subchannels());
}
// If the subchannel list is shutting down, stop watching.
if (sd->subchannel_list->shutting_down || error == GRPC_ERROR_CANCELLED) {
grpc_lb_subchannel_data_stop_connectivity_watch(sd);
grpc_lb_subchannel_data_unref_subchannel(sd, "rr_sl_shutdown");
p->SubchannelListUnrefForConnectivityWatch(sd->subchannel_list,
"rr_sl_shutdown");
return;
p->subchannel_list_ = std::move(p->latest_pending_subchannel_list_);
}
// Drain pending picks.
p->DrainPendingPicksLocked();
}
// If we're still here, the notification must be for a subchannel in
// either the current or latest pending subchannel lists.
GPR_ASSERT(sd->subchannel_list == p->subchannel_list_ ||
sd->subchannel_list == p->latest_pending_subchannel_list_);
GPR_ASSERT(sd->pending_connectivity_state_unsafe != GRPC_CHANNEL_SHUTDOWN);
// Now that we're inside the combiner, copy the pending connectivity
// state (which was set by the connectivity state watcher) to
// curr_connectivity_state, which is what we use inside of the combiner.
sd->curr_connectivity_state = sd->pending_connectivity_state_unsafe;
// If the sd's new state is TRANSIENT_FAILURE, unref the *connected*
// subchannel, if any.
switch (sd->curr_connectivity_state) {
case GRPC_CHANNEL_TRANSIENT_FAILURE: {
sd->connected_subchannel.reset();
// Update the RR policy's connectivity state if needed.
MaybeUpdateRoundRobinConnectivityStateLocked();
}
void RoundRobin::RoundRobinSubchannelData::UpdateConnectivityStateLocked(
grpc_connectivity_state connectivity_state, grpc_error* error) {
RoundRobin* p = static_cast<RoundRobin*>(subchannel_list()->policy());
if (grpc_lb_round_robin_trace.enabled()) {
gpr_log(GPR_DEBUG,
gpr_log(
GPR_INFO,
"[RR %p] connectivity changed for subchannel %p, subchannel_list %p "
"(index %" PRIuPTR " of %" PRIuPTR "): prev_state=%s new_state=%s",
p, subchannel(), subchannel_list(), Index(),
subchannel_list()->num_subchannels(),
grpc_connectivity_state_name(last_connectivity_state_),
grpc_connectivity_state_name(connectivity_state));
}
subchannel_list()->UpdateStateCountersLocked(last_connectivity_state_,
connectivity_state, error);
last_connectivity_state_ = connectivity_state;
}
void RoundRobin::RoundRobinSubchannelData::ProcessConnectivityChangeLocked(
grpc_connectivity_state connectivity_state, grpc_error* error) {
RoundRobin* p = static_cast<RoundRobin*>(subchannel_list()->policy());
GPR_ASSERT(subchannel() != nullptr);
// If the new state is TRANSIENT_FAILURE, re-resolve.
// Only do this if we've started watching, not at startup time.
// Otherwise, if the subchannel was already in state TRANSIENT_FAILURE
// when the subchannel list was created, we'd wind up in a constant
// loop of re-resolution.
if (connectivity_state == GRPC_CHANNEL_TRANSIENT_FAILURE) {
if (grpc_lb_round_robin_trace.enabled()) {
gpr_log(GPR_INFO,
"[RR %p] Subchannel %p has gone into TRANSIENT_FAILURE. "
"Requesting re-resolution",
p, sd->subchannel);
p, subchannel());
}
p->TryReresolutionLocked(&grpc_lb_round_robin_trace, GRPC_ERROR_NONE);
break;
}
case GRPC_CHANNEL_READY: {
if (sd->connected_subchannel == nullptr) {
sd->connected_subchannel =
grpc_subchannel_get_connected_subchannel(sd->subchannel);
}
if (sd->subchannel_list != p->subchannel_list_) {
// promote sd->subchannel_list to p->subchannel_list_.
// sd->subchannel_list must be equal to
// p->latest_pending_subchannel_list_ because we have already filtered
// for sds belonging to outdated subchannel lists.
GPR_ASSERT(sd->subchannel_list == p->latest_pending_subchannel_list_);
GPR_ASSERT(!sd->subchannel_list->shutting_down);
}
// Update state counters.
UpdateConnectivityStateLocked(connectivity_state, error);
// Update overall state and renew notification.
subchannel_list()->UpdateRoundRobinStateFromSubchannelStateCountsLocked();
RenewConnectivityWatchLocked();
}
/** Returns the index into p->subchannel_list->subchannels of the next
* subchannel in READY state, or p->subchannel_list->num_subchannels if no
* subchannel is READY.
*
* Note that this function does *not* update p->last_ready_subchannel_index.
* The caller must do that if it returns a pick. */
size_t
RoundRobin::RoundRobinSubchannelList::GetNextReadySubchannelIndexLocked() {
if (grpc_lb_round_robin_trace.enabled()) {
const size_t num_subchannels =
p->subchannel_list_ != nullptr
? p->subchannel_list_->num_subchannels
: 0;
gpr_log(GPR_DEBUG,
"[RR %p] phasing out subchannel list %p (size %" PRIuPTR
") in favor of %p (size %" PRIuPTR ")",
p, p->subchannel_list_, num_subchannels, sd->subchannel_list,
num_subchannels);
}
if (p->subchannel_list_ != nullptr) {
// dispose of the current subchannel_list
grpc_lb_subchannel_list_shutdown_and_unref(p->subchannel_list_,
"sl_phase_out_shutdown");
}
p->subchannel_list_ = p->latest_pending_subchannel_list_;
p->latest_pending_subchannel_list_ = nullptr;
}
/* at this point we know there's at least one suitable subchannel. Go
* ahead and pick one and notify the pending suitors in
* p->pending_picks. This preemptively replicates rr_pick()'s actions. */
const size_t next_ready_index = p->GetNextReadySubchannelIndexLocked();
GPR_ASSERT(next_ready_index < p->subchannel_list_->num_subchannels);
grpc_lb_subchannel_data* selected =
&p->subchannel_list_->subchannels[next_ready_index];
if (p->pending_picks_ != nullptr) {
// if the selected subchannel is going to be used for the pending
// picks, update the last picked pointer
p->UpdateLastReadySubchannelIndexLocked(next_ready_index);
gpr_log(GPR_INFO,
"[RR %p] getting next ready subchannel (out of %" PRIuPTR
"), last_ready_index=%" PRIuPTR,
policy(), num_subchannels(), last_ready_index_);
}
PickState* pick;
while ((pick = p->pending_picks_)) {
p->pending_picks_ = pick->next;
pick->connected_subchannel = selected->connected_subchannel;
if (pick->user_data != nullptr) {
*pick->user_data = selected->user_data;
for (size_t i = 0; i < num_subchannels(); ++i) {
const size_t index = (i + last_ready_index_ + 1) % num_subchannels();
if (grpc_lb_round_robin_trace.enabled()) {
gpr_log(
GPR_INFO,
"[RR %p] checking subchannel %p, subchannel_list %p, index %" PRIuPTR
": state=%s",
policy(), subchannel(index)->subchannel(), this, index,
grpc_connectivity_state_name(
subchannel(index)->connectivity_state()));
}
if (subchannel(index)->connectivity_state() == GRPC_CHANNEL_READY) {
if (grpc_lb_round_robin_trace.enabled()) {
gpr_log(GPR_DEBUG,
"[RR %p] Fulfilling pending pick. Target <-- subchannel %p "
"(subchannel_list %p, index %" PRIuPTR ")",
p, selected->subchannel, p->subchannel_list_,
next_ready_index);
gpr_log(GPR_INFO,
"[RR %p] found next ready subchannel (%p) at index %" PRIuPTR
" of subchannel_list %p",
policy(), subchannel(index)->subchannel(), index, this);
}
GRPC_CLOSURE_SCHED(pick->on_complete, GRPC_ERROR_NONE);
return index;
}
break;
}
case GRPC_CHANNEL_SHUTDOWN:
GPR_UNREACHABLE_CODE(return );
case GRPC_CHANNEL_CONNECTING:
case GRPC_CHANNEL_IDLE:; // fallthrough
if (grpc_lb_round_robin_trace.enabled()) {
gpr_log(GPR_INFO, "[RR %p] no subchannels in ready state", this);
}
// Update state counters.
UpdateStateCountersLocked(sd);
// Only update connectivity based on the selected subchannel list.
if (sd->subchannel_list == p->subchannel_list_) {
p->UpdateConnectivityStatusLocked(sd, GRPC_ERROR_REF(error));
return num_subchannels();
}
// Sets last_ready_index_ to last_ready_index.
void RoundRobin::RoundRobinSubchannelList::UpdateLastReadySubchannelIndexLocked(
size_t last_ready_index) {
GPR_ASSERT(last_ready_index < num_subchannels());
last_ready_index_ = last_ready_index;
if (grpc_lb_round_robin_trace.enabled()) {
gpr_log(GPR_INFO,
"[RR %p] setting last_ready_subchannel_index=%" PRIuPTR
" (SC %p, CSC %p)",
policy(), last_ready_index,
subchannel(last_ready_index)->subchannel(),
subchannel(last_ready_index)->connected_subchannel());
}
// Renew notification.
grpc_lb_subchannel_data_start_connectivity_watch(sd);
}
grpc_connectivity_state RoundRobin::CheckConnectivityLocked(
@ -555,11 +592,12 @@ void RoundRobin::NotifyOnStateChangeLocked(grpc_connectivity_state* current,
void RoundRobin::PingOneLocked(grpc_closure* on_initiate,
grpc_closure* on_ack) {
const size_t next_ready_index = GetNextReadySubchannelIndexLocked();
if (next_ready_index < subchannel_list_->num_subchannels) {
grpc_lb_subchannel_data* selected =
&subchannel_list_->subchannels[next_ready_index];
selected->connected_subchannel->Ping(on_initiate, on_ack);
const size_t next_ready_index =
subchannel_list_->GetNextReadySubchannelIndexLocked();
if (next_ready_index < subchannel_list_->num_subchannels()) {
RoundRobinSubchannelData* selected =
subchannel_list_->subchannel(next_ready_index);
selected->connected_subchannel()->Ping(on_initiate, on_ack);
} else {
GRPC_CLOSURE_SCHED(on_initiate, GRPC_ERROR_CREATE_FROM_STATIC_STRING(
"Round Robin not connected"));
@ -582,80 +620,37 @@ void RoundRobin::UpdateLocked(const grpc_channel_args& args) {
}
return;
}
grpc_lb_addresses* addresses = (grpc_lb_addresses*)arg->value.pointer.p;
grpc_lb_addresses* addresses =
static_cast<grpc_lb_addresses*>(arg->value.pointer.p);
if (grpc_lb_round_robin_trace.enabled()) {
gpr_log(GPR_DEBUG, "[RR %p] received update with %" PRIuPTR " addresses",
gpr_log(GPR_INFO, "[RR %p] received update with %" PRIuPTR " addresses",
this, addresses->num_addresses);
}
grpc_lb_subchannel_list* subchannel_list = grpc_lb_subchannel_list_create(
// Replace latest_pending_subchannel_list_.
if (latest_pending_subchannel_list_ != nullptr) {
if (grpc_lb_round_robin_trace.enabled()) {
gpr_log(GPR_INFO,
"[RR %p] Shutting down previous pending subchannel list %p", this,
latest_pending_subchannel_list_.get());
}
}
latest_pending_subchannel_list_ = MakeOrphanable<RoundRobinSubchannelList>(
this, &grpc_lb_round_robin_trace, addresses, combiner(),
client_channel_factory(), args, &RoundRobin::OnConnectivityChangedLocked);
if (subchannel_list->num_subchannels == 0) {
client_channel_factory(), args);
// If we haven't started picking yet or the new list is empty,
// immediately promote the new list to the current list.
if (!started_picking_ ||
latest_pending_subchannel_list_->num_subchannels() == 0) {
if (latest_pending_subchannel_list_->num_subchannels() == 0) {
grpc_connectivity_state_set(
&state_tracker_, GRPC_CHANNEL_TRANSIENT_FAILURE,
GRPC_ERROR_CREATE_FROM_STATIC_STRING("Empty update"),
"rr_update_empty");
if (subchannel_list_ != nullptr) {
grpc_lb_subchannel_list_shutdown_and_unref(subchannel_list_,
"sl_shutdown_empty_update");
}
subchannel_list_ = subchannel_list; // empty list
return;
}
if (started_picking_) {
for (size_t i = 0; i < subchannel_list->num_subchannels; ++i) {
const grpc_connectivity_state subchannel_state =
grpc_subchannel_check_connectivity(
subchannel_list->subchannels[i].subchannel, nullptr);
// Override the default setting of IDLE for connectivity notification
// purposes if the subchannel is already in transient failure. Otherwise
// we'd be immediately notified of the IDLE-TRANSIENT_FAILURE
// discrepancy, attempt to re-resolve and end up here again.
// TODO(roth): As part of C++-ifying the subchannel_list API, design a
// better API for notifying the LB policy of subchannel states, which can
// be used both for the subchannel's initial state and for subsequent
// state changes. This will allow us to handle this more generally instead
// of special-casing TRANSIENT_FAILURE (e.g., we can also distribute any
// pending picks across all READY subchannels rather than sending them all
// to the first one).
if (subchannel_state == GRPC_CHANNEL_TRANSIENT_FAILURE) {
subchannel_list->subchannels[i].pending_connectivity_state_unsafe =
subchannel_list->subchannels[i].curr_connectivity_state =
subchannel_list->subchannels[i].prev_connectivity_state =
subchannel_state;
--subchannel_list->num_idle;
++subchannel_list->num_transient_failures;
}
}
if (latest_pending_subchannel_list_ != nullptr) {
if (grpc_lb_round_robin_trace.enabled()) {
gpr_log(GPR_DEBUG,
"[RR %p] Shutting down latest pending subchannel list %p, "
"about to be replaced by newer latest %p",
this, latest_pending_subchannel_list_, subchannel_list);
}
grpc_lb_subchannel_list_shutdown_and_unref(
latest_pending_subchannel_list_, "sl_outdated");
}
latest_pending_subchannel_list_ = subchannel_list;
for (size_t i = 0; i < subchannel_list->num_subchannels; ++i) {
/* Watch every new subchannel. A subchannel list becomes active the
* moment one of its subchannels is READY. At that moment, we swap
* p->subchannel_list for sd->subchannel_list, provided the subchannel
* list is still valid (ie, isn't shutting down) */
SubchannelListRefForConnectivityWatch(subchannel_list,
"connectivity_watch");
grpc_lb_subchannel_data_start_connectivity_watch(
&subchannel_list->subchannels[i]);
}
subchannel_list_ = std::move(latest_pending_subchannel_list_);
} else {
// The policy isn't picking yet. Save the update for later, disposing of
// previous version if any.
if (subchannel_list_ != nullptr) {
grpc_lb_subchannel_list_shutdown_and_unref(
subchannel_list_, "rr_update_before_started_picking");
}
subchannel_list_ = subchannel_list;
// If we've started picking, start watching the new list.
latest_pending_subchannel_list_->StartWatchingLocked();
}
}

@ -1,253 +0,0 @@
/*
*
* Copyright 2015 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
#include <grpc/support/port_platform.h>
#include <string.h>
#include <grpc/support/alloc.h>
#include "src/core/ext/filters/client_channel/lb_policy/subchannel_list.h"
#include "src/core/lib/channel/channel_args.h"
#include "src/core/lib/debug/trace.h"
#include "src/core/lib/iomgr/closure.h"
#include "src/core/lib/iomgr/combiner.h"
#include "src/core/lib/iomgr/sockaddr_utils.h"
#include "src/core/lib/transport/connectivity_state.h"
void grpc_lb_subchannel_data_unref_subchannel(grpc_lb_subchannel_data* sd,
const char* reason) {
if (sd->subchannel != nullptr) {
if (sd->subchannel_list->tracer->enabled()) {
gpr_log(GPR_DEBUG,
"[%s %p] subchannel list %p index %" PRIuPTR " of %" PRIuPTR
" (subchannel %p): unreffing subchannel",
sd->subchannel_list->tracer->name(), sd->subchannel_list->policy,
sd->subchannel_list,
static_cast<size_t>(sd - sd->subchannel_list->subchannels),
sd->subchannel_list->num_subchannels, sd->subchannel);
}
GRPC_SUBCHANNEL_UNREF(sd->subchannel, reason);
sd->subchannel = nullptr;
sd->connected_subchannel.reset();
if (sd->user_data != nullptr) {
GPR_ASSERT(sd->user_data_vtable != nullptr);
sd->user_data_vtable->destroy(sd->user_data);
sd->user_data = nullptr;
}
}
}
void grpc_lb_subchannel_data_start_connectivity_watch(
grpc_lb_subchannel_data* sd) {
if (sd->subchannel_list->tracer->enabled()) {
gpr_log(
GPR_DEBUG,
"[%s %p] subchannel list %p index %" PRIuPTR " of %" PRIuPTR
" (subchannel %p): requesting connectivity change "
"notification (from %s)",
sd->subchannel_list->tracer->name(), sd->subchannel_list->policy,
sd->subchannel_list,
static_cast<size_t>(sd - sd->subchannel_list->subchannels),
sd->subchannel_list->num_subchannels, sd->subchannel,
grpc_connectivity_state_name(sd->pending_connectivity_state_unsafe));
}
sd->connectivity_notification_pending = true;
grpc_subchannel_notify_on_state_change(
sd->subchannel, sd->subchannel_list->policy->interested_parties(),
&sd->pending_connectivity_state_unsafe,
&sd->connectivity_changed_closure);
}
void grpc_lb_subchannel_data_stop_connectivity_watch(
grpc_lb_subchannel_data* sd) {
if (sd->subchannel_list->tracer->enabled()) {
gpr_log(GPR_DEBUG,
"[%s %p] subchannel list %p index %" PRIuPTR " of %" PRIuPTR
" (subchannel %p): stopping connectivity watch",
sd->subchannel_list->tracer->name(), sd->subchannel_list->policy,
sd->subchannel_list,
static_cast<size_t>(sd - sd->subchannel_list->subchannels),
sd->subchannel_list->num_subchannels, sd->subchannel);
}
GPR_ASSERT(sd->connectivity_notification_pending);
sd->connectivity_notification_pending = false;
}
grpc_lb_subchannel_list* grpc_lb_subchannel_list_create(
grpc_core::LoadBalancingPolicy* p, grpc_core::TraceFlag* tracer,
const grpc_lb_addresses* addresses, grpc_combiner* combiner,
grpc_client_channel_factory* client_channel_factory,
const grpc_channel_args& args, grpc_iomgr_cb_func connectivity_changed_cb) {
grpc_lb_subchannel_list* subchannel_list =
static_cast<grpc_lb_subchannel_list*>(
gpr_zalloc(sizeof(*subchannel_list)));
if (tracer->enabled()) {
gpr_log(GPR_DEBUG,
"[%s %p] Creating subchannel list %p for %" PRIuPTR " subchannels",
tracer->name(), p, subchannel_list, addresses->num_addresses);
}
subchannel_list->policy = p;
subchannel_list->tracer = tracer;
gpr_ref_init(&subchannel_list->refcount, 1);
subchannel_list->subchannels = static_cast<grpc_lb_subchannel_data*>(
gpr_zalloc(sizeof(grpc_lb_subchannel_data) * addresses->num_addresses));
// We need to remove the LB addresses in order to be able to compare the
// subchannel keys of subchannels from a different batch of addresses.
static const char* keys_to_remove[] = {GRPC_ARG_SUBCHANNEL_ADDRESS,
GRPC_ARG_LB_ADDRESSES};
// Create a subchannel for each address.
grpc_subchannel_args sc_args;
size_t subchannel_index = 0;
for (size_t i = 0; i < addresses->num_addresses; i++) {
// If there were any balancer, we would have chosen grpclb policy instead.
GPR_ASSERT(!addresses->addresses[i].is_balancer);
memset(&sc_args, 0, sizeof(grpc_subchannel_args));
grpc_arg addr_arg =
grpc_create_subchannel_address_arg(&addresses->addresses[i].address);
grpc_channel_args* new_args = grpc_channel_args_copy_and_add_and_remove(
&args, keys_to_remove, GPR_ARRAY_SIZE(keys_to_remove), &addr_arg, 1);
gpr_free(addr_arg.value.string);
sc_args.args = new_args;
grpc_subchannel* subchannel = grpc_client_channel_factory_create_subchannel(
client_channel_factory, &sc_args);
grpc_channel_args_destroy(new_args);
if (subchannel == nullptr) {
// Subchannel could not be created.
if (tracer->enabled()) {
char* address_uri =
grpc_sockaddr_to_uri(&addresses->addresses[i].address);
gpr_log(GPR_DEBUG,
"[%s %p] could not create subchannel for address uri %s, "
"ignoring",
tracer->name(), subchannel_list->policy, address_uri);
gpr_free(address_uri);
}
continue;
}
if (tracer->enabled()) {
char* address_uri =
grpc_sockaddr_to_uri(&addresses->addresses[i].address);
gpr_log(GPR_DEBUG,
"[%s %p] subchannel list %p index %" PRIuPTR
": Created subchannel %p for address uri %s",
tracer->name(), p, subchannel_list, subchannel_index, subchannel,
address_uri);
gpr_free(address_uri);
}
grpc_lb_subchannel_data* sd =
&subchannel_list->subchannels[subchannel_index++];
sd->subchannel_list = subchannel_list;
sd->subchannel = subchannel;
GRPC_CLOSURE_INIT(&sd->connectivity_changed_closure,
connectivity_changed_cb, sd,
grpc_combiner_scheduler(combiner));
// We assume that the current state is IDLE. If not, we'll get a
// callback telling us that.
sd->prev_connectivity_state = GRPC_CHANNEL_IDLE;
sd->curr_connectivity_state = GRPC_CHANNEL_IDLE;
sd->pending_connectivity_state_unsafe = GRPC_CHANNEL_IDLE;
sd->user_data_vtable = addresses->user_data_vtable;
if (sd->user_data_vtable != nullptr) {
sd->user_data =
sd->user_data_vtable->copy(addresses->addresses[i].user_data);
}
}
subchannel_list->num_subchannels = subchannel_index;
subchannel_list->num_idle = subchannel_index;
return subchannel_list;
}
static void subchannel_list_destroy(grpc_lb_subchannel_list* subchannel_list) {
if (subchannel_list->tracer->enabled()) {
gpr_log(GPR_DEBUG, "[%s %p] Destroying subchannel_list %p",
subchannel_list->tracer->name(), subchannel_list->policy,
subchannel_list);
}
for (size_t i = 0; i < subchannel_list->num_subchannels; i++) {
grpc_lb_subchannel_data* sd = &subchannel_list->subchannels[i];
grpc_lb_subchannel_data_unref_subchannel(sd, "subchannel_list_destroy");
}
gpr_free(subchannel_list->subchannels);
gpr_free(subchannel_list);
}
void grpc_lb_subchannel_list_ref(grpc_lb_subchannel_list* subchannel_list,
const char* reason) {
gpr_ref_non_zero(&subchannel_list->refcount);
if (subchannel_list->tracer->enabled()) {
const gpr_atm count = gpr_atm_acq_load(&subchannel_list->refcount.count);
gpr_log(GPR_DEBUG, "[%s %p] subchannel_list %p REF %lu->%lu (%s)",
subchannel_list->tracer->name(), subchannel_list->policy,
subchannel_list, static_cast<unsigned long>(count - 1),
static_cast<unsigned long>(count), reason);
}
}
void grpc_lb_subchannel_list_unref(grpc_lb_subchannel_list* subchannel_list,
const char* reason) {
const bool done = gpr_unref(&subchannel_list->refcount);
if (subchannel_list->tracer->enabled()) {
const gpr_atm count = gpr_atm_acq_load(&subchannel_list->refcount.count);
gpr_log(GPR_DEBUG, "[%s %p] subchannel_list %p UNREF %lu->%lu (%s)",
subchannel_list->tracer->name(), subchannel_list->policy,
subchannel_list, static_cast<unsigned long>(count + 1),
static_cast<unsigned long>(count), reason);
}
if (done) {
subchannel_list_destroy(subchannel_list);
}
}
static void subchannel_data_cancel_connectivity_watch(
grpc_lb_subchannel_data* sd, const char* reason) {
if (sd->subchannel_list->tracer->enabled()) {
gpr_log(GPR_DEBUG,
"[%s %p] subchannel list %p index %" PRIuPTR " of %" PRIuPTR
" (subchannel %p): canceling connectivity watch (%s)",
sd->subchannel_list->tracer->name(), sd->subchannel_list->policy,
sd->subchannel_list,
static_cast<size_t>(sd - sd->subchannel_list->subchannels),
sd->subchannel_list->num_subchannels, sd->subchannel, reason);
}
grpc_subchannel_notify_on_state_change(sd->subchannel, nullptr, nullptr,
&sd->connectivity_changed_closure);
}
void grpc_lb_subchannel_list_shutdown_and_unref(
grpc_lb_subchannel_list* subchannel_list, const char* reason) {
if (subchannel_list->tracer->enabled()) {
gpr_log(GPR_DEBUG, "[%s %p] Shutting down subchannel_list %p (%s)",
subchannel_list->tracer->name(), subchannel_list->policy,
subchannel_list, reason);
}
GPR_ASSERT(!subchannel_list->shutting_down);
subchannel_list->shutting_down = true;
for (size_t i = 0; i < subchannel_list->num_subchannels; i++) {
grpc_lb_subchannel_data* sd = &subchannel_list->subchannels[i];
// If there's a pending notification for this subchannel, cancel it;
// the callback is responsible for unreffing the subchannel.
// Otherwise, unref the subchannel directly.
if (sd->connectivity_notification_pending) {
subchannel_data_cancel_connectivity_watch(sd, reason);
} else if (sd->subchannel != nullptr) {
grpc_lb_subchannel_data_unref_subchannel(sd, reason);
}
}
grpc_lb_subchannel_list_unref(subchannel_list, reason);
}

@ -21,116 +21,516 @@
#include <grpc/support/port_platform.h>
#include <string.h>
#include <grpc/support/alloc.h>
#include "src/core/ext/filters/client_channel/lb_policy_registry.h"
#include "src/core/ext/filters/client_channel/subchannel.h"
#include "src/core/lib/channel/channel_args.h"
#include "src/core/lib/debug/trace.h"
#include "src/core/lib/gprpp/abstract.h"
#include "src/core/lib/gprpp/inlined_vector.h"
#include "src/core/lib/gprpp/orphanable.h"
#include "src/core/lib/gprpp/ref_counted.h"
#include "src/core/lib/gprpp/ref_counted_ptr.h"
#include "src/core/lib/iomgr/closure.h"
#include "src/core/lib/iomgr/combiner.h"
#include "src/core/lib/iomgr/sockaddr_utils.h"
#include "src/core/lib/transport/connectivity_state.h"
// TODO(roth): This code is intended to be shared between pick_first and
// round_robin. However, the interface needs more work to provide clean
// encapsulation. For example, the structs here have some fields that are
// only used in one of the two (e.g., the state counters in
// grpc_lb_subchannel_list and the prev_connectivity_state field in
// grpc_lb_subchannel_data are only used in round_robin, and the
// checking_subchannel field in grpc_lb_subchannel_list is only used by
// pick_first). Also, there is probably some code duplication between the
// connectivity state notification callback code in both pick_first and
// round_robin that could be refactored and moved here. In a future PR,
// need to clean this up.
typedef struct grpc_lb_subchannel_list grpc_lb_subchannel_list;
typedef struct {
/** backpointer to owning subchannel list */
grpc_lb_subchannel_list* subchannel_list;
/** subchannel itself */
grpc_subchannel* subchannel;
grpc_core::RefCountedPtr<grpc_core::ConnectedSubchannel> connected_subchannel;
/** Is a connectivity notification pending? */
bool connectivity_notification_pending;
/** notification that connectivity has changed on subchannel */
grpc_closure connectivity_changed_closure;
/** previous and current connectivity states. Updated by \a
* \a connectivity_changed_closure based on
* \a pending_connectivity_state_unsafe. */
grpc_connectivity_state prev_connectivity_state;
grpc_connectivity_state curr_connectivity_state;
/** connectivity state to be updated by
* grpc_subchannel_notify_on_state_change(), not guarded by
* the combiner. To be copied to \a curr_connectivity_state by
* \a connectivity_changed_closure. */
grpc_connectivity_state pending_connectivity_state_unsafe;
/** the subchannel's target user data */
void* user_data;
/** vtable to operate over \a user_data */
const grpc_lb_user_data_vtable* user_data_vtable;
} grpc_lb_subchannel_data;
/// Unrefs the subchannel contained in sd.
void grpc_lb_subchannel_data_unref_subchannel(grpc_lb_subchannel_data* sd,
const char* reason);
/// Starts watching the connectivity state of the subchannel.
/// The connectivity_changed_cb callback must invoke either
/// grpc_lb_subchannel_data_stop_connectivity_watch() or again call
/// grpc_lb_subchannel_data_start_connectivity_watch().
void grpc_lb_subchannel_data_start_connectivity_watch(
grpc_lb_subchannel_data* sd);
/// Stops watching the connectivity state of the subchannel.
void grpc_lb_subchannel_data_stop_connectivity_watch(
grpc_lb_subchannel_data* sd);
struct grpc_lb_subchannel_list {
/** backpointer to owning policy */
grpc_core::LoadBalancingPolicy* policy;
grpc_core::TraceFlag* tracer;
/** all our subchannels */
size_t num_subchannels;
grpc_lb_subchannel_data* subchannels;
/** Index into subchannels of the one we're currently checking.
* Used when connecting to subchannels serially instead of in parallel. */
// TODO(roth): When we have time, we can probably make this go away
// and compute the index dynamically by subtracting
// subchannel_list->subchannels from the subchannel_data pointer.
size_t checking_subchannel;
/** how many subchannels are in state READY */
size_t num_ready;
/** how many subchannels are in state TRANSIENT_FAILURE */
size_t num_transient_failures;
/** how many subchannels are in state IDLE */
size_t num_idle;
/** There will be one ref for each entry in subchannels for which there is a
* pending connectivity state watcher callback. */
gpr_refcount refcount;
/** Is this list shutting down? This may be true due to the shutdown of the
* policy itself or because a newer update has arrived while this one hadn't
* finished processing. */
bool shutting_down;
// Code for maintaining a list of subchannels within an LB policy.
//
// To use this, callers must create their own subclasses, like so:
/*
class MySubchannelList; // Forward declaration.
class MySubchannelData
: public SubchannelData<MySubchannelList, MySubchannelData> {
public:
void ProcessConnectivityChangeLocked(
grpc_connectivity_state connectivity_state, grpc_error* error) override {
// ...code to handle connectivity changes...
}
};
class MySubchannelList
: public SubchannelList<MySubchannelList, MySubchannelData> {
};
*/
// All methods with a Locked() suffix must be called from within the
// client_channel combiner.
namespace grpc_core {
// Stores data for a particular subchannel in a subchannel list.
// Callers must create a subclass that implements the
// ProcessConnectivityChangeLocked() method.
template <typename SubchannelListType, typename SubchannelDataType>
class SubchannelData {
public:
// Returns a pointer to the subchannel list containing this object.
SubchannelListType* subchannel_list() const { return subchannel_list_; }
// Returns the index into the subchannel list of this object.
size_t Index() const {
return static_cast<size_t>(static_cast<const SubchannelDataType*>(this) -
subchannel_list_->subchannel(0));
}
// Returns a pointer to the subchannel.
grpc_subchannel* subchannel() const { return subchannel_; }
// Returns the connected subchannel. Will be null if the subchannel
// is not connected.
ConnectedSubchannel* connected_subchannel() const {
return connected_subchannel_.get();
}
// Synchronously checks the subchannel's connectivity state.
// Must not be called while there is a connectivity notification
// pending (i.e., between calling StartConnectivityWatchLocked() or
// RenewConnectivityWatchLocked() and the resulting invocation of
// ProcessConnectivityChangeLocked()).
grpc_connectivity_state CheckConnectivityStateLocked(grpc_error** error) {
GPR_ASSERT(!connectivity_notification_pending_);
pending_connectivity_state_unsafe_ =
grpc_subchannel_check_connectivity(subchannel(), error);
UpdateConnectedSubchannelLocked();
return pending_connectivity_state_unsafe_;
}
// Unrefs the subchannel. May be used if an individual subchannel is
// no longer needed even though the subchannel list as a whole is not
// being unreffed.
virtual void UnrefSubchannelLocked(const char* reason);
// Starts watching the connectivity state of the subchannel.
// ProcessConnectivityChangeLocked() will be called when the
// connectivity state changes.
void StartConnectivityWatchLocked();
// Renews watching the connectivity state of the subchannel.
void RenewConnectivityWatchLocked();
// Stops watching the connectivity state of the subchannel.
void StopConnectivityWatchLocked();
// Cancels watching the connectivity state of the subchannel.
// Must be called only while there is a connectivity notification
// pending (i.e., between calling StartConnectivityWatchLocked() or
// RenewConnectivityWatchLocked() and the resulting invocation of
// ProcessConnectivityChangeLocked()).
// From within ProcessConnectivityChangeLocked(), use
// StopConnectivityWatchLocked() instead.
void CancelConnectivityWatchLocked(const char* reason);
// Cancels any pending connectivity watch and unrefs the subchannel.
void ShutdownLocked();
GRPC_ABSTRACT_BASE_CLASS
protected:
SubchannelData(SubchannelListType* subchannel_list,
const grpc_lb_user_data_vtable* user_data_vtable,
const grpc_lb_address& address, grpc_subchannel* subchannel,
grpc_combiner* combiner);
virtual ~SubchannelData();
// After StartConnectivityWatchLocked() or RenewConnectivityWatchLocked()
// is called, this method will be invoked when the subchannel's connectivity
// state changes.
// Implementations must invoke either RenewConnectivityWatchLocked() or
// StopConnectivityWatchLocked() before returning.
virtual void ProcessConnectivityChangeLocked(
grpc_connectivity_state connectivity_state,
grpc_error* error) GRPC_ABSTRACT;
private:
// Updates connected_subchannel_ based on pending_connectivity_state_unsafe_.
// Returns true if the connectivity state should be reported.
bool UpdateConnectedSubchannelLocked();
static void OnConnectivityChangedLocked(void* arg, grpc_error* error);
// Backpointer to owning subchannel list. Not owned.
SubchannelListType* subchannel_list_;
// The subchannel and connected subchannel.
grpc_subchannel* subchannel_;
RefCountedPtr<ConnectedSubchannel> connected_subchannel_;
// Notification that connectivity has changed on subchannel.
grpc_closure connectivity_changed_closure_;
// Is a connectivity notification pending?
bool connectivity_notification_pending_ = false;
// Connectivity state to be updated by
// grpc_subchannel_notify_on_state_change(), not guarded by
// the combiner.
grpc_connectivity_state pending_connectivity_state_unsafe_;
};
grpc_lb_subchannel_list* grpc_lb_subchannel_list_create(
grpc_core::LoadBalancingPolicy* p, grpc_core::TraceFlag* tracer,
// A list of subchannels.
template <typename SubchannelListType, typename SubchannelDataType>
class SubchannelList
: public InternallyRefCountedWithTracing<SubchannelListType> {
public:
typedef InlinedVector<SubchannelDataType, 10> SubchannelVector;
// The number of subchannels in the list.
size_t num_subchannels() const { return subchannels_.size(); }
// The data for the subchannel at a particular index.
SubchannelDataType* subchannel(size_t index) { return &subchannels_[index]; }
// Returns true if the subchannel list is shutting down.
bool shutting_down() const { return shutting_down_; }
// Accessors.
LoadBalancingPolicy* policy() const { return policy_; }
TraceFlag* tracer() const { return tracer_; }
// Note: Caller must ensure that this is invoked inside of the combiner.
void Orphan() override {
ShutdownLocked();
InternallyRefCountedWithTracing<SubchannelListType>::Unref(DEBUG_LOCATION,
"shutdown");
}
GRPC_ABSTRACT_BASE_CLASS
protected:
SubchannelList(LoadBalancingPolicy* policy, TraceFlag* tracer,
const grpc_lb_addresses* addresses, grpc_combiner* combiner,
grpc_client_channel_factory* client_channel_factory,
const grpc_channel_args& args);
virtual ~SubchannelList();
private:
// So New() can call our private ctor.
template <typename T, typename... Args>
friend T* New(Args&&... args);
// For accessing Ref() and Unref().
friend class SubchannelData<SubchannelListType, SubchannelDataType>;
void ShutdownLocked();
// Backpointer to owning policy.
LoadBalancingPolicy* policy_;
TraceFlag* tracer_;
grpc_combiner* combiner_;
// The list of subchannels.
SubchannelVector subchannels_;
// Is this list shutting down? This may be true due to the shutdown of the
// policy itself or because a newer update has arrived while this one hadn't
// finished processing.
bool shutting_down_ = false;
};
//
// implementation -- no user-servicable parts below
//
//
// SubchannelData
//
template <typename SubchannelListType, typename SubchannelDataType>
SubchannelData<SubchannelListType, SubchannelDataType>::SubchannelData(
SubchannelListType* subchannel_list,
const grpc_lb_user_data_vtable* user_data_vtable,
const grpc_lb_address& address, grpc_subchannel* subchannel,
grpc_combiner* combiner)
: subchannel_list_(subchannel_list),
subchannel_(subchannel),
// We assume that the current state is IDLE. If not, we'll get a
// callback telling us that.
pending_connectivity_state_unsafe_(GRPC_CHANNEL_IDLE) {
GRPC_CLOSURE_INIT(
&connectivity_changed_closure_,
(&SubchannelData<SubchannelListType,
SubchannelDataType>::OnConnectivityChangedLocked),
this, grpc_combiner_scheduler(combiner));
}
template <typename SubchannelListType, typename SubchannelDataType>
SubchannelData<SubchannelListType, SubchannelDataType>::~SubchannelData() {
UnrefSubchannelLocked("subchannel_data_destroy");
}
template <typename SubchannelListType, typename SubchannelDataType>
void SubchannelData<SubchannelListType, SubchannelDataType>::
UnrefSubchannelLocked(const char* reason) {
if (subchannel_ != nullptr) {
if (subchannel_list_->tracer()->enabled()) {
gpr_log(GPR_INFO,
"[%s %p] subchannel list %p index %" PRIuPTR " of %" PRIuPTR
" (subchannel %p): unreffing subchannel",
subchannel_list_->tracer()->name(), subchannel_list_->policy(),
subchannel_list_, Index(), subchannel_list_->num_subchannels(),
subchannel_);
}
GRPC_SUBCHANNEL_UNREF(subchannel_, reason);
subchannel_ = nullptr;
connected_subchannel_.reset();
}
}
template <typename SubchannelListType, typename SubchannelDataType>
void SubchannelData<SubchannelListType,
SubchannelDataType>::StartConnectivityWatchLocked() {
if (subchannel_list_->tracer()->enabled()) {
gpr_log(GPR_INFO,
"[%s %p] subchannel list %p index %" PRIuPTR " of %" PRIuPTR
" (subchannel %p): starting watch: requesting connectivity change "
"notification (from %s)",
subchannel_list_->tracer()->name(), subchannel_list_->policy(),
subchannel_list_, Index(), subchannel_list_->num_subchannels(),
subchannel_,
grpc_connectivity_state_name(pending_connectivity_state_unsafe_));
}
GPR_ASSERT(!connectivity_notification_pending_);
connectivity_notification_pending_ = true;
subchannel_list()->Ref(DEBUG_LOCATION, "connectivity_watch").release();
grpc_subchannel_notify_on_state_change(
subchannel_, subchannel_list_->policy()->interested_parties(),
&pending_connectivity_state_unsafe_, &connectivity_changed_closure_);
}
template <typename SubchannelListType, typename SubchannelDataType>
void SubchannelData<SubchannelListType,
SubchannelDataType>::RenewConnectivityWatchLocked() {
if (subchannel_list_->tracer()->enabled()) {
gpr_log(GPR_INFO,
"[%s %p] subchannel list %p index %" PRIuPTR " of %" PRIuPTR
" (subchannel %p): renewing watch: requesting connectivity change "
"notification (from %s)",
subchannel_list_->tracer()->name(), subchannel_list_->policy(),
subchannel_list_, Index(), subchannel_list_->num_subchannels(),
subchannel_,
grpc_connectivity_state_name(pending_connectivity_state_unsafe_));
}
GPR_ASSERT(connectivity_notification_pending_);
grpc_subchannel_notify_on_state_change(
subchannel_, subchannel_list_->policy()->interested_parties(),
&pending_connectivity_state_unsafe_, &connectivity_changed_closure_);
}
template <typename SubchannelListType, typename SubchannelDataType>
void SubchannelData<SubchannelListType,
SubchannelDataType>::StopConnectivityWatchLocked() {
if (subchannel_list_->tracer()->enabled()) {
gpr_log(GPR_INFO,
"[%s %p] subchannel list %p index %" PRIuPTR " of %" PRIuPTR
" (subchannel %p): stopping connectivity watch",
subchannel_list_->tracer()->name(), subchannel_list_->policy(),
subchannel_list_, Index(), subchannel_list_->num_subchannels(),
subchannel_);
}
GPR_ASSERT(connectivity_notification_pending_);
connectivity_notification_pending_ = false;
subchannel_list()->Unref(DEBUG_LOCATION, "connectivity_watch");
}
template <typename SubchannelListType, typename SubchannelDataType>
void SubchannelData<SubchannelListType, SubchannelDataType>::
CancelConnectivityWatchLocked(const char* reason) {
if (subchannel_list_->tracer()->enabled()) {
gpr_log(GPR_INFO,
"[%s %p] subchannel list %p index %" PRIuPTR " of %" PRIuPTR
" (subchannel %p): canceling connectivity watch (%s)",
subchannel_list_->tracer()->name(), subchannel_list_->policy(),
subchannel_list_, Index(), subchannel_list_->num_subchannels(),
subchannel_, reason);
}
GPR_ASSERT(connectivity_notification_pending_);
grpc_subchannel_notify_on_state_change(subchannel_, nullptr, nullptr,
&connectivity_changed_closure_);
}
template <typename SubchannelListType, typename SubchannelDataType>
bool SubchannelData<SubchannelListType,
SubchannelDataType>::UpdateConnectedSubchannelLocked() {
// If the subchannel is READY, take a ref to the connected subchannel.
if (pending_connectivity_state_unsafe_ == GRPC_CHANNEL_READY) {
connected_subchannel_ =
grpc_subchannel_get_connected_subchannel(subchannel_);
// If the subchannel became disconnected between the time that READY
// was reported and the time we got here (e.g., between when a
// notification callback is scheduled and when it was actually run in
// the combiner), then the connected subchannel may have disappeared out
// from under us. In that case, we don't actually want to consider the
// subchannel to be in state READY. Instead, we use IDLE as the
// basis for any future connectivity watch; this is the one state that
// the subchannel will never transition back into, so this ensures
// that we will get a notification for the next state, even if that state
// is READY again (e.g., if the subchannel has transitioned back to
// READY before the next watch gets requested).
if (connected_subchannel_ == nullptr) {
if (subchannel_list_->tracer()->enabled()) {
gpr_log(GPR_INFO,
"[%s %p] subchannel list %p index %" PRIuPTR " of %" PRIuPTR
" (subchannel %p): state is READY but connected subchannel is "
"null; moving to state IDLE",
subchannel_list_->tracer()->name(), subchannel_list_->policy(),
subchannel_list_, Index(), subchannel_list_->num_subchannels(),
subchannel_);
}
pending_connectivity_state_unsafe_ = GRPC_CHANNEL_IDLE;
return false;
}
} else {
// For any state other than READY, unref the connected subchannel.
connected_subchannel_.reset();
}
return true;
}
template <typename SubchannelListType, typename SubchannelDataType>
void SubchannelData<SubchannelListType, SubchannelDataType>::
OnConnectivityChangedLocked(void* arg, grpc_error* error) {
SubchannelData* sd = static_cast<SubchannelData*>(arg);
if (sd->subchannel_list_->tracer()->enabled()) {
gpr_log(
GPR_INFO,
"[%s %p] subchannel list %p index %" PRIuPTR " of %" PRIuPTR
" (subchannel %p): connectivity changed: state=%s, error=%s, "
"shutting_down=%d",
sd->subchannel_list_->tracer()->name(), sd->subchannel_list_->policy(),
sd->subchannel_list_, sd->Index(),
sd->subchannel_list_->num_subchannels(), sd->subchannel_,
grpc_connectivity_state_name(sd->pending_connectivity_state_unsafe_),
grpc_error_string(error), sd->subchannel_list_->shutting_down());
}
// If shutting down, unref subchannel and stop watching.
if (sd->subchannel_list_->shutting_down() || error == GRPC_ERROR_CANCELLED) {
sd->UnrefSubchannelLocked("connectivity_shutdown");
sd->StopConnectivityWatchLocked();
return;
}
// Get or release ref to connected subchannel.
if (!sd->UpdateConnectedSubchannelLocked()) {
// We don't want to report this connectivity state, so renew the watch.
sd->RenewConnectivityWatchLocked();
return;
}
// Call the subclass's ProcessConnectivityChangeLocked() method.
sd->ProcessConnectivityChangeLocked(sd->pending_connectivity_state_unsafe_,
GRPC_ERROR_REF(error));
}
template <typename SubchannelListType, typename SubchannelDataType>
void SubchannelData<SubchannelListType, SubchannelDataType>::ShutdownLocked() {
// If there's a pending notification for this subchannel, cancel it;
// the callback is responsible for unreffing the subchannel.
// Otherwise, unref the subchannel directly.
if (connectivity_notification_pending_) {
CancelConnectivityWatchLocked("shutdown");
} else if (subchannel_ != nullptr) {
UnrefSubchannelLocked("shutdown");
}
}
//
// SubchannelList
//
template <typename SubchannelListType, typename SubchannelDataType>
SubchannelList<SubchannelListType, SubchannelDataType>::SubchannelList(
LoadBalancingPolicy* policy, TraceFlag* tracer,
const grpc_lb_addresses* addresses, grpc_combiner* combiner,
grpc_client_channel_factory* client_channel_factory,
const grpc_channel_args& args, grpc_iomgr_cb_func connectivity_changed_cb);
const grpc_channel_args& args)
: InternallyRefCountedWithTracing<SubchannelListType>(tracer),
policy_(policy),
tracer_(tracer),
combiner_(GRPC_COMBINER_REF(combiner, "subchannel_list")) {
if (tracer_->enabled()) {
gpr_log(GPR_INFO,
"[%s %p] Creating subchannel list %p for %" PRIuPTR " subchannels",
tracer_->name(), policy, this, addresses->num_addresses);
}
subchannels_.reserve(addresses->num_addresses);
// We need to remove the LB addresses in order to be able to compare the
// subchannel keys of subchannels from a different batch of addresses.
static const char* keys_to_remove[] = {GRPC_ARG_SUBCHANNEL_ADDRESS,
GRPC_ARG_LB_ADDRESSES};
// Create a subchannel for each address.
grpc_subchannel_args sc_args;
for (size_t i = 0; i < addresses->num_addresses; i++) {
// If there were any balancer, we would have chosen grpclb policy instead.
GPR_ASSERT(!addresses->addresses[i].is_balancer);
memset(&sc_args, 0, sizeof(grpc_subchannel_args));
grpc_arg addr_arg =
grpc_create_subchannel_address_arg(&addresses->addresses[i].address);
grpc_channel_args* new_args = grpc_channel_args_copy_and_add_and_remove(
&args, keys_to_remove, GPR_ARRAY_SIZE(keys_to_remove), &addr_arg, 1);
gpr_free(addr_arg.value.string);
sc_args.args = new_args;
grpc_subchannel* subchannel = grpc_client_channel_factory_create_subchannel(
client_channel_factory, &sc_args);
grpc_channel_args_destroy(new_args);
if (subchannel == nullptr) {
// Subchannel could not be created.
if (tracer_->enabled()) {
char* address_uri =
grpc_sockaddr_to_uri(&addresses->addresses[i].address);
gpr_log(GPR_INFO,
"[%s %p] could not create subchannel for address uri %s, "
"ignoring",
tracer_->name(), policy_, address_uri);
gpr_free(address_uri);
}
continue;
}
if (tracer_->enabled()) {
char* address_uri =
grpc_sockaddr_to_uri(&addresses->addresses[i].address);
gpr_log(GPR_INFO,
"[%s %p] subchannel list %p index %" PRIuPTR
": Created subchannel %p for address uri %s",
tracer_->name(), policy_, this, subchannels_.size(), subchannel,
address_uri);
gpr_free(address_uri);
}
subchannels_.emplace_back(static_cast<SubchannelListType*>(this),
addresses->user_data_vtable,
addresses->addresses[i], subchannel, combiner);
}
}
void grpc_lb_subchannel_list_ref(grpc_lb_subchannel_list* subchannel_list,
const char* reason);
template <typename SubchannelListType, typename SubchannelDataType>
SubchannelList<SubchannelListType, SubchannelDataType>::~SubchannelList() {
if (tracer_->enabled()) {
gpr_log(GPR_INFO, "[%s %p] Destroying subchannel_list %p", tracer_->name(),
policy_, this);
}
GRPC_COMBINER_UNREF(combiner_, "subchannel_list");
}
void grpc_lb_subchannel_list_unref(grpc_lb_subchannel_list* subchannel_list,
const char* reason);
template <typename SubchannelListType, typename SubchannelDataType>
void SubchannelList<SubchannelListType, SubchannelDataType>::ShutdownLocked() {
if (tracer_->enabled()) {
gpr_log(GPR_INFO, "[%s %p] Shutting down subchannel_list %p",
tracer_->name(), policy_, this);
}
GPR_ASSERT(!shutting_down_);
shutting_down_ = true;
for (size_t i = 0; i < subchannels_.size(); i++) {
SubchannelDataType* sd = &subchannels_[i];
sd->ShutdownLocked();
}
}
/// Mark subchannel_list as discarded. Unsubscribes all its subchannels. The
/// connectivity state notification callback will ultimately unref it.
void grpc_lb_subchannel_list_shutdown_and_unref(
grpc_lb_subchannel_list* subchannel_list, const char* reason);
} // namespace grpc_core
#endif /* GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_LB_POLICY_SUBCHANNEL_LIST_H */

@ -60,6 +60,10 @@ class ClientChannelMethodParams : public RefCounted<ClientChannelMethodParams> {
template <typename T, typename... Args>
friend T* grpc_core::New(Args&&... args);
// So Delete() can call our private dtor.
template <typename T>
friend void grpc_core::Delete(T*);
ClientChannelMethodParams() {}
virtual ~ClientChannelMethodParams() {}

@ -105,6 +105,10 @@ class Resolver : public InternallyRefCountedWithTracing<Resolver> {
GRPC_ABSTRACT_BASE_CLASS
protected:
// So Delete() can access our protected dtor.
template <typename T>
friend void Delete(T*);
/// Does NOT take ownership of the reference to \a combiner.
// TODO(roth): Once we have a C++-like interface for combiners, this
// API should change to take a RefCountedPtr<>, so that we always take

@ -363,6 +363,15 @@ void AresDnsResolver::OnResolvedLocked(void* arg, grpc_error* error) {
}
void AresDnsResolver::MaybeStartResolvingLocked() {
// If there is an existing timer, the time it fires is the earliest time we
// can start the next resolution.
if (have_next_resolution_timer_) {
// TODO(dgq): remove the following two lines once Pick First stops
// discarding subchannels after selecting.
++resolved_version_;
MaybeFinishNextLocked();
return;
}
if (last_resolution_timestamp_ >= 0) {
const grpc_millis earliest_next_resolution =
last_resolution_timestamp_ + min_time_between_resolutions_;
@ -375,7 +384,6 @@ void AresDnsResolver::MaybeStartResolvingLocked() {
"In cooldown from last resolution (from %" PRId64
" ms ago). Will resolve again in %" PRId64 " ms",
last_resolution_ago, ms_until_next_resolution);
if (!have_next_resolution_timer_) {
have_next_resolution_timer_ = true;
// TODO(roth): We currently deal with this ref manually. Once the
// new closure API is done, find a way to track this ref with the timer
@ -385,7 +393,6 @@ void AresDnsResolver::MaybeStartResolvingLocked() {
self.release();
grpc_timer_init(&next_resolution_timer_, ms_until_next_resolution,
&on_next_resolution_);
}
// TODO(dgq): remove the following two lines once Pick First stops
// discarding subchannels after selecting.
++resolved_version_;
@ -397,6 +404,7 @@ void AresDnsResolver::MaybeStartResolvingLocked() {
}
void AresDnsResolver::StartResolvingLocked() {
gpr_log(GPR_DEBUG, "Start resolving.");
// TODO(roth): We currently deal with this ref manually. Once the
// new closure API is done, find a way to track this ref with the timer
// callback as part of the type system.

@ -153,7 +153,10 @@ static void grpc_ares_request_unref(grpc_ares_request* r) {
/* If there are no pending queries, invoke on_done callback and destroy the
request */
if (gpr_unref(&r->pending_queries)) {
grpc_cares_wrapper_address_sorting_sort(*(r->lb_addrs_out));
grpc_lb_addresses* lb_addrs = *(r->lb_addrs_out);
if (lb_addrs != nullptr) {
grpc_cares_wrapper_address_sorting_sort(lb_addrs);
}
GRPC_CLOSURE_SCHED(r->on_done, r->error);
gpr_mu_destroy(&r->mu);
grpc_ares_ev_driver_destroy(r->ev_driver);

@ -236,6 +236,15 @@ void NativeDnsResolver::OnResolvedLocked(void* arg, grpc_error* error) {
}
void NativeDnsResolver::MaybeStartResolvingLocked() {
// If there is an existing timer, the time it fires is the earliest time we
// can start the next resolution.
if (have_next_resolution_timer_) {
// TODO(dgq): remove the following two lines once Pick First stops
// discarding subchannels after selecting.
++resolved_version_;
MaybeFinishNextLocked();
return;
}
if (last_resolution_timestamp_ >= 0) {
const grpc_millis earliest_next_resolution =
last_resolution_timestamp_ + min_time_between_resolutions_;
@ -248,7 +257,6 @@ void NativeDnsResolver::MaybeStartResolvingLocked() {
"In cooldown from last resolution (from %" PRId64
" ms ago). Will resolve again in %" PRId64 " ms",
last_resolution_ago, ms_until_next_resolution);
if (!have_next_resolution_timer_) {
have_next_resolution_timer_ = true;
// TODO(roth): We currently deal with this ref manually. Once the
// new closure API is done, find a way to track this ref with the timer
@ -258,7 +266,6 @@ void NativeDnsResolver::MaybeStartResolvingLocked() {
self.release();
grpc_timer_init(&next_resolution_timer_, ms_until_next_resolution,
&on_next_resolution_);
}
// TODO(dgq): remove the following two lines once Pick First stops
// discarding subchannels after selecting.
++resolved_version_;
@ -270,6 +277,7 @@ void NativeDnsResolver::MaybeStartResolvingLocked() {
}
void NativeDnsResolver::StartResolvingLocked() {
gpr_log(GPR_DEBUG, "Start resolving.");
// TODO(roth): We currently deal with this ref manually. Once the
// new closure API is done, find a way to track this ref with the timer
// callback as part of the type system.

@ -42,6 +42,10 @@ class ServerRetryThrottleData : public RefCounted<ServerRetryThrottleData> {
intptr_t milli_token_ratio() const { return milli_token_ratio_; }
private:
// So Delete() can call our private dtor.
template <typename T>
friend void grpc_core::Delete(T*);
~ServerRetryThrottleData();
void GetReplacementThrottleDataIfNeeded(

@ -234,7 +234,7 @@ static void finish_send_message(grpc_call_element* elem) {
static_cast<float>(before_size);
GPR_ASSERT(grpc_message_compression_algorithm_name(
calld->message_compression_algorithm, &algo_name));
gpr_log(GPR_DEBUG,
gpr_log(GPR_INFO,
"Compressed[%s] %" PRIuPTR " bytes vs. %" PRIuPTR
" bytes (%.2f%% savings)",
algo_name, before_size, after_size, 100 * savings_ratio);
@ -246,7 +246,7 @@ static void finish_send_message(grpc_call_element* elem) {
const char* algo_name;
GPR_ASSERT(grpc_message_compression_algorithm_name(
calld->message_compression_algorithm, &algo_name));
gpr_log(GPR_DEBUG,
gpr_log(GPR_INFO,
"Algorithm '%s' enabled but decided not to compress. Input size: "
"%" PRIuPTR,
algo_name, calld->slices.length);

@ -807,7 +807,7 @@ static const char* write_state_name(grpc_chttp2_write_state st) {
static void set_write_state(grpc_chttp2_transport* t,
grpc_chttp2_write_state st, const char* reason) {
GRPC_CHTTP2_IF_TRACING(gpr_log(GPR_DEBUG, "W:%p %s state %s -> %s [%s]", t,
GRPC_CHTTP2_IF_TRACING(gpr_log(GPR_INFO, "W:%p %s state %s -> %s [%s]", t,
t->is_client ? "CLIENT" : "SERVER",
write_state_name(t->write_state),
write_state_name(st), reason));
@ -1072,7 +1072,7 @@ void grpc_chttp2_add_incoming_goaway(grpc_chttp2_transport* t,
uint32_t goaway_error,
grpc_slice goaway_text) {
// GRPC_CHTTP2_IF_TRACING(
// gpr_log(GPR_DEBUG, "got goaway [%d]: %s", goaway_error, msg));
// gpr_log(GPR_INFO, "got goaway [%d]: %s", goaway_error, msg));
// Discard the error from a previous goaway frame (if any)
if (t->goaway_error != GRPC_ERROR_NONE) {
@ -1118,7 +1118,7 @@ static void maybe_start_some_streams(grpc_chttp2_transport* t) {
grpc_chttp2_list_pop_waiting_for_concurrency(t, &s)) {
/* safe since we can't (legally) be parsing this stream yet */
GRPC_CHTTP2_IF_TRACING(gpr_log(
GPR_DEBUG, "HTTP:%s: Allocating new grpc_chttp2_stream %p to id %d",
GPR_INFO, "HTTP:%s: Allocating new grpc_chttp2_stream %p to id %d",
t->is_client ? "CLI" : "SVR", s, t->next_stream_id));
GPR_ASSERT(s->id == 0);
@ -1183,7 +1183,7 @@ void grpc_chttp2_complete_closure_step(grpc_chttp2_transport* t,
if (grpc_http_trace.enabled()) {
const char* errstr = grpc_error_string(error);
gpr_log(
GPR_DEBUG,
GPR_INFO,
"complete_closure_step: t=%p %p refs=%d flags=0x%04x desc=%s err=%s "
"write_state=%s",
t, closure,
@ -1336,7 +1336,7 @@ static void perform_stream_op_locked(void* stream_op,
if (grpc_http_trace.enabled()) {
char* str = grpc_transport_stream_op_batch_string(op);
gpr_log(GPR_DEBUG, "perform_stream_op_locked: %s; on_complete = %p", str,
gpr_log(GPR_INFO, "perform_stream_op_locked: %s; on_complete = %p", str,
op->on_complete);
gpr_free(str);
if (op->send_initial_metadata) {
@ -1638,7 +1638,7 @@ static void perform_stream_op(grpc_transport* gt, grpc_stream* gs,
if (grpc_http_trace.enabled()) {
char* str = grpc_transport_stream_op_batch_string(op);
gpr_log(GPR_DEBUG, "perform_stream_op[s=%p]: %s", s, str);
gpr_log(GPR_INFO, "perform_stream_op[s=%p]: %s", s, str);
gpr_free(str);
}
@ -2529,7 +2529,7 @@ static void schedule_bdp_ping_locked(grpc_chttp2_transport* t) {
static void start_bdp_ping_locked(void* tp, grpc_error* error) {
grpc_chttp2_transport* t = static_cast<grpc_chttp2_transport*>(tp);
if (grpc_http_trace.enabled()) {
gpr_log(GPR_DEBUG, "%s: Start BDP ping err=%s", t->peer_string,
gpr_log(GPR_INFO, "%s: Start BDP ping err=%s", t->peer_string,
grpc_error_string(error));
}
/* Reset the keepalive ping timer */
@ -2542,7 +2542,7 @@ static void start_bdp_ping_locked(void* tp, grpc_error* error) {
static void finish_bdp_ping_locked(void* tp, grpc_error* error) {
grpc_chttp2_transport* t = static_cast<grpc_chttp2_transport*>(tp);
if (grpc_http_trace.enabled()) {
gpr_log(GPR_DEBUG, "%s: Complete BDP ping err=%s", t->peer_string,
gpr_log(GPR_INFO, "%s: Complete BDP ping err=%s", t->peer_string,
grpc_error_string(error));
}
if (error != GRPC_ERROR_NONE) {
@ -2716,8 +2716,7 @@ static void keepalive_watchdog_fired_locked(void* arg, grpc_error* error) {
static void connectivity_state_set(grpc_chttp2_transport* t,
grpc_connectivity_state state,
grpc_error* error, const char* reason) {
GRPC_CHTTP2_IF_TRACING(
gpr_log(GPR_DEBUG, "set connectivity_state=%d", state));
GRPC_CHTTP2_IF_TRACING(gpr_log(GPR_INFO, "set connectivity_state=%d", state));
grpc_connectivity_state_set(&t->channel_callback.state_tracker, state, error,
reason);
}
@ -2984,7 +2983,7 @@ static void benign_reclaimer_locked(void* arg, grpc_error* error) {
/* Channel with no active streams: send a goaway to try and make it
* disconnect cleanly */
if (grpc_resource_quota_trace.enabled()) {
gpr_log(GPR_DEBUG, "HTTP2: %s - send goaway to free memory",
gpr_log(GPR_INFO, "HTTP2: %s - send goaway to free memory",
t->peer_string);
}
send_goaway(t,
@ -2992,7 +2991,7 @@ static void benign_reclaimer_locked(void* arg, grpc_error* error) {
GRPC_ERROR_CREATE_FROM_STATIC_STRING("Buffers full"),
GRPC_ERROR_INT_HTTP2_ERROR, GRPC_HTTP2_ENHANCE_YOUR_CALM));
} else if (error == GRPC_ERROR_NONE && grpc_resource_quota_trace.enabled()) {
gpr_log(GPR_DEBUG,
gpr_log(GPR_INFO,
"HTTP2: %s - skip benign reclamation, there are still %" PRIdPTR
" streams",
t->peer_string, grpc_chttp2_stream_map_size(&t->stream_map));
@ -3013,7 +3012,7 @@ static void destructive_reclaimer_locked(void* arg, grpc_error* error) {
grpc_chttp2_stream* s = static_cast<grpc_chttp2_stream*>(
grpc_chttp2_stream_map_rand(&t->stream_map));
if (grpc_resource_quota_trace.enabled()) {
gpr_log(GPR_DEBUG, "HTTP2: %s - abandon stream id %d", t->peer_string,
gpr_log(GPR_INFO, "HTTP2: %s - abandon stream id %d", t->peer_string,
s->id);
}
grpc_chttp2_cancel_stream(

@ -217,14 +217,14 @@ grpc_error* grpc_chttp2_settings_parser_parse(void* p, grpc_chttp2_transport* t,
t->initial_window_update += static_cast<int64_t>(parser->value) -
parser->incoming_settings[id];
if (grpc_http_trace.enabled() || grpc_flowctl_trace.enabled()) {
gpr_log(GPR_DEBUG, "%p[%s] adding %d for initial_window change",
t, t->is_client ? "cli" : "svr",
gpr_log(GPR_INFO, "%p[%s] adding %d for initial_window change", t,
t->is_client ? "cli" : "svr",
static_cast<int>(t->initial_window_update));
}
}
parser->incoming_settings[id] = parser->value;
if (grpc_http_trace.enabled()) {
gpr_log(GPR_DEBUG, "CHTTP2:%s:%s: got setting %s = %d",
gpr_log(GPR_INFO, "CHTTP2:%s:%s: got setting %s = %d",
t->is_client ? "CLI" : "SVR", t->peer_string, sp->name,
parser->value);
}

@ -470,7 +470,7 @@ static void hpack_enc(grpc_chttp2_hpack_compressor* c, grpc_mdelem elem,
v = grpc_slice_to_c_string(GRPC_MDVALUE(elem));
}
gpr_log(
GPR_DEBUG,
GPR_INFO,
"Encode: '%s: %s', elem_interned=%d [%d], k_interned=%d, v_interned=%d",
k, v, GRPC_MDELEM_IS_INTERNED(elem), GRPC_MDELEM_STORAGE(elem),
grpc_slice_is_interned(GRPC_MDKEY(elem)),
@ -654,7 +654,7 @@ void grpc_chttp2_hpack_compressor_set_max_table_size(
}
c->advertise_table_size_change = 1;
if (grpc_http_trace.enabled()) {
gpr_log(GPR_DEBUG, "set max table size from encoder to %d", max_table_size);
gpr_log(GPR_INFO, "set max table size from encoder to %d", max_table_size);
}
}

@ -633,7 +633,7 @@ static grpc_error* on_hdr(grpc_chttp2_hpack_parser* p, grpc_mdelem md,
v = grpc_slice_to_c_string(GRPC_MDVALUE(md));
}
gpr_log(
GPR_DEBUG,
GPR_INFO,
"Decode: '%s: %s', elem_interned=%d [%d], k_interned=%d, v_interned=%d",
k, v, GRPC_MDELEM_IS_INTERNED(md), GRPC_MDELEM_STORAGE(md),
grpc_slice_is_interned(GRPC_MDKEY(md)),

@ -247,7 +247,7 @@ void grpc_chttp2_hptbl_set_max_bytes(grpc_chttp2_hptbl* tbl,
return;
}
if (grpc_http_trace.enabled()) {
gpr_log(GPR_DEBUG, "Update hpack parser max size to %d", max_bytes);
gpr_log(GPR_INFO, "Update hpack parser max size to %d", max_bytes);
}
while (tbl->mem_used > max_bytes) {
evict1(tbl);
@ -270,7 +270,7 @@ grpc_error* grpc_chttp2_hptbl_set_current_table_size(grpc_chttp2_hptbl* tbl,
return err;
}
if (grpc_http_trace.enabled()) {
gpr_log(GPR_DEBUG, "Update hpack parser table size to %d", bytes);
gpr_log(GPR_INFO, "Update hpack parser table size to %d", bytes);
}
while (tbl->mem_used > bytes) {
evict1(tbl);

@ -68,7 +68,7 @@ static bool stream_list_pop(grpc_chttp2_transport* t,
}
*stream = s;
if (s && grpc_trace_http2_stream_state.enabled()) {
gpr_log(GPR_DEBUG, "%p[%d][%s]: pop from %s", t, s->id,
gpr_log(GPR_INFO, "%p[%d][%s]: pop from %s", t, s->id,
t->is_client ? "cli" : "svr", stream_list_id_string(id));
}
return s != nullptr;
@ -90,7 +90,7 @@ static void stream_list_remove(grpc_chttp2_transport* t, grpc_chttp2_stream* s,
t->lists[id].tail = s->links[id].prev;
}
if (grpc_trace_http2_stream_state.enabled()) {
gpr_log(GPR_DEBUG, "%p[%d][%s]: remove from %s", t, s->id,
gpr_log(GPR_INFO, "%p[%d][%s]: remove from %s", t, s->id,
t->is_client ? "cli" : "svr", stream_list_id_string(id));
}
}
@ -122,7 +122,7 @@ static void stream_list_add_tail(grpc_chttp2_transport* t,
t->lists[id].tail = s;
s->included[id] = 1;
if (grpc_trace_http2_stream_state.enabled()) {
gpr_log(GPR_DEBUG, "%p[%d][%s]: add to %s", t, s->id,
gpr_log(GPR_INFO, "%p[%d][%s]: add to %s", t, s->id,
t->is_client ? "cli" : "svr", stream_list_id_string(id));
}
}

@ -52,7 +52,7 @@ static void maybe_initiate_ping(grpc_chttp2_transport* t) {
if (!grpc_closure_list_empty(pq->lists[GRPC_CHTTP2_PCL_INFLIGHT])) {
/* ping already in-flight: wait */
if (grpc_http_trace.enabled() || grpc_bdp_estimator_trace.enabled()) {
gpr_log(GPR_DEBUG, "%s: Ping delayed [%p]: already pinging",
gpr_log(GPR_INFO, "%s: Ping delayed [%p]: already pinging",
t->is_client ? "CLIENT" : "SERVER", t->peer_string);
}
return;
@ -61,7 +61,7 @@ static void maybe_initiate_ping(grpc_chttp2_transport* t) {
t->ping_policy.max_pings_without_data != 0) {
/* need to receive something of substance before sending a ping again */
if (grpc_http_trace.enabled() || grpc_bdp_estimator_trace.enabled()) {
gpr_log(GPR_DEBUG, "%s: Ping delayed [%p]: too many recent pings: %d/%d",
gpr_log(GPR_INFO, "%s: Ping delayed [%p]: too many recent pings: %d/%d",
t->is_client ? "CLIENT" : "SERVER", t->peer_string,
t->ping_state.pings_before_data_required,
t->ping_policy.max_pings_without_data);
@ -81,7 +81,7 @@ static void maybe_initiate_ping(grpc_chttp2_transport* t) {
if (next_allowed_ping > now) {
/* not enough elapsed time between successive pings */
if (grpc_http_trace.enabled() || grpc_bdp_estimator_trace.enabled()) {
gpr_log(GPR_DEBUG,
gpr_log(GPR_INFO,
"%s: Ping delayed [%p]: not enough time elapsed since last ping. "
" Last ping %f: Next ping %f: Now %f",
t->is_client ? "CLIENT" : "SERVER", t->peer_string,
@ -107,7 +107,7 @@ static void maybe_initiate_ping(grpc_chttp2_transport* t) {
GRPC_STATS_INC_HTTP2_PINGS_SENT();
t->ping_state.last_ping_sent_time = now;
if (grpc_http_trace.enabled() || grpc_bdp_estimator_trace.enabled()) {
gpr_log(GPR_DEBUG, "%s: Ping sent [%p]: %d/%d",
gpr_log(GPR_INFO, "%s: Ping sent [%p]: %d/%d",
t->is_client ? "CLIENT" : "SERVER", t->peer_string,
t->ping_state.pings_before_data_required,
t->ping_policy.max_pings_without_data);
@ -401,7 +401,7 @@ class StreamWriteContext {
StreamWriteContext(WriteContext* write_context, grpc_chttp2_stream* s)
: write_context_(write_context), t_(write_context->transport()), s_(s) {
GRPC_CHTTP2_IF_TRACING(
gpr_log(GPR_DEBUG, "W:%p %s[%d] im-(sent,send)=(%d,%d) announce=%d", t_,
gpr_log(GPR_INFO, "W:%p %s[%d] im-(sent,send)=(%d,%d) announce=%d", t_,
t_->is_client ? "CLIENT" : "SERVER", s->id,
s->sent_initial_metadata, s->send_initial_metadata != nullptr,
(int)(s->flow_control->local_window_delta() -

@ -125,12 +125,12 @@ static bool cancel_stream_locked(inproc_stream* s, grpc_error* error);
static void op_state_machine(void* arg, grpc_error* error);
static void ref_transport(inproc_transport* t) {
INPROC_LOG(GPR_DEBUG, "ref_transport %p", t);
INPROC_LOG(GPR_INFO, "ref_transport %p", t);
gpr_ref(&t->refs);
}
static void really_destroy_transport(inproc_transport* t) {
INPROC_LOG(GPR_DEBUG, "really_destroy_transport %p", t);
INPROC_LOG(GPR_INFO, "really_destroy_transport %p", t);
grpc_connectivity_state_destroy(&t->connectivity);
if (gpr_unref(&t->mu->refs)) {
gpr_free(t->mu);
@ -139,7 +139,7 @@ static void really_destroy_transport(inproc_transport* t) {
}
static void unref_transport(inproc_transport* t) {
INPROC_LOG(GPR_DEBUG, "unref_transport %p", t);
INPROC_LOG(GPR_INFO, "unref_transport %p", t);
if (gpr_unref(&t->refs)) {
really_destroy_transport(t);
}
@ -154,17 +154,17 @@ static void unref_transport(inproc_transport* t) {
#endif
static void ref_stream(inproc_stream* s, const char* reason) {
INPROC_LOG(GPR_DEBUG, "ref_stream %p %s", s, reason);
INPROC_LOG(GPR_INFO, "ref_stream %p %s", s, reason);
STREAM_REF(s->refs, reason);
}
static void unref_stream(inproc_stream* s, const char* reason) {
INPROC_LOG(GPR_DEBUG, "unref_stream %p %s", s, reason);
INPROC_LOG(GPR_INFO, "unref_stream %p %s", s, reason);
STREAM_UNREF(s->refs, reason);
}
static void really_destroy_stream(inproc_stream* s) {
INPROC_LOG(GPR_DEBUG, "really_destroy_stream %p", s);
INPROC_LOG(GPR_INFO, "really_destroy_stream %p", s);
GRPC_ERROR_UNREF(s->write_buffer_cancel_error);
GRPC_ERROR_UNREF(s->cancel_self_error);
@ -225,7 +225,7 @@ static grpc_error* fill_in_metadata(inproc_stream* s,
static int init_stream(grpc_transport* gt, grpc_stream* gs,
grpc_stream_refcount* refcount, const void* server_data,
gpr_arena* arena) {
INPROC_LOG(GPR_DEBUG, "init_stream %p %p %p", gt, gs, server_data);
INPROC_LOG(GPR_INFO, "init_stream %p %p %p", gt, gs, server_data);
inproc_transport* t = reinterpret_cast<inproc_transport*>(gt);
inproc_stream* s = reinterpret_cast<inproc_stream*>(gs);
s->arena = arena;
@ -282,8 +282,8 @@ static int init_stream(grpc_transport* gt, grpc_stream* gs,
// Pass the client-side stream address to the server-side for a ref
ref_stream(s, "inproc_init_stream:clt"); // ref it now on behalf of server
// side to avoid destruction
INPROC_LOG(GPR_DEBUG, "calling accept stream cb %p %p",
st->accept_stream_cb, st->accept_stream_data);
INPROC_LOG(GPR_INFO, "calling accept stream cb %p %p", st->accept_stream_cb,
st->accept_stream_data);
(*st->accept_stream_cb)(st->accept_stream_data, &st->base, (void*)s);
} else {
// This is the server-side and is being called through accept_stream_cb
@ -378,7 +378,7 @@ static void complete_if_batch_end_locked(inproc_stream* s, grpc_error* error,
int is_rtm = static_cast<int>(op == s->recv_trailing_md_op);
if ((is_sm + is_stm + is_rim + is_rm + is_rtm) == 1) {
INPROC_LOG(GPR_DEBUG, "%s %p %p %p", msg, s, op, error);
INPROC_LOG(GPR_INFO, "%s %p %p %p", msg, s, op, error);
GRPC_CLOSURE_SCHED(op->on_complete, GRPC_ERROR_REF(error));
}
}
@ -393,7 +393,7 @@ static void maybe_schedule_op_closure_locked(inproc_stream* s,
}
static void fail_helper_locked(inproc_stream* s, grpc_error* error) {
INPROC_LOG(GPR_DEBUG, "op_state_machine %p fail_helper", s);
INPROC_LOG(GPR_INFO, "op_state_machine %p fail_helper", s);
// If we're failing this side, we need to make sure that
// we also send or have already sent trailing metadata
if (!s->trailing_md_sent) {
@ -458,7 +458,7 @@ static void fail_helper_locked(inproc_stream* s, grpc_error* error) {
*s->recv_initial_md_op->payload->recv_initial_metadata
.trailing_metadata_available = true;
}
INPROC_LOG(GPR_DEBUG,
INPROC_LOG(GPR_INFO,
"fail_helper %p scheduling initial-metadata-ready %p %p", s,
error, err);
GRPC_CLOSURE_SCHED(s->recv_initial_md_op->payload->recv_initial_metadata
@ -472,7 +472,7 @@ static void fail_helper_locked(inproc_stream* s, grpc_error* error) {
s->recv_initial_md_op = nullptr;
}
if (s->recv_message_op) {
INPROC_LOG(GPR_DEBUG, "fail_helper %p scheduling message-ready %p", s,
INPROC_LOG(GPR_INFO, "fail_helper %p scheduling message-ready %p", s,
error);
GRPC_CLOSURE_SCHED(
s->recv_message_op->payload->recv_message.recv_message_ready,
@ -496,9 +496,8 @@ static void fail_helper_locked(inproc_stream* s, grpc_error* error) {
s->send_trailing_md_op = nullptr;
}
if (s->recv_trailing_md_op) {
INPROC_LOG(GPR_DEBUG,
"fail_helper %p scheduling trailing-md-on-complete %p", s,
error);
INPROC_LOG(GPR_INFO, "fail_helper %p scheduling trailing-md-on-complete %p",
s, error);
complete_if_batch_end_locked(
s, error, s->recv_trailing_md_op,
"fail_helper scheduling recv-trailing-metadata-on-complete");
@ -549,7 +548,7 @@ static void message_transfer_locked(inproc_stream* sender,
receiver->recv_stream.Init(&receiver->recv_message, 0);
receiver->recv_message_op->payload->recv_message.recv_message->reset(
receiver->recv_stream.get());
INPROC_LOG(GPR_DEBUG, "message_transfer_locked %p scheduling message-ready",
INPROC_LOG(GPR_INFO, "message_transfer_locked %p scheduling message-ready",
receiver);
GRPC_CLOSURE_SCHED(
receiver->recv_message_op->payload->recv_message.recv_message_ready,
@ -577,7 +576,7 @@ static void op_state_machine(void* arg, grpc_error* error) {
bool needs_close = false;
INPROC_LOG(GPR_DEBUG, "op_state_machine %p", arg);
INPROC_LOG(GPR_INFO, "op_state_machine %p", arg);
inproc_stream* s = static_cast<inproc_stream*>(arg);
gpr_mu* mu = &s->t->mu->mu; // keep aside in case s gets closed
gpr_mu_lock(mu);
@ -626,7 +625,7 @@ static void op_state_machine(void* arg, grpc_error* error) {
: &other->to_read_trailing_md_filled;
if (*destfilled || s->trailing_md_sent) {
// The buffer is already in use; that's an error!
INPROC_LOG(GPR_DEBUG, "Extra trailing metadata %p", s);
INPROC_LOG(GPR_INFO, "Extra trailing metadata %p", s);
new_err = GRPC_ERROR_CREATE_FROM_STATIC_STRING("Extra trailing metadata");
fail_helper_locked(s, GRPC_ERROR_REF(new_err));
goto done;
@ -639,7 +638,7 @@ static void op_state_machine(void* arg, grpc_error* error) {
}
s->trailing_md_sent = true;
if (!s->t->is_client && s->trailing_md_recvd && s->recv_trailing_md_op) {
INPROC_LOG(GPR_DEBUG,
INPROC_LOG(GPR_INFO,
"op_state_machine %p scheduling trailing-md-on-complete", s);
GRPC_CLOSURE_SCHED(s->recv_trailing_md_op->on_complete,
GRPC_ERROR_NONE);
@ -658,7 +657,7 @@ static void op_state_machine(void* arg, grpc_error* error) {
new_err =
GRPC_ERROR_CREATE_FROM_STATIC_STRING("Already recvd initial md");
INPROC_LOG(
GPR_DEBUG,
GPR_INFO,
"op_state_machine %p scheduling on_complete errors for already "
"recvd initial md %p",
s, new_err);
@ -684,7 +683,7 @@ static void op_state_machine(void* arg, grpc_error* error) {
}
grpc_metadata_batch_clear(&s->to_read_initial_md);
s->to_read_initial_md_filled = false;
INPROC_LOG(GPR_DEBUG,
INPROC_LOG(GPR_INFO,
"op_state_machine %p scheduling initial-metadata-ready %p", s,
new_err);
GRPC_CLOSURE_SCHED(s->recv_initial_md_op->payload->recv_initial_metadata
@ -696,7 +695,7 @@ static void op_state_machine(void* arg, grpc_error* error) {
s->recv_initial_md_op = nullptr;
if (new_err != GRPC_ERROR_NONE) {
INPROC_LOG(GPR_DEBUG,
INPROC_LOG(GPR_INFO,
"op_state_machine %p scheduling on_complete errors2 %p", s,
new_err);
fail_helper_locked(s, GRPC_ERROR_REF(new_err));
@ -719,7 +718,7 @@ static void op_state_machine(void* arg, grpc_error* error) {
new_err =
GRPC_ERROR_CREATE_FROM_STATIC_STRING("Already recvd trailing md");
INPROC_LOG(
GPR_DEBUG,
GPR_INFO,
"op_state_machine %p scheduling on_complete errors for already "
"recvd trailing md %p",
s, new_err);
@ -729,7 +728,7 @@ static void op_state_machine(void* arg, grpc_error* error) {
if (s->recv_message_op != nullptr) {
// This message needs to be wrapped up because it will never be
// satisfied
INPROC_LOG(GPR_DEBUG, "op_state_machine %p scheduling message-ready", s);
INPROC_LOG(GPR_INFO, "op_state_machine %p scheduling message-ready", s);
GRPC_CLOSURE_SCHED(
s->recv_message_op->payload->recv_message.recv_message_ready,
GRPC_ERROR_NONE);
@ -764,7 +763,7 @@ static void op_state_machine(void* arg, grpc_error* error) {
// (If the server hasn't already sent its trailing md, it doesn't have
// a final status, so don't mark this op complete)
if (s->t->is_client || s->trailing_md_sent) {
INPROC_LOG(GPR_DEBUG,
INPROC_LOG(GPR_INFO,
"op_state_machine %p scheduling trailing-md-on-complete %p",
s, new_err);
GRPC_CLOSURE_SCHED(s->recv_trailing_md_op->on_complete,
@ -772,21 +771,21 @@ static void op_state_machine(void* arg, grpc_error* error) {
s->recv_trailing_md_op = nullptr;
needs_close = true;
} else {
INPROC_LOG(GPR_DEBUG,
INPROC_LOG(GPR_INFO,
"op_state_machine %p server needs to delay handling "
"trailing-md-on-complete %p",
s, new_err);
}
} else {
INPROC_LOG(
GPR_DEBUG,
GPR_INFO,
"op_state_machine %p has trailing md but not yet waiting for it", s);
}
}
if (s->trailing_md_recvd && s->recv_message_op) {
// No further message will come on this stream, so finish off the
// recv_message_op
INPROC_LOG(GPR_DEBUG, "op_state_machine %p scheduling message-ready", s);
INPROC_LOG(GPR_INFO, "op_state_machine %p scheduling message-ready", s);
GRPC_CLOSURE_SCHED(
s->recv_message_op->payload->recv_message.recv_message_ready,
GRPC_ERROR_NONE);
@ -810,7 +809,7 @@ static void op_state_machine(void* arg, grpc_error* error) {
// Didn't get the item we wanted so we still need to get
// rescheduled
INPROC_LOG(
GPR_DEBUG, "op_state_machine %p still needs closure %p %p %p %p %p", s,
GPR_INFO, "op_state_machine %p still needs closure %p %p %p %p %p", s,
s->send_message_op, s->send_trailing_md_op, s->recv_initial_md_op,
s->recv_message_op, s->recv_trailing_md_op);
s->ops_needed = true;
@ -826,8 +825,7 @@ done:
static bool cancel_stream_locked(inproc_stream* s, grpc_error* error) {
bool ret = false; // was the cancel accepted
INPROC_LOG(GPR_DEBUG, "cancel_stream %p with %s", s,
grpc_error_string(error));
INPROC_LOG(GPR_INFO, "cancel_stream %p with %s", s, grpc_error_string(error));
if (s->cancel_self_error == GRPC_ERROR_NONE) {
ret = true;
s->cancel_self_error = GRPC_ERROR_REF(error);
@ -877,7 +875,7 @@ static bool cancel_stream_locked(inproc_stream* s, grpc_error* error) {
static void perform_stream_op(grpc_transport* gt, grpc_stream* gs,
grpc_transport_stream_op_batch* op) {
INPROC_LOG(GPR_DEBUG, "perform_stream_op %p %p %p", gt, gs, op);
INPROC_LOG(GPR_INFO, "perform_stream_op %p %p %p", gt, gs, op);
inproc_stream* s = reinterpret_cast<inproc_stream*>(gs);
gpr_mu* mu = &s->t->mu->mu; // save aside in case s gets closed
gpr_mu_lock(mu);
@ -907,7 +905,7 @@ static void perform_stream_op(grpc_transport* gt, grpc_stream* gs,
// already self-canceled so still give it an error
error = GRPC_ERROR_REF(s->cancel_self_error);
} else {
INPROC_LOG(GPR_DEBUG, "perform_stream_op %p %s%s%s%s%s%s%s", s,
INPROC_LOG(GPR_INFO, "perform_stream_op %p %s%s%s%s%s%s%s", s,
s->t->is_client ? "client" : "server",
op->send_initial_metadata ? " send_initial_metadata" : "",
op->send_message ? " send_message" : "",
@ -936,7 +934,7 @@ static void perform_stream_op(grpc_transport* gt, grpc_stream* gs,
: &other->to_read_initial_md_filled;
if (*destfilled || s->initial_md_sent) {
// The buffer is already in use; that's an error!
INPROC_LOG(GPR_DEBUG, "Extra initial metadata %p", s);
INPROC_LOG(GPR_INFO, "Extra initial metadata %p", s);
error = GRPC_ERROR_CREATE_FROM_STATIC_STRING("Extra initial metadata");
} else {
if (!other || !other->closed) {
@ -1013,7 +1011,7 @@ static void perform_stream_op(grpc_transport* gt, grpc_stream* gs,
true;
}
INPROC_LOG(
GPR_DEBUG,
GPR_INFO,
"perform_stream_op error %p scheduling initial-metadata-ready %p",
s, error);
GRPC_CLOSURE_SCHED(
@ -1022,14 +1020,14 @@ static void perform_stream_op(grpc_transport* gt, grpc_stream* gs,
}
if (op->recv_message) {
INPROC_LOG(
GPR_DEBUG,
GPR_INFO,
"perform_stream_op error %p scheduling recv message-ready %p", s,
error);
GRPC_CLOSURE_SCHED(op->payload->recv_message.recv_message_ready,
GRPC_ERROR_REF(error));
}
}
INPROC_LOG(GPR_DEBUG, "perform_stream_op %p scheduling on_complete %p", s,
INPROC_LOG(GPR_INFO, "perform_stream_op %p scheduling on_complete %p", s,
error);
GRPC_CLOSURE_SCHED(on_complete, GRPC_ERROR_REF(error));
}
@ -1042,7 +1040,7 @@ static void perform_stream_op(grpc_transport* gt, grpc_stream* gs,
}
static void close_transport_locked(inproc_transport* t) {
INPROC_LOG(GPR_DEBUG, "close_transport %p %d", t, t->is_closed);
INPROC_LOG(GPR_INFO, "close_transport %p %d", t, t->is_closed);
grpc_connectivity_state_set(
&t->connectivity, GRPC_CHANNEL_SHUTDOWN,
GRPC_ERROR_CREATE_FROM_STATIC_STRING("Closing transport."),
@ -1063,7 +1061,7 @@ static void close_transport_locked(inproc_transport* t) {
static void perform_transport_op(grpc_transport* gt, grpc_transport_op* op) {
inproc_transport* t = reinterpret_cast<inproc_transport*>(gt);
INPROC_LOG(GPR_DEBUG, "perform_transport_op %p %p", t, op);
INPROC_LOG(GPR_INFO, "perform_transport_op %p %p", t, op);
gpr_mu_lock(&t->mu->mu);
if (op->on_connectivity_state_change) {
grpc_connectivity_state_notify_on_state_change(
@ -1096,7 +1094,7 @@ static void perform_transport_op(grpc_transport* gt, grpc_transport_op* op) {
static void destroy_stream(grpc_transport* gt, grpc_stream* gs,
grpc_closure* then_schedule_closure) {
INPROC_LOG(GPR_DEBUG, "destroy_stream %p %p", gs, then_schedule_closure);
INPROC_LOG(GPR_INFO, "destroy_stream %p %p", gs, then_schedule_closure);
inproc_stream* s = reinterpret_cast<inproc_stream*>(gs);
s->closure_at_destroy = then_schedule_closure;
really_destroy_stream(s);
@ -1104,7 +1102,7 @@ static void destroy_stream(grpc_transport* gt, grpc_stream* gs,
static void destroy_transport(grpc_transport* gt) {
inproc_transport* t = reinterpret_cast<inproc_transport*>(gt);
INPROC_LOG(GPR_DEBUG, "destroy_transport %p", t);
INPROC_LOG(GPR_INFO, "destroy_transport %p", t);
gpr_mu_lock(&t->mu->mu);
close_transport_locked(t);
gpr_mu_unlock(&t->mu->mu);
@ -1165,7 +1163,7 @@ static void inproc_transports_create(grpc_transport** server_transport,
const grpc_channel_args* server_args,
grpc_transport** client_transport,
const grpc_channel_args* client_args) {
INPROC_LOG(GPR_DEBUG, "inproc_transports_create");
INPROC_LOG(GPR_INFO, "inproc_transports_create");
inproc_transport* st =
static_cast<inproc_transport*>(gpr_zalloc(sizeof(*st)));
inproc_transport* ct =

@ -411,3 +411,31 @@ grpc_arg grpc_channel_arg_pointer_create(
arg.value.pointer.vtable = vtable;
return arg;
}
char* grpc_channel_args_string(const grpc_channel_args* args) {
if (args == nullptr) return nullptr;
gpr_strvec v;
gpr_strvec_init(&v);
for (size_t i = 0; i < args->num_args; ++i) {
const grpc_arg& arg = args->args[i];
char* s;
switch (arg.type) {
case GRPC_ARG_INTEGER:
gpr_asprintf(&s, "%s=%d", arg.key, arg.value.integer);
break;
case GRPC_ARG_STRING:
gpr_asprintf(&s, "%s=%s", arg.key, arg.value.string);
break;
case GRPC_ARG_POINTER:
gpr_asprintf(&s, "%s=%p", arg.key, arg.value.pointer.p);
break;
default:
gpr_asprintf(&s, "arg with unknown type");
}
gpr_strvec_add(&v, s);
}
char* result =
gpr_strjoin_sep(const_cast<const char**>(v.strs), v.count, ", ", nullptr);
gpr_strvec_destroy(&v);
return result;
}

@ -124,4 +124,8 @@ grpc_arg grpc_channel_arg_integer_create(char* name, int value);
grpc_arg grpc_channel_arg_pointer_create(char* name, void* value,
const grpc_arg_pointer_vtable* vtable);
// Returns a string representing channel args in human-readable form.
// Callers takes ownership of result.
char* grpc_channel_args_string(const grpc_channel_args* args);
#endif /* GRPC_CORE_LIB_CHANNEL_CHANNEL_ARGS_H */

@ -22,11 +22,15 @@
#include <grpc/support/alloc.h>
#include <grpc/support/log.h>
#include <grpc/support/string_util.h>
#include "src/core/lib/channel/channel_args.h"
#include "src/core/lib/channel/handshaker.h"
#include "src/core/lib/debug/trace.h"
#include "src/core/lib/iomgr/timer.h"
grpc_core::TraceFlag grpc_handshaker_trace(false, "handshaker");
//
// grpc_handshaker
//
@ -52,6 +56,10 @@ void grpc_handshaker_do_handshake(grpc_handshaker* handshaker,
args);
}
const char* grpc_handshaker_name(grpc_handshaker* handshaker) {
return handshaker->vtable->name;
}
//
// grpc_handshake_manager
//
@ -127,6 +135,12 @@ static bool is_power_of_2(size_t n) { return (n & (n - 1)) == 0; }
void grpc_handshake_manager_add(grpc_handshake_manager* mgr,
grpc_handshaker* handshaker) {
if (grpc_handshaker_trace.enabled()) {
gpr_log(
GPR_INFO,
"handshake_manager %p: adding handshaker %s [%p] at index %" PRIuPTR,
mgr, grpc_handshaker_name(handshaker), handshaker, mgr->count);
}
gpr_mu_lock(&mgr->mu);
// To avoid allocating memory for each handshaker we add, we double
// the number of elements every time we need more.
@ -172,23 +186,56 @@ void grpc_handshake_manager_shutdown(grpc_handshake_manager* mgr,
GRPC_ERROR_UNREF(why);
}
static char* handshaker_args_string(grpc_handshaker_args* args) {
char* args_str = grpc_channel_args_string(args->args);
size_t num_args = args->args != nullptr ? args->args->num_args : 0;
size_t read_buffer_length =
args->read_buffer != nullptr ? args->read_buffer->length : 0;
char* str;
gpr_asprintf(&str,
"{endpoint=%p, args=%p {size=%" PRIuPTR
": %s}, read_buffer=%p (length=%" PRIuPTR "), exit_early=%d}",
args->endpoint, args->args, num_args, args_str,
args->read_buffer, read_buffer_length, args->exit_early);
gpr_free(args_str);
return str;
}
// Helper function to call either the next handshaker or the
// on_handshake_done callback.
// Returns true if we've scheduled the on_handshake_done callback.
static bool call_next_handshaker_locked(grpc_handshake_manager* mgr,
grpc_error* error) {
if (grpc_handshaker_trace.enabled()) {
char* args_str = handshaker_args_string(&mgr->args);
gpr_log(GPR_INFO,
"handshake_manager %p: error=%s shutdown=%d index=%" PRIuPTR
", args=%s",
mgr, grpc_error_string(error), mgr->shutdown, mgr->index, args_str);
gpr_free(args_str);
}
GPR_ASSERT(mgr->index <= mgr->count);
// If we got an error or we've been shut down or we're exiting early or
// we've finished the last handshaker, invoke the on_handshake_done
// callback. Otherwise, call the next handshaker.
if (error != GRPC_ERROR_NONE || mgr->shutdown || mgr->args.exit_early ||
mgr->index == mgr->count) {
if (grpc_handshaker_trace.enabled()) {
gpr_log(GPR_INFO, "handshake_manager %p: handshaking complete", mgr);
}
// Cancel deadline timer, since we're invoking the on_handshake_done
// callback now.
grpc_timer_cancel(&mgr->deadline_timer);
GRPC_CLOSURE_SCHED(&mgr->on_handshake_done, error);
mgr->shutdown = true;
} else {
if (grpc_handshaker_trace.enabled()) {
gpr_log(
GPR_INFO,
"handshake_manager %p: calling handshaker %s [%p] at index %" PRIuPTR,
mgr, grpc_handshaker_name(mgr->handshakers[mgr->index]),
mgr->handshakers[mgr->index], mgr->index);
}
grpc_handshaker_do_handshake(mgr->handshakers[mgr->index], mgr->acceptor,
&mgr->call_next_handshaker, &mgr->args);
}

@ -84,6 +84,9 @@ typedef struct {
grpc_tcp_server_acceptor* acceptor,
grpc_closure* on_handshake_done,
grpc_handshaker_args* args);
/// The name of the handshaker, for debugging purposes.
const char* name;
} grpc_handshaker_vtable;
/// Base struct. To subclass, make this the first member of the
@ -102,6 +105,7 @@ void grpc_handshaker_do_handshake(grpc_handshaker* handshaker,
grpc_tcp_server_acceptor* acceptor,
grpc_closure* on_handshake_done,
grpc_handshaker_args* args);
const char* grpc_handshaker_name(grpc_handshaker* handshaker);
///
/// grpc_handshake_manager

@ -57,13 +57,22 @@ class TraceFlag {
const char* name() const { return name_; }
// This following define may be commented out to ensure that the compiler
// deletes any "if (tracer.enabled()) {...}" codeblocks. This is useful to
// test the performance impact tracers have on the system.
//
// #define COMPILE_OUT_ALL_TRACERS_IN_OPT_BUILD
#ifdef COMPILE_OUT_ALL_TRACERS_IN_OPT_BUILD
bool enabled() { return false; }
#else
bool enabled() {
#ifdef GRPC_THREADSAFE_TRACER
return gpr_atm_no_barrier_load(&value_) != 0;
#else
return value_;
#endif
#endif // GRPC_THREADSAFE_TRACER
}
#endif // COMPILE_OUT_ALL_TRACERS_IN_OPT_BUILD
private:
friend void grpc_core::testing::grpc_tracer_enable_flag(TraceFlag* flag);

@ -100,7 +100,7 @@ class InternallyRefCounted : public Orphanable {
void Unref() {
if (gpr_unref(&refs_)) {
Delete(this);
Delete(static_cast<Child*>(this));
}
}
@ -159,7 +159,7 @@ class InternallyRefCountedWithTracing : public Orphanable {
const char* reason) GRPC_MUST_USE_RESULT {
if (location.Log() && trace_flag_ != nullptr && trace_flag_->enabled()) {
gpr_atm old_refs = gpr_atm_no_barrier_load(&refs_.count);
gpr_log(GPR_DEBUG, "%s:%p %s:%d ref %" PRIdPTR " -> %" PRIdPTR " %s",
gpr_log(GPR_INFO, "%s:%p %s:%d ref %" PRIdPTR " -> %" PRIdPTR " %s",
trace_flag_->name(), this, location.file(), location.line(),
old_refs, old_refs + 1, reason);
}
@ -173,14 +173,14 @@ class InternallyRefCountedWithTracing : public Orphanable {
void Unref() {
if (gpr_unref(&refs_)) {
Delete(this);
Delete(static_cast<Child*>(this));
}
}
void Unref(const DebugLocation& location, const char* reason) {
if (location.Log() && trace_flag_ != nullptr && trace_flag_->enabled()) {
gpr_atm old_refs = gpr_atm_no_barrier_load(&refs_.count);
gpr_log(GPR_DEBUG, "%s:%p %s:%d unref %" PRIdPTR " -> %" PRIdPTR " %s",
gpr_log(GPR_INFO, "%s:%p %s:%d unref %" PRIdPTR " -> %" PRIdPTR " %s",
trace_flag_->name(), this, location.file(), location.line(),
old_refs, old_refs - 1, reason);
}

@ -54,7 +54,7 @@ class RefCounted {
// friend of this class.
void Unref() {
if (gpr_unref(&refs_)) {
Delete(this);
Delete(static_cast<Child*>(this));
}
}
@ -100,7 +100,7 @@ class RefCountedWithTracing {
const char* reason) GRPC_MUST_USE_RESULT {
if (location.Log() && trace_flag_ != nullptr && trace_flag_->enabled()) {
gpr_atm old_refs = gpr_atm_no_barrier_load(&refs_.count);
gpr_log(GPR_DEBUG, "%s:%p %s:%d ref %" PRIdPTR " -> %" PRIdPTR " %s",
gpr_log(GPR_INFO, "%s:%p %s:%d ref %" PRIdPTR " -> %" PRIdPTR " %s",
trace_flag_->name(), this, location.file(), location.line(),
old_refs, old_refs + 1, reason);
}
@ -114,14 +114,14 @@ class RefCountedWithTracing {
void Unref() {
if (gpr_unref(&refs_)) {
Delete(this);
Delete(static_cast<Child*>(this));
}
}
void Unref(const DebugLocation& location, const char* reason) {
if (location.Log() && trace_flag_ != nullptr && trace_flag_->enabled()) {
gpr_atm old_refs = gpr_atm_no_barrier_load(&refs_.count);
gpr_log(GPR_DEBUG, "%s:%p %s:%d unref %" PRIdPTR " -> %" PRIdPTR " %s",
gpr_log(GPR_INFO, "%s:%p %s:%d unref %" PRIdPTR " -> %" PRIdPTR " %s",
trace_flag_->name(), this, location.file(), location.line(),
old_refs, old_refs - 1, reason);
}

@ -64,7 +64,7 @@ void grpc_call_combiner_start(grpc_call_combiner* call_combiner,
const char* reason) {
GPR_TIMER_SCOPE("call_combiner_start", 0);
if (grpc_call_combiner_trace.enabled()) {
gpr_log(GPR_DEBUG,
gpr_log(GPR_INFO,
"==> grpc_call_combiner_start() [%p] closure=%p [" DEBUG_FMT_STR
"%s] error=%s",
call_combiner, closure DEBUG_FMT_ARGS, reason,
@ -73,7 +73,7 @@ void grpc_call_combiner_start(grpc_call_combiner* call_combiner,
size_t prev_size = static_cast<size_t>(
gpr_atm_full_fetch_add(&call_combiner->size, (gpr_atm)1));
if (grpc_call_combiner_trace.enabled()) {
gpr_log(GPR_DEBUG, " size: %" PRIdPTR " -> %" PRIdPTR, prev_size,
gpr_log(GPR_INFO, " size: %" PRIdPTR " -> %" PRIdPTR, prev_size,
prev_size + 1);
}
GRPC_STATS_INC_CALL_COMBINER_LOCKS_SCHEDULED_ITEMS();
@ -82,7 +82,7 @@ void grpc_call_combiner_start(grpc_call_combiner* call_combiner,
GPR_TIMER_MARK("call_combiner_initiate", 0);
if (grpc_call_combiner_trace.enabled()) {
gpr_log(GPR_DEBUG, " EXECUTING IMMEDIATELY");
gpr_log(GPR_INFO, " EXECUTING IMMEDIATELY");
}
// Queue was empty, so execute this closure immediately.
GRPC_CLOSURE_SCHED(closure, error);
@ -101,21 +101,21 @@ void grpc_call_combiner_stop(grpc_call_combiner* call_combiner DEBUG_ARGS,
const char* reason) {
GPR_TIMER_SCOPE("call_combiner_stop", 0);
if (grpc_call_combiner_trace.enabled()) {
gpr_log(GPR_DEBUG,
gpr_log(GPR_INFO,
"==> grpc_call_combiner_stop() [%p] [" DEBUG_FMT_STR "%s]",
call_combiner DEBUG_FMT_ARGS, reason);
}
size_t prev_size = static_cast<size_t>(
gpr_atm_full_fetch_add(&call_combiner->size, (gpr_atm)-1));
if (grpc_call_combiner_trace.enabled()) {
gpr_log(GPR_DEBUG, " size: %" PRIdPTR " -> %" PRIdPTR, prev_size,
gpr_log(GPR_INFO, " size: %" PRIdPTR " -> %" PRIdPTR, prev_size,
prev_size - 1);
}
GPR_ASSERT(prev_size >= 1);
if (prev_size > 1) {
while (true) {
if (grpc_call_combiner_trace.enabled()) {
gpr_log(GPR_DEBUG, " checking queue");
gpr_log(GPR_INFO, " checking queue");
}
bool empty;
grpc_closure* closure = reinterpret_cast<grpc_closure*>(
@ -124,19 +124,19 @@ void grpc_call_combiner_stop(grpc_call_combiner* call_combiner DEBUG_ARGS,
// This can happen either due to a race condition within the mpscq
// code or because of a race with grpc_call_combiner_start().
if (grpc_call_combiner_trace.enabled()) {
gpr_log(GPR_DEBUG, " queue returned no result; checking again");
gpr_log(GPR_INFO, " queue returned no result; checking again");
}
continue;
}
if (grpc_call_combiner_trace.enabled()) {
gpr_log(GPR_DEBUG, " EXECUTING FROM QUEUE: closure=%p error=%s",
gpr_log(GPR_INFO, " EXECUTING FROM QUEUE: closure=%p error=%s",
closure, grpc_error_string(closure->error_data.error));
}
GRPC_CLOSURE_SCHED(closure, closure->error_data.error);
break;
}
} else if (grpc_call_combiner_trace.enabled()) {
gpr_log(GPR_DEBUG, " queue empty");
gpr_log(GPR_INFO, " queue empty");
}
}
@ -151,7 +151,7 @@ void grpc_call_combiner_set_notify_on_cancel(grpc_call_combiner* call_combiner,
// Otherwise, store the new closure.
if (original_error != GRPC_ERROR_NONE) {
if (grpc_call_combiner_trace.enabled()) {
gpr_log(GPR_DEBUG,
gpr_log(GPR_INFO,
"call_combiner=%p: scheduling notify_on_cancel callback=%p "
"for pre-existing cancellation",
call_combiner, closure);
@ -162,7 +162,7 @@ void grpc_call_combiner_set_notify_on_cancel(grpc_call_combiner* call_combiner,
if (gpr_atm_full_cas(&call_combiner->cancel_state, original_state,
(gpr_atm)closure)) {
if (grpc_call_combiner_trace.enabled()) {
gpr_log(GPR_DEBUG, "call_combiner=%p: setting notify_on_cancel=%p",
gpr_log(GPR_INFO, "call_combiner=%p: setting notify_on_cancel=%p",
call_combiner, closure);
}
// If we replaced an earlier closure, invoke the original
@ -171,7 +171,7 @@ void grpc_call_combiner_set_notify_on_cancel(grpc_call_combiner* call_combiner,
if (original_state != 0) {
closure = (grpc_closure*)original_state;
if (grpc_call_combiner_trace.enabled()) {
gpr_log(GPR_DEBUG,
gpr_log(GPR_INFO,
"call_combiner=%p: scheduling old cancel callback=%p",
call_combiner, closure);
}
@ -199,7 +199,7 @@ void grpc_call_combiner_cancel(grpc_call_combiner* call_combiner,
if (original_state != 0) {
grpc_closure* notify_on_cancel = (grpc_closure*)original_state;
if (grpc_call_combiner_trace.enabled()) {
gpr_log(GPR_DEBUG,
gpr_log(GPR_INFO,
"call_combiner=%p: scheduling notify_on_cancel callback=%p",
call_combiner, notify_on_cancel);
}

@ -253,8 +253,8 @@ inline void grpc_closure_run(grpc_closure* c, grpc_error* error) {
c->file_initiated = file;
c->line_initiated = line;
c->run = true;
GPR_ASSERT(c->cb != nullptr);
#endif
assert(c->cb);
c->scheduler->vtable->run(c, error);
} else {
GRPC_ERROR_UNREF(error);
@ -292,8 +292,8 @@ inline void grpc_closure_sched(grpc_closure* c, grpc_error* error) {
c->file_initiated = file;
c->line_initiated = line;
c->run = false;
GPR_ASSERT(c->cb != nullptr);
#endif
assert(c->cb);
c->scheduler->vtable->sched(c, error);
} else {
GRPC_ERROR_UNREF(error);
@ -330,8 +330,8 @@ inline void grpc_closure_list_sched(grpc_closure_list* list) {
c->file_initiated = file;
c->line_initiated = line;
c->run = false;
GPR_ASSERT(c->cb != nullptr);
#endif
assert(c->cb);
c->scheduler->vtable->sched(c, c->error_data.error);
c = next;
}

@ -83,12 +83,12 @@ grpc_combiner* grpc_combiner_create(void) {
grpc_closure_list_init(&lock->final_list);
GRPC_CLOSURE_INIT(&lock->offload, offload, lock,
grpc_executor_scheduler(GRPC_EXECUTOR_SHORT));
GRPC_COMBINER_TRACE(gpr_log(GPR_DEBUG, "C:%p create", lock));
GRPC_COMBINER_TRACE(gpr_log(GPR_INFO, "C:%p create", lock));
return lock;
}
static void really_destroy(grpc_combiner* lock) {
GRPC_COMBINER_TRACE(gpr_log(GPR_DEBUG, "C:%p really_destroy", lock));
GRPC_COMBINER_TRACE(gpr_log(GPR_INFO, "C:%p really_destroy", lock));
GPR_ASSERT(gpr_atm_no_barrier_load(&lock->state) == 0);
gpr_mpscq_destroy(&lock->queue);
gpr_free(lock);
@ -97,7 +97,7 @@ static void really_destroy(grpc_combiner* lock) {
static void start_destroy(grpc_combiner* lock) {
gpr_atm old_state = gpr_atm_full_fetch_add(&lock->state, -STATE_UNORPHANED);
GRPC_COMBINER_TRACE(gpr_log(
GPR_DEBUG, "C:%p really_destroy old_state=%" PRIdPTR, lock, old_state));
GPR_INFO, "C:%p really_destroy old_state=%" PRIdPTR, lock, old_state));
if (old_state == 1) {
really_destroy(lock);
}
@ -159,7 +159,7 @@ static void combiner_exec(grpc_closure* cl, grpc_error* error) {
GRPC_STATS_INC_COMBINER_LOCKS_SCHEDULED_ITEMS();
grpc_combiner* lock = COMBINER_FROM_CLOSURE_SCHEDULER(cl, scheduler);
gpr_atm last = gpr_atm_full_fetch_add(&lock->state, STATE_ELEM_COUNT_LOW_BIT);
GRPC_COMBINER_TRACE(gpr_log(GPR_DEBUG,
GRPC_COMBINER_TRACE(gpr_log(GPR_INFO,
"C:%p grpc_combiner_execute c=%p last=%" PRIdPTR,
lock, cl, last));
if (last == 1) {
@ -203,7 +203,7 @@ static void offload(void* arg, grpc_error* error) {
static void queue_offload(grpc_combiner* lock) {
GRPC_STATS_INC_COMBINER_LOCKS_OFFLOADED();
move_next();
GRPC_COMBINER_TRACE(gpr_log(GPR_DEBUG, "C:%p queue_offload", lock));
GRPC_COMBINER_TRACE(gpr_log(GPR_INFO, "C:%p queue_offload", lock));
GRPC_CLOSURE_SCHED(&lock->offload, GRPC_ERROR_NONE);
}
@ -218,7 +218,7 @@ bool grpc_combiner_continue_exec_ctx() {
bool contended =
gpr_atm_no_barrier_load(&lock->initiating_exec_ctx_or_null) == 0;
GRPC_COMBINER_TRACE(gpr_log(GPR_DEBUG,
GRPC_COMBINER_TRACE(gpr_log(GPR_INFO,
"C:%p grpc_combiner_continue_exec_ctx "
"contended=%d "
"exec_ctx_ready_to_finish=%d "
@ -242,7 +242,7 @@ bool grpc_combiner_continue_exec_ctx() {
(gpr_atm_acq_load(&lock->state) >> 1) > 1) {
gpr_mpscq_node* n = gpr_mpscq_pop(&lock->queue);
GRPC_COMBINER_TRACE(
gpr_log(GPR_DEBUG, "C:%p maybe_finish_one n=%p", lock, n));
gpr_log(GPR_INFO, "C:%p maybe_finish_one n=%p", lock, n));
if (n == nullptr) {
// queue is in an inconsistent state: use this as a cue that we should
// go off and do something else for a while (and come back later)
@ -266,7 +266,7 @@ bool grpc_combiner_continue_exec_ctx() {
while (c != nullptr) {
GPR_TIMER_SCOPE("combiner.exec_1final", 0);
GRPC_COMBINER_TRACE(
gpr_log(GPR_DEBUG, "C:%p execute_final[%d] c=%p", lock, loops, c));
gpr_log(GPR_INFO, "C:%p execute_final[%d] c=%p", lock, loops, c));
grpc_closure* next = c->next_data.next;
grpc_error* error = c->error_data.error;
#ifndef NDEBUG
@ -284,7 +284,7 @@ bool grpc_combiner_continue_exec_ctx() {
gpr_atm old_state =
gpr_atm_full_fetch_add(&lock->state, -STATE_ELEM_COUNT_LOW_BIT);
GRPC_COMBINER_TRACE(
gpr_log(GPR_DEBUG, "C:%p finish old_state=%" PRIdPTR, lock, old_state));
gpr_log(GPR_INFO, "C:%p finish old_state=%" PRIdPTR, lock, old_state));
// Define a macro to ease readability of the following switch statement.
#define OLD_STATE_WAS(orphaned, elem_count) \
(((orphaned) ? 0 : STATE_UNORPHANED) | \
@ -327,8 +327,8 @@ static void combiner_finally_exec(grpc_closure* closure, grpc_error* error) {
grpc_combiner* lock =
COMBINER_FROM_CLOSURE_SCHEDULER(closure, finally_scheduler);
GRPC_COMBINER_TRACE(gpr_log(
GPR_DEBUG, "C:%p grpc_combiner_execute_finally c=%p; ac=%p", lock,
closure, grpc_core::ExecCtx::Get()->combiner_data()->active_combiner));
GPR_INFO, "C:%p grpc_combiner_execute_finally c=%p; ac=%p", lock, closure,
grpc_core::ExecCtx::Get()->combiner_data()->active_combiner));
if (grpc_core::ExecCtx::Get()->combiner_data()->active_combiner != lock) {
GPR_TIMER_MARK("slowpath", 0);
GRPC_CLOSURE_SCHED(GRPC_CLOSURE_CREATE(enqueue_finally, closure,

@ -658,7 +658,7 @@ static grpc_error* do_epoll_wait(grpc_pollset* ps, grpc_millis deadline) {
GRPC_STATS_INC_POLL_EVENTS_RETURNED(r);
if (grpc_polling_trace.enabled()) {
gpr_log(GPR_DEBUG, "ps: %p poll got %d events", ps, r);
gpr_log(GPR_INFO, "ps: %p poll got %d events", ps, r);
}
gpr_atm_rel_store(&g_epoll_set.num_events, r);
@ -678,7 +678,7 @@ static bool begin_worker(grpc_pollset* pollset, grpc_pollset_worker* worker,
pollset->begin_refs++;
if (grpc_polling_trace.enabled()) {
gpr_log(GPR_DEBUG, "PS:%p BEGIN_STARTS:%p", pollset, worker);
gpr_log(GPR_INFO, "PS:%p BEGIN_STARTS:%p", pollset, worker);
}
if (pollset->seen_inactive) {
@ -697,7 +697,7 @@ static bool begin_worker(grpc_pollset* pollset, grpc_pollset_worker* worker,
gpr_mu_lock(&neighborhood->mu);
gpr_mu_lock(&pollset->mu);
if (grpc_polling_trace.enabled()) {
gpr_log(GPR_DEBUG, "PS:%p BEGIN_REORG:%p kick_state=%s is_reassigning=%d",
gpr_log(GPR_INFO, "PS:%p BEGIN_REORG:%p kick_state=%s is_reassigning=%d",
pollset, worker, kick_state_string(worker->state),
is_reassigning);
}
@ -749,7 +749,7 @@ static bool begin_worker(grpc_pollset* pollset, grpc_pollset_worker* worker,
gpr_cv_init(&worker->cv);
while (worker->state == UNKICKED && !pollset->shutting_down) {
if (grpc_polling_trace.enabled()) {
gpr_log(GPR_DEBUG, "PS:%p BEGIN_WAIT:%p kick_state=%s shutdown=%d",
gpr_log(GPR_INFO, "PS:%p BEGIN_WAIT:%p kick_state=%s shutdown=%d",
pollset, worker, kick_state_string(worker->state),
pollset->shutting_down);
}
@ -766,7 +766,7 @@ static bool begin_worker(grpc_pollset* pollset, grpc_pollset_worker* worker,
}
if (grpc_polling_trace.enabled()) {
gpr_log(GPR_DEBUG,
gpr_log(GPR_INFO,
"PS:%p BEGIN_DONE:%p kick_state=%s shutdown=%d "
"kicked_without_poller: %d",
pollset, worker, kick_state_string(worker->state),
@ -809,7 +809,7 @@ static bool check_neighborhood_for_available_poller(
if (gpr_atm_no_barrier_cas(&g_active_poller, 0,
(gpr_atm)inspect_worker)) {
if (grpc_polling_trace.enabled()) {
gpr_log(GPR_DEBUG, " .. choose next poller to be %p",
gpr_log(GPR_INFO, " .. choose next poller to be %p",
inspect_worker);
}
SET_KICK_STATE(inspect_worker, DESIGNATED_POLLER);
@ -820,7 +820,7 @@ static bool check_neighborhood_for_available_poller(
}
} else {
if (grpc_polling_trace.enabled()) {
gpr_log(GPR_DEBUG, " .. beaten to choose next poller");
gpr_log(GPR_INFO, " .. beaten to choose next poller");
}
}
// even if we didn't win the cas, there's a worker, we can stop
@ -838,7 +838,7 @@ static bool check_neighborhood_for_available_poller(
}
if (!found_worker) {
if (grpc_polling_trace.enabled()) {
gpr_log(GPR_DEBUG, " .. mark pollset %p inactive", inspect);
gpr_log(GPR_INFO, " .. mark pollset %p inactive", inspect);
}
inspect->seen_inactive = true;
if (inspect == neighborhood->active_root) {
@ -858,7 +858,7 @@ static void end_worker(grpc_pollset* pollset, grpc_pollset_worker* worker,
grpc_pollset_worker** worker_hdl) {
GPR_TIMER_SCOPE("end_worker", 0);
if (grpc_polling_trace.enabled()) {
gpr_log(GPR_DEBUG, "PS:%p END_WORKER:%p", pollset, worker);
gpr_log(GPR_INFO, "PS:%p END_WORKER:%p", pollset, worker);
}
if (worker_hdl != nullptr) *worker_hdl = nullptr;
/* Make sure we appear kicked */
@ -868,7 +868,7 @@ static void end_worker(grpc_pollset* pollset, grpc_pollset_worker* worker,
if (gpr_atm_no_barrier_load(&g_active_poller) == (gpr_atm)worker) {
if (worker->next != worker && worker->next->state == UNKICKED) {
if (grpc_polling_trace.enabled()) {
gpr_log(GPR_DEBUG, " .. choose next poller to be peer %p", worker);
gpr_log(GPR_INFO, " .. choose next poller to be peer %p", worker);
}
GPR_ASSERT(worker->next->initialized_cv);
gpr_atm_no_barrier_store(&g_active_poller, (gpr_atm)worker->next);
@ -920,7 +920,7 @@ static void end_worker(grpc_pollset* pollset, grpc_pollset_worker* worker,
gpr_cv_destroy(&worker->cv);
}
if (grpc_polling_trace.enabled()) {
gpr_log(GPR_DEBUG, " .. remove worker");
gpr_log(GPR_INFO, " .. remove worker");
}
if (EMPTIED == worker_remove(pollset, worker)) {
pollset_maybe_finish_shutdown(pollset);
@ -1022,7 +1022,7 @@ static grpc_error* pollset_kick(grpc_pollset* pollset,
GRPC_STATS_INC_POLLSET_KICKED_WITHOUT_POLLER();
pollset->kicked_without_poller = true;
if (grpc_polling_trace.enabled()) {
gpr_log(GPR_DEBUG, " .. kicked_without_poller");
gpr_log(GPR_INFO, " .. kicked_without_poller");
}
goto done;
}
@ -1030,14 +1030,14 @@ static grpc_error* pollset_kick(grpc_pollset* pollset,
if (root_worker->state == KICKED) {
GRPC_STATS_INC_POLLSET_KICKED_AGAIN();
if (grpc_polling_trace.enabled()) {
gpr_log(GPR_DEBUG, " .. already kicked %p", root_worker);
gpr_log(GPR_INFO, " .. already kicked %p", root_worker);
}
SET_KICK_STATE(root_worker, KICKED);
goto done;
} else if (next_worker->state == KICKED) {
GRPC_STATS_INC_POLLSET_KICKED_AGAIN();
if (grpc_polling_trace.enabled()) {
gpr_log(GPR_DEBUG, " .. already kicked %p", next_worker);
gpr_log(GPR_INFO, " .. already kicked %p", next_worker);
}
SET_KICK_STATE(next_worker, KICKED);
goto done;
@ -1048,7 +1048,7 @@ static grpc_error* pollset_kick(grpc_pollset* pollset,
&g_active_poller)) {
GRPC_STATS_INC_POLLSET_KICK_WAKEUP_FD();
if (grpc_polling_trace.enabled()) {
gpr_log(GPR_DEBUG, " .. kicked %p", root_worker);
gpr_log(GPR_INFO, " .. kicked %p", root_worker);
}
SET_KICK_STATE(root_worker, KICKED);
ret_err = grpc_wakeup_fd_wakeup(&global_wakeup_fd);
@ -1056,7 +1056,7 @@ static grpc_error* pollset_kick(grpc_pollset* pollset,
} else if (next_worker->state == UNKICKED) {
GRPC_STATS_INC_POLLSET_KICK_WAKEUP_CV();
if (grpc_polling_trace.enabled()) {
gpr_log(GPR_DEBUG, " .. kicked %p", next_worker);
gpr_log(GPR_INFO, " .. kicked %p", next_worker);
}
GPR_ASSERT(next_worker->initialized_cv);
SET_KICK_STATE(next_worker, KICKED);
@ -1066,7 +1066,7 @@ static grpc_error* pollset_kick(grpc_pollset* pollset,
if (root_worker->state != DESIGNATED_POLLER) {
if (grpc_polling_trace.enabled()) {
gpr_log(
GPR_DEBUG,
GPR_INFO,
" .. kicked root non-poller %p (initialized_cv=%d) (poller=%p)",
root_worker, root_worker->initialized_cv, next_worker);
}
@ -1079,7 +1079,7 @@ static grpc_error* pollset_kick(grpc_pollset* pollset,
} else {
GRPC_STATS_INC_POLLSET_KICK_WAKEUP_FD();
if (grpc_polling_trace.enabled()) {
gpr_log(GPR_DEBUG, " .. non-root poller %p (root=%p)", next_worker,
gpr_log(GPR_INFO, " .. non-root poller %p (root=%p)", next_worker,
root_worker);
}
SET_KICK_STATE(next_worker, KICKED);
@ -1095,7 +1095,7 @@ static grpc_error* pollset_kick(grpc_pollset* pollset,
} else {
GRPC_STATS_INC_POLLSET_KICK_OWN_THREAD();
if (grpc_polling_trace.enabled()) {
gpr_log(GPR_DEBUG, " .. kicked while waking up");
gpr_log(GPR_INFO, " .. kicked while waking up");
}
goto done;
}
@ -1105,14 +1105,14 @@ static grpc_error* pollset_kick(grpc_pollset* pollset,
if (specific_worker->state == KICKED) {
if (grpc_polling_trace.enabled()) {
gpr_log(GPR_DEBUG, " .. specific worker already kicked");
gpr_log(GPR_INFO, " .. specific worker already kicked");
}
goto done;
} else if (gpr_tls_get(&g_current_thread_worker) ==
(intptr_t)specific_worker) {
GRPC_STATS_INC_POLLSET_KICK_OWN_THREAD();
if (grpc_polling_trace.enabled()) {
gpr_log(GPR_DEBUG, " .. mark %p kicked", specific_worker);
gpr_log(GPR_INFO, " .. mark %p kicked", specific_worker);
}
SET_KICK_STATE(specific_worker, KICKED);
goto done;
@ -1120,7 +1120,7 @@ static grpc_error* pollset_kick(grpc_pollset* pollset,
(grpc_pollset_worker*)gpr_atm_no_barrier_load(&g_active_poller)) {
GRPC_STATS_INC_POLLSET_KICK_WAKEUP_FD();
if (grpc_polling_trace.enabled()) {
gpr_log(GPR_DEBUG, " .. kick active poller");
gpr_log(GPR_INFO, " .. kick active poller");
}
SET_KICK_STATE(specific_worker, KICKED);
ret_err = grpc_wakeup_fd_wakeup(&global_wakeup_fd);
@ -1128,7 +1128,7 @@ static grpc_error* pollset_kick(grpc_pollset* pollset,
} else if (specific_worker->initialized_cv) {
GRPC_STATS_INC_POLLSET_KICK_WAKEUP_CV();
if (grpc_polling_trace.enabled()) {
gpr_log(GPR_DEBUG, " .. kick waiting worker");
gpr_log(GPR_INFO, " .. kick waiting worker");
}
SET_KICK_STATE(specific_worker, KICKED);
gpr_cv_signal(&specific_worker->cv);
@ -1136,7 +1136,7 @@ static grpc_error* pollset_kick(grpc_pollset* pollset,
} else {
GRPC_STATS_INC_POLLSET_KICKED_AGAIN();
if (grpc_polling_trace.enabled()) {
gpr_log(GPR_DEBUG, " .. kick non-waiting worker");
gpr_log(GPR_INFO, " .. kick non-waiting worker");
}
SET_KICK_STATE(specific_worker, KICKED);
goto done;

@ -518,7 +518,7 @@ static grpc_error* pollable_add_fd(pollable* p, grpc_fd* fd) {
const int epfd = p->epfd;
if (grpc_polling_trace.enabled()) {
gpr_log(GPR_DEBUG, "add fd %p (%d) to pollable %p", fd, fd->fd, p);
gpr_log(GPR_INFO, "add fd %p (%d) to pollable %p", fd, fd->fd, p);
}
struct epoll_event ev_fd;
@ -560,7 +560,7 @@ static void pollset_global_shutdown(void) {
/* pollset->mu must be held while calling this function */
static void pollset_maybe_finish_shutdown(grpc_pollset* pollset) {
if (grpc_polling_trace.enabled()) {
gpr_log(GPR_DEBUG,
gpr_log(GPR_INFO,
"PS:%p (pollable:%p) maybe_finish_shutdown sc=%p (target:!NULL) "
"rw=%p (target:NULL) cpsc=%d (target:0)",
pollset, pollset->active_pollable, pollset->shutdown_closure,
@ -585,14 +585,14 @@ static grpc_error* kick_one_worker(grpc_pollset_worker* specific_worker) {
GPR_ASSERT(specific_worker != nullptr);
if (specific_worker->kicked) {
if (grpc_polling_trace.enabled()) {
gpr_log(GPR_DEBUG, "PS:%p kicked_specific_but_already_kicked", p);
gpr_log(GPR_INFO, "PS:%p kicked_specific_but_already_kicked", p);
}
GRPC_STATS_INC_POLLSET_KICKED_AGAIN();
return GRPC_ERROR_NONE;
}
if (gpr_tls_get(&g_current_thread_worker) == (intptr_t)specific_worker) {
if (grpc_polling_trace.enabled()) {
gpr_log(GPR_DEBUG, "PS:%p kicked_specific_but_awake", p);
gpr_log(GPR_INFO, "PS:%p kicked_specific_but_awake", p);
}
GRPC_STATS_INC_POLLSET_KICK_OWN_THREAD();
specific_worker->kicked = true;
@ -601,7 +601,7 @@ static grpc_error* kick_one_worker(grpc_pollset_worker* specific_worker) {
if (specific_worker == p->root_worker) {
GRPC_STATS_INC_POLLSET_KICK_WAKEUP_FD();
if (grpc_polling_trace.enabled()) {
gpr_log(GPR_DEBUG, "PS:%p kicked_specific_via_wakeup_fd", p);
gpr_log(GPR_INFO, "PS:%p kicked_specific_via_wakeup_fd", p);
}
specific_worker->kicked = true;
grpc_error* error = grpc_wakeup_fd_wakeup(&p->wakeup);
@ -610,7 +610,7 @@ static grpc_error* kick_one_worker(grpc_pollset_worker* specific_worker) {
if (specific_worker->initialized_cv) {
GRPC_STATS_INC_POLLSET_KICK_WAKEUP_CV();
if (grpc_polling_trace.enabled()) {
gpr_log(GPR_DEBUG, "PS:%p kicked_specific_via_cv", p);
gpr_log(GPR_INFO, "PS:%p kicked_specific_via_cv", p);
}
specific_worker->kicked = true;
gpr_cv_signal(&specific_worker->cv);
@ -626,7 +626,7 @@ static grpc_error* pollset_kick(grpc_pollset* pollset,
GPR_TIMER_SCOPE("pollset_kick", 0);
GRPC_STATS_INC_POLLSET_KICK();
if (grpc_polling_trace.enabled()) {
gpr_log(GPR_DEBUG,
gpr_log(GPR_INFO,
"PS:%p kick %p tls_pollset=%p tls_worker=%p pollset.root_worker=%p",
pollset, specific_worker,
(void*)gpr_tls_get(&g_current_thread_pollset),
@ -636,7 +636,7 @@ static grpc_error* pollset_kick(grpc_pollset* pollset,
if (gpr_tls_get(&g_current_thread_pollset) != (intptr_t)pollset) {
if (pollset->root_worker == nullptr) {
if (grpc_polling_trace.enabled()) {
gpr_log(GPR_DEBUG, "PS:%p kicked_any_without_poller", pollset);
gpr_log(GPR_INFO, "PS:%p kicked_any_without_poller", pollset);
}
GRPC_STATS_INC_POLLSET_KICKED_WITHOUT_POLLER();
pollset->kicked_without_poller = true;
@ -662,7 +662,7 @@ static grpc_error* pollset_kick(grpc_pollset* pollset,
}
} else {
if (grpc_polling_trace.enabled()) {
gpr_log(GPR_DEBUG, "PS:%p kicked_any_but_awake", pollset);
gpr_log(GPR_INFO, "PS:%p kicked_any_but_awake", pollset);
}
GRPC_STATS_INC_POLLSET_KICK_OWN_THREAD();
return GRPC_ERROR_NONE;
@ -784,7 +784,7 @@ static grpc_error* pollable_process_events(grpc_pollset* pollset,
void* data_ptr = ev->data.ptr;
if (1 & (intptr_t)data_ptr) {
if (grpc_polling_trace.enabled()) {
gpr_log(GPR_DEBUG, "PS:%p got pollset_wakeup %p", pollset, data_ptr);
gpr_log(GPR_INFO, "PS:%p got pollset_wakeup %p", pollset, data_ptr);
}
append_error(&error,
grpc_wakeup_fd_consume_wakeup(
@ -797,7 +797,7 @@ static grpc_error* pollable_process_events(grpc_pollset* pollset,
bool read_ev = (ev->events & (EPOLLIN | EPOLLPRI)) != 0;
bool write_ev = (ev->events & EPOLLOUT) != 0;
if (grpc_polling_trace.enabled()) {
gpr_log(GPR_DEBUG,
gpr_log(GPR_INFO,
"PS:%p got fd %p: cancel=%d read=%d "
"write=%d",
pollset, fd, cancel, read_ev, write_ev);
@ -827,7 +827,7 @@ static grpc_error* pollable_epoll(pollable* p, grpc_millis deadline) {
if (grpc_polling_trace.enabled()) {
char* desc = pollable_desc(p);
gpr_log(GPR_DEBUG, "POLLABLE:%p[%s] poll for %dms", p, desc, timeout);
gpr_log(GPR_INFO, "POLLABLE:%p[%s] poll for %dms", p, desc, timeout);
gpr_free(desc);
}
@ -846,7 +846,7 @@ static grpc_error* pollable_epoll(pollable* p, grpc_millis deadline) {
if (r < 0) return GRPC_OS_ERROR(errno, "epoll_wait");
if (grpc_polling_trace.enabled()) {
gpr_log(GPR_DEBUG, "POLLABLE:%p got %d events", p, r);
gpr_log(GPR_INFO, "POLLABLE:%p got %d events", p, r);
}
p->event_cursor = 0;
@ -917,7 +917,7 @@ static bool begin_worker(grpc_pollset* pollset, grpc_pollset_worker* worker,
gpr_mu_unlock(&pollset->mu);
if (grpc_polling_trace.enabled() &&
worker->pollable_obj->root_worker != worker) {
gpr_log(GPR_DEBUG, "PS:%p wait %p w=%p for %dms", pollset,
gpr_log(GPR_INFO, "PS:%p wait %p w=%p for %dms", pollset,
worker->pollable_obj, worker,
poll_deadline_to_millis_timeout(deadline));
}
@ -925,19 +925,19 @@ static bool begin_worker(grpc_pollset* pollset, grpc_pollset_worker* worker,
if (gpr_cv_wait(&worker->cv, &worker->pollable_obj->mu,
grpc_millis_to_timespec(deadline, GPR_CLOCK_REALTIME))) {
if (grpc_polling_trace.enabled()) {
gpr_log(GPR_DEBUG, "PS:%p timeout_wait %p w=%p", pollset,
gpr_log(GPR_INFO, "PS:%p timeout_wait %p w=%p", pollset,
worker->pollable_obj, worker);
}
do_poll = false;
} else if (worker->kicked) {
if (grpc_polling_trace.enabled()) {
gpr_log(GPR_DEBUG, "PS:%p wakeup %p w=%p", pollset,
gpr_log(GPR_INFO, "PS:%p wakeup %p w=%p", pollset,
worker->pollable_obj, worker);
}
do_poll = false;
} else if (grpc_polling_trace.enabled() &&
worker->pollable_obj->root_worker != worker) {
gpr_log(GPR_DEBUG, "PS:%p spurious_wakeup %p w=%p", pollset,
gpr_log(GPR_INFO, "PS:%p spurious_wakeup %p w=%p", pollset,
worker->pollable_obj, worker);
}
}
@ -1009,7 +1009,7 @@ static grpc_error* pollset_work(grpc_pollset* pollset,
WORKER_PTR->originator = gettid();
#endif
if (grpc_polling_trace.enabled()) {
gpr_log(GPR_DEBUG,
gpr_log(GPR_INFO,
"PS:%p work hdl=%p worker=%p now=%" PRId64 " deadline=%" PRId64
" kwp=%d pollable=%p",
pollset, worker_hdl, WORKER_PTR, grpc_core::ExecCtx::Get()->Now(),
@ -1050,7 +1050,7 @@ static grpc_error* pollset_transition_pollable_from_empty_to_fd_locked(
static const char* err_desc = "pollset_transition_pollable_from_empty_to_fd";
grpc_error* error = GRPC_ERROR_NONE;
if (grpc_polling_trace.enabled()) {
gpr_log(GPR_DEBUG,
gpr_log(GPR_INFO,
"PS:%p add fd %p (%d); transition pollable from empty to fd",
pollset, fd, fd->fd);
}
@ -1067,7 +1067,7 @@ static grpc_error* pollset_transition_pollable_from_fd_to_multi_locked(
grpc_error* error = GRPC_ERROR_NONE;
if (grpc_polling_trace.enabled()) {
gpr_log(
GPR_DEBUG,
GPR_INFO,
"PS:%p add fd %p (%d); transition pollable from fd %p to multipoller",
pollset, and_add_fd, and_add_fd ? and_add_fd->fd : -1,
pollset->active_pollable->owner_fd);
@ -1137,7 +1137,7 @@ static grpc_error* pollset_as_multipollable_locked(grpc_pollset* pollset,
/* Any workers currently polling on this pollset must now be woked up so
* that they can pick up the new active_pollable */
if (grpc_polling_trace.enabled()) {
gpr_log(GPR_DEBUG,
gpr_log(GPR_INFO,
"PS:%p active pollable transition from empty to multi",
pollset);
}
@ -1224,7 +1224,7 @@ static void pollset_set_unref(grpc_pollset_set* pss) {
static void pollset_set_add_fd(grpc_pollset_set* pss, grpc_fd* fd) {
GPR_TIMER_SCOPE("pollset_set_add_fd", 0);
if (grpc_polling_trace.enabled()) {
gpr_log(GPR_DEBUG, "PSS:%p: add fd %p (%d)", pss, fd, fd->fd);
gpr_log(GPR_INFO, "PSS:%p: add fd %p (%d)", pss, fd, fd->fd);
}
grpc_error* error = GRPC_ERROR_NONE;
static const char* err_desc = "pollset_set_add_fd";
@ -1248,7 +1248,7 @@ static void pollset_set_add_fd(grpc_pollset_set* pss, grpc_fd* fd) {
static void pollset_set_del_fd(grpc_pollset_set* pss, grpc_fd* fd) {
GPR_TIMER_SCOPE("pollset_set_del_fd", 0);
if (grpc_polling_trace.enabled()) {
gpr_log(GPR_DEBUG, "PSS:%p: del fd %p", pss, fd);
gpr_log(GPR_INFO, "PSS:%p: del fd %p", pss, fd);
}
pss = pss_lock_adam(pss);
size_t i;
@ -1269,7 +1269,7 @@ static void pollset_set_del_fd(grpc_pollset_set* pss, grpc_fd* fd) {
static void pollset_set_del_pollset(grpc_pollset_set* pss, grpc_pollset* ps) {
GPR_TIMER_SCOPE("pollset_set_del_pollset", 0);
if (grpc_polling_trace.enabled()) {
gpr_log(GPR_DEBUG, "PSS:%p: del pollset %p", pss, ps);
gpr_log(GPR_INFO, "PSS:%p: del pollset %p", pss, ps);
}
pss = pss_lock_adam(pss);
size_t i;
@ -1321,7 +1321,7 @@ static grpc_error* add_fds_to_pollsets(grpc_fd** fds, size_t fd_count,
static void pollset_set_add_pollset(grpc_pollset_set* pss, grpc_pollset* ps) {
GPR_TIMER_SCOPE("pollset_set_add_pollset", 0);
if (grpc_polling_trace.enabled()) {
gpr_log(GPR_DEBUG, "PSS:%p: add pollset %p", pss, ps);
gpr_log(GPR_INFO, "PSS:%p: add pollset %p", pss, ps);
}
grpc_error* error = GRPC_ERROR_NONE;
static const char* err_desc = "pollset_set_add_pollset";
@ -1358,7 +1358,7 @@ static void pollset_set_add_pollset_set(grpc_pollset_set* a,
grpc_pollset_set* b) {
GPR_TIMER_SCOPE("pollset_set_add_pollset_set", 0);
if (grpc_polling_trace.enabled()) {
gpr_log(GPR_DEBUG, "PSS: merge (%p, %p)", a, b);
gpr_log(GPR_INFO, "PSS: merge (%p, %p)", a, b);
}
grpc_error* error = GRPC_ERROR_NONE;
static const char* err_desc = "pollset_set_add_fd";
@ -1392,7 +1392,7 @@ static void pollset_set_add_pollset_set(grpc_pollset_set* a,
GPR_SWAP(grpc_pollset_set*, a, b);
}
if (grpc_polling_trace.enabled()) {
gpr_log(GPR_DEBUG, "PSS: parent %p to %p", b, a);
gpr_log(GPR_INFO, "PSS: parent %p to %p", b, a);
}
gpr_ref(&a->refs);
b->parent = a;

@ -292,7 +292,7 @@ static void pi_add_ref_dbg(polling_island* pi, const char* reason,
const char* file, int line) {
if (grpc_polling_trace.enabled()) {
gpr_atm old_cnt = gpr_atm_acq_load(&pi->ref_count);
gpr_log(GPR_DEBUG,
gpr_log(GPR_INFO,
"Add ref pi: %p, old:%" PRIdPTR " -> new:%" PRIdPTR
" (%s) - (%s, %d)",
pi, old_cnt, old_cnt + 1, reason, file, line);
@ -304,7 +304,7 @@ static void pi_unref_dbg(polling_island* pi, const char* reason,
const char* file, int line) {
if (grpc_polling_trace.enabled()) {
gpr_atm old_cnt = gpr_atm_acq_load(&pi->ref_count);
gpr_log(GPR_DEBUG,
gpr_log(GPR_INFO,
"Unref pi: %p, old:%" PRIdPTR " -> new:%" PRIdPTR
" (%s) - (%s, %d)",
pi, old_cnt, (old_cnt - 1), reason, file, line);

@ -983,7 +983,7 @@ static grpc_error* pollset_work(grpc_pollset* pollset,
GRPC_SCHEDULING_END_BLOCKING_REGION;
if (grpc_polling_trace.enabled()) {
gpr_log(GPR_DEBUG, "%p poll=%d", pollset, r);
gpr_log(GPR_INFO, "%p poll=%d", pollset, r);
}
if (r < 0) {
@ -1007,7 +1007,7 @@ static grpc_error* pollset_work(grpc_pollset* pollset,
} else {
if (pfds[0].revents & POLLIN_CHECK) {
if (grpc_polling_trace.enabled()) {
gpr_log(GPR_DEBUG, "%p: got_wakeup", pollset);
gpr_log(GPR_INFO, "%p: got_wakeup", pollset);
}
work_combine_error(
&error, grpc_wakeup_fd_consume_wakeup(&worker.wakeup_fd->fd));
@ -1017,7 +1017,7 @@ static grpc_error* pollset_work(grpc_pollset* pollset,
fd_end_poll(&watchers[i], 0, 0, nullptr);
} else {
if (grpc_polling_trace.enabled()) {
gpr_log(GPR_DEBUG, "%p got_event: %d r:%d w:%d [%d]", pollset,
gpr_log(GPR_INFO, "%p got_event: %d r:%d w:%d [%d]", pollset,
pfds[i].fd, (pfds[i].revents & POLLIN_CHECK) != 0,
(pfds[i].revents & POLLOUT_CHECK) != 0, pfds[i].revents);
}

@ -48,7 +48,7 @@ grpc_core::DebugOnlyTraceFlag grpc_polling_api_trace(false, "polling_api");
// Polling API trace only enabled in debug builds
#define GRPC_POLLING_API_TRACE(format, ...) \
if (grpc_polling_api_trace.enabled()) { \
gpr_log(GPR_DEBUG, "(polling-api) " format, __VA_ARGS__); \
gpr_log(GPR_INFO, "(polling-api) " format, __VA_ARGS__); \
}
#else
#define GRPC_POLLING_API_TRACE(...)

@ -69,7 +69,7 @@ static size_t run_closures(grpc_closure_list list) {
gpr_log(GPR_DEBUG, "EXECUTOR: run %p [created by %s:%d]", c,
c->file_created, c->line_created);
#else
gpr_log(GPR_DEBUG, "EXECUTOR: run %p", c);
gpr_log(GPR_INFO, "EXECUTOR: run %p", c);
#endif
}
#ifndef NDEBUG
@ -150,7 +150,7 @@ static void executor_thread(void* arg) {
size_t subtract_depth = 0;
for (;;) {
if (executor_trace.enabled()) {
gpr_log(GPR_DEBUG, "EXECUTOR[%d]: step (sub_depth=%" PRIdPTR ")",
gpr_log(GPR_INFO, "EXECUTOR[%d]: step (sub_depth=%" PRIdPTR ")",
static_cast<int>(ts - g_thread_state), subtract_depth);
}
gpr_mu_lock(&ts->mu);
@ -161,7 +161,7 @@ static void executor_thread(void* arg) {
}
if (ts->shutdown) {
if (executor_trace.enabled()) {
gpr_log(GPR_DEBUG, "EXECUTOR[%d]: shutdown",
gpr_log(GPR_INFO, "EXECUTOR[%d]: shutdown",
static_cast<int>(ts - g_thread_state));
}
gpr_mu_unlock(&ts->mu);
@ -172,7 +172,7 @@ static void executor_thread(void* arg) {
ts->elems = GRPC_CLOSURE_LIST_INIT;
gpr_mu_unlock(&ts->mu);
if (executor_trace.enabled()) {
gpr_log(GPR_DEBUG, "EXECUTOR[%d]: execute",
gpr_log(GPR_INFO, "EXECUTOR[%d]: execute",
static_cast<int>(ts - g_thread_state));
}
@ -199,7 +199,7 @@ static void executor_push(grpc_closure* closure, grpc_error* error,
gpr_log(GPR_DEBUG, "EXECUTOR: schedule %p (created %s:%d) inline",
closure, closure->file_created, closure->line_created);
#else
gpr_log(GPR_DEBUG, "EXECUTOR: schedule %p inline", closure);
gpr_log(GPR_INFO, "EXECUTOR: schedule %p inline", closure);
#endif
}
grpc_closure_list_append(grpc_core::ExecCtx::Get()->closure_list(),
@ -225,7 +225,7 @@ static void executor_push(grpc_closure* closure, grpc_error* error,
closure, is_short ? "short" : "long", closure->file_created,
closure->line_created, static_cast<int>(ts - g_thread_state));
#else
gpr_log(GPR_DEBUG, "EXECUTOR: try to schedule %p (%s) to thread %d",
gpr_log(GPR_INFO, "EXECUTOR: try to schedule %p (%s) to thread %d",
closure, is_short ? "short" : "long",
(int)(ts - g_thread_state));
#endif

@ -74,7 +74,6 @@ void grpc_postfork_child() {
grpc_timer_manager_set_threading(true);
grpc_core::ExecCtx exec_ctx;
grpc_executor_set_threading(true);
grpc_core::ExecCtx::Get()->Flush();
}
}

@ -289,7 +289,7 @@ static bool rq_alloc(grpc_resource_quota* resource_quota) {
GRPC_RULIST_AWAITING_ALLOCATION))) {
gpr_mu_lock(&resource_user->mu);
if (grpc_resource_quota_trace.enabled()) {
gpr_log(GPR_DEBUG,
gpr_log(GPR_INFO,
"RQ: check allocation for user %p shutdown=%" PRIdPTR
" free_pool=%" PRId64,
resource_user, gpr_atm_no_barrier_load(&resource_user->shutdown),
@ -315,7 +315,7 @@ static bool rq_alloc(grpc_resource_quota* resource_quota) {
resource_quota->free_pool -= amt;
rq_update_estimate(resource_quota);
if (grpc_resource_quota_trace.enabled()) {
gpr_log(GPR_DEBUG,
gpr_log(GPR_INFO,
"RQ %s %s: grant alloc %" PRId64
" bytes; rq_free_pool -> %" PRId64,
resource_quota->name, resource_user->name, amt,
@ -323,7 +323,7 @@ static bool rq_alloc(grpc_resource_quota* resource_quota) {
}
} else if (grpc_resource_quota_trace.enabled() &&
resource_user->free_pool >= 0) {
gpr_log(GPR_DEBUG, "RQ %s %s: discard already satisfied alloc request",
gpr_log(GPR_INFO, "RQ %s %s: discard already satisfied alloc request",
resource_quota->name, resource_user->name);
}
if (resource_user->free_pool >= 0) {
@ -353,7 +353,7 @@ static bool rq_reclaim_from_per_user_free_pool(
resource_quota->free_pool += amt;
rq_update_estimate(resource_quota);
if (grpc_resource_quota_trace.enabled()) {
gpr_log(GPR_DEBUG,
gpr_log(GPR_INFO,
"RQ %s %s: reclaim_from_per_user_free_pool %" PRId64
" bytes; rq_free_pool -> %" PRId64,
resource_quota->name, resource_user->name, amt,
@ -376,9 +376,8 @@ static bool rq_reclaim(grpc_resource_quota* resource_quota, bool destructive) {
grpc_resource_user* resource_user = rulist_pop_head(resource_quota, list);
if (resource_user == nullptr) return false;
if (grpc_resource_quota_trace.enabled()) {
gpr_log(GPR_DEBUG, "RQ %s %s: initiate %s reclamation",
resource_quota->name, resource_user->name,
destructive ? "destructive" : "benign");
gpr_log(GPR_INFO, "RQ %s %s: initiate %s reclamation", resource_quota->name,
resource_user->name, destructive ? "destructive" : "benign");
}
resource_quota->reclaiming = true;
grpc_resource_quota_ref_internal(resource_quota);
@ -506,7 +505,7 @@ static void ru_post_destructive_reclaimer(void* ru, grpc_error* error) {
static void ru_shutdown(void* ru, grpc_error* error) {
if (grpc_resource_quota_trace.enabled()) {
gpr_log(GPR_DEBUG, "RU shutdown %p", ru);
gpr_log(GPR_INFO, "RU shutdown %p", ru);
}
grpc_resource_user* resource_user = static_cast<grpc_resource_user*>(ru);
gpr_mu_lock(&resource_user->mu);
@ -793,7 +792,7 @@ void grpc_resource_user_alloc(grpc_resource_user* resource_user, size_t size,
resource_user->free_pool -= static_cast<int64_t>(size);
resource_user->outstanding_allocations += static_cast<int64_t>(size);
if (grpc_resource_quota_trace.enabled()) {
gpr_log(GPR_DEBUG, "RQ %s %s: alloc %" PRIdPTR "; free_pool -> %" PRId64,
gpr_log(GPR_INFO, "RQ %s %s: alloc %" PRIdPTR "; free_pool -> %" PRId64,
resource_user->resource_quota->name, resource_user->name, size,
resource_user->free_pool);
}
@ -816,7 +815,7 @@ void grpc_resource_user_free(grpc_resource_user* resource_user, size_t size) {
bool was_zero_or_negative = resource_user->free_pool <= 0;
resource_user->free_pool += static_cast<int64_t>(size);
if (grpc_resource_quota_trace.enabled()) {
gpr_log(GPR_DEBUG, "RQ %s %s: free %" PRIdPTR "; free_pool -> %" PRId64,
gpr_log(GPR_INFO, "RQ %s %s: free %" PRIdPTR "; free_pool -> %" PRId64,
resource_user->resource_quota->name, resource_user->name, size,
resource_user->free_pool);
}
@ -842,7 +841,7 @@ void grpc_resource_user_post_reclaimer(grpc_resource_user* resource_user,
void grpc_resource_user_finish_reclamation(grpc_resource_user* resource_user) {
if (grpc_resource_quota_trace.enabled()) {
gpr_log(GPR_DEBUG, "RQ %s %s: reclamation complete",
gpr_log(GPR_INFO, "RQ %s %s: reclamation complete",
resource_user->resource_quota->name, resource_user->name);
}
GRPC_CLOSURE_SCHED(

@ -66,7 +66,7 @@ static void on_alarm(void* acp, grpc_error* error) {
grpc_custom_tcp_connect* connect = socket->connector;
if (grpc_tcp_trace.enabled()) {
const char* str = grpc_error_string(error);
gpr_log(GPR_DEBUG, "CLIENT_CONNECT: %s: on_alarm: error=%s",
gpr_log(GPR_INFO, "CLIENT_CONNECT: %s: on_alarm: error=%s",
connect->addr_name, str);
}
if (error == GRPC_ERROR_NONE) {
@ -136,7 +136,7 @@ static void tcp_connect(grpc_closure* closure, grpc_endpoint** ep,
connect->refs = 2;
if (grpc_tcp_trace.enabled()) {
gpr_log(GPR_DEBUG, "CLIENT_CONNECT: %p %s: asynchronously connecting",
gpr_log(GPR_INFO, "CLIENT_CONNECT: %p %s: asynchronously connecting",
socket, connect->addr_name);
}

@ -104,7 +104,7 @@ static void tc_on_alarm(void* acp, grpc_error* error) {
async_connect* ac = static_cast<async_connect*>(acp);
if (grpc_tcp_trace.enabled()) {
const char* str = grpc_error_string(error);
gpr_log(GPR_DEBUG, "CLIENT_CONNECT: %s: on_alarm: error=%s", ac->addr_str,
gpr_log(GPR_INFO, "CLIENT_CONNECT: %s: on_alarm: error=%s", ac->addr_str,
str);
}
gpr_mu_lock(&ac->mu);
@ -141,8 +141,8 @@ static void on_writable(void* acp, grpc_error* error) {
if (grpc_tcp_trace.enabled()) {
const char* str = grpc_error_string(error);
gpr_log(GPR_DEBUG, "CLIENT_CONNECT: %s: on_writable: error=%s",
ac->addr_str, str);
gpr_log(GPR_INFO, "CLIENT_CONNECT: %s: on_writable: error=%s", ac->addr_str,
str);
}
gpr_mu_lock(&ac->mu);
@ -325,7 +325,7 @@ void grpc_tcp_client_create_from_prepared_fd(
ac->channel_args = grpc_channel_args_copy(channel_args);
if (grpc_tcp_trace.enabled()) {
gpr_log(GPR_DEBUG, "CLIENT_CONNECT: %s: asynchronously connecting fd %p",
gpr_log(GPR_INFO, "CLIENT_CONNECT: %s: asynchronously connecting fd %p",
ac->addr_str, fdobj);
}

@ -125,16 +125,16 @@ static void tcp_ref(custom_tcp_endpoint* tcp) { gpr_ref(&tcp->refcount); }
static void call_read_cb(custom_tcp_endpoint* tcp, grpc_error* error) {
grpc_closure* cb = tcp->read_cb;
if (grpc_tcp_trace.enabled()) {
gpr_log(GPR_DEBUG, "TCP:%p call_cb %p %p:%p", tcp->socket, cb, cb->cb,
gpr_log(GPR_INFO, "TCP:%p call_cb %p %p:%p", tcp->socket, cb, cb->cb,
cb->cb_arg);
size_t i;
const char* str = grpc_error_string(error);
gpr_log(GPR_DEBUG, "read: error=%s", str);
gpr_log(GPR_INFO, "read: error=%s", str);
for (i = 0; i < tcp->read_slices->count; i++) {
char* dump = grpc_dump_slice(tcp->read_slices->slices[i],
GPR_DUMP_HEX | GPR_DUMP_ASCII);
gpr_log(GPR_DEBUG, "READ %p (peer=%s): %s", tcp, tcp->peer_string, dump);
gpr_log(GPR_INFO, "READ %p (peer=%s): %s", tcp, tcp->peer_string, dump);
gpr_free(dump);
}
}
@ -171,7 +171,7 @@ static void custom_read_callback(grpc_custom_socket* socket, size_t nread,
static void tcp_read_allocation_done(void* tcpp, grpc_error* error) {
custom_tcp_endpoint* tcp = (custom_tcp_endpoint*)tcpp;
if (grpc_tcp_trace.enabled()) {
gpr_log(GPR_DEBUG, "TCP:%p read_allocation_done: %s", tcp->socket,
gpr_log(GPR_INFO, "TCP:%p read_allocation_done: %s", tcp->socket,
grpc_error_string(error));
}
if (error == GRPC_ERROR_NONE) {
@ -188,7 +188,7 @@ static void tcp_read_allocation_done(void* tcpp, grpc_error* error) {
}
if (grpc_tcp_trace.enabled()) {
const char* str = grpc_error_string(error);
gpr_log(GPR_DEBUG, "Initiating read on %p: error=%s", tcp->socket, str);
gpr_log(GPR_INFO, "Initiating read on %p: error=%s", tcp->socket, str);
}
}
@ -214,7 +214,7 @@ static void custom_write_callback(grpc_custom_socket* socket,
tcp->write_cb = nullptr;
if (grpc_tcp_trace.enabled()) {
const char* str = grpc_error_string(error);
gpr_log(GPR_DEBUG, "write complete on %p: error=%s", tcp->socket, str);
gpr_log(GPR_INFO, "write complete on %p: error=%s", tcp->socket, str);
}
TCP_UNREF(tcp, "write");
GRPC_CLOSURE_SCHED(cb, error);
@ -231,8 +231,8 @@ static void endpoint_write(grpc_endpoint* ep, grpc_slice_buffer* write_slices,
for (j = 0; j < write_slices->count; j++) {
char* data = grpc_dump_slice(write_slices->slices[j],
GPR_DUMP_HEX | GPR_DUMP_ASCII);
gpr_log(GPR_DEBUG, "WRITE %p (peer=%s): %s", tcp->socket,
tcp->peer_string, data);
gpr_log(GPR_INFO, "WRITE %p (peer=%s): %s", tcp->socket, tcp->peer_string,
data);
gpr_free(data);
}
}
@ -283,7 +283,7 @@ static void endpoint_shutdown(grpc_endpoint* ep, grpc_error* why) {
if (!tcp->shutting_down) {
if (grpc_tcp_trace.enabled()) {
const char* str = grpc_error_string(why);
gpr_log(GPR_DEBUG, "TCP %p shutdown why=%s", tcp->socket, str);
gpr_log(GPR_INFO, "TCP %p shutdown why=%s", tcp->socket, str);
}
tcp->shutting_down = true;
// GRPC_CLOSURE_SCHED(tcp->read_cb, GRPC_ERROR_REF(why));
@ -345,7 +345,7 @@ grpc_endpoint* custom_tcp_endpoint_create(grpc_custom_socket* socket,
grpc_core::ExecCtx exec_ctx;
if (grpc_tcp_trace.enabled()) {
gpr_log(GPR_DEBUG, "Creating TCP endpoint %p", socket);
gpr_log(GPR_INFO, "Creating TCP endpoint %p", socket);
}
memset(tcp, 0, sizeof(custom_tcp_endpoint));
socket->refs++;

@ -120,7 +120,7 @@ static void tcp_drop_uncovered_then_handle_write(void* arg /* grpc_tcp */,
static void done_poller(void* bp, grpc_error* error_ignored) {
backup_poller* p = static_cast<backup_poller*>(bp);
if (grpc_tcp_trace.enabled()) {
gpr_log(GPR_DEBUG, "BACKUP_POLLER:%p destroy", p);
gpr_log(GPR_INFO, "BACKUP_POLLER:%p destroy", p);
}
grpc_pollset_destroy(BACKUP_POLLER_POLLSET(p));
gpr_free(p);
@ -129,7 +129,7 @@ static void done_poller(void* bp, grpc_error* error_ignored) {
static void run_poller(void* bp, grpc_error* error_ignored) {
backup_poller* p = static_cast<backup_poller*>(bp);
if (grpc_tcp_trace.enabled()) {
gpr_log(GPR_DEBUG, "BACKUP_POLLER:%p run", p);
gpr_log(GPR_INFO, "BACKUP_POLLER:%p run", p);
}
gpr_mu_lock(p->pollset_mu);
grpc_millis deadline = grpc_core::ExecCtx::Get()->Now() + 10 * GPR_MS_PER_SEC;
@ -145,18 +145,18 @@ static void run_poller(void* bp, grpc_error* error_ignored) {
gpr_mu_lock(p->pollset_mu);
bool cas_ok = gpr_atm_full_cas(&g_backup_poller, (gpr_atm)p, 0);
if (grpc_tcp_trace.enabled()) {
gpr_log(GPR_DEBUG, "BACKUP_POLLER:%p done cas_ok=%d", p, cas_ok);
gpr_log(GPR_INFO, "BACKUP_POLLER:%p done cas_ok=%d", p, cas_ok);
}
gpr_mu_unlock(p->pollset_mu);
if (grpc_tcp_trace.enabled()) {
gpr_log(GPR_DEBUG, "BACKUP_POLLER:%p shutdown", p);
gpr_log(GPR_INFO, "BACKUP_POLLER:%p shutdown", p);
}
grpc_pollset_shutdown(BACKUP_POLLER_POLLSET(p),
GRPC_CLOSURE_INIT(&p->run_poller, done_poller, p,
grpc_schedule_on_exec_ctx));
} else {
if (grpc_tcp_trace.enabled()) {
gpr_log(GPR_DEBUG, "BACKUP_POLLER:%p reschedule", p);
gpr_log(GPR_INFO, "BACKUP_POLLER:%p reschedule", p);
}
GRPC_CLOSURE_SCHED(&p->run_poller, GRPC_ERROR_NONE);
}
@ -167,7 +167,7 @@ static void drop_uncovered(grpc_tcp* tcp) {
gpr_atm old_count =
gpr_atm_no_barrier_fetch_add(&g_uncovered_notifications_pending, -1);
if (grpc_tcp_trace.enabled()) {
gpr_log(GPR_DEBUG, "BACKUP_POLLER:%p uncover cnt %d->%d", p,
gpr_log(GPR_INFO, "BACKUP_POLLER:%p uncover cnt %d->%d", p,
static_cast<int>(old_count), static_cast<int>(old_count) - 1);
}
GPR_ASSERT(old_count != 1);
@ -178,7 +178,7 @@ static void cover_self(grpc_tcp* tcp) {
gpr_atm old_count =
gpr_atm_no_barrier_fetch_add(&g_uncovered_notifications_pending, 2);
if (grpc_tcp_trace.enabled()) {
gpr_log(GPR_DEBUG, "BACKUP_POLLER: cover cnt %d->%d",
gpr_log(GPR_INFO, "BACKUP_POLLER: cover cnt %d->%d",
static_cast<int>(old_count), 2 + static_cast<int>(old_count));
}
if (old_count == 0) {
@ -186,7 +186,7 @@ static void cover_self(grpc_tcp* tcp) {
p = static_cast<backup_poller*>(
gpr_zalloc(sizeof(*p) + grpc_pollset_size()));
if (grpc_tcp_trace.enabled()) {
gpr_log(GPR_DEBUG, "BACKUP_POLLER:%p create", p);
gpr_log(GPR_INFO, "BACKUP_POLLER:%p create", p);
}
grpc_pollset_init(BACKUP_POLLER_POLLSET(p), &p->pollset_mu);
gpr_atm_rel_store(&g_backup_poller, (gpr_atm)p);
@ -201,7 +201,7 @@ static void cover_self(grpc_tcp* tcp) {
}
}
if (grpc_tcp_trace.enabled()) {
gpr_log(GPR_DEBUG, "BACKUP_POLLER:%p add %p", p, tcp);
gpr_log(GPR_INFO, "BACKUP_POLLER:%p add %p", p, tcp);
}
grpc_pollset_add_fd(BACKUP_POLLER_POLLSET(p), tcp->em_fd);
if (old_count != 0) {
@ -211,7 +211,7 @@ static void cover_self(grpc_tcp* tcp) {
static void notify_on_read(grpc_tcp* tcp) {
if (grpc_tcp_trace.enabled()) {
gpr_log(GPR_DEBUG, "TCP:%p notify_on_read", tcp);
gpr_log(GPR_INFO, "TCP:%p notify_on_read", tcp);
}
GRPC_CLOSURE_INIT(&tcp->read_done_closure, tcp_handle_read, tcp,
grpc_schedule_on_exec_ctx);
@ -220,7 +220,7 @@ static void notify_on_read(grpc_tcp* tcp) {
static void notify_on_write(grpc_tcp* tcp) {
if (grpc_tcp_trace.enabled()) {
gpr_log(GPR_DEBUG, "TCP:%p notify_on_write", tcp);
gpr_log(GPR_INFO, "TCP:%p notify_on_write", tcp);
}
cover_self(tcp);
GRPC_CLOSURE_INIT(&tcp->write_done_closure,
@ -231,7 +231,7 @@ static void notify_on_write(grpc_tcp* tcp) {
static void tcp_drop_uncovered_then_handle_write(void* arg, grpc_error* error) {
if (grpc_tcp_trace.enabled()) {
gpr_log(GPR_DEBUG, "TCP:%p got_write: %s", arg, grpc_error_string(error));
gpr_log(GPR_INFO, "TCP:%p got_write: %s", arg, grpc_error_string(error));
}
drop_uncovered(static_cast<grpc_tcp*>(arg));
tcp_handle_write(arg, error);
@ -351,15 +351,15 @@ static void call_read_cb(grpc_tcp* tcp, grpc_error* error) {
grpc_closure* cb = tcp->read_cb;
if (grpc_tcp_trace.enabled()) {
gpr_log(GPR_DEBUG, "TCP:%p call_cb %p %p:%p", tcp, cb, cb->cb, cb->cb_arg);
gpr_log(GPR_INFO, "TCP:%p call_cb %p %p:%p", tcp, cb, cb->cb, cb->cb_arg);
size_t i;
const char* str = grpc_error_string(error);
gpr_log(GPR_DEBUG, "read: error=%s", str);
gpr_log(GPR_INFO, "read: error=%s", str);
for (i = 0; i < tcp->incoming_buffer->count; i++) {
char* dump = grpc_dump_slice(tcp->incoming_buffer->slices[i],
GPR_DUMP_HEX | GPR_DUMP_ASCII);
gpr_log(GPR_DEBUG, "READ %p (peer=%s): %s", tcp, tcp->peer_string, dump);
gpr_log(GPR_INFO, "READ %p (peer=%s): %s", tcp, tcp->peer_string, dump);
gpr_free(dump);
}
}
@ -371,7 +371,7 @@ static void call_read_cb(grpc_tcp* tcp, grpc_error* error) {
#define MAX_READ_IOVEC 4
static void tcp_do_read(grpc_tcp* tcp) {
GPR_TIMER_SCOPE("tcp_continue_read", 0);
GPR_TIMER_SCOPE("tcp_do_read", 0);
struct msghdr msg;
struct iovec iov[MAX_READ_IOVEC];
ssize_t read_bytes;
@ -441,7 +441,7 @@ static void tcp_do_read(grpc_tcp* tcp) {
static void tcp_read_allocation_done(void* tcpp, grpc_error* error) {
grpc_tcp* tcp = static_cast<grpc_tcp*>(tcpp);
if (grpc_tcp_trace.enabled()) {
gpr_log(GPR_DEBUG, "TCP:%p read_allocation_done: %s", tcp,
gpr_log(GPR_INFO, "TCP:%p read_allocation_done: %s", tcp,
grpc_error_string(error));
}
if (error != GRPC_ERROR_NONE) {
@ -459,13 +459,13 @@ static void tcp_continue_read(grpc_tcp* tcp) {
if (tcp->incoming_buffer->length < target_read_size &&
tcp->incoming_buffer->count < MAX_READ_IOVEC) {
if (grpc_tcp_trace.enabled()) {
gpr_log(GPR_DEBUG, "TCP:%p alloc_slices", tcp);
gpr_log(GPR_INFO, "TCP:%p alloc_slices", tcp);
}
grpc_resource_user_alloc_slices(&tcp->slice_allocator, target_read_size, 1,
tcp->incoming_buffer);
} else {
if (grpc_tcp_trace.enabled()) {
gpr_log(GPR_DEBUG, "TCP:%p do_read", tcp);
gpr_log(GPR_INFO, "TCP:%p do_read", tcp);
}
tcp_do_read(tcp);
}
@ -475,7 +475,7 @@ static void tcp_handle_read(void* arg /* grpc_tcp */, grpc_error* error) {
grpc_tcp* tcp = static_cast<grpc_tcp*>(arg);
GPR_ASSERT(!tcp->finished_edge);
if (grpc_tcp_trace.enabled()) {
gpr_log(GPR_DEBUG, "TCP:%p got_read: %s", tcp, grpc_error_string(error));
gpr_log(GPR_INFO, "TCP:%p got_read: %s", tcp, grpc_error_string(error));
}
if (error != GRPC_ERROR_NONE) {
@ -618,7 +618,7 @@ static void tcp_handle_write(void* arg /* grpc_tcp */, grpc_error* error) {
if (!tcp_flush(tcp, &error)) {
if (grpc_tcp_trace.enabled()) {
gpr_log(GPR_DEBUG, "write: delayed");
gpr_log(GPR_INFO, "write: delayed");
}
notify_on_write(tcp);
} else {
@ -626,7 +626,7 @@ static void tcp_handle_write(void* arg /* grpc_tcp */, grpc_error* error) {
tcp->write_cb = nullptr;
if (grpc_tcp_trace.enabled()) {
const char* str = grpc_error_string(error);
gpr_log(GPR_DEBUG, "write: %s", str);
gpr_log(GPR_INFO, "write: %s", str);
}
GRPC_CLOSURE_RUN(cb, error);
@ -646,7 +646,7 @@ static void tcp_write(grpc_endpoint* ep, grpc_slice_buffer* buf,
for (i = 0; i < buf->count; i++) {
char* data =
grpc_dump_slice(buf->slices[i], GPR_DUMP_HEX | GPR_DUMP_ASCII);
gpr_log(GPR_DEBUG, "WRITE %p (peer=%s): %s", tcp, tcp->peer_string, data);
gpr_log(GPR_INFO, "WRITE %p (peer=%s): %s", tcp, tcp->peer_string, data);
gpr_free(data);
}
}
@ -668,13 +668,13 @@ static void tcp_write(grpc_endpoint* ep, grpc_slice_buffer* buf,
TCP_REF(tcp, "write");
tcp->write_cb = cb;
if (grpc_tcp_trace.enabled()) {
gpr_log(GPR_DEBUG, "write: delayed");
gpr_log(GPR_INFO, "write: delayed");
}
notify_on_write(tcp);
} else {
if (grpc_tcp_trace.enabled()) {
const char* str = grpc_error_string(error);
gpr_log(GPR_DEBUG, "write: %s", str);
gpr_log(GPR_INFO, "write: %s", str);
}
GRPC_CLOSURE_SCHED(cb, error);
}

@ -222,10 +222,10 @@ static void finish_accept(grpc_tcp_listener* sp, grpc_custom_socket* socket) {
}
if (grpc_tcp_trace.enabled()) {
if (peer_name_string) {
gpr_log(GPR_DEBUG, "SERVER_CONNECT: %p accepted connection: %s",
gpr_log(GPR_INFO, "SERVER_CONNECT: %p accepted connection: %s",
sp->server, peer_name_string);
} else {
gpr_log(GPR_DEBUG, "SERVER_CONNECT: %p accepted connection", sp->server);
gpr_log(GPR_INFO, "SERVER_CONNECT: %p accepted connection", sp->server);
}
}
ep = custom_tcp_endpoint_create(socket, sp->server->resource_quota,
@ -377,10 +377,10 @@ static grpc_error* tcp_server_add_port(grpc_tcp_server* s,
grpc_sockaddr_to_string(&port_string, addr, 0);
const char* str = grpc_error_string(error);
if (port_string) {
gpr_log(GPR_DEBUG, "SERVER %p add_port %s error=%s", s, port_string, str);
gpr_log(GPR_INFO, "SERVER %p add_port %s error=%s", s, port_string, str);
gpr_free(port_string);
} else {
gpr_log(GPR_DEBUG, "SERVER %p add_port error=%s", s, str);
gpr_log(GPR_INFO, "SERVER %p add_port error=%s", s, str);
}
}
@ -419,7 +419,7 @@ static void tcp_server_start(grpc_tcp_server* server, grpc_pollset** pollsets,
(void)pollset_count;
GRPC_CUSTOM_IOMGR_ASSERT_SAME_THREAD();
if (grpc_tcp_trace.enabled()) {
gpr_log(GPR_DEBUG, "SERVER_START %p", server);
gpr_log(GPR_INFO, "SERVER_START %p", server);
}
GPR_ASSERT(on_accept_cb);
GPR_ASSERT(!server->on_accept_cb);

@ -228,7 +228,7 @@ static void on_read(void* arg, grpc_error* err) {
gpr_asprintf(&name, "tcp-server-connection:%s", addr_str);
if (grpc_tcp_trace.enabled()) {
gpr_log(GPR_DEBUG, "SERVER_CONNECT: incoming connection: %s", addr_str);
gpr_log(GPR_INFO, "SERVER_CONNECT: incoming connection: %s", addr_str);
}
grpc_fd* fdobj = grpc_fd_create(fd, name);

@ -204,6 +204,9 @@ static grpc_error* uv_socket_init_helper(uv_socket_t* uv_socket, int domain) {
uv_socket->write_buffers = nullptr;
uv_socket->read_len = 0;
uv_tcp_nodelay(uv_socket->handle, 1);
// Node uses a garbage collector to call destructors, so we don't
// want to hold the uv loop open with active gRPC objects.
uv_unref((uv_handle_t*)uv_socket->handle);
uv_socket->pending_connection = false;
uv_socket->accept_socket = nullptr;
uv_socket->accept_error = GRPC_ERROR_NONE;

@ -74,12 +74,28 @@ static grpc_error* set_dualstack(SOCKET sock) {
: GRPC_WSA_ERROR(WSAGetLastError(), "setsockopt(IPV6_V6ONLY)");
}
static grpc_error* enable_loopback_fast_path(SOCKET sock) {
int status;
uint32_t param = 1;
DWORD ret;
status = WSAIoctl(sock, /*SIO_LOOPBACK_FAST_PATH==*/_WSAIOW(IOC_VENDOR, 16),
&param, sizeof(param), NULL, 0, &ret, 0, 0);
if (status == SOCKET_ERROR) {
status = WSAGetLastError();
}
return status == 0 || status == WSAEOPNOTSUPP
? GRPC_ERROR_NONE
: GRPC_WSA_ERROR(status, "WSAIoctl(SIO_LOOPBACK_FAST_PATH)");
}
grpc_error* grpc_tcp_prepare_socket(SOCKET sock) {
grpc_error* err;
err = set_non_block(sock);
if (err != GRPC_ERROR_NONE) return err;
err = set_dualstack(sock);
if (err != GRPC_ERROR_NONE) return err;
err = enable_loopback_fast_path(sock);
if (err != GRPC_ERROR_NONE) return err;
return GRPC_ERROR_NONE;
}

@ -362,7 +362,7 @@ static void timer_init(grpc_timer* timer, grpc_millis deadline,
#endif
if (grpc_timer_trace.enabled()) {
gpr_log(GPR_DEBUG, "TIMER %p: SET %" PRId64 " now %" PRId64 " call %p[%p]",
gpr_log(GPR_INFO, "TIMER %p: SET %" PRId64 " now %" PRId64 " call %p[%p]",
timer, deadline, grpc_core::ExecCtx::Get()->Now(), closure,
closure->cb);
}
@ -398,7 +398,7 @@ static void timer_init(grpc_timer* timer, grpc_millis deadline,
list_join(&shard->list, timer);
}
if (grpc_timer_trace.enabled()) {
gpr_log(GPR_DEBUG,
gpr_log(GPR_INFO,
" .. add to shard %d with queue_deadline_cap=%" PRId64
" => is_first_timer=%s",
static_cast<int>(shard - g_shards), shard->queue_deadline_cap,
@ -420,7 +420,7 @@ static void timer_init(grpc_timer* timer, grpc_millis deadline,
if (is_first_timer) {
gpr_mu_lock(&g_shared_mutables.mu);
if (grpc_timer_trace.enabled()) {
gpr_log(GPR_DEBUG, " .. old shard min_deadline=%" PRId64,
gpr_log(GPR_INFO, " .. old shard min_deadline=%" PRId64,
shard->min_deadline);
}
if (deadline < shard->min_deadline) {
@ -460,7 +460,7 @@ static void timer_cancel(grpc_timer* timer) {
timer_shard* shard = &g_shards[GPR_HASH_POINTER(timer, g_num_shards)];
gpr_mu_lock(&shard->mu);
if (grpc_timer_trace.enabled()) {
gpr_log(GPR_DEBUG, "TIMER %p: CANCEL pending=%s", timer,
gpr_log(GPR_INFO, "TIMER %p: CANCEL pending=%s", timer,
timer->pending ? "true" : "false");
}
@ -501,7 +501,7 @@ static int refill_heap(timer_shard* shard, grpc_millis now) {
static_cast<grpc_millis>(deadline_delta * 1000.0));
if (grpc_timer_check_trace.enabled()) {
gpr_log(GPR_DEBUG, " .. shard[%d]->queue_deadline_cap --> %" PRId64,
gpr_log(GPR_INFO, " .. shard[%d]->queue_deadline_cap --> %" PRId64,
static_cast<int>(shard - g_shards), shard->queue_deadline_cap);
}
for (timer = shard->list.next; timer != &shard->list; timer = next) {
@ -509,7 +509,7 @@ static int refill_heap(timer_shard* shard, grpc_millis now) {
if (timer->deadline < shard->queue_deadline_cap) {
if (grpc_timer_check_trace.enabled()) {
gpr_log(GPR_DEBUG, " .. add timer with deadline %" PRId64 " to heap",
gpr_log(GPR_INFO, " .. add timer with deadline %" PRId64 " to heap",
timer->deadline);
}
list_remove(timer);
@ -526,7 +526,7 @@ static grpc_timer* pop_one(timer_shard* shard, grpc_millis now) {
grpc_timer* timer;
for (;;) {
if (grpc_timer_check_trace.enabled()) {
gpr_log(GPR_DEBUG, " .. shard[%d]: heap_empty=%s",
gpr_log(GPR_INFO, " .. shard[%d]: heap_empty=%s",
static_cast<int>(shard - g_shards),
grpc_timer_heap_is_empty(&shard->heap) ? "true" : "false");
}
@ -536,13 +536,13 @@ static grpc_timer* pop_one(timer_shard* shard, grpc_millis now) {
}
timer = grpc_timer_heap_top(&shard->heap);
if (grpc_timer_check_trace.enabled()) {
gpr_log(GPR_DEBUG,
gpr_log(GPR_INFO,
" .. check top timer deadline=%" PRId64 " now=%" PRId64,
timer->deadline, now);
}
if (timer->deadline > now) return nullptr;
if (grpc_timer_trace.enabled()) {
gpr_log(GPR_DEBUG, "TIMER %p: FIRE %" PRId64 "ms late via %s scheduler",
gpr_log(GPR_INFO, "TIMER %p: FIRE %" PRId64 "ms late via %s scheduler",
timer, now - timer->deadline,
timer->closure->scheduler->vtable->name);
}
@ -566,7 +566,7 @@ static size_t pop_timers(timer_shard* shard, grpc_millis now,
*new_min_deadline = compute_min_deadline(shard);
gpr_mu_unlock(&shard->mu);
if (grpc_timer_check_trace.enabled()) {
gpr_log(GPR_DEBUG, " .. shard[%d] popped %" PRIdPTR,
gpr_log(GPR_INFO, " .. shard[%d] popped %" PRIdPTR,
static_cast<int>(shard - g_shards), n);
}
return n;
@ -599,7 +599,7 @@ static grpc_timer_check_result run_some_expired_timers(grpc_millis now,
result = GRPC_TIMERS_CHECKED_AND_EMPTY;
if (grpc_timer_check_trace.enabled()) {
gpr_log(GPR_DEBUG, " .. shard[%d]->min_deadline = %" PRId64,
gpr_log(GPR_INFO, " .. shard[%d]->min_deadline = %" PRId64,
static_cast<int>(g_shard_queue[0] - g_shards),
g_shard_queue[0]->min_deadline);
}
@ -617,7 +617,7 @@ static grpc_timer_check_result run_some_expired_timers(grpc_millis now,
}
if (grpc_timer_check_trace.enabled()) {
gpr_log(GPR_DEBUG,
gpr_log(GPR_INFO,
" .. result --> %d"
", shard[%d]->min_deadline %" PRId64 " --> %" PRId64
", now=%" PRId64,
@ -681,7 +681,7 @@ static grpc_timer_check_result timer_check(grpc_millis* next) {
*next = GPR_MIN(*next, min_timer);
}
if (grpc_timer_check_trace.enabled()) {
gpr_log(GPR_DEBUG, "TIMER CHECK SKIP: now=%" PRId64 " min_timer=%" PRId64,
gpr_log(GPR_INFO, "TIMER CHECK SKIP: now=%" PRId64 " min_timer=%" PRId64,
now, min_timer);
}
return GRPC_TIMERS_CHECKED_AND_EMPTY;
@ -701,16 +701,15 @@ static grpc_timer_check_result timer_check(grpc_millis* next) {
gpr_asprintf(&next_str, "%" PRId64, *next);
}
#if GPR_ARCH_64
gpr_log(GPR_DEBUG,
gpr_log(GPR_INFO,
"TIMER CHECK BEGIN: now=%" PRId64 " next=%s tls_min=%" PRId64
" glob_min=%" PRId64,
now, next_str, min_timer,
gpr_atm_no_barrier_load(
static_cast<gpr_atm*>(&g_shared_mutables.min_timer)));
#else
gpr_log(GPR_DEBUG,
"TIMER CHECK BEGIN: now=%" PRId64 " next=%s min=%" PRId64, now,
next_str, min_timer);
gpr_log(GPR_INFO, "TIMER CHECK BEGIN: now=%" PRId64 " next=%s min=%" PRId64,
now, next_str, min_timer);
#endif
gpr_free(next_str);
}
@ -725,7 +724,7 @@ static grpc_timer_check_result timer_check(grpc_millis* next) {
} else {
gpr_asprintf(&next_str, "%" PRId64, *next);
}
gpr_log(GPR_DEBUG, "TIMER CHECK END: r=%d; next=%s", r, next_str);
gpr_log(GPR_INFO, "TIMER CHECK END: r=%d; next=%s", r, next_str);
gpr_free(next_str);
}
return r;

@ -82,7 +82,7 @@ static void start_timer_thread_and_unlock(void) {
++g_thread_count;
gpr_mu_unlock(&g_mu);
if (grpc_timer_check_trace.enabled()) {
gpr_log(GPR_DEBUG, "Spawn timer thread");
gpr_log(GPR_INFO, "Spawn timer thread");
}
completed_thread* ct =
static_cast<completed_thread*>(gpr_malloc(sizeof(*ct)));
@ -108,7 +108,7 @@ static void run_some_timers() {
// waiter so that the next deadline is not missed
if (!g_has_timed_waiter) {
if (grpc_timer_check_trace.enabled()) {
gpr_log(GPR_DEBUG, "kick untimed waiter");
gpr_log(GPR_INFO, "kick untimed waiter");
}
gpr_cv_signal(&g_cv_wait);
}
@ -116,7 +116,7 @@ static void run_some_timers() {
}
// without our lock, flush the exec_ctx
if (grpc_timer_check_trace.enabled()) {
gpr_log(GPR_DEBUG, "flush exec_ctx");
gpr_log(GPR_INFO, "flush exec_ctx");
}
grpc_core::ExecCtx::Get()->Flush();
gpr_mu_lock(&g_mu);
@ -172,7 +172,7 @@ static bool wait_until(grpc_millis next) {
if (grpc_timer_check_trace.enabled()) {
grpc_millis wait_time = next - grpc_core::ExecCtx::Get()->Now();
gpr_log(GPR_DEBUG, "sleep for a %" PRId64 " milliseconds", wait_time);
gpr_log(GPR_INFO, "sleep for a %" PRId64 " milliseconds", wait_time);
}
} else { // g_timed_waiter == true && next >= g_timed_waiter_deadline
next = GRPC_MILLIS_INF_FUTURE;
@ -180,14 +180,14 @@ static bool wait_until(grpc_millis next) {
}
if (grpc_timer_check_trace.enabled() && next == GRPC_MILLIS_INF_FUTURE) {
gpr_log(GPR_DEBUG, "sleep until kicked");
gpr_log(GPR_INFO, "sleep until kicked");
}
gpr_cv_wait(&g_cv_wait, &g_mu,
grpc_millis_to_timespec(next, GPR_CLOCK_MONOTONIC));
if (grpc_timer_check_trace.enabled()) {
gpr_log(GPR_DEBUG, "wait ended: was_timed:%d kicked:%d",
gpr_log(GPR_INFO, "wait ended: was_timed:%d kicked:%d",
my_timed_waiter_generation == g_timed_waiter_generation,
g_kicked);
}
@ -232,7 +232,7 @@ static void timer_main_loop() {
Consequently, we can just sleep forever here and be happy at some
saved wakeup cycles. */
if (grpc_timer_check_trace.enabled()) {
gpr_log(GPR_DEBUG, "timers not checked: expect another thread to");
gpr_log(GPR_INFO, "timers not checked: expect another thread to");
}
next = GRPC_MILLIS_INF_FUTURE;
/* fall through */
@ -258,7 +258,7 @@ static void timer_thread_cleanup(completed_thread* ct) {
g_completed_threads = ct;
gpr_mu_unlock(&g_mu);
if (grpc_timer_check_trace.enabled()) {
gpr_log(GPR_DEBUG, "End timer thread");
gpr_log(GPR_INFO, "End timer thread");
}
}
@ -300,18 +300,18 @@ void grpc_timer_manager_init(void) {
static void stop_threads(void) {
gpr_mu_lock(&g_mu);
if (grpc_timer_check_trace.enabled()) {
gpr_log(GPR_DEBUG, "stop timer threads: threaded=%d", g_threaded);
gpr_log(GPR_INFO, "stop timer threads: threaded=%d", g_threaded);
}
if (g_threaded) {
g_threaded = false;
gpr_cv_broadcast(&g_cv_wait);
if (grpc_timer_check_trace.enabled()) {
gpr_log(GPR_DEBUG, "num timer threads: %d", g_thread_count);
gpr_log(GPR_INFO, "num timer threads: %d", g_thread_count);
}
while (g_thread_count > 0) {
gpr_cv_wait(&g_cv_shutdown, &g_mu, gpr_inf_future(GPR_CLOCK_MONOTONIC));
if (grpc_timer_check_trace.enabled()) {
gpr_log(GPR_DEBUG, "num timer threads: %d", g_thread_count);
gpr_log(GPR_INFO, "num timer threads: %d", g_thread_count);
}
gc_completed_threads();
}

@ -52,6 +52,9 @@ static void timer_start(grpc_custom_timer* t) {
uv_timer->data = t;
t->timer = (void*)uv_timer;
uv_timer_start(uv_timer, run_expired_timer, t->timeout_ms, 0);
// Node uses a garbage collector to call destructors, so we don't
// want to hold the uv loop open with active gRPC objects.
uv_unref((uv_handle_t*)uv_timer);
}
static void timer_stop(grpc_custom_timer* t) {

@ -27,6 +27,7 @@
#include <grpc/support/sync.h>
#include <grpc/support/time.h>
#include <inttypes.h>
#include <pthread.h>
#include <stdio.h>
#include <string.h>

@ -45,6 +45,7 @@ typedef enum {
#define GRPC_CHANNEL_CREDENTIALS_TYPE_SSL "Ssl"
#define GRPC_CHANNEL_CREDENTIALS_TYPE_FAKE_TRANSPORT_SECURITY \
"FakeTransportSecurity"
#define GRPC_CHANNEL_CREDENTIALS_TYPE_GOOGLE_DEFAULT "GoogleDefault"
#define GRPC_CALL_CREDENTIALS_TYPE_OAUTH2 "Oauth2"
#define GRPC_CALL_CREDENTIALS_TYPE_JWT "Jwt"

@ -26,12 +26,15 @@
#include <grpc/support/log.h>
#include <grpc/support/sync.h>
#include "src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.h"
#include "src/core/lib/channel/channel_args.h"
#include "src/core/lib/gpr/env.h"
#include "src/core/lib/gpr/string.h"
#include "src/core/lib/http/httpcli.h"
#include "src/core/lib/http/parser.h"
#include "src/core/lib/iomgr/load_file.h"
#include "src/core/lib/iomgr/polling_entity.h"
#include "src/core/lib/security/credentials/alts/alts_credentials.h"
#include "src/core/lib/security/credentials/google_default/google_default_credentials.h"
#include "src/core/lib/security/credentials/jwt/jwt_credentials.h"
#include "src/core/lib/security/credentials/oauth2/oauth2_credentials.h"
@ -45,8 +48,8 @@
/* -- Default credentials. -- */
static grpc_channel_credentials* default_credentials = nullptr;
static int compute_engine_detection_done = 0;
static grpc_channel_credentials* g_default_credentials = nullptr;
static int g_compute_engine_detection_done = 0;
static gpr_mu g_state_mu;
static gpr_mu* g_polling_mu;
static gpr_once g_once = GPR_ONCE_INIT;
@ -60,6 +63,52 @@ typedef struct {
grpc_http_response response;
} compute_engine_detector;
static void google_default_credentials_destruct(
grpc_channel_credentials* creds) {
grpc_google_default_channel_credentials* c =
reinterpret_cast<grpc_google_default_channel_credentials*>(creds);
grpc_channel_credentials_unref(c->alts_creds);
grpc_channel_credentials_unref(c->ssl_creds);
}
static grpc_security_status google_default_create_security_connector(
grpc_channel_credentials* creds, grpc_call_credentials* call_creds,
const char* target, const grpc_channel_args* args,
grpc_channel_security_connector** sc, grpc_channel_args** new_args) {
grpc_google_default_channel_credentials* c =
reinterpret_cast<grpc_google_default_channel_credentials*>(creds);
bool is_grpclb_load_balancer = grpc_channel_arg_get_bool(
grpc_channel_args_find(args, GRPC_ARG_ADDRESS_IS_GRPCLB_LOAD_BALANCER),
false);
bool is_backend_from_grpclb_load_balancer = grpc_channel_arg_get_bool(
grpc_channel_args_find(
args, GRPC_ARG_ADDRESS_IS_BACKEND_FROM_GRPCLB_LOAD_BALANCER),
false);
bool use_alts =
is_grpclb_load_balancer || is_backend_from_grpclb_load_balancer;
grpc_security_status status = GRPC_SECURITY_ERROR;
status = use_alts ? c->alts_creds->vtable->create_security_connector(
c->alts_creds, call_creds, target, args, sc, new_args)
: c->ssl_creds->vtable->create_security_connector(
c->ssl_creds, call_creds, target, args, sc, new_args);
/* grpclb-specific channel args are removed from the channel args set
* to ensure backends and fallback adresses will have the same set of channel
* args. By doing that, it guarantees the connections to backends will not be
* torn down and re-connected when switching in and out of fallback mode.
*/
static const char* args_to_remove[] = {
GRPC_ARG_ADDRESS_IS_GRPCLB_LOAD_BALANCER,
GRPC_ARG_ADDRESS_IS_BACKEND_FROM_GRPCLB_LOAD_BALANCER,
};
*new_args = grpc_channel_args_copy_and_add_and_remove(
args, args_to_remove, GPR_ARRAY_SIZE(args_to_remove), nullptr, 0);
return status;
}
static grpc_channel_credentials_vtable google_default_credentials_vtable = {
google_default_credentials_destruct,
google_default_create_security_connector, nullptr};
static void on_compute_engine_detection_http_response(void* user_data,
grpc_error* error) {
compute_engine_detector* detector =
@ -234,8 +283,8 @@ grpc_channel_credentials* grpc_google_default_credentials_create(void) {
gpr_mu_lock(&g_state_mu);
if (default_credentials != nullptr) {
result = grpc_channel_credentials_ref(default_credentials);
if (g_default_credentials != nullptr) {
result = grpc_channel_credentials_ref(g_default_credentials);
goto end;
}
@ -253,9 +302,9 @@ grpc_channel_credentials* grpc_google_default_credentials_create(void) {
/* At last try to see if we're on compute engine (do the detection only once
since it requires a network test). */
if (!compute_engine_detection_done) {
if (!g_compute_engine_detection_done) {
int need_compute_engine_creds = is_stack_running_on_compute_engine();
compute_engine_detection_done = 1;
g_compute_engine_detection_done = 1;
if (need_compute_engine_creds) {
call_creds = grpc_google_compute_engine_credentials_create(nullptr);
if (call_creds == nullptr) {
@ -269,18 +318,25 @@ grpc_channel_credentials* grpc_google_default_credentials_create(void) {
end:
if (result == nullptr) {
if (call_creds != nullptr) {
/* Blend with default ssl credentials and add a global reference so that
it
can be cached and re-served. */
grpc_channel_credentials* ssl_creds =
grpc_ssl_credentials_create(nullptr, nullptr, nullptr);
default_credentials = grpc_channel_credentials_ref(
grpc_composite_channel_credentials_create(ssl_creds, call_creds,
nullptr));
GPR_ASSERT(default_credentials != nullptr);
grpc_channel_credentials_unref(ssl_creds);
/* Create google default credentials. */
auto creds = static_cast<grpc_google_default_channel_credentials*>(
gpr_zalloc(sizeof(grpc_google_default_channel_credentials)));
creds->base.vtable = &google_default_credentials_vtable;
creds->base.type = GRPC_CHANNEL_CREDENTIALS_TYPE_GOOGLE_DEFAULT;
gpr_ref_init(&creds->base.refcount, 1);
creds->ssl_creds = grpc_ssl_credentials_create(nullptr, nullptr, nullptr);
GPR_ASSERT(creds->ssl_creds != nullptr);
grpc_alts_credentials_options* options =
grpc_alts_credentials_client_options_create();
creds->alts_creds = grpc_alts_credentials_create(options);
grpc_alts_credentials_options_destroy(options);
/* Add a global reference so that it can be cached and re-served. */
g_default_credentials = grpc_composite_channel_credentials_create(
&creds->base, call_creds, nullptr);
GPR_ASSERT(g_default_credentials != nullptr);
grpc_channel_credentials_unref(&creds->base);
grpc_call_credentials_unref(call_creds);
result = default_credentials;
result = grpc_channel_credentials_ref(g_default_credentials);
} else {
gpr_log(GPR_ERROR, "Could not create google default credentials.");
}
@ -299,11 +355,11 @@ void grpc_flush_cached_google_default_credentials(void) {
grpc_core::ExecCtx exec_ctx;
gpr_once_init(&g_once, init_default_credentials);
gpr_mu_lock(&g_state_mu);
if (default_credentials != nullptr) {
grpc_channel_credentials_unref(default_credentials);
default_credentials = nullptr;
if (g_default_credentials != nullptr) {
grpc_channel_credentials_unref(g_default_credentials);
g_default_credentials = nullptr;
}
compute_engine_detection_done = 0;
g_compute_engine_detection_done = 0;
gpr_mu_unlock(&g_state_mu);
}

@ -39,6 +39,12 @@
"/" GRPC_GOOGLE_WELL_KNOWN_CREDENTIALS_FILE
#endif
typedef struct {
grpc_channel_credentials base;
grpc_channel_credentials* alts_creds;
grpc_channel_credentials* ssl_creds;
} grpc_google_default_channel_credentials;
void grpc_flush_cached_google_default_credentials(void);
#endif /* GRPC_CORE_LIB_SECURITY_CREDENTIALS_GOOGLE_DEFAULT_GOOGLE_DEFAULT_CREDENTIALS_H \

@ -786,23 +786,26 @@ static void ssl_server_add_handshakers(grpc_server_security_connector* sc,
tsi_create_adapter_handshaker(tsi_hs), &sc->base));
}
static int ssl_host_matches_name(const tsi_peer* peer, const char* peer_name) {
int grpc_ssl_host_matches_name(const tsi_peer* peer, const char* peer_name) {
char* allocated_name = nullptr;
int r;
if (strchr(peer_name, ':') != nullptr) {
char* ignored_port;
gpr_split_host_port(peer_name, &allocated_name, &ignored_port);
gpr_free(ignored_port);
peer_name = allocated_name;
if (!peer_name) return 0;
}
// IPv6 zone-id should not be included in comparisons.
char* const zone_id = strchr(allocated_name, '%');
if (zone_id != nullptr) *zone_id = '\0';
r = tsi_ssl_peer_matches_name(peer, peer_name);
gpr_free(allocated_name);
return r;
}
grpc_auth_context* tsi_ssl_peer_to_auth_context(const tsi_peer* peer) {
grpc_auth_context* grpc_ssl_peer_to_auth_context(const tsi_peer* peer) {
size_t i;
grpc_auth_context* ctx = nullptr;
const char* peer_identity_property_name = nullptr;
@ -859,14 +862,14 @@ static grpc_error* ssl_check_peer(grpc_security_connector* sc,
}
/* Check the peer name if specified. */
if (peer_name != nullptr && !ssl_host_matches_name(peer, peer_name)) {
if (peer_name != nullptr && !grpc_ssl_host_matches_name(peer, peer_name)) {
char* msg;
gpr_asprintf(&msg, "Peer name %s is not in peer certificate", peer_name);
grpc_error* error = GRPC_ERROR_CREATE_FROM_COPIED_STRING(msg);
gpr_free(msg);
return error;
}
*auth_context = tsi_ssl_peer_to_auth_context(peer);
*auth_context = grpc_ssl_peer_to_auth_context(peer);
return GRPC_ERROR_NONE;
}
@ -924,7 +927,7 @@ static void add_shallow_auth_property_to_peer(tsi_peer* peer,
tsi_prop->value.length = prop->value_length;
}
tsi_peer tsi_shallow_peer_from_ssl_auth_context(
tsi_peer grpc_shallow_peer_from_ssl_auth_context(
const grpc_auth_context* auth_context) {
size_t max_num_props = 0;
grpc_auth_property_iterator it;
@ -955,7 +958,7 @@ tsi_peer tsi_shallow_peer_from_ssl_auth_context(
return peer;
}
void tsi_shallow_peer_destruct(tsi_peer* peer) {
void grpc_shallow_peer_destruct(tsi_peer* peer) {
if (peer->properties != nullptr) gpr_free(peer->properties);
}
@ -967,8 +970,8 @@ static bool ssl_channel_check_call_host(grpc_channel_security_connector* sc,
grpc_ssl_channel_security_connector* c =
reinterpret_cast<grpc_ssl_channel_security_connector*>(sc);
grpc_security_status status = GRPC_SECURITY_ERROR;
tsi_peer peer = tsi_shallow_peer_from_ssl_auth_context(auth_context);
if (ssl_host_matches_name(&peer, host)) status = GRPC_SECURITY_OK;
tsi_peer peer = grpc_shallow_peer_from_ssl_auth_context(auth_context);
if (grpc_ssl_host_matches_name(&peer, host)) status = GRPC_SECURITY_OK;
/* If the target name was overridden, then the original target_name was
'checked' transitively during the previous peer check at the end of the
handshake. */
@ -980,7 +983,7 @@ static bool ssl_channel_check_call_host(grpc_channel_security_connector* sc,
*error = GRPC_ERROR_CREATE_FROM_STATIC_STRING(
"call host does not match SSL server name");
}
tsi_shallow_peer_destruct(&peer);
grpc_shallow_peer_destruct(&peer);
return true;
}

@ -239,10 +239,11 @@ const tsi_peer_property* tsi_peer_get_property_by_name(const tsi_peer* peer,
const char* name);
/* Exposed for testing only. */
grpc_auth_context* tsi_ssl_peer_to_auth_context(const tsi_peer* peer);
tsi_peer tsi_shallow_peer_from_ssl_auth_context(
grpc_auth_context* grpc_ssl_peer_to_auth_context(const tsi_peer* peer);
tsi_peer grpc_shallow_peer_from_ssl_auth_context(
const grpc_auth_context* auth_context);
void tsi_shallow_peer_destruct(tsi_peer* peer);
void grpc_shallow_peer_destruct(tsi_peer* peer);
int grpc_ssl_host_matches_name(const tsi_peer* peer, const char* peer_name);
/* --- Default SSL Root Store. --- */
namespace grpc_core {

@ -133,7 +133,7 @@ static void call_read_cb(secure_endpoint* ep, grpc_error* error) {
for (i = 0; i < ep->read_buffer->count; i++) {
char* data = grpc_dump_slice(ep->read_buffer->slices[i],
GPR_DUMP_HEX | GPR_DUMP_ASCII);
gpr_log(GPR_DEBUG, "READ %p: %s", ep, data);
gpr_log(GPR_INFO, "READ %p: %s", ep, data);
gpr_free(data);
}
}
@ -269,7 +269,7 @@ static void endpoint_write(grpc_endpoint* secure_ep, grpc_slice_buffer* slices,
for (i = 0; i < slices->count; i++) {
char* data =
grpc_dump_slice(slices->slices[i], GPR_DUMP_HEX | GPR_DUMP_ASCII);
gpr_log(GPR_DEBUG, "WRITE %p: %s", ep, data);
gpr_log(GPR_INFO, "WRITE %p: %s", ep, data);
gpr_free(data);
}
}

@ -232,6 +232,10 @@ static grpc_error* on_handshake_next_done_locked(
const unsigned char* bytes_to_send, size_t bytes_to_send_size,
tsi_handshaker_result* handshaker_result) {
grpc_error* error = GRPC_ERROR_NONE;
// Handshaker was shutdown.
if (h->shutdown) {
return GRPC_ERROR_CREATE_FROM_STATIC_STRING("Handshaker shutdown");
}
// Read more if we need to.
if (result == TSI_INCOMPLETE_DATA) {
GPR_ASSERT(bytes_to_send_size == 0);
@ -406,7 +410,7 @@ static void security_handshaker_do_handshake(grpc_handshaker* handshaker,
static const grpc_handshaker_vtable security_handshaker_vtable = {
security_handshaker_destroy, security_handshaker_shutdown,
security_handshaker_do_handshake};
security_handshaker_do_handshake, "security"};
static grpc_handshaker* security_handshaker_create(
tsi_handshaker* handshaker, grpc_security_connector* connector) {
@ -456,7 +460,7 @@ static void fail_handshaker_do_handshake(grpc_handshaker* handshaker,
static const grpc_handshaker_vtable fail_handshaker_vtable = {
fail_handshaker_destroy, fail_handshaker_shutdown,
fail_handshaker_do_handshake};
fail_handshaker_do_handshake, "security_fail"};
static grpc_handshaker* fail_handshaker_create() {
grpc_handshaker* h = static_cast<grpc_handshaker*>(gpr_malloc(sizeof(*h)));

@ -81,6 +81,10 @@ class SliceHashTable : public RefCounted<SliceHashTable<T>> {
template <typename T2, typename... Args>
friend T2* New(Args&&... args);
// So Delete() can call our private dtor.
template <typename T2>
friend void Delete(T2*);
SliceHashTable(size_t num_entries, Entry* entries, ValueCmp value_cmp);
virtual ~SliceHashTable();

@ -65,6 +65,10 @@ class SliceWeakHashTable : public RefCounted<SliceWeakHashTable<T, Size>> {
template <typename T2, typename... Args>
friend T2* New(Args&&... args);
// So Delete() can call our private dtor.
template <typename T2>
friend void Delete(T2*);
SliceWeakHashTable() = default;
~SliceWeakHashTable() = default;

@ -610,7 +610,7 @@ grpc_call_error grpc_call_cancel(grpc_call* call, void* reserved) {
// This is called via the call combiner to start sending a batch down
// the filter stack.
static void execute_batch_in_call_combiner(void* arg, grpc_error* ignored) {
GPR_TIMER_SCOPE("execute_batch", 0);
GPR_TIMER_SCOPE("execute_batch_in_call_combiner", 0);
grpc_transport_stream_op_batch* batch =
static_cast<grpc_transport_stream_op_batch*>(arg);
grpc_call* call = static_cast<grpc_call*>(batch->handler_private.extra_arg);
@ -747,10 +747,10 @@ static void get_final_status(
status[i] = unpack_received_status(gpr_atm_acq_load(&call->status[i]));
}
if (grpc_call_error_trace.enabled()) {
gpr_log(GPR_DEBUG, "get_final_status %s", call->is_client ? "CLI" : "SVR");
gpr_log(GPR_INFO, "get_final_status %s", call->is_client ? "CLI" : "SVR");
for (i = 0; i < STATUS_SOURCE_COUNT; i++) {
if (status[i].is_set) {
gpr_log(GPR_DEBUG, " %d: %s", i, grpc_error_string(status[i].error));
gpr_log(GPR_INFO, " %d: %s", i, grpc_error_string(status[i].error));
}
}
}
@ -1539,7 +1539,7 @@ static void free_no_op_completion(void* p, grpc_cq_completion* completion) {
static grpc_call_error call_start_batch(grpc_call* call, const grpc_op* ops,
size_t nops, void* notify_tag,
int is_notify_tag_closure) {
GPR_TIMER_SCOPE("grpc_call_start_batch", 0);
GPR_TIMER_SCOPE("call_start_batch", 0);
size_t i;
const grpc_op* op;

@ -1161,6 +1161,22 @@ static void listener_destroy_done(void* s, grpc_error* error) {
gpr_mu_unlock(&server->mu_global);
}
/*
- Kills all pending requests-for-incoming-RPC-calls (i.e the requests made via
grpc_server_request_call and grpc_server_request_registered call will now be
cancelled). See 'kill_pending_work_locked()'
- Shuts down the listeners (i.e the server will no longer listen on the port
for new incoming channels).
- Iterates through all channels on the server and sends shutdown msg (see
'channel_broadcaster_shutdown()' for details) to the clients via the
transport layer. The transport layer then guarantees the following:
-- Sends shutdown to the client (for eg: HTTP2 transport sends GOAWAY)
-- If the server has outstanding calls that are in the process, the
connection is NOT closed until the server is done with all those calls
-- Once, there are no more calls in progress, the channel is closed
*/
void grpc_server_shutdown_and_notify(grpc_server* server,
grpc_completion_queue* cq, void* tag) {
listener* l;

@ -25,4 +25,4 @@
const char* grpc_version_string(void) { return "6.0.0-dev"; }
const char* grpc_g_stands_for(void) { return "glorious"; }
const char* grpc_g_stands_for(void) { return "gloriosa"; }

@ -47,7 +47,7 @@ grpc_millis BdpEstimator::CompletePing() {
double bw = dt > 0 ? (static_cast<double>(accumulator_) / dt) : 0;
int start_inter_ping_delay = inter_ping_delay_;
if (grpc_bdp_estimator_trace.enabled()) {
gpr_log(GPR_DEBUG,
gpr_log(GPR_INFO,
"bdp[%s]:complete acc=%" PRId64 " est=%" PRId64
" dt=%lf bw=%lfMbs bw_est=%lfMbs",
name_, accumulator_, estimate_, dt, bw / 125000.0,
@ -58,7 +58,7 @@ grpc_millis BdpEstimator::CompletePing() {
estimate_ = GPR_MAX(accumulator_, estimate_ * 2);
bw_est_ = bw;
if (grpc_bdp_estimator_trace.enabled()) {
gpr_log(GPR_DEBUG, "bdp[%s]: estimate increased to %" PRId64, name_,
gpr_log(GPR_INFO, "bdp[%s]: estimate increased to %" PRId64, name_,
estimate_);
}
inter_ping_delay_ /= 2; // if the ping estimate changes,
@ -75,7 +75,7 @@ grpc_millis BdpEstimator::CompletePing() {
if (start_inter_ping_delay != inter_ping_delay_) {
stable_estimate_count_ = 0;
if (grpc_bdp_estimator_trace.enabled()) {
gpr_log(GPR_DEBUG, "bdp[%s]:update_inter_time to %dms", name_,
gpr_log(GPR_INFO, "bdp[%s]:update_inter_time to %dms", name_,
inter_ping_delay_);
}
}

@ -50,7 +50,7 @@ class BdpEstimator {
// transport (but not necessarily started)
void SchedulePing() {
if (grpc_bdp_estimator_trace.enabled()) {
gpr_log(GPR_DEBUG, "bdp[%s]:sched acc=%" PRId64 " est=%" PRId64, name_,
gpr_log(GPR_INFO, "bdp[%s]:sched acc=%" PRId64 " est=%" PRId64, name_,
accumulator_, estimate_);
}
GPR_ASSERT(ping_state_ == PingState::UNSCHEDULED);
@ -63,7 +63,7 @@ class BdpEstimator {
// the ping is on the wire
void StartPing() {
if (grpc_bdp_estimator_trace.enabled()) {
gpr_log(GPR_DEBUG, "bdp[%s]:start acc=%" PRId64 " est=%" PRId64, name_,
gpr_log(GPR_INFO, "bdp[%s]:start acc=%" PRId64 " est=%" PRId64, name_,
accumulator_, estimate_);
}
GPR_ASSERT(ping_state_ == PingState::SCHEDULED);

@ -78,7 +78,7 @@ grpc_connectivity_state grpc_connectivity_state_check(
grpc_connectivity_state cur = static_cast<grpc_connectivity_state>(
gpr_atm_no_barrier_load(&tracker->current_state_atm));
if (grpc_connectivity_state_trace.enabled()) {
gpr_log(GPR_DEBUG, "CONWATCH: %p %s: get %s", tracker, tracker->name,
gpr_log(GPR_INFO, "CONWATCH: %p %s: get %s", tracker, tracker->name,
grpc_connectivity_state_name(cur));
}
return cur;
@ -89,7 +89,7 @@ grpc_connectivity_state grpc_connectivity_state_get(
grpc_connectivity_state cur = static_cast<grpc_connectivity_state>(
gpr_atm_no_barrier_load(&tracker->current_state_atm));
if (grpc_connectivity_state_trace.enabled()) {
gpr_log(GPR_DEBUG, "CONWATCH: %p %s: get %s", tracker, tracker->name,
gpr_log(GPR_INFO, "CONWATCH: %p %s: get %s", tracker, tracker->name,
grpc_connectivity_state_name(cur));
}
if (error != nullptr) {
@ -110,10 +110,10 @@ bool grpc_connectivity_state_notify_on_state_change(
gpr_atm_no_barrier_load(&tracker->current_state_atm));
if (grpc_connectivity_state_trace.enabled()) {
if (current == nullptr) {
gpr_log(GPR_DEBUG, "CONWATCH: %p %s: unsubscribe notify=%p", tracker,
gpr_log(GPR_INFO, "CONWATCH: %p %s: unsubscribe notify=%p", tracker,
tracker->name, notify);
} else {
gpr_log(GPR_DEBUG, "CONWATCH: %p %s: from %s [cur=%s] notify=%p", tracker,
gpr_log(GPR_INFO, "CONWATCH: %p %s: from %s [cur=%s] notify=%p", tracker,
tracker->name, grpc_connectivity_state_name(*current),
grpc_connectivity_state_name(cur), notify);
}
@ -161,7 +161,7 @@ void grpc_connectivity_state_set(grpc_connectivity_state_tracker* tracker,
grpc_connectivity_state_watcher* w;
if (grpc_connectivity_state_trace.enabled()) {
const char* error_string = grpc_error_string(error);
gpr_log(GPR_DEBUG, "SET: %p %s: %s --> %s [%s] error=%p %s", tracker,
gpr_log(GPR_INFO, "SET: %p %s: %s --> %s [%s] error=%p %s", tracker,
tracker->name, grpc_connectivity_state_name(cur),
grpc_connectivity_state_name(state), reason, error, error_string);
}
@ -187,8 +187,7 @@ void grpc_connectivity_state_set(grpc_connectivity_state_tracker* tracker,
*w->current = state;
tracker->watchers = w->next;
if (grpc_connectivity_state_trace.enabled()) {
gpr_log(GPR_DEBUG, "NOTIFY: %p %s: %p", tracker, tracker->name,
w->notify);
gpr_log(GPR_INFO, "NOTIFY: %p %s: %p", tracker, tracker->name, w->notify);
}
GRPC_CLOSURE_SCHED(w->notify, GRPC_ERROR_REF(tracker->current_error));
gpr_free(w);

@ -69,6 +69,10 @@ class SslSessionLRUCache : public grpc_core::RefCounted<SslSessionLRUCache> {
template <typename T, typename... Args>
friend T* grpc_core::New(Args&&... args);
// So Delete() can call our private dtor.
template <typename T>
friend void grpc_core::Delete(T*);
class Node;
explicit SslSessionLRUCache(size_t capacity);

@ -120,12 +120,14 @@ typedef struct {
/* --- Library Initialization. ---*/
static gpr_once g_init_openssl_once = GPR_ONCE_INIT;
static gpr_mu* g_openssl_mutexes = nullptr;
static int g_ssl_ctx_ex_factory_index = -1;
static const unsigned char kSslSessionIdContext[] = {'g', 'r', 'p', 'c'};
#if OPENSSL_VERSION_NUMBER < 0x10100000
static gpr_mu* g_openssl_mutexes = nullptr;
static void openssl_locking_cb(int mode, int type, const char* file,
int line) GRPC_UNUSED;
static unsigned long openssl_thread_id_cb(void) GRPC_UNUSED;
static const unsigned char kSslSessionIdContext[] = {'g', 'r', 'p', 'c'};
static void openssl_locking_cb(int mode, int type, const char* file, int line) {
if (mode & CRYPTO_LOCK) {
@ -138,22 +140,27 @@ static void openssl_locking_cb(int mode, int type, const char* file, int line) {
static unsigned long openssl_thread_id_cb(void) {
return static_cast<unsigned long>(gpr_thd_currentid());
}
#endif
static void init_openssl(void) {
int i;
int num_locks;
SSL_library_init();
SSL_load_error_strings();
OpenSSL_add_all_algorithms();
num_locks = CRYPTO_num_locks();
#if OPENSSL_VERSION_NUMBER < 0x10100000
if (!CRYPTO_get_locking_callback()) {
int num_locks = CRYPTO_num_locks();
GPR_ASSERT(num_locks > 0);
g_openssl_mutexes = static_cast<gpr_mu*>(
gpr_malloc(static_cast<size_t>(num_locks) * sizeof(gpr_mu)));
for (i = 0; i < CRYPTO_num_locks(); i++) {
for (int i = 0; i < num_locks; i++) {
gpr_mu_init(&g_openssl_mutexes[i]);
}
CRYPTO_set_locking_callback(openssl_locking_cb);
CRYPTO_set_id_callback(openssl_thread_id_cb);
} else {
gpr_log(GPR_INFO, "OpenSSL callback has already been set.");
}
#endif
g_ssl_ctx_ex_factory_index =
SSL_CTX_get_ex_new_index(0, nullptr, nullptr, nullptr, nullptr);
GPR_ASSERT(g_ssl_ctx_ex_factory_index != -1);

Some files were not shown because too many files have changed in this diff Show More

Loading…
Cancel
Save