Merge github.com:grpc/grpc into execp

pull/12580/head
Craig Tiller 8 years ago
commit 5e5b73ccf0
  1. 8
      BUILD
  2. 184
      CMakeLists.txt
  3. 188
      Makefile
  4. 16
      bazel/grpc_build_system.bzl
  5. 2
      config.m4
  6. 1
      doc/environment_variables.md
  7. 12
      examples/cpp/helloworld/greeter_async_client.cc
  8. 14
      examples/cpp/helloworld/greeter_async_client2.cc
  9. 1
      grpc.gemspec
  10. 7
      include/grpc++/generic/generic_stub.h
  11. 136
      include/grpc++/impl/codegen/async_stream.h
  12. 41
      include/grpc++/impl/codegen/async_unary_call.h
  13. 6
      include/grpc++/support/channel_arguments.h
  14. 4
      include/grpc/compression.h
  15. 19
      include/grpc/impl/codegen/grpc_types.h
  16. 5
      package.xml
  17. 2
      setup.py
  18. 578
      src/compiler/cpp_generator.cc
  19. 4
      src/compiler/python_generator.cc
  20. 28
      src/core/ext/census/base_resources.c
  21. 13
      src/core/ext/filters/client_channel/client_channel.c
  22. 2
      src/core/ext/filters/client_channel/client_channel_factory.c
  23. 4
      src/core/ext/filters/client_channel/client_channel_plugin.c
  24. 6
      src/core/ext/filters/client_channel/http_proxy.c
  25. 204
      src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.c
  26. 2
      src/core/ext/filters/client_channel/lb_policy/round_robin/round_robin.c
  27. 4
      src/core/ext/filters/client_channel/lb_policy_factory.c
  28. 2
      src/core/ext/filters/client_channel/lb_policy_factory.h
  29. 4
      src/core/ext/filters/client_channel/resolver/dns/c_ares/dns_resolver_ares.c
  30. 2
      src/core/ext/filters/client_channel/resolver/fake/fake_resolver.c
  31. 2
      src/core/ext/filters/client_channel/subchannel.c
  32. 4
      src/core/ext/filters/http/message_compress/message_compress_filter.c
  33. 3
      src/core/ext/filters/load_reporting/server_load_reporting_plugin.c
  34. 2
      src/core/ext/transport/chttp2/client/insecure/channel_create.c
  35. 2
      src/core/ext/transport/chttp2/client/insecure/channel_create_posix.c
  36. 1
      src/core/ext/transport/chttp2/transport/chttp2_plugin.c
  37. 116
      src/core/ext/transport/chttp2/transport/chttp2_transport.c
  38. 1
      src/core/ext/transport/chttp2/transport/chttp2_transport.h
  39. 2
      src/core/ext/transport/chttp2/transport/frame_ping.c
  40. 4
      src/core/ext/transport/chttp2/transport/internal.h
  41. 7
      src/core/ext/transport/chttp2/transport/parsing.c
  42. 52
      src/core/ext/transport/chttp2/transport/stream_lists.c
  43. 38
      src/core/ext/transport/chttp2/transport/writing.c
  44. 4
      src/core/ext/transport/inproc/inproc_transport.c
  45. 12
      src/core/lib/channel/channel_args.c
  46. 2
      src/core/lib/channel/channel_stack.h
  47. 4
      src/core/lib/compression/compression.c
  48. 214
      src/core/lib/debug/stats_data.c
  49. 65
      src/core/lib/debug/stats_data.h
  50. 30
      src/core/lib/debug/stats_data.yaml
  51. 6
      src/core/lib/debug/stats_data_bq_schema.sql
  52. 2
      src/core/lib/debug/trace.h
  53. 3
      src/core/lib/http/httpcli_security_connector.c
  54. 18
      src/core/lib/iomgr/closure.c
  55. 2
      src/core/lib/iomgr/error.c
  56. 91
      src/core/lib/iomgr/ev_epoll1_linux.c
  57. 204
      src/core/lib/iomgr/ev_epollex_linux.c
  58. 5
      src/core/lib/iomgr/ev_epollsig_linux.c
  59. 46
      src/core/lib/iomgr/ev_poll_posix.c
  60. 4
      src/core/lib/iomgr/ev_posix.c
  61. 2
      src/core/lib/iomgr/ev_posix.h
  62. 2
      src/core/lib/iomgr/pollset.h
  63. 6
      src/core/lib/iomgr/pollset_windows.c
  64. 2
      src/core/lib/iomgr/resolve_address_posix.c
  65. 3
      src/core/lib/security/credentials/google_default/google_default_credentials.c
  66. 22
      src/core/lib/security/transport/security_connector.c
  67. 2
      src/core/lib/security/transport/security_handshaker.c
  68. 2
      src/core/lib/support/log_linux.c
  69. 2
      src/core/lib/support/string.c
  70. 79
      src/core/lib/surface/call.c
  71. 2
      src/core/lib/surface/call.h
  72. 2
      src/core/lib/surface/channel.c
  73. 30
      src/core/lib/surface/completion_queue.c
  74. 2
      src/core/lib/transport/transport_op_string.c
  75. 3
      src/core/tsi/fake_transport_security.c
  76. 113
      src/core/tsi/ssl_transport_security.c
  77. 37
      src/core/tsi/ssl_transport_security.h
  78. 10
      src/core/tsi/transport_security.h
  79. 8
      src/core/tsi/transport_security_grpc.c
  80. 3
      src/core/tsi/transport_security_grpc.h
  81. 2
      src/cpp/client/client_context.cc
  82. 23
      src/cpp/client/generic_stub.cc
  83. 4
      src/cpp/common/channel_arguments.cc
  84. 2
      src/cpp/server/server_context.cc
  85. 1
      src/csharp/Grpc.Auth/Grpc.Auth.csproj
  86. 1
      src/csharp/Grpc.Core.Testing/Grpc.Core.Testing.csproj
  87. 3
      src/csharp/Grpc.Core.Tests/Grpc.Core.Tests.csproj
  88. 3
      src/csharp/Grpc.Core/Grpc.Core.csproj
  89. 1
      src/csharp/Grpc.Examples.MathClient/Grpc.Examples.MathClient.csproj
  90. 1
      src/csharp/Grpc.Examples.MathServer/Grpc.Examples.MathServer.csproj
  91. 2
      src/csharp/Grpc.Examples.Tests/Grpc.Examples.Tests.csproj
  92. 1
      src/csharp/Grpc.Examples/Grpc.Examples.csproj
  93. 2
      src/csharp/Grpc.HealthCheck.Tests/Grpc.HealthCheck.Tests.csproj
  94. 1
      src/csharp/Grpc.HealthCheck/Grpc.HealthCheck.csproj
  95. 2
      src/csharp/Grpc.IntegrationTesting.Client/Grpc.IntegrationTesting.Client.csproj
  96. 2
      src/csharp/Grpc.IntegrationTesting.QpsWorker/Grpc.IntegrationTesting.QpsWorker.csproj
  97. 2
      src/csharp/Grpc.IntegrationTesting.Server/Grpc.IntegrationTesting.Server.csproj
  98. 2
      src/csharp/Grpc.IntegrationTesting.StressClient/Grpc.IntegrationTesting.StressClient.csproj
  99. 6
      src/csharp/Grpc.IntegrationTesting/Grpc.IntegrationTesting.csproj
  100. 2
      src/csharp/Grpc.Microbenchmarks/Grpc.Microbenchmarks.csproj
  101. Some files were not shown because too many files have changed in this diff Show More

@ -574,6 +574,8 @@ grpc_cc_library(
"src/core/lib/compression/compression.c",
"src/core/lib/compression/message_compress.c",
"src/core/lib/compression/stream_compression.c",
"src/core/lib/debug/stats.c",
"src/core/lib/debug/stats_data.c",
"src/core/lib/http/format_request.c",
"src/core/lib/http/httpcli.c",
"src/core/lib/http/parser.c",
@ -690,8 +692,6 @@ grpc_cc_library(
"src/core/lib/transport/timeout_encoding.c",
"src/core/lib/transport/transport.c",
"src/core/lib/transport/transport_op_string.c",
"src/core/lib/debug/stats.c",
"src/core/lib/debug/stats_data.c",
],
hdrs = [
"src/core/lib/channel/channel_args.h",
@ -705,6 +705,8 @@ grpc_cc_library(
"src/core/lib/compression/algorithm_metadata.h",
"src/core/lib/compression/message_compress.h",
"src/core/lib/compression/stream_compression.h",
"src/core/lib/debug/stats.h",
"src/core/lib/debug/stats_data.h",
"src/core/lib/http/format_request.h",
"src/core/lib/http/httpcli.h",
"src/core/lib/http/parser.h",
@ -807,8 +809,6 @@ grpc_cc_library(
"src/core/lib/transport/timeout_encoding.h",
"src/core/lib/transport/transport.h",
"src/core/lib/transport/transport_impl.h",
"src/core/lib/debug/stats.h",
"src/core/lib/debug/stats_data.h",
],
external_deps = [
"zlib",

@ -761,6 +761,18 @@ add_dependencies(buildtests_cxx thread_stress_test)
if(_gRPC_PLATFORM_LINUX OR _gRPC_PLATFORM_MAC OR _gRPC_PLATFORM_POSIX)
add_dependencies(buildtests_cxx writes_per_rpc_test)
endif()
if(_gRPC_PLATFORM_LINUX OR _gRPC_PLATFORM_MAC OR _gRPC_PLATFORM_POSIX)
add_dependencies(buildtests_cxx resolver_component_test_unsecure)
endif()
if(_gRPC_PLATFORM_LINUX OR _gRPC_PLATFORM_MAC OR _gRPC_PLATFORM_POSIX)
add_dependencies(buildtests_cxx resolver_component_test)
endif()
if(_gRPC_PLATFORM_LINUX OR _gRPC_PLATFORM_MAC OR _gRPC_PLATFORM_POSIX)
add_dependencies(buildtests_cxx resolver_component_tests_runner_invoker_unsecure)
endif()
if(_gRPC_PLATFORM_LINUX OR _gRPC_PLATFORM_MAC OR _gRPC_PLATFORM_POSIX)
add_dependencies(buildtests_cxx resolver_component_tests_runner_invoker)
endif()
add_custom_target(buildtests
DEPENDS buildtests_c buildtests_cxx)
@ -14114,6 +14126,178 @@ target_link_libraries(inproc_nosec_test
gpr
)
endif (gRPC_BUILD_TESTS)
if (gRPC_BUILD_TESTS)
if(_gRPC_PLATFORM_LINUX OR _gRPC_PLATFORM_MAC OR _gRPC_PLATFORM_POSIX)
add_executable(resolver_component_test_unsecure
test/cpp/naming/resolver_component_test.cc
third_party/googletest/googletest/src/gtest-all.cc
third_party/googletest/googlemock/src/gmock-all.cc
)
target_include_directories(resolver_component_test_unsecure
PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
PRIVATE ${BORINGSSL_ROOT_DIR}/include
PRIVATE ${PROTOBUF_ROOT_DIR}/src
PRIVATE ${BENCHMARK_ROOT_DIR}/include
PRIVATE ${ZLIB_ROOT_DIR}
PRIVATE ${CMAKE_CURRENT_BINARY_DIR}/third_party/zlib
PRIVATE ${CARES_INCLUDE_DIR}
PRIVATE ${CMAKE_CURRENT_BINARY_DIR}/third_party/cares/cares
PRIVATE ${CMAKE_CURRENT_BINARY_DIR}/third_party/gflags/include
PRIVATE third_party/googletest/googletest/include
PRIVATE third_party/googletest/googletest
PRIVATE third_party/googletest/googlemock/include
PRIVATE third_party/googletest/googlemock
PRIVATE ${_gRPC_PROTO_GENS_DIR}
)
target_link_libraries(resolver_component_test_unsecure
${_gRPC_PROTOBUF_LIBRARIES}
${_gRPC_ALLTARGETS_LIBRARIES}
grpc++_test_util_unsecure
grpc_test_util_unsecure
gpr_test_util
grpc++_unsecure
grpc_unsecure
gpr
grpc++_test_config
${_gRPC_GFLAGS_LIBRARIES}
)
endif()
endif (gRPC_BUILD_TESTS)
if (gRPC_BUILD_TESTS)
if(_gRPC_PLATFORM_LINUX OR _gRPC_PLATFORM_MAC OR _gRPC_PLATFORM_POSIX)
add_executable(resolver_component_test
test/cpp/naming/resolver_component_test.cc
third_party/googletest/googletest/src/gtest-all.cc
third_party/googletest/googlemock/src/gmock-all.cc
)
target_include_directories(resolver_component_test
PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
PRIVATE ${BORINGSSL_ROOT_DIR}/include
PRIVATE ${PROTOBUF_ROOT_DIR}/src
PRIVATE ${BENCHMARK_ROOT_DIR}/include
PRIVATE ${ZLIB_ROOT_DIR}
PRIVATE ${CMAKE_CURRENT_BINARY_DIR}/third_party/zlib
PRIVATE ${CARES_INCLUDE_DIR}
PRIVATE ${CMAKE_CURRENT_BINARY_DIR}/third_party/cares/cares
PRIVATE ${CMAKE_CURRENT_BINARY_DIR}/third_party/gflags/include
PRIVATE third_party/googletest/googletest/include
PRIVATE third_party/googletest/googletest
PRIVATE third_party/googletest/googlemock/include
PRIVATE third_party/googletest/googlemock
PRIVATE ${_gRPC_PROTO_GENS_DIR}
)
target_link_libraries(resolver_component_test
${_gRPC_PROTOBUF_LIBRARIES}
${_gRPC_ALLTARGETS_LIBRARIES}
grpc++_test_util
grpc_test_util
gpr_test_util
grpc++
grpc
gpr
grpc++_test_config
${_gRPC_GFLAGS_LIBRARIES}
)
endif()
endif (gRPC_BUILD_TESTS)
if (gRPC_BUILD_TESTS)
if(_gRPC_PLATFORM_LINUX OR _gRPC_PLATFORM_MAC OR _gRPC_PLATFORM_POSIX)
add_executable(resolver_component_tests_runner_invoker_unsecure
test/cpp/naming/resolver_component_tests_runner_invoker.cc
third_party/googletest/googletest/src/gtest-all.cc
third_party/googletest/googlemock/src/gmock-all.cc
)
target_include_directories(resolver_component_tests_runner_invoker_unsecure
PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
PRIVATE ${BORINGSSL_ROOT_DIR}/include
PRIVATE ${PROTOBUF_ROOT_DIR}/src
PRIVATE ${BENCHMARK_ROOT_DIR}/include
PRIVATE ${ZLIB_ROOT_DIR}
PRIVATE ${CMAKE_CURRENT_BINARY_DIR}/third_party/zlib
PRIVATE ${CARES_INCLUDE_DIR}
PRIVATE ${CMAKE_CURRENT_BINARY_DIR}/third_party/cares/cares
PRIVATE ${CMAKE_CURRENT_BINARY_DIR}/third_party/gflags/include
PRIVATE third_party/googletest/googletest/include
PRIVATE third_party/googletest/googletest
PRIVATE third_party/googletest/googlemock/include
PRIVATE third_party/googletest/googlemock
PRIVATE ${_gRPC_PROTO_GENS_DIR}
)
target_link_libraries(resolver_component_tests_runner_invoker_unsecure
${_gRPC_PROTOBUF_LIBRARIES}
${_gRPC_ALLTARGETS_LIBRARIES}
grpc++_test_util
grpc_test_util
gpr_test_util
grpc++
grpc
gpr
grpc++_test_config
${_gRPC_GFLAGS_LIBRARIES}
)
endif()
endif (gRPC_BUILD_TESTS)
if (gRPC_BUILD_TESTS)
if(_gRPC_PLATFORM_LINUX OR _gRPC_PLATFORM_MAC OR _gRPC_PLATFORM_POSIX)
add_executable(resolver_component_tests_runner_invoker
test/cpp/naming/resolver_component_tests_runner_invoker.cc
third_party/googletest/googletest/src/gtest-all.cc
third_party/googletest/googlemock/src/gmock-all.cc
)
target_include_directories(resolver_component_tests_runner_invoker
PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
PRIVATE ${BORINGSSL_ROOT_DIR}/include
PRIVATE ${PROTOBUF_ROOT_DIR}/src
PRIVATE ${BENCHMARK_ROOT_DIR}/include
PRIVATE ${ZLIB_ROOT_DIR}
PRIVATE ${CMAKE_CURRENT_BINARY_DIR}/third_party/zlib
PRIVATE ${CARES_INCLUDE_DIR}
PRIVATE ${CMAKE_CURRENT_BINARY_DIR}/third_party/cares/cares
PRIVATE ${CMAKE_CURRENT_BINARY_DIR}/third_party/gflags/include
PRIVATE third_party/googletest/googletest/include
PRIVATE third_party/googletest/googletest
PRIVATE third_party/googletest/googlemock/include
PRIVATE third_party/googletest/googlemock
PRIVATE ${_gRPC_PROTO_GENS_DIR}
)
target_link_libraries(resolver_component_tests_runner_invoker
${_gRPC_PROTOBUF_LIBRARIES}
${_gRPC_ALLTARGETS_LIBRARIES}
grpc++_test_util
grpc_test_util
gpr_test_util
grpc++
grpc
gpr
grpc++_test_config
${_gRPC_GFLAGS_LIBRARIES}
)
endif()
endif (gRPC_BUILD_TESTS)
if (gRPC_BUILD_TESTS)

@ -1266,6 +1266,10 @@ h2_sockpair+trace_nosec_test: $(BINDIR)/$(CONFIG)/h2_sockpair+trace_nosec_test
h2_sockpair_1byte_nosec_test: $(BINDIR)/$(CONFIG)/h2_sockpair_1byte_nosec_test
h2_uds_nosec_test: $(BINDIR)/$(CONFIG)/h2_uds_nosec_test
inproc_nosec_test: $(BINDIR)/$(CONFIG)/inproc_nosec_test
resolver_component_test_unsecure: $(BINDIR)/$(CONFIG)/resolver_component_test_unsecure
resolver_component_test: $(BINDIR)/$(CONFIG)/resolver_component_test
resolver_component_tests_runner_invoker_unsecure: $(BINDIR)/$(CONFIG)/resolver_component_tests_runner_invoker_unsecure
resolver_component_tests_runner_invoker: $(BINDIR)/$(CONFIG)/resolver_component_tests_runner_invoker
api_fuzzer_one_entry: $(BINDIR)/$(CONFIG)/api_fuzzer_one_entry
client_fuzzer_one_entry: $(BINDIR)/$(CONFIG)/client_fuzzer_one_entry
hpack_parser_fuzzer_test_one_entry: $(BINDIR)/$(CONFIG)/hpack_parser_fuzzer_test_one_entry
@ -1652,6 +1656,10 @@ buildtests_cxx: privatelibs_cxx \
$(BINDIR)/$(CONFIG)/boringssl_x509_test \
$(BINDIR)/$(CONFIG)/boringssl_tab_test \
$(BINDIR)/$(CONFIG)/boringssl_v3name_test \
$(BINDIR)/$(CONFIG)/resolver_component_test_unsecure \
$(BINDIR)/$(CONFIG)/resolver_component_test \
$(BINDIR)/$(CONFIG)/resolver_component_tests_runner_invoker_unsecure \
$(BINDIR)/$(CONFIG)/resolver_component_tests_runner_invoker \
else
buildtests_cxx: privatelibs_cxx \
@ -1730,6 +1738,10 @@ buildtests_cxx: privatelibs_cxx \
$(BINDIR)/$(CONFIG)/thread_manager_test \
$(BINDIR)/$(CONFIG)/thread_stress_test \
$(BINDIR)/$(CONFIG)/writes_per_rpc_test \
$(BINDIR)/$(CONFIG)/resolver_component_test_unsecure \
$(BINDIR)/$(CONFIG)/resolver_component_test \
$(BINDIR)/$(CONFIG)/resolver_component_tests_runner_invoker_unsecure \
$(BINDIR)/$(CONFIG)/resolver_component_tests_runner_invoker \
endif
@ -2141,6 +2153,10 @@ test_cxx: buildtests_cxx
$(Q) $(BINDIR)/$(CONFIG)/thread_stress_test || ( echo test thread_stress_test failed ; exit 1 )
$(E) "[RUN] Testing writes_per_rpc_test"
$(Q) $(BINDIR)/$(CONFIG)/writes_per_rpc_test || ( echo test writes_per_rpc_test failed ; exit 1 )
$(E) "[RUN] Testing resolver_component_tests_runner_invoker_unsecure"
$(Q) $(BINDIR)/$(CONFIG)/resolver_component_tests_runner_invoker_unsecure || ( echo test resolver_component_tests_runner_invoker_unsecure failed ; exit 1 )
$(E) "[RUN] Testing resolver_component_tests_runner_invoker"
$(Q) $(BINDIR)/$(CONFIG)/resolver_component_tests_runner_invoker || ( echo test resolver_component_tests_runner_invoker failed ; exit 1 )
flaky_test_cxx: buildtests_cxx
@ -19480,6 +19496,178 @@ ifneq ($(NO_DEPS),true)
endif
RESOLVER_COMPONENT_TEST_UNSECURE_SRC = \
test/cpp/naming/resolver_component_test.cc \
RESOLVER_COMPONENT_TEST_UNSECURE_OBJS = $(addprefix $(OBJDIR)/$(CONFIG)/, $(addsuffix .o, $(basename $(RESOLVER_COMPONENT_TEST_UNSECURE_SRC))))
ifeq ($(NO_SECURE),true)
# You can't build secure targets if you don't have OpenSSL.
$(BINDIR)/$(CONFIG)/resolver_component_test_unsecure: openssl_dep_error
else
ifeq ($(NO_PROTOBUF),true)
# You can't build the protoc plugins or protobuf-enabled targets if you don't have protobuf 3.0.0+.
$(BINDIR)/$(CONFIG)/resolver_component_test_unsecure: protobuf_dep_error
else
$(BINDIR)/$(CONFIG)/resolver_component_test_unsecure: $(PROTOBUF_DEP) $(RESOLVER_COMPONENT_TEST_UNSECURE_OBJS) $(LIBDIR)/$(CONFIG)/libgrpc++_test_util_unsecure.a $(LIBDIR)/$(CONFIG)/libgrpc_test_util_unsecure.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc++_unsecure.a $(LIBDIR)/$(CONFIG)/libgrpc_unsecure.a $(LIBDIR)/$(CONFIG)/libgpr.a $(LIBDIR)/$(CONFIG)/libgrpc++_test_config.a
$(E) "[LD] Linking $@"
$(Q) mkdir -p `dirname $@`
$(Q) $(LDXX) $(LDFLAGS) $(RESOLVER_COMPONENT_TEST_UNSECURE_OBJS) $(LIBDIR)/$(CONFIG)/libgrpc++_test_util_unsecure.a $(LIBDIR)/$(CONFIG)/libgrpc_test_util_unsecure.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc++_unsecure.a $(LIBDIR)/$(CONFIG)/libgrpc_unsecure.a $(LIBDIR)/$(CONFIG)/libgpr.a $(LIBDIR)/$(CONFIG)/libgrpc++_test_config.a $(LDLIBSXX) $(LDLIBS_PROTOBUF) $(LDLIBS) $(LDLIBS_SECURE) $(GTEST_LIB) -o $(BINDIR)/$(CONFIG)/resolver_component_test_unsecure
endif
endif
$(OBJDIR)/$(CONFIG)/test/cpp/naming/resolver_component_test.o: $(LIBDIR)/$(CONFIG)/libgrpc++_test_util_unsecure.a $(LIBDIR)/$(CONFIG)/libgrpc_test_util_unsecure.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc++_unsecure.a $(LIBDIR)/$(CONFIG)/libgrpc_unsecure.a $(LIBDIR)/$(CONFIG)/libgpr.a $(LIBDIR)/$(CONFIG)/libgrpc++_test_config.a
deps_resolver_component_test_unsecure: $(RESOLVER_COMPONENT_TEST_UNSECURE_OBJS:.o=.dep)
ifneq ($(NO_SECURE),true)
ifneq ($(NO_DEPS),true)
-include $(RESOLVER_COMPONENT_TEST_UNSECURE_OBJS:.o=.dep)
endif
endif
RESOLVER_COMPONENT_TEST_SRC = \
test/cpp/naming/resolver_component_test.cc \
RESOLVER_COMPONENT_TEST_OBJS = $(addprefix $(OBJDIR)/$(CONFIG)/, $(addsuffix .o, $(basename $(RESOLVER_COMPONENT_TEST_SRC))))
ifeq ($(NO_SECURE),true)
# You can't build secure targets if you don't have OpenSSL.
$(BINDIR)/$(CONFIG)/resolver_component_test: openssl_dep_error
else
ifeq ($(NO_PROTOBUF),true)
# You can't build the protoc plugins or protobuf-enabled targets if you don't have protobuf 3.0.0+.
$(BINDIR)/$(CONFIG)/resolver_component_test: protobuf_dep_error
else
$(BINDIR)/$(CONFIG)/resolver_component_test: $(PROTOBUF_DEP) $(RESOLVER_COMPONENT_TEST_OBJS) $(LIBDIR)/$(CONFIG)/libgrpc++_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc++.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr.a $(LIBDIR)/$(CONFIG)/libgrpc++_test_config.a
$(E) "[LD] Linking $@"
$(Q) mkdir -p `dirname $@`
$(Q) $(LDXX) $(LDFLAGS) $(RESOLVER_COMPONENT_TEST_OBJS) $(LIBDIR)/$(CONFIG)/libgrpc++_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc++.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr.a $(LIBDIR)/$(CONFIG)/libgrpc++_test_config.a $(LDLIBSXX) $(LDLIBS_PROTOBUF) $(LDLIBS) $(LDLIBS_SECURE) $(GTEST_LIB) -o $(BINDIR)/$(CONFIG)/resolver_component_test
endif
endif
$(OBJDIR)/$(CONFIG)/test/cpp/naming/resolver_component_test.o: $(LIBDIR)/$(CONFIG)/libgrpc++_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc++.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr.a $(LIBDIR)/$(CONFIG)/libgrpc++_test_config.a
deps_resolver_component_test: $(RESOLVER_COMPONENT_TEST_OBJS:.o=.dep)
ifneq ($(NO_SECURE),true)
ifneq ($(NO_DEPS),true)
-include $(RESOLVER_COMPONENT_TEST_OBJS:.o=.dep)
endif
endif
RESOLVER_COMPONENT_TESTS_RUNNER_INVOKER_UNSECURE_SRC = \
test/cpp/naming/resolver_component_tests_runner_invoker.cc \
RESOLVER_COMPONENT_TESTS_RUNNER_INVOKER_UNSECURE_OBJS = $(addprefix $(OBJDIR)/$(CONFIG)/, $(addsuffix .o, $(basename $(RESOLVER_COMPONENT_TESTS_RUNNER_INVOKER_UNSECURE_SRC))))
ifeq ($(NO_SECURE),true)
# You can't build secure targets if you don't have OpenSSL.
$(BINDIR)/$(CONFIG)/resolver_component_tests_runner_invoker_unsecure: openssl_dep_error
else
ifeq ($(NO_PROTOBUF),true)
# You can't build the protoc plugins or protobuf-enabled targets if you don't have protobuf 3.0.0+.
$(BINDIR)/$(CONFIG)/resolver_component_tests_runner_invoker_unsecure: protobuf_dep_error
else
$(BINDIR)/$(CONFIG)/resolver_component_tests_runner_invoker_unsecure: $(PROTOBUF_DEP) $(RESOLVER_COMPONENT_TESTS_RUNNER_INVOKER_UNSECURE_OBJS) $(LIBDIR)/$(CONFIG)/libgrpc++_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc++.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr.a $(LIBDIR)/$(CONFIG)/libgrpc++_test_config.a
$(E) "[LD] Linking $@"
$(Q) mkdir -p `dirname $@`
$(Q) $(LDXX) $(LDFLAGS) $(RESOLVER_COMPONENT_TESTS_RUNNER_INVOKER_UNSECURE_OBJS) $(LIBDIR)/$(CONFIG)/libgrpc++_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc++.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr.a $(LIBDIR)/$(CONFIG)/libgrpc++_test_config.a $(LDLIBSXX) $(LDLIBS_PROTOBUF) $(LDLIBS) $(LDLIBS_SECURE) $(GTEST_LIB) -o $(BINDIR)/$(CONFIG)/resolver_component_tests_runner_invoker_unsecure
endif
endif
$(OBJDIR)/$(CONFIG)/test/cpp/naming/resolver_component_tests_runner_invoker.o: $(LIBDIR)/$(CONFIG)/libgrpc++_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc++.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr.a $(LIBDIR)/$(CONFIG)/libgrpc++_test_config.a
deps_resolver_component_tests_runner_invoker_unsecure: $(RESOLVER_COMPONENT_TESTS_RUNNER_INVOKER_UNSECURE_OBJS:.o=.dep)
ifneq ($(NO_SECURE),true)
ifneq ($(NO_DEPS),true)
-include $(RESOLVER_COMPONENT_TESTS_RUNNER_INVOKER_UNSECURE_OBJS:.o=.dep)
endif
endif
RESOLVER_COMPONENT_TESTS_RUNNER_INVOKER_SRC = \
test/cpp/naming/resolver_component_tests_runner_invoker.cc \
RESOLVER_COMPONENT_TESTS_RUNNER_INVOKER_OBJS = $(addprefix $(OBJDIR)/$(CONFIG)/, $(addsuffix .o, $(basename $(RESOLVER_COMPONENT_TESTS_RUNNER_INVOKER_SRC))))
ifeq ($(NO_SECURE),true)
# You can't build secure targets if you don't have OpenSSL.
$(BINDIR)/$(CONFIG)/resolver_component_tests_runner_invoker: openssl_dep_error
else
ifeq ($(NO_PROTOBUF),true)
# You can't build the protoc plugins or protobuf-enabled targets if you don't have protobuf 3.0.0+.
$(BINDIR)/$(CONFIG)/resolver_component_tests_runner_invoker: protobuf_dep_error
else
$(BINDIR)/$(CONFIG)/resolver_component_tests_runner_invoker: $(PROTOBUF_DEP) $(RESOLVER_COMPONENT_TESTS_RUNNER_INVOKER_OBJS) $(LIBDIR)/$(CONFIG)/libgrpc++_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc++.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr.a $(LIBDIR)/$(CONFIG)/libgrpc++_test_config.a
$(E) "[LD] Linking $@"
$(Q) mkdir -p `dirname $@`
$(Q) $(LDXX) $(LDFLAGS) $(RESOLVER_COMPONENT_TESTS_RUNNER_INVOKER_OBJS) $(LIBDIR)/$(CONFIG)/libgrpc++_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc++.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr.a $(LIBDIR)/$(CONFIG)/libgrpc++_test_config.a $(LDLIBSXX) $(LDLIBS_PROTOBUF) $(LDLIBS) $(LDLIBS_SECURE) $(GTEST_LIB) -o $(BINDIR)/$(CONFIG)/resolver_component_tests_runner_invoker
endif
endif
$(OBJDIR)/$(CONFIG)/test/cpp/naming/resolver_component_tests_runner_invoker.o: $(LIBDIR)/$(CONFIG)/libgrpc++_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc++.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr.a $(LIBDIR)/$(CONFIG)/libgrpc++_test_config.a
deps_resolver_component_tests_runner_invoker: $(RESOLVER_COMPONENT_TESTS_RUNNER_INVOKER_OBJS:.o=.dep)
ifneq ($(NO_SECURE),true)
ifneq ($(NO_DEPS),true)
-include $(RESOLVER_COMPONENT_TESTS_RUNNER_INVOKER_OBJS:.o=.dep)
endif
endif
API_FUZZER_ONE_ENTRY_SRC = \
test/core/end2end/fuzzers/api_fuzzer.c \
test/core/util/one_corpus_entry_fuzzer.c \

@ -106,6 +106,22 @@ def grpc_sh_test(name, srcs, args = [], data = []):
args = args,
data = data)
def grpc_sh_binary(name, srcs, data = []):
native.sh_test(
name = name,
srcs = srcs,
data = data)
def grpc_py_binary(name, srcs, data = [], deps = []):
if name == "test_dns_server":
# TODO: allow running test_dns_server in oss bazel test suite
deps = []
native.py_binary(
name = name,
srcs = srcs,
data = data,
deps = deps)
def grpc_package(name, visibility = "private", features = []):
if visibility == "tests":
visibility = ["//test:__subpackages__"]

@ -12,7 +12,7 @@ if test "$PHP_GRPC" != "no"; then
LIBS="-lpthread $LIBS"
CFLAGS="-Wall -Werror -Wno-parentheses-equality -Wno-unused-value -std=c11"
CXXFLAGS="-std=c++11"
CXXFLAGS="-std=c++11 -fno-exceptions -fno-rtti"
GRPC_SHARED_LIBADD="-lpthread $GRPC_SHARED_LIBADD"
PHP_REQUIRE_CXX()
PHP_ADD_LIBRARY(pthread)

@ -50,6 +50,7 @@ some configuration as environment variables that can be set.
- channel_stack_builder - traces information about channel stacks being built
- executor - traces grpc's internal thread pool ('the executor')
- http - traces state in the http2 transport engine
- http2_stream_state - traces all http2 stream state mutations.
- http1 - traces HTTP/1.x operations performed by gRPC
- inproc - traces the in-process transport
- flowctl - traces http2 flow control

@ -60,11 +60,15 @@ class GreeterClient {
// Storage for the status of the RPC upon completion.
Status status;
// stub_->AsyncSayHello() performs the RPC call, returning an instance we
// store in "rpc". Because we are using the asynchronous API, we need to
// hold on to the "rpc" instance in order to get updates on the ongoing RPC.
// stub_->PrepareAsyncSayHello() creates an RPC object, returning
// an instance to store in "call" but does not actually start the RPC
// Because we are using the asynchronous API, we need to hold on to
// the "call" instance in order to get updates on the ongoing RPC.
std::unique_ptr<ClientAsyncResponseReader<HelloReply> > rpc(
stub_->AsyncSayHello(&context, request, &cq));
stub_->PrepareAsyncSayHello(&context, request, &cq));
// StartCall initiates the RPC call
rpc->StartCall();
// Request that, upon completion of the RPC, "reply" be updated with the
// server's response; "status" with the indication of whether the operation

@ -49,11 +49,15 @@ class GreeterClient {
// Call object to store rpc data
AsyncClientCall* call = new AsyncClientCall;
// stub_->AsyncSayHello() performs the RPC call, returning an instance to
// store in "call". Because we are using the asynchronous API, we need to
// hold on to the "call" instance in order to get updates on the ongoing RPC.
call->response_reader = stub_->AsyncSayHello(&call->context, request, &cq_);
// stub_->PrepareAsyncSayHello() creates an RPC object, returning
// an instance to store in "call" but does not actually start the RPC
// Because we are using the asynchronous API, we need to hold on to
// the "call" instance in order to get updates on the ongoing RPC.
call->response_reader =
stub_->PrepareAsyncSayHello(&call->context, request, &cq_);
// StartCall initiates the RPC call
call->response_reader->StartCall();
// Request that, upon completion of the RPC, "reply" be updated with the
// server's response; "status" with the indication of whether the operation

@ -29,6 +29,7 @@ Gem::Specification.new do |s|
s.add_dependency 'google-protobuf', '~> 3.1'
s.add_dependency 'googleauth', '~> 0.5.1'
s.add_dependency 'googleapis-common-protos-types', '~> 1.0.0'
s.add_development_dependency 'bundler', '~> 1.9'
s.add_development_dependency 'facter', '~> 2.4'

@ -44,6 +44,13 @@ class GenericStub final {
ClientContext* context, const grpc::string& method, CompletionQueue* cq,
void* tag);
/// Setup a call to a named method \a method using \a context, but don't
/// start it. Let it be started explicitly with StartCall and a tag.
/// The return value only indicates whether or not registration of the call
/// succeeded (i.e. the call won't proceed if the return value is nullptr).
std::unique_ptr<GenericClientAsyncReaderWriter> PrepareCall(
ClientContext* context, const grpc::string& method, CompletionQueue* cq);
private:
std::shared_ptr<ChannelInterface> channel_;
};

@ -35,6 +35,11 @@ class ClientAsyncStreamingInterface {
public:
virtual ~ClientAsyncStreamingInterface() {}
/// Start the call that was set up by the constructor, but only if the
/// constructor was invoked through the "Prepare" API which doesn't actually
/// start the call
virtual void StartCall(void* tag) = 0;
/// Request notification of the reading of the initial metadata. Completion
/// will be notified by \a tag on the associated completion queue.
/// This call is optional, but if it is used, it cannot be used concurrently
@ -156,20 +161,22 @@ class ClientAsyncReaderInterface : public ClientAsyncStreamingInterface,
template <class R>
class ClientAsyncReader final : public ClientAsyncReaderInterface<R> {
public:
/// Create a stream and write the first request out.
/// Create a stream object.
/// Write the first request out if \a start is set.
/// \a tag will be notified on \a cq when the call has been started and
/// \a request has been written out.
/// \a request has been written out. If \a start is not set, \a tag must be
/// nullptr and the actual call must be initiated by StartCall
/// Note that \a context will be used to fill in custom initial metadata
/// used to send to the server when starting the call.
template <class W>
static ClientAsyncReader* Create(ChannelInterface* channel,
CompletionQueue* cq, const RpcMethod& method,
ClientContext* context, const W& request,
void* tag) {
bool start, void* tag) {
Call call = channel->CreateCall(method, context, cq);
return new (g_core_codegen_interface->grpc_call_arena_alloc(
call.call(), sizeof(ClientAsyncReader)))
ClientAsyncReader(call, context, request, tag);
ClientAsyncReader(call, context, request, start, tag);
}
// always allocated against a call arena, no memory free required
@ -177,6 +184,12 @@ class ClientAsyncReader final : public ClientAsyncReaderInterface<R> {
assert(size == sizeof(ClientAsyncReader));
}
void StartCall(void* tag) override {
assert(!started_);
started_ = true;
StartCallInternal(tag);
}
/// See the \a ClientAsyncStreamingInterface.ReadInitialMetadata
/// method for semantics.
///
@ -186,6 +199,7 @@ class ClientAsyncReader final : public ClientAsyncReaderInterface<R> {
/// calling code can access the received metadata through the
/// \a ClientContext.
void ReadInitialMetadata(void* tag) override {
assert(started_);
GPR_CODEGEN_ASSERT(!context_->initial_metadata_received_);
meta_ops_.set_output_tag(tag);
@ -194,6 +208,7 @@ class ClientAsyncReader final : public ClientAsyncReaderInterface<R> {
}
void Read(R* msg, void* tag) override {
assert(started_);
read_ops_.set_output_tag(tag);
if (!context_->initial_metadata_received_) {
read_ops_.RecvInitialMetadata(context_);
@ -208,6 +223,7 @@ class ClientAsyncReader final : public ClientAsyncReaderInterface<R> {
/// - the \a ClientContext associated with this call is updated with
/// possible initial and trailing metadata received from the server.
void Finish(Status* status, void* tag) override {
assert(started_);
finish_ops_.set_output_tag(tag);
if (!context_->initial_metadata_received_) {
finish_ops_.RecvInitialMetadata(context_);
@ -219,19 +235,28 @@ class ClientAsyncReader final : public ClientAsyncReaderInterface<R> {
private:
template <class W>
ClientAsyncReader(Call call, ClientContext* context, const W& request,
void* tag)
: context_(context), call_(call) {
init_ops_.set_output_tag(tag);
init_ops_.SendInitialMetadata(context->send_initial_metadata_,
context->initial_metadata_flags());
bool start, void* tag)
: context_(context), call_(call), started_(start) {
// TODO(ctiller): don't assert
GPR_CODEGEN_ASSERT(init_ops_.SendMessage(request).ok());
init_ops_.ClientSendClose();
if (start) {
StartCallInternal(tag);
} else {
assert(tag == nullptr);
}
}
void StartCallInternal(void* tag) {
init_ops_.SendInitialMetadata(context_->send_initial_metadata_,
context_->initial_metadata_flags());
init_ops_.set_output_tag(tag);
call_.PerformOps(&init_ops_);
}
ClientContext* context_;
Call call_;
bool started_;
CallOpSet<CallOpSendInitialMetadata, CallOpSendMessage, CallOpClientSendClose>
init_ops_;
CallOpSet<CallOpRecvInitialMetadata> meta_ops_;
@ -257,9 +282,12 @@ class ClientAsyncWriterInterface : public ClientAsyncStreamingInterface,
template <class W>
class ClientAsyncWriter final : public ClientAsyncWriterInterface<W> {
public:
/// Create a stream and write the first request out.
/// Create a stream object.
/// Start the RPC if \a start is set
/// \a tag will be notified on \a cq when the call has been started (i.e.
/// intitial metadata sent) and \a request has been written out.
/// If \a start is not set, \a tag must be nullptr and the actual call
/// must be initiated by StartCall
/// Note that \a context will be used to fill in custom initial metadata
/// used to send to the server when starting the call.
/// \a response will be filled in with the single expected response
@ -269,11 +297,11 @@ class ClientAsyncWriter final : public ClientAsyncWriterInterface<W> {
static ClientAsyncWriter* Create(ChannelInterface* channel,
CompletionQueue* cq, const RpcMethod& method,
ClientContext* context, R* response,
void* tag) {
bool start, void* tag) {
Call call = channel->CreateCall(method, context, cq);
return new (g_core_codegen_interface->grpc_call_arena_alloc(
call.call(), sizeof(ClientAsyncWriter)))
ClientAsyncWriter(call, context, response, tag);
ClientAsyncWriter(call, context, response, start, tag);
}
// always allocated against a call arena, no memory free required
@ -281,6 +309,12 @@ class ClientAsyncWriter final : public ClientAsyncWriterInterface<W> {
assert(size == sizeof(ClientAsyncWriter));
}
void StartCall(void* tag) override {
assert(!started_);
started_ = true;
StartCallInternal(tag);
}
/// See the \a ClientAsyncStreamingInterface.ReadInitialMetadata method for
/// semantics.
///
@ -289,6 +323,7 @@ class ClientAsyncWriter final : public ClientAsyncWriterInterface<W> {
/// associated with this call is updated, and the calling code can access
/// the received metadata through the \a ClientContext.
void ReadInitialMetadata(void* tag) override {
assert(started_);
GPR_CODEGEN_ASSERT(!context_->initial_metadata_received_);
meta_ops_.set_output_tag(tag);
@ -297,6 +332,7 @@ class ClientAsyncWriter final : public ClientAsyncWriterInterface<W> {
}
void Write(const W& msg, void* tag) override {
assert(started_);
write_ops_.set_output_tag(tag);
// TODO(ctiller): don't assert
GPR_CODEGEN_ASSERT(write_ops_.SendMessage(msg).ok());
@ -304,6 +340,7 @@ class ClientAsyncWriter final : public ClientAsyncWriterInterface<W> {
}
void Write(const W& msg, WriteOptions options, void* tag) override {
assert(started_);
write_ops_.set_output_tag(tag);
if (options.is_last_message()) {
options.set_buffer_hint();
@ -315,6 +352,7 @@ class ClientAsyncWriter final : public ClientAsyncWriterInterface<W> {
}
void WritesDone(void* tag) override {
assert(started_);
write_ops_.set_output_tag(tag);
write_ops_.ClientSendClose();
call_.PerformOps(&write_ops_);
@ -328,6 +366,7 @@ class ClientAsyncWriter final : public ClientAsyncWriterInterface<W> {
/// - attempts to fill in the \a response parameter passed to this class's
/// constructor with the server's response message.
void Finish(Status* status, void* tag) override {
assert(started_);
finish_ops_.set_output_tag(tag);
if (!context_->initial_metadata_received_) {
finish_ops_.RecvInitialMetadata(context_);
@ -338,25 +377,32 @@ class ClientAsyncWriter final : public ClientAsyncWriterInterface<W> {
private:
template <class R>
ClientAsyncWriter(Call call, ClientContext* context, R* response, void* tag)
: context_(context), call_(call) {
ClientAsyncWriter(Call call, ClientContext* context, R* response, bool start,
void* tag)
: context_(context), call_(call), started_(start) {
finish_ops_.RecvMessage(response);
finish_ops_.AllowNoMessage();
// if corked bit is set in context, we buffer up the initial metadata to
// coalesce with later message to be sent. No op is performed.
if (context_->initial_metadata_corked_) {
write_ops_.SendInitialMetadata(context->send_initial_metadata_,
context->initial_metadata_flags());
if (start) {
StartCallInternal(tag);
} else {
assert(tag == nullptr);
}
}
void StartCallInternal(void* tag) {
write_ops_.SendInitialMetadata(context_->send_initial_metadata_,
context_->initial_metadata_flags());
// if corked bit is set in context, we just keep the initial metadata
// buffered up to coalesce with later message send. No op is performed.
if (!context_->initial_metadata_corked_) {
write_ops_.set_output_tag(tag);
write_ops_.SendInitialMetadata(context->send_initial_metadata_,
context->initial_metadata_flags());
call_.PerformOps(&write_ops_);
}
}
ClientContext* context_;
Call call_;
bool started_;
CallOpSet<CallOpRecvInitialMetadata> meta_ops_;
CallOpSet<CallOpSendInitialMetadata, CallOpSendMessage, CallOpClientSendClose>
write_ops_;
@ -388,20 +434,23 @@ template <class W, class R>
class ClientAsyncReaderWriter final
: public ClientAsyncReaderWriterInterface<W, R> {
public:
/// Create a stream and write the first request out.
/// Create a stream object.
/// Start the RPC request if \a start is set.
/// \a tag will be notified on \a cq when the call has been started (i.e.
/// intitial metadata sent).
/// intitial metadata sent). If \a start is not set, \a tag must be
/// nullptr and the actual call must be initiated by StartCall
/// Note that \a context will be used to fill in custom initial metadata
/// used to send to the server when starting the call.
static ClientAsyncReaderWriter* Create(ChannelInterface* channel,
CompletionQueue* cq,
const RpcMethod& method,
ClientContext* context, void* tag) {
ClientContext* context, bool start,
void* tag) {
Call call = channel->CreateCall(method, context, cq);
return new (g_core_codegen_interface->grpc_call_arena_alloc(
call.call(), sizeof(ClientAsyncReaderWriter)))
ClientAsyncReaderWriter(call, context, tag);
ClientAsyncReaderWriter(call, context, start, tag);
}
// always allocated against a call arena, no memory free required
@ -409,6 +458,12 @@ class ClientAsyncReaderWriter final
assert(size == sizeof(ClientAsyncReaderWriter));
}
void StartCall(void* tag) override {
assert(!started_);
started_ = true;
StartCallInternal(tag);
}
/// See the \a ClientAsyncStreamingInterface.ReadInitialMetadata method
/// for semantics of this method.
///
@ -417,6 +472,7 @@ class ClientAsyncReaderWriter final
/// is updated with it, and then the receiving initial metadata can
/// be accessed through this \a ClientContext.
void ReadInitialMetadata(void* tag) override {
assert(started_);
GPR_CODEGEN_ASSERT(!context_->initial_metadata_received_);
meta_ops_.set_output_tag(tag);
@ -425,6 +481,7 @@ class ClientAsyncReaderWriter final
}
void Read(R* msg, void* tag) override {
assert(started_);
read_ops_.set_output_tag(tag);
if (!context_->initial_metadata_received_) {
read_ops_.RecvInitialMetadata(context_);
@ -434,6 +491,7 @@ class ClientAsyncReaderWriter final
}
void Write(const W& msg, void* tag) override {
assert(started_);
write_ops_.set_output_tag(tag);
// TODO(ctiller): don't assert
GPR_CODEGEN_ASSERT(write_ops_.SendMessage(msg).ok());
@ -441,6 +499,7 @@ class ClientAsyncReaderWriter final
}
void Write(const W& msg, WriteOptions options, void* tag) override {
assert(started_);
write_ops_.set_output_tag(tag);
if (options.is_last_message()) {
options.set_buffer_hint();
@ -452,6 +511,7 @@ class ClientAsyncReaderWriter final
}
void WritesDone(void* tag) override {
assert(started_);
write_ops_.set_output_tag(tag);
write_ops_.ClientSendClose();
call_.PerformOps(&write_ops_);
@ -462,6 +522,7 @@ class ClientAsyncReaderWriter final
/// - the \a ClientContext associated with this call is updated with
/// possible initial and trailing metadata sent from the server.
void Finish(Status* status, void* tag) override {
assert(started_);
finish_ops_.set_output_tag(tag);
if (!context_->initial_metadata_received_) {
finish_ops_.RecvInitialMetadata(context_);
@ -471,23 +532,30 @@ class ClientAsyncReaderWriter final
}
private:
ClientAsyncReaderWriter(Call call, ClientContext* context, void* tag)
: context_(context), call_(call) {
if (context_->initial_metadata_corked_) {
// if corked bit is set in context, we buffer up the initial metadata to
// coalesce with later message to be sent. No op is performed.
write_ops_.SendInitialMetadata(context->send_initial_metadata_,
context->initial_metadata_flags());
ClientAsyncReaderWriter(Call call, ClientContext* context, bool start,
void* tag)
: context_(context), call_(call), started_(start) {
if (start) {
StartCallInternal(tag);
} else {
assert(tag == nullptr);
}
}
void StartCallInternal(void* tag) {
write_ops_.SendInitialMetadata(context_->send_initial_metadata_,
context_->initial_metadata_flags());
// if corked bit is set in context, we just keep the initial metadata
// buffered up to coalesce with later message send. No op is performed.
if (!context_->initial_metadata_corked_) {
write_ops_.set_output_tag(tag);
write_ops_.SendInitialMetadata(context->send_initial_metadata_,
context->initial_metadata_flags());
call_.PerformOps(&write_ops_);
}
}
ClientContext* context_;
Call call_;
bool started_;
CallOpSet<CallOpRecvInitialMetadata> meta_ops_;
CallOpSet<CallOpRecvInitialMetadata, CallOpRecvMessage<R>> read_ops_;
CallOpSet<CallOpSendInitialMetadata, CallOpSendMessage, CallOpClientSendClose>

@ -32,13 +32,18 @@ namespace grpc {
class CompletionQueue;
extern CoreCodegenInterface* g_core_codegen_interface;
/// An interface relevant for async client side unary RPCS (which send
/// An interface relevant for async client side unary RPCs (which send
/// one request message to a server and receive one response message).
template <class R>
class ClientAsyncResponseReaderInterface {
public:
virtual ~ClientAsyncResponseReaderInterface() {}
/// Start the call that was set up by the constructor, but only if the
/// constructor was invoked through the "Prepare" API which doesn't actually
/// start the call
virtual void StartCall() = 0;
/// Request notification of the reading of initial metadata. Completion
/// will be notified by \a tag on the associated completion queue.
/// This call is optional, but if it is used, it cannot be used concurrently
@ -70,9 +75,10 @@ template <class R>
class ClientAsyncResponseReader final
: public ClientAsyncResponseReaderInterface<R> {
public:
/// Start a call and write the request out.
/// Start a call and write the request out if \a start is set.
/// \a tag will be notified on \a cq when the call has been started (i.e.
/// intitial metadata sent) and \a request has been written out.
/// If \a start is not set, the actual call must be initiated by StartCall
/// Note that \a context will be used to fill in custom initial metadata
/// used to send to the server when starting the call.
template <class W>
@ -80,11 +86,11 @@ class ClientAsyncResponseReader final
CompletionQueue* cq,
const RpcMethod& method,
ClientContext* context,
const W& request) {
const W& request, bool start) {
Call call = channel->CreateCall(method, context, cq);
return new (g_core_codegen_interface->grpc_call_arena_alloc(
call.call(), sizeof(ClientAsyncResponseReader)))
ClientAsyncResponseReader(call, context, request);
ClientAsyncResponseReader(call, context, request, start);
}
// always allocated against a call arena, no memory free required
@ -92,13 +98,20 @@ class ClientAsyncResponseReader final
assert(size == sizeof(ClientAsyncResponseReader));
}
void StartCall() override {
assert(!started_);
started_ = true;
StartCallInternal();
}
/// See \a ClientAsyncResponseReaderInterface::ReadInitialMetadata for
/// semantics.
///
/// Side effect:
/// - the \a ClientContext associated with this call is updated with
/// possible initial and trailing metadata sent from the server.
void ReadInitialMetadata(void* tag) {
void ReadInitialMetadata(void* tag) override {
assert(started_);
GPR_CODEGEN_ASSERT(!context_->initial_metadata_received_);
meta_buf.set_output_tag(tag);
@ -111,7 +124,8 @@ class ClientAsyncResponseReader final
/// Side effect:
/// - the \a ClientContext associated with this call is updated with
/// possible initial and trailing metadata sent from the server.
void Finish(R* msg, Status* status, void* tag) {
void Finish(R* msg, Status* status, void* tag) override {
assert(started_);
finish_buf.set_output_tag(tag);
if (!context_->initial_metadata_received_) {
finish_buf.RecvInitialMetadata(context_);
@ -125,15 +139,22 @@ class ClientAsyncResponseReader final
private:
ClientContext* const context_;
Call call_;
bool started_;
template <class W>
ClientAsyncResponseReader(Call call, ClientContext* context, const W& request)
: context_(context), call_(call) {
init_buf.SendInitialMetadata(context->send_initial_metadata_,
context->initial_metadata_flags());
ClientAsyncResponseReader(Call call, ClientContext* context, const W& request,
bool start)
: context_(context), call_(call), started_(start) {
// Bind the metadata at time of StartCallInternal but set up the rest here
// TODO(ctiller): don't assert
GPR_CODEGEN_ASSERT(init_buf.SendMessage(request).ok());
init_buf.ClientSendClose();
if (start) StartCallInternal();
}
void StartCallInternal() {
init_buf.SendInitialMetadata(context_->send_initial_metadata_,
context_->initial_metadata_flags());
call_.PerformOps(&init_buf);
}

@ -64,12 +64,6 @@ class ChannelArguments {
/// Set the compression algorithm for the channel.
void SetCompressionAlgorithm(grpc_compression_algorithm algorithm);
/// Set the grpclb fallback timeout (in ms) for the channel. If this amount
/// of time has passed but we have not gotten any non-empty \a serverlist from
/// the balancer, we will fall back to use the backend address(es) returned by
/// the resolver.
void SetGrpclbFallbackTimeout(int fallback_timeout);
/// Set the socket mutator for the channel.
void SetSocketMutator(grpc_socket_mutator* mutator);

@ -44,13 +44,13 @@ int grpc_stream_compression_algorithm_parse(
* algorithm. Note that \a name is statically allocated and must *not* be freed.
* Returns 1 upon success, 0 otherwise. */
GRPCAPI int grpc_compression_algorithm_name(
grpc_compression_algorithm algorithm, char **name);
grpc_compression_algorithm algorithm, const char **name);
/** Updates \a name with the encoding name corresponding to a valid \a
* algorithm. Note that \a name is statically allocated and must *not* be freed.
* Returns 1 upon success, 0 otherwise. */
GRPCAPI int grpc_stream_compression_algorithm_name(
grpc_stream_compression_algorithm algorithm, char **name);
grpc_stream_compression_algorithm algorithm, const char **name);
/** Returns the compression algorithm corresponding to \a level for the
* compression algorithms encoded in the \a accepted_encodings bitset.

@ -188,9 +188,14 @@ typedef struct {
#define GRPC_ARG_HTTP2_MAX_FRAME_SIZE "grpc.http2.max_frame_size"
/** Should BDP probing be performed? */
#define GRPC_ARG_HTTP2_BDP_PROBE "grpc.http2.bdp_probe"
/** Minimum time (in milliseconds) between successive ping frames being sent */
#define GRPC_ARG_HTTP2_MIN_TIME_BETWEEN_PINGS_MS \
/** Minimum time between sending successive ping frames without receiving any
data frame, Int valued, milliseconds. */
#define GRPC_ARG_HTTP2_MIN_SENT_PING_INTERVAL_WITHOUT_DATA_MS \
"grpc.http2.min_time_between_pings_ms"
/** Minimum allowed time between receiving successive ping frames without
sending any data frame. Int valued, milliseconds */
#define GRPC_ARG_HTTP2_MIN_RECV_PING_INTERVAL_WITHOUT_DATA_MS \
"grpc.http2.min_ping_interval_without_data_ms"
/** Channel arg to override the http2 :scheme header */
#define GRPC_ARG_HTTP2_SCHEME "grpc.http2_scheme"
/** How many pings can we send before needing to send a data frame or header
@ -202,10 +207,6 @@ typedef struct {
closing the transport? (0 indicates that the server can bear an infinite
number of misbehaving pings) */
#define GRPC_ARG_HTTP2_MAX_PING_STRIKES "grpc.http2.max_ping_strikes"
/** Minimum allowed time between two pings without sending any data frame. Int
valued, seconds */
#define GRPC_ARG_HTTP2_MIN_PING_INTERVAL_WITHOUT_DATA_MS \
"grpc.http2.min_ping_interval_without_data_ms"
/** How much data are we willing to queue up per stream if
GRPC_WRITE_BUFFER_HINT is set? This is an upper bound */
#define GRPC_ARG_HTTP2_WRITE_BUFFER_SIZE "grpc.http2.write_buffer_size"
@ -287,11 +288,7 @@ typedef struct {
"grpc.experimental.tcp_max_read_chunk_size"
/* Timeout in milliseconds to use for calls to the grpclb load balancer.
If 0 or unset, the balancer calls will have no deadline. */
#define GRPC_ARG_GRPCLB_CALL_TIMEOUT_MS "grpc.grpclb_call_timeout_ms"
/* Timeout in milliseconds to wait for the serverlist from the grpclb load
balancer before using fallback backend addresses from the resolver.
If 0, fallback will never be used. */
#define GRPC_ARG_GRPCLB_FALLBACK_TIMEOUT_MS "grpc.grpclb_fallback_timeout_ms"
#define GRPC_ARG_GRPCLB_CALL_TIMEOUT_MS "grpc.grpclb_timeout_ms"
/** If non-zero, grpc server's cronet compression workaround will be enabled */
#define GRPC_ARG_WORKAROUND_CRONET_COMPRESSION \
"grpc.workaround.cronet_compression"

@ -10,7 +10,7 @@
<email>grpc-packages@google.com</email>
<active>yes</active>
</lead>
<date>2017-05-22</date>
<date>2017-08-24</date>
<time>16:06:07</time>
<version>
<release>1.7.0dev</release>
@ -25,6 +25,9 @@
- Channel are now by default persistent #11878
- Some bug fixes from 1.4 branch #12109, #12123
- Fixed hang bug when fork() was used #11814
- License changed to Apache 2.0
- Added support for php_namespace option in codegen plugin #11886
- Updated gRPC C Core library version 1.6
</notes>
<contents>
<dir baseinstalldir="/" name="/">

@ -70,7 +70,7 @@ CLASSIFIERS = [
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'License :: OSI Approved :: Apache Software License',
],
]
# Environment variable to determine whether or not the Cython extension should
# *use* Cython or use the generated C files. Note that this requires the C files

@ -165,25 +165,37 @@ void PrintHeaderClientMethodInterfaces(
(*vars)["Request"] = method->input_type_name();
(*vars)["Response"] = method->output_type_name();
struct {
grpc::string prefix;
grpc::string method_params; // extra arguments to method
grpc::string raw_args; // extra arguments to raw version of method
} async_prefixes[] = {{"Async", ", void* tag", ", tag"},
{"PrepareAsync", "", ""}};
if (is_public) {
if (method->NoStreaming()) {
printer->Print(
*vars,
"virtual ::grpc::Status $Method$(::grpc::ClientContext* context, "
"const $Request$& request, $Response$* response) = 0;\n");
printer->Print(*vars,
"std::unique_ptr< "
"::grpc::ClientAsyncResponseReaderInterface< $Response$>> "
"Async$Method$(::grpc::ClientContext* context, "
"const $Request$& request, "
"::grpc::CompletionQueue* cq) {\n");
printer->Indent();
printer->Print(*vars,
"return std::unique_ptr< "
"::grpc::ClientAsyncResponseReaderInterface< $Response$>>("
"Async$Method$Raw(context, request, cq));\n");
printer->Outdent();
printer->Print("}\n");
for (auto async_prefix : async_prefixes) {
(*vars)["AsyncPrefix"] = async_prefix.prefix;
printer->Print(
*vars,
"std::unique_ptr< "
"::grpc::ClientAsyncResponseReaderInterface< $Response$>> "
"$AsyncPrefix$$Method$(::grpc::ClientContext* context, "
"const $Request$& request, "
"::grpc::CompletionQueue* cq) {\n");
printer->Indent();
printer->Print(
*vars,
"return std::unique_ptr< "
"::grpc::ClientAsyncResponseReaderInterface< $Response$>>("
"$AsyncPrefix$$Method$Raw(context, request, cq));\n");
printer->Outdent();
printer->Print("}\n");
}
} else if (ClientOnlyStreaming(method)) {
printer->Print(
*vars,
@ -197,19 +209,26 @@ void PrintHeaderClientMethodInterfaces(
"($Method$Raw(context, response));\n");
printer->Outdent();
printer->Print("}\n");
printer->Print(
*vars,
"std::unique_ptr< ::grpc::ClientAsyncWriterInterface< $Request$>>"
" Async$Method$(::grpc::ClientContext* context, $Response$* "
"response, "
"::grpc::CompletionQueue* cq, void* tag) {\n");
printer->Indent();
printer->Print(*vars,
"return std::unique_ptr< "
"::grpc::ClientAsyncWriterInterface< $Request$>>("
"Async$Method$Raw(context, response, cq, tag));\n");
printer->Outdent();
printer->Print("}\n");
for (auto async_prefix : async_prefixes) {
(*vars)["AsyncPrefix"] = async_prefix.prefix;
(*vars)["AsyncMethodParams"] = async_prefix.method_params;
(*vars)["AsyncRawArgs"] = async_prefix.raw_args;
printer->Print(
*vars,
"std::unique_ptr< ::grpc::ClientAsyncWriterInterface< $Request$>>"
" $AsyncPrefix$$Method$(::grpc::ClientContext* context, "
"$Response$* "
"response, "
"::grpc::CompletionQueue* cq$AsyncMethodParams$) {\n");
printer->Indent();
printer->Print(*vars,
"return std::unique_ptr< "
"::grpc::ClientAsyncWriterInterface< $Request$>>("
"$AsyncPrefix$$Method$Raw(context, response, "
"cq$AsyncRawArgs$));\n");
printer->Outdent();
printer->Print("}\n");
}
} else if (ServerOnlyStreaming(method)) {
printer->Print(
*vars,
@ -223,19 +242,25 @@ void PrintHeaderClientMethodInterfaces(
"($Method$Raw(context, request));\n");
printer->Outdent();
printer->Print("}\n");
printer->Print(
*vars,
"std::unique_ptr< ::grpc::ClientAsyncReaderInterface< $Response$>> "
"Async$Method$("
"::grpc::ClientContext* context, const $Request$& request, "
"::grpc::CompletionQueue* cq, void* tag) {\n");
printer->Indent();
printer->Print(*vars,
"return std::unique_ptr< "
"::grpc::ClientAsyncReaderInterface< $Response$>>("
"Async$Method$Raw(context, request, cq, tag));\n");
printer->Outdent();
printer->Print("}\n");
for (auto async_prefix : async_prefixes) {
(*vars)["AsyncPrefix"] = async_prefix.prefix;
(*vars)["AsyncMethodParams"] = async_prefix.method_params;
(*vars)["AsyncRawArgs"] = async_prefix.raw_args;
printer->Print(
*vars,
"std::unique_ptr< ::grpc::ClientAsyncReaderInterface< $Response$>> "
"$AsyncPrefix$$Method$("
"::grpc::ClientContext* context, const $Request$& request, "
"::grpc::CompletionQueue* cq$AsyncMethodParams$) {\n");
printer->Indent();
printer->Print(
*vars,
"return std::unique_ptr< "
"::grpc::ClientAsyncReaderInterface< $Response$>>("
"$AsyncPrefix$$Method$Raw(context, request, cq$AsyncRawArgs$));\n");
printer->Outdent();
printer->Print("}\n");
}
} else if (method->BidiStreaming()) {
printer->Print(*vars,
"std::unique_ptr< ::grpc::ClientReaderWriterInterface< "
@ -249,61 +274,83 @@ void PrintHeaderClientMethodInterfaces(
"$Method$Raw(context));\n");
printer->Outdent();
printer->Print("}\n");
printer->Print(
*vars,
"std::unique_ptr< "
"::grpc::ClientAsyncReaderWriterInterface< $Request$, $Response$>> "
"Async$Method$(::grpc::ClientContext* context, "
"::grpc::CompletionQueue* cq, void* tag) {\n");
printer->Indent();
printer->Print(
*vars,
"return std::unique_ptr< "
"::grpc::ClientAsyncReaderWriterInterface< $Request$, $Response$>>("
"Async$Method$Raw(context, cq, tag));\n");
printer->Outdent();
printer->Print("}\n");
for (auto async_prefix : async_prefixes) {
(*vars)["AsyncPrefix"] = async_prefix.prefix;
(*vars)["AsyncMethodParams"] = async_prefix.method_params;
(*vars)["AsyncRawArgs"] = async_prefix.raw_args;
printer->Print(
*vars,
"std::unique_ptr< "
"::grpc::ClientAsyncReaderWriterInterface< $Request$, $Response$>> "
"$AsyncPrefix$$Method$(::grpc::ClientContext* context, "
"::grpc::CompletionQueue* cq$AsyncMethodParams$) {\n");
printer->Indent();
printer->Print(
*vars,
"return std::unique_ptr< "
"::grpc::ClientAsyncReaderWriterInterface< $Request$, $Response$>>("
"$AsyncPrefix$$Method$Raw(context, cq$AsyncRawArgs$));\n");
printer->Outdent();
printer->Print("}\n");
}
}
} else {
if (method->NoStreaming()) {
printer->Print(
*vars,
"virtual ::grpc::ClientAsyncResponseReaderInterface< $Response$>* "
"Async$Method$Raw(::grpc::ClientContext* context, "
"const $Request$& request, "
"::grpc::CompletionQueue* cq) = 0;\n");
for (auto async_prefix : async_prefixes) {
(*vars)["AsyncPrefix"] = async_prefix.prefix;
printer->Print(
*vars,
"virtual ::grpc::ClientAsyncResponseReaderInterface< $Response$>* "
"$AsyncPrefix$$Method$Raw(::grpc::ClientContext* context, "
"const $Request$& request, "
"::grpc::CompletionQueue* cq) = 0;\n");
}
} else if (ClientOnlyStreaming(method)) {
printer->Print(
*vars,
"virtual ::grpc::ClientWriterInterface< $Request$>*"
" $Method$Raw("
"::grpc::ClientContext* context, $Response$* response) = 0;\n");
printer->Print(*vars,
"virtual ::grpc::ClientAsyncWriterInterface< $Request$>*"
" Async$Method$Raw(::grpc::ClientContext* context, "
"$Response$* response, "
"::grpc::CompletionQueue* cq, void* tag) = 0;\n");
for (auto async_prefix : async_prefixes) {
(*vars)["AsyncPrefix"] = async_prefix.prefix;
(*vars)["AsyncMethodParams"] = async_prefix.method_params;
printer->Print(
*vars,
"virtual ::grpc::ClientAsyncWriterInterface< $Request$>*"
" $AsyncPrefix$$Method$Raw(::grpc::ClientContext* context, "
"$Response$* response, "
"::grpc::CompletionQueue* cq$AsyncMethodParams$) = 0;\n");
}
} else if (ServerOnlyStreaming(method)) {
printer->Print(
*vars,
"virtual ::grpc::ClientReaderInterface< $Response$>* $Method$Raw("
"::grpc::ClientContext* context, const $Request$& request) = 0;\n");
printer->Print(
*vars,
"virtual ::grpc::ClientAsyncReaderInterface< $Response$>* "
"Async$Method$Raw("
"::grpc::ClientContext* context, const $Request$& request, "
"::grpc::CompletionQueue* cq, void* tag) = 0;\n");
for (auto async_prefix : async_prefixes) {
(*vars)["AsyncPrefix"] = async_prefix.prefix;
(*vars)["AsyncMethodParams"] = async_prefix.method_params;
printer->Print(
*vars,
"virtual ::grpc::ClientAsyncReaderInterface< $Response$>* "
"$AsyncPrefix$$Method$Raw("
"::grpc::ClientContext* context, const $Request$& request, "
"::grpc::CompletionQueue* cq$AsyncMethodParams$) = 0;\n");
}
} else if (method->BidiStreaming()) {
printer->Print(*vars,
"virtual ::grpc::ClientReaderWriterInterface< $Request$, "
"$Response$>* "
"$Method$Raw(::grpc::ClientContext* context) = 0;\n");
printer->Print(*vars,
"virtual ::grpc::ClientAsyncReaderWriterInterface< "
"$Request$, $Response$>* "
"Async$Method$Raw(::grpc::ClientContext* context, "
"::grpc::CompletionQueue* cq, void* tag) = 0;\n");
for (auto async_prefix : async_prefixes) {
(*vars)["AsyncPrefix"] = async_prefix.prefix;
(*vars)["AsyncMethodParams"] = async_prefix.method_params;
printer->Print(
*vars,
"virtual ::grpc::ClientAsyncReaderWriterInterface< "
"$Request$, $Response$>* "
"$AsyncPrefix$$Method$Raw(::grpc::ClientContext* context, "
"::grpc::CompletionQueue* cq$AsyncMethodParams$) = 0;\n");
}
}
}
}
@ -315,25 +362,35 @@ void PrintHeaderClientMethod(grpc_generator::Printer *printer,
(*vars)["Method"] = method->name();
(*vars)["Request"] = method->input_type_name();
(*vars)["Response"] = method->output_type_name();
struct {
grpc::string prefix;
grpc::string method_params; // extra arguments to method
grpc::string raw_args; // extra arguments to raw version of method
} async_prefixes[] = {{"Async", ", void* tag", ", tag"},
{"PrepareAsync", "", ""}};
if (is_public) {
if (method->NoStreaming()) {
printer->Print(
*vars,
"::grpc::Status $Method$(::grpc::ClientContext* context, "
"const $Request$& request, $Response$* response) override;\n");
printer->Print(
*vars,
"std::unique_ptr< ::grpc::ClientAsyncResponseReader< $Response$>> "
"Async$Method$(::grpc::ClientContext* context, "
"const $Request$& request, "
"::grpc::CompletionQueue* cq) {\n");
printer->Indent();
printer->Print(*vars,
"return std::unique_ptr< "
"::grpc::ClientAsyncResponseReader< $Response$>>("
"Async$Method$Raw(context, request, cq));\n");
printer->Outdent();
printer->Print("}\n");
for (auto async_prefix : async_prefixes) {
(*vars)["AsyncPrefix"] = async_prefix.prefix;
printer->Print(
*vars,
"std::unique_ptr< ::grpc::ClientAsyncResponseReader< $Response$>> "
"$AsyncPrefix$$Method$(::grpc::ClientContext* context, "
"const $Request$& request, "
"::grpc::CompletionQueue* cq) {\n");
printer->Indent();
printer->Print(*vars,
"return std::unique_ptr< "
"::grpc::ClientAsyncResponseReader< $Response$>>("
"$AsyncPrefix$$Method$Raw(context, request, cq));\n");
printer->Outdent();
printer->Print("}\n");
}
} else if (ClientOnlyStreaming(method)) {
printer->Print(
*vars,
@ -346,18 +403,24 @@ void PrintHeaderClientMethod(grpc_generator::Printer *printer,
"($Method$Raw(context, response));\n");
printer->Outdent();
printer->Print("}\n");
printer->Print(*vars,
"std::unique_ptr< ::grpc::ClientAsyncWriter< $Request$>>"
" Async$Method$(::grpc::ClientContext* context, "
"$Response$* response, "
"::grpc::CompletionQueue* cq, void* tag) {\n");
printer->Indent();
printer->Print(
*vars,
"return std::unique_ptr< ::grpc::ClientAsyncWriter< $Request$>>("
"Async$Method$Raw(context, response, cq, tag));\n");
printer->Outdent();
printer->Print("}\n");
for (auto async_prefix : async_prefixes) {
(*vars)["AsyncPrefix"] = async_prefix.prefix;
(*vars)["AsyncMethodParams"] = async_prefix.method_params;
(*vars)["AsyncRawArgs"] = async_prefix.raw_args;
printer->Print(*vars,
"std::unique_ptr< ::grpc::ClientAsyncWriter< $Request$>>"
" $AsyncPrefix$$Method$(::grpc::ClientContext* context, "
"$Response$* response, "
"::grpc::CompletionQueue* cq$AsyncMethodParams$) {\n");
printer->Indent();
printer->Print(
*vars,
"return std::unique_ptr< ::grpc::ClientAsyncWriter< $Request$>>("
"$AsyncPrefix$$Method$Raw(context, response, "
"cq$AsyncRawArgs$));\n");
printer->Outdent();
printer->Print("}\n");
}
} else if (ServerOnlyStreaming(method)) {
printer->Print(
*vars,
@ -371,19 +434,24 @@ void PrintHeaderClientMethod(grpc_generator::Printer *printer,
"($Method$Raw(context, request));\n");
printer->Outdent();
printer->Print("}\n");
printer->Print(
*vars,
"std::unique_ptr< ::grpc::ClientAsyncReader< $Response$>> "
"Async$Method$("
"::grpc::ClientContext* context, const $Request$& request, "
"::grpc::CompletionQueue* cq, void* tag) {\n");
printer->Indent();
printer->Print(
*vars,
"return std::unique_ptr< ::grpc::ClientAsyncReader< $Response$>>("
"Async$Method$Raw(context, request, cq, tag));\n");
printer->Outdent();
printer->Print("}\n");
for (auto async_prefix : async_prefixes) {
(*vars)["AsyncPrefix"] = async_prefix.prefix;
(*vars)["AsyncMethodParams"] = async_prefix.method_params;
(*vars)["AsyncRawArgs"] = async_prefix.raw_args;
printer->Print(
*vars,
"std::unique_ptr< ::grpc::ClientAsyncReader< $Response$>> "
"$AsyncPrefix$$Method$("
"::grpc::ClientContext* context, const $Request$& request, "
"::grpc::CompletionQueue* cq$AsyncMethodParams$) {\n");
printer->Indent();
printer->Print(
*vars,
"return std::unique_ptr< ::grpc::ClientAsyncReader< $Response$>>("
"$AsyncPrefix$$Method$Raw(context, request, cq$AsyncRawArgs$));\n");
printer->Outdent();
printer->Print("}\n");
}
} else if (method->BidiStreaming()) {
printer->Print(
*vars,
@ -396,53 +464,80 @@ void PrintHeaderClientMethod(grpc_generator::Printer *printer,
"$Method$Raw(context));\n");
printer->Outdent();
printer->Print("}\n");
printer->Print(*vars,
"std::unique_ptr< ::grpc::ClientAsyncReaderWriter< "
"$Request$, $Response$>> "
"Async$Method$(::grpc::ClientContext* context, "
"::grpc::CompletionQueue* cq, void* tag) {\n");
printer->Indent();
printer->Print(*vars,
"return std::unique_ptr< "
"::grpc::ClientAsyncReaderWriter< $Request$, $Response$>>("
"Async$Method$Raw(context, cq, tag));\n");
printer->Outdent();
printer->Print("}\n");
for (auto async_prefix : async_prefixes) {
(*vars)["AsyncPrefix"] = async_prefix.prefix;
(*vars)["AsyncMethodParams"] = async_prefix.method_params;
(*vars)["AsyncRawArgs"] = async_prefix.raw_args;
printer->Print(*vars,
"std::unique_ptr< ::grpc::ClientAsyncReaderWriter< "
"$Request$, $Response$>> "
"$AsyncPrefix$$Method$(::grpc::ClientContext* context, "
"::grpc::CompletionQueue* cq$AsyncMethodParams$) {\n");
printer->Indent();
printer->Print(
*vars,
"return std::unique_ptr< "
"::grpc::ClientAsyncReaderWriter< $Request$, $Response$>>("
"$AsyncPrefix$$Method$Raw(context, cq$AsyncRawArgs$));\n");
printer->Outdent();
printer->Print("}\n");
}
}
} else {
if (method->NoStreaming()) {
printer->Print(*vars,
"::grpc::ClientAsyncResponseReader< $Response$>* "
"Async$Method$Raw(::grpc::ClientContext* context, "
"const $Request$& request, "
"::grpc::CompletionQueue* cq) override;\n");
for (auto async_prefix : async_prefixes) {
(*vars)["AsyncPrefix"] = async_prefix.prefix;
printer->Print(
*vars,
"::grpc::ClientAsyncResponseReader< $Response$>* "
"$AsyncPrefix$$Method$Raw(::grpc::ClientContext* context, "
"const $Request$& request, "
"::grpc::CompletionQueue* cq) override;\n");
}
} else if (ClientOnlyStreaming(method)) {
printer->Print(*vars,
"::grpc::ClientWriter< $Request$>* $Method$Raw("
"::grpc::ClientContext* context, $Response$* response) "
"override;\n");
printer->Print(*vars,
"::grpc::ClientAsyncWriter< $Request$>* Async$Method$Raw("
"::grpc::ClientContext* context, $Response$* response, "
"::grpc::CompletionQueue* cq, void* tag) override;\n");
for (auto async_prefix : async_prefixes) {
(*vars)["AsyncPrefix"] = async_prefix.prefix;
(*vars)["AsyncMethodParams"] = async_prefix.method_params;
(*vars)["AsyncRawArgs"] = async_prefix.raw_args;
printer->Print(
*vars,
"::grpc::ClientAsyncWriter< $Request$>* $AsyncPrefix$$Method$Raw("
"::grpc::ClientContext* context, $Response$* response, "
"::grpc::CompletionQueue* cq$AsyncMethodParams$) override;\n");
}
} else if (ServerOnlyStreaming(method)) {
printer->Print(*vars,
"::grpc::ClientReader< $Response$>* $Method$Raw("
"::grpc::ClientContext* context, const $Request$& request)"
" override;\n");
printer->Print(
*vars,
"::grpc::ClientAsyncReader< $Response$>* Async$Method$Raw("
"::grpc::ClientContext* context, const $Request$& request, "
"::grpc::CompletionQueue* cq, void* tag) override;\n");
for (auto async_prefix : async_prefixes) {
(*vars)["AsyncPrefix"] = async_prefix.prefix;
(*vars)["AsyncMethodParams"] = async_prefix.method_params;
(*vars)["AsyncRawArgs"] = async_prefix.raw_args;
printer->Print(
*vars,
"::grpc::ClientAsyncReader< $Response$>* $AsyncPrefix$$Method$Raw("
"::grpc::ClientContext* context, const $Request$& request, "
"::grpc::CompletionQueue* cq$AsyncMethodParams$) override;\n");
}
} else if (method->BidiStreaming()) {
printer->Print(*vars,
"::grpc::ClientReaderWriter< $Request$, $Response$>* "
"$Method$Raw(::grpc::ClientContext* context) override;\n");
printer->Print(*vars,
"::grpc::ClientAsyncReaderWriter< $Request$, $Response$>* "
"Async$Method$Raw(::grpc::ClientContext* context, "
"::grpc::CompletionQueue* cq, void* tag) override;\n");
for (auto async_prefix : async_prefixes) {
(*vars)["AsyncPrefix"] = async_prefix.prefix;
(*vars)["AsyncMethodParams"] = async_prefix.method_params;
(*vars)["AsyncRawArgs"] = async_prefix.raw_args;
printer->Print(
*vars,
"::grpc::ClientAsyncReaderWriter< $Request$, $Response$>* "
"$AsyncPrefix$$Method$Raw(::grpc::ClientContext* context, "
"::grpc::CompletionQueue* cq$AsyncMethodParams$) override;\n");
}
}
}
}
@ -1077,6 +1172,13 @@ void PrintSourceClientMethod(grpc_generator::Printer *printer,
(*vars)["Method"] = method->name();
(*vars)["Request"] = method->input_type_name();
(*vars)["Response"] = method->output_type_name();
struct {
grpc::string prefix;
grpc::string start; // bool literal expressed as string
grpc::string method_params; // extra arguments to method
grpc::string create_args; // extra arguments to creator
} async_prefixes[] = {{"Async", "true", ", void* tag", ", tag"},
{"PrepareAsync", "false", "", ", nullptr"}};
if (method->NoStreaming()) {
printer->Print(*vars,
"::grpc::Status $ns$$Service$::Stub::$Method$("
@ -1087,19 +1189,23 @@ void PrintSourceClientMethod(grpc_generator::Printer *printer,
"rpcmethod_$Method$_, "
"context, request, response);\n"
"}\n\n");
printer->Print(
*vars,
"::grpc::ClientAsyncResponseReader< $Response$>* "
"$ns$$Service$::Stub::Async$Method$Raw(::grpc::ClientContext* context, "
"const $Request$& request, "
"::grpc::CompletionQueue* cq) {\n");
printer->Print(*vars,
" return "
"::grpc::ClientAsyncResponseReader< $Response$>::Create("
"channel_.get(), cq, "
"rpcmethod_$Method$_, "
"context, request);\n"
"}\n\n");
for (auto async_prefix : async_prefixes) {
(*vars)["AsyncPrefix"] = async_prefix.prefix;
(*vars)["AsyncStart"] = async_prefix.start;
printer->Print(*vars,
"::grpc::ClientAsyncResponseReader< $Response$>* "
"$ns$$Service$::Stub::$AsyncPrefix$$Method$Raw(::grpc::"
"ClientContext* context, "
"const $Request$& request, "
"::grpc::CompletionQueue* cq) {\n");
printer->Print(*vars,
" return "
"::grpc::ClientAsyncResponseReader< $Response$>::Create("
"channel_.get(), cq, "
"rpcmethod_$Method$_, "
"context, request, $AsyncStart$);\n"
"}\n\n");
}
} else if (ClientOnlyStreaming(method)) {
printer->Print(*vars,
"::grpc::ClientWriter< $Request$>* "
@ -1111,17 +1217,23 @@ void PrintSourceClientMethod(grpc_generator::Printer *printer,
"rpcmethod_$Method$_, "
"context, response);\n"
"}\n\n");
printer->Print(*vars,
"::grpc::ClientAsyncWriter< $Request$>* "
"$ns$$Service$::Stub::Async$Method$Raw("
"::grpc::ClientContext* context, $Response$* response, "
"::grpc::CompletionQueue* cq, void* tag) {\n");
printer->Print(*vars,
" return ::grpc::ClientAsyncWriter< $Request$>::Create("
"channel_.get(), cq, "
"rpcmethod_$Method$_, "
"context, response, tag);\n"
"}\n\n");
for (auto async_prefix : async_prefixes) {
(*vars)["AsyncPrefix"] = async_prefix.prefix;
(*vars)["AsyncStart"] = async_prefix.start;
(*vars)["AsyncMethodParams"] = async_prefix.method_params;
(*vars)["AsyncCreateArgs"] = async_prefix.create_args;
printer->Print(*vars,
"::grpc::ClientAsyncWriter< $Request$>* "
"$ns$$Service$::Stub::$AsyncPrefix$$Method$Raw("
"::grpc::ClientContext* context, $Response$* response, "
"::grpc::CompletionQueue* cq$AsyncMethodParams$) {\n");
printer->Print(*vars,
" return ::grpc::ClientAsyncWriter< $Request$>::Create("
"channel_.get(), cq, "
"rpcmethod_$Method$_, "
"context, response, $AsyncStart$$AsyncCreateArgs$);\n"
"}\n\n");
}
} else if (ServerOnlyStreaming(method)) {
printer->Print(
*vars,
@ -1134,17 +1246,24 @@ void PrintSourceClientMethod(grpc_generator::Printer *printer,
"rpcmethod_$Method$_, "
"context, request);\n"
"}\n\n");
printer->Print(*vars,
"::grpc::ClientAsyncReader< $Response$>* "
"$ns$$Service$::Stub::Async$Method$Raw("
"::grpc::ClientContext* context, const $Request$& request, "
"::grpc::CompletionQueue* cq, void* tag) {\n");
printer->Print(*vars,
" return ::grpc::ClientAsyncReader< $Response$>::Create("
"channel_.get(), cq, "
"rpcmethod_$Method$_, "
"context, request, tag);\n"
"}\n\n");
for (auto async_prefix : async_prefixes) {
(*vars)["AsyncPrefix"] = async_prefix.prefix;
(*vars)["AsyncStart"] = async_prefix.start;
(*vars)["AsyncMethodParams"] = async_prefix.method_params;
(*vars)["AsyncCreateArgs"] = async_prefix.create_args;
printer->Print(
*vars,
"::grpc::ClientAsyncReader< $Response$>* "
"$ns$$Service$::Stub::$AsyncPrefix$$Method$Raw("
"::grpc::ClientContext* context, const $Request$& request, "
"::grpc::CompletionQueue* cq$AsyncMethodParams$) {\n");
printer->Print(*vars,
" return ::grpc::ClientAsyncReader< $Response$>::Create("
"channel_.get(), cq, "
"rpcmethod_$Method$_, "
"context, request, $AsyncStart$$AsyncCreateArgs$);\n"
"}\n\n");
}
} else if (method->BidiStreaming()) {
printer->Print(
*vars,
@ -1157,19 +1276,25 @@ void PrintSourceClientMethod(grpc_generator::Printer *printer,
"rpcmethod_$Method$_, "
"context);\n"
"}\n\n");
printer->Print(
*vars,
"::grpc::ClientAsyncReaderWriter< $Request$, $Response$>* "
"$ns$$Service$::Stub::Async$Method$Raw(::grpc::ClientContext* context, "
"::grpc::CompletionQueue* cq, void* tag) {\n");
printer->Print(
*vars,
" return "
"::grpc::ClientAsyncReaderWriter< $Request$, $Response$>::Create("
"channel_.get(), cq, "
"rpcmethod_$Method$_, "
"context, tag);\n"
"}\n\n");
for (auto async_prefix : async_prefixes) {
(*vars)["AsyncPrefix"] = async_prefix.prefix;
(*vars)["AsyncStart"] = async_prefix.start;
(*vars)["AsyncMethodParams"] = async_prefix.method_params;
(*vars)["AsyncCreateArgs"] = async_prefix.create_args;
printer->Print(*vars,
"::grpc::ClientAsyncReaderWriter< $Request$, $Response$>* "
"$ns$$Service$::Stub::$AsyncPrefix$$Method$Raw(::grpc::"
"ClientContext* context, "
"::grpc::CompletionQueue* cq$AsyncMethodParams$) {\n");
printer->Print(
*vars,
" return "
"::grpc::ClientAsyncReaderWriter< $Request$, $Response$>::Create("
"channel_.get(), cq, "
"rpcmethod_$Method$_, "
"context, $AsyncStart$$AsyncCreateArgs$);\n"
"}\n\n");
}
}
}
@ -1460,50 +1585,79 @@ void PrintMockClientMethods(grpc_generator::Printer *printer,
(*vars)["Request"] = method->input_type_name();
(*vars)["Response"] = method->output_type_name();
struct {
grpc::string prefix;
grpc::string method_params; // extra arguments to method
int extra_method_param_count;
} async_prefixes[] = {{"Async", ", void* tag", 1}, {"PrepareAsync", "", 0}};
if (method->NoStreaming()) {
printer->Print(
*vars,
"MOCK_METHOD3($Method$, ::grpc::Status(::grpc::ClientContext* context, "
"const $Request$& request, $Response$* response));\n");
printer->Print(*vars,
"MOCK_METHOD3(Async$Method$Raw, "
"::grpc::ClientAsyncResponseReaderInterface< $Response$>*"
"(::grpc::ClientContext* context, const $Request$& request, "
"::grpc::CompletionQueue* cq));\n");
for (auto async_prefix : async_prefixes) {
(*vars)["AsyncPrefix"] = async_prefix.prefix;
printer->Print(
*vars,
"MOCK_METHOD3($AsyncPrefix$$Method$Raw, "
"::grpc::ClientAsyncResponseReaderInterface< $Response$>*"
"(::grpc::ClientContext* context, const $Request$& request, "
"::grpc::CompletionQueue* cq));\n");
}
} else if (ClientOnlyStreaming(method)) {
printer->Print(
*vars,
"MOCK_METHOD2($Method$Raw, "
"::grpc::ClientWriterInterface< $Request$>*"
"(::grpc::ClientContext* context, $Response$* response));\n");
printer->Print(*vars,
"MOCK_METHOD4(Async$Method$Raw, "
"::grpc::ClientAsyncWriterInterface< $Request$>*"
"(::grpc::ClientContext* context, $Response$* response, "
"::grpc::CompletionQueue* cq, void* tag));\n");
for (auto async_prefix : async_prefixes) {
(*vars)["AsyncPrefix"] = async_prefix.prefix;
(*vars)["AsyncMethodParams"] = async_prefix.method_params;
(*vars)["MockArgs"] =
std::to_string(3 + async_prefix.extra_method_param_count);
printer->Print(*vars,
"MOCK_METHOD$MockArgs$($AsyncPrefix$$Method$Raw, "
"::grpc::ClientAsyncWriterInterface< $Request$>*"
"(::grpc::ClientContext* context, $Response$* response, "
"::grpc::CompletionQueue* cq$AsyncMethodParams$));\n");
}
} else if (ServerOnlyStreaming(method)) {
printer->Print(
*vars,
"MOCK_METHOD2($Method$Raw, "
"::grpc::ClientReaderInterface< $Response$>*"
"(::grpc::ClientContext* context, const $Request$& request));\n");
printer->Print(*vars,
"MOCK_METHOD4(Async$Method$Raw, "
"::grpc::ClientAsyncReaderInterface< $Response$>*"
"(::grpc::ClientContext* context, const $Request$& request, "
"::grpc::CompletionQueue* cq, void* tag));\n");
for (auto async_prefix : async_prefixes) {
(*vars)["AsyncPrefix"] = async_prefix.prefix;
(*vars)["AsyncMethodParams"] = async_prefix.method_params;
(*vars)["MockArgs"] =
std::to_string(3 + async_prefix.extra_method_param_count);
printer->Print(
*vars,
"MOCK_METHOD$MockArgs$($AsyncPrefix$$Method$Raw, "
"::grpc::ClientAsyncReaderInterface< $Response$>*"
"(::grpc::ClientContext* context, const $Request$& request, "
"::grpc::CompletionQueue* cq$AsyncMethodParams$));\n");
}
} else if (method->BidiStreaming()) {
printer->Print(
*vars,
"MOCK_METHOD1($Method$Raw, "
"::grpc::ClientReaderWriterInterface< $Request$, $Response$>*"
"(::grpc::ClientContext* context));\n");
printer->Print(
*vars,
"MOCK_METHOD3(Async$Method$Raw, "
"::grpc::ClientAsyncReaderWriterInterface<$Request$, $Response$>*"
"(::grpc::ClientContext* context, ::grpc::CompletionQueue* cq, "
"void* tag));\n");
for (auto async_prefix : async_prefixes) {
(*vars)["AsyncPrefix"] = async_prefix.prefix;
(*vars)["AsyncMethodParams"] = async_prefix.method_params;
(*vars)["MockArgs"] =
std::to_string(2 + async_prefix.extra_method_param_count);
printer->Print(
*vars,
"MOCK_METHOD$MockArgs$($AsyncPrefix$$Method$Raw, "
"::grpc::ClientAsyncReaderWriterInterface<$Request$, $Response$>*"
"(::grpc::ClientContext* context, ::grpc::CompletionQueue* cq"
"$AsyncMethodParams$));\n");
}
}
}

@ -767,9 +767,9 @@ bool PythonGrpcGenerator::Generate(const FileDescriptor* file,
ProtoBufFile pbfile(file);
PrivateGenerator generator(config_, &pbfile);
if (parameter == "grpc_2_0") {
if (parameter == "" || parameter == "grpc_2_0") {
return GenerateGrpc(context, generator, pb2_grpc_file_name, true);
} else if (parameter == "") {
} else if (parameter == "grpc_1_0") {
return GenerateGrpc(context, generator, pb2_grpc_file_name, true) &&
GenerateGrpc(context, generator, pb2_file_name, false);
} else {

@ -37,20 +37,20 @@
void define_base_resources() {
google_census_Resource_BasicUnit numerator =
google_census_Resource_BasicUnit_SECS;
resource r = {"client_rpc_latency", // name
"Client RPC latency in seconds", // description
0, // prefix
1, // n_numerators
&numerator, // numerators
0, // n_denominators
NULL}; // denominators
resource r = {(char *)"client_rpc_latency", // name
(char *)"Client RPC latency in seconds", // description
0, // prefix
1, // n_numerators
&numerator, // numerators
0, // n_denominators
NULL}; // denominators
define_resource(&r);
r = (resource){"server_rpc_latency", // name
"Server RPC latency in seconds", // description
0, // prefix
1, // n_numerators
&numerator, // numerators
0, // n_denominators
NULL}; // denominators
r = (resource){(char *)"server_rpc_latency", // name
(char *)"Server RPC latency in seconds", // description
0, // prefix
1, // n_numerators
&numerator, // numerators
0, // n_denominators
NULL}; // denominators
define_resource(&r);
}

@ -375,7 +375,7 @@ static void on_resolver_result_changed_locked(grpc_exec_ctx *exec_ctx,
}
// Extract the following fields from the resolver result, if non-NULL.
bool lb_policy_updated = false;
char *lb_policy_name = NULL;
char *lb_policy_name_dup = NULL;
bool lb_policy_name_changed = false;
grpc_lb_policy *new_lb_policy = NULL;
char *service_config_json = NULL;
@ -383,6 +383,7 @@ static void on_resolver_result_changed_locked(grpc_exec_ctx *exec_ctx,
grpc_slice_hash_table *method_params_table = NULL;
if (chand->resolver_result != NULL) {
// Find LB policy name.
const char *lb_policy_name = NULL;
const grpc_arg *channel_arg =
grpc_channel_args_find(chand->resolver_result, GRPC_ARG_LB_POLICY_NAME);
if (channel_arg != NULL) {
@ -473,7 +474,7 @@ static void on_resolver_result_changed_locked(grpc_exec_ctx *exec_ctx,
// Before we clean up, save a copy of lb_policy_name, since it might
// be pointing to data inside chand->resolver_result.
// The copy will be saved in chand->lb_policy_name below.
lb_policy_name = gpr_strdup(lb_policy_name);
lb_policy_name_dup = gpr_strdup(lb_policy_name);
grpc_channel_args_destroy(exec_ctx, chand->resolver_result);
chand->resolver_result = NULL;
}
@ -481,8 +482,8 @@ static void on_resolver_result_changed_locked(grpc_exec_ctx *exec_ctx,
gpr_log(GPR_DEBUG,
"chand=%p: resolver result: lb_policy_name=\"%s\"%s, "
"service_config=\"%s\"",
chand, lb_policy_name, lb_policy_name_changed ? " (changed)" : "",
service_config_json);
chand, lb_policy_name_dup,
lb_policy_name_changed ? " (changed)" : "", service_config_json);
}
// Now swap out fields in chand. Note that the new values may still
// be NULL if (e.g.) the resolver failed to return results or the
@ -490,9 +491,9 @@ static void on_resolver_result_changed_locked(grpc_exec_ctx *exec_ctx,
//
// First, swap out the data used by cc_get_channel_info().
gpr_mu_lock(&chand->info_mu);
if (lb_policy_name != NULL) {
if (lb_policy_name_dup != NULL) {
gpr_free(chand->info_lb_policy_name);
chand->info_lb_policy_name = lb_policy_name;
chand->info_lb_policy_name = lb_policy_name_dup;
}
if (service_config_json != NULL) {
gpr_free(chand->info_service_config_json);

@ -63,6 +63,6 @@ static const grpc_arg_pointer_vtable factory_arg_vtable = {
grpc_arg grpc_client_channel_factory_create_channel_arg(
grpc_client_channel_factory* factory) {
return grpc_channel_arg_pointer_create(GRPC_ARG_CLIENT_CHANNEL_FACTORY,
return grpc_channel_arg_pointer_create((char*)GRPC_ARG_CLIENT_CHANNEL_FACTORY,
factory, &factory_arg_vtable);
}

@ -54,8 +54,8 @@ static bool set_default_host_if_unset(grpc_exec_ctx *exec_ctx,
char *default_authority = grpc_get_default_authority(
exec_ctx, grpc_channel_stack_builder_get_target(builder));
if (default_authority != NULL) {
grpc_arg arg = grpc_channel_arg_string_create(GRPC_ARG_DEFAULT_AUTHORITY,
default_authority);
grpc_arg arg = grpc_channel_arg_string_create(
(char *)GRPC_ARG_DEFAULT_AUTHORITY, default_authority);
grpc_channel_args *new_args = grpc_channel_args_copy_and_add(args, &arg, 1);
grpc_channel_stack_builder_set_channel_arguments(exec_ctx, builder,
new_args);

@ -157,7 +157,7 @@ static bool proxy_mapper_map_name(grpc_exec_ctx* exec_ctx,
}
grpc_arg args_to_add[2];
args_to_add[0] = grpc_channel_arg_string_create(
GRPC_ARG_HTTP_CONNECT_SERVER,
(char*)GRPC_ARG_HTTP_CONNECT_SERVER,
uri->path[0] == '/' ? uri->path + 1 : uri->path);
if (user_cred != NULL) {
/* Use base64 encoding for user credentials as stated in RFC 7617 */
@ -166,8 +166,8 @@ static bool proxy_mapper_map_name(grpc_exec_ctx* exec_ctx,
char* header;
gpr_asprintf(&header, "Proxy-Authorization:Basic %s", encoded_user_cred);
gpr_free(encoded_user_cred);
args_to_add[1] =
grpc_channel_arg_string_create(GRPC_ARG_HTTP_CONNECT_HEADERS, header);
args_to_add[1] = grpc_channel_arg_string_create(
(char*)GRPC_ARG_HTTP_CONNECT_HEADERS, header);
*new_args = grpc_channel_args_copy_and_add(args, args_to_add, 2);
gpr_free(header);
} else {

@ -123,7 +123,6 @@
#define GRPC_GRPCLB_RECONNECT_BACKOFF_MULTIPLIER 1.6
#define GRPC_GRPCLB_RECONNECT_MAX_BACKOFF_SECONDS 120
#define GRPC_GRPCLB_RECONNECT_JITTER 0.2
#define GRPC_GRPCLB_DEFAULT_FALLBACK_TIMEOUT_MS 10000
grpc_tracer_flag grpc_lb_glb_trace = GRPC_TRACER_INITIALIZER(false, "glb");
@ -300,10 +299,6 @@ typedef struct glb_lb_policy {
/** timeout in milliseconds for the LB call. 0 means no deadline. */
int lb_call_timeout_ms;
/** timeout in milliseconds for before using fallback backend addresses.
* 0 means not using fallback. */
int lb_fallback_timeout_ms;
/** for communicating with the LB server */
grpc_channel *lb_channel;
@ -330,9 +325,6 @@ typedef struct glb_lb_policy {
* Otherwise, we delegate to the RR policy. */
size_t serverlist_index;
/** stores the backend addresses from the resolver */
grpc_lb_addresses *fallback_backend_addresses;
/** list of picks that are waiting on RR's policy connectivity */
pending_pick *pending_picks;
@ -353,9 +345,6 @@ typedef struct glb_lb_policy {
/** is \a lb_call_retry_timer active? */
bool retry_timer_active;
/** is \a lb_fallback_timer active? */
bool fallback_timer_active;
/** called upon changes to the LB channel's connectivity. */
grpc_closure lb_channel_on_connectivity_changed;
@ -378,9 +367,6 @@ typedef struct glb_lb_policy {
/* LB call retry timer callback. */
grpc_closure lb_on_call_retry;
/* LB fallback timer callback. */
grpc_closure lb_on_fallback;
grpc_call *lb_call; /* streaming call to the LB server, */
grpc_metadata_array lb_initial_metadata_recv; /* initial MD from LB server */
@ -404,9 +390,6 @@ typedef struct glb_lb_policy {
/** LB call retry timer */
grpc_timer lb_call_retry_timer;
/** LB fallback timer */
grpc_timer lb_fallback_timer;
bool initial_request_sent;
bool seen_initial_response;
@ -553,32 +536,6 @@ static grpc_lb_addresses *process_serverlist_locked(
return lb_addresses;
}
/* Returns the backend addresses extracted from the given addresses */
static grpc_lb_addresses *extract_backend_addresses_locked(
grpc_exec_ctx *exec_ctx, const grpc_lb_addresses *addresses) {
/* first pass: count the number of backend addresses */
size_t num_backends = 0;
for (size_t i = 0; i < addresses->num_addresses; ++i) {
if (!addresses->addresses[i].is_balancer) {
++num_backends;
}
}
/* second pass: actually populate the addresses and (empty) LB tokens */
grpc_lb_addresses *backend_addresses =
grpc_lb_addresses_create(num_backends, &lb_token_vtable);
size_t num_copied = 0;
for (size_t i = 0; i < addresses->num_addresses; ++i) {
if (addresses->addresses[i].is_balancer) continue;
const grpc_resolved_address *addr = &addresses->addresses[i].address;
grpc_lb_addresses_set_address(backend_addresses, num_copied, &addr->addr,
addr->len, false /* is_balancer */,
NULL /* balancer_name */,
(void *)GRPC_MDELEM_LB_TOKEN_EMPTY.payload);
++num_copied;
}
return backend_addresses;
}
static void update_lb_connectivity_status_locked(
grpc_exec_ctx *exec_ctx, glb_lb_policy *glb_policy,
grpc_connectivity_state rr_state, grpc_error *rr_state_error) {
@ -646,38 +603,35 @@ static bool pick_from_internal_rr_locked(
grpc_exec_ctx *exec_ctx, glb_lb_policy *glb_policy,
const grpc_lb_policy_pick_args *pick_args, bool force_async,
grpc_connected_subchannel **target, wrapped_rr_closure_arg *wc_arg) {
// Check for drops if we are not using fallback backend addresses.
if (glb_policy->serverlist != NULL) {
// Look at the index into the serverlist to see if we should drop this call.
grpc_grpclb_server *server =
glb_policy->serverlist->servers[glb_policy->serverlist_index++];
if (glb_policy->serverlist_index == glb_policy->serverlist->num_servers) {
glb_policy->serverlist_index = 0; // Wrap-around.
// Look at the index into the serverlist to see if we should drop this call.
grpc_grpclb_server *server =
glb_policy->serverlist->servers[glb_policy->serverlist_index++];
if (glb_policy->serverlist_index == glb_policy->serverlist->num_servers) {
glb_policy->serverlist_index = 0; // Wrap-around.
}
if (server->drop) {
// Not using the RR policy, so unref it.
if (GRPC_TRACER_ON(grpc_lb_glb_trace)) {
gpr_log(GPR_INFO, "Unreffing RR for drop (0x%" PRIxPTR ")",
(intptr_t)wc_arg->rr_policy);
}
if (server->drop) {
// Not using the RR policy, so unref it.
if (GRPC_TRACER_ON(grpc_lb_glb_trace)) {
gpr_log(GPR_INFO, "Unreffing RR for drop (0x%" PRIxPTR ")",
(intptr_t)wc_arg->rr_policy);
}
GRPC_LB_POLICY_UNREF(exec_ctx, wc_arg->rr_policy, "glb_pick_sync");
// Update client load reporting stats to indicate the number of
// dropped calls. Note that we have to do this here instead of in
// the client_load_reporting filter, because we do not create a
// subchannel call (and therefore no client_load_reporting filter)
// for dropped calls.
grpc_grpclb_client_stats_add_call_dropped_locked(
server->load_balance_token, wc_arg->client_stats);
grpc_grpclb_client_stats_unref(wc_arg->client_stats);
if (force_async) {
GPR_ASSERT(wc_arg->wrapped_closure != NULL);
GRPC_CLOSURE_SCHED(exec_ctx, wc_arg->wrapped_closure, GRPC_ERROR_NONE);
gpr_free(wc_arg->free_when_done);
return false;
}
GRPC_LB_POLICY_UNREF(exec_ctx, wc_arg->rr_policy, "glb_pick_sync");
// Update client load reporting stats to indicate the number of
// dropped calls. Note that we have to do this here instead of in
// the client_load_reporting filter, because we do not create a
// subchannel call (and therefore no client_load_reporting filter)
// for dropped calls.
grpc_grpclb_client_stats_add_call_dropped_locked(server->load_balance_token,
wc_arg->client_stats);
grpc_grpclb_client_stats_unref(wc_arg->client_stats);
if (force_async) {
GPR_ASSERT(wc_arg->wrapped_closure != NULL);
GRPC_CLOSURE_SCHED(exec_ctx, wc_arg->wrapped_closure, GRPC_ERROR_NONE);
gpr_free(wc_arg->free_when_done);
return true;
return false;
}
gpr_free(wc_arg->free_when_done);
return true;
}
// Pick via the RR policy.
const bool pick_done = grpc_lb_policy_pick_locked(
@ -715,18 +669,8 @@ static bool pick_from_internal_rr_locked(
static grpc_lb_policy_args *lb_policy_args_create(grpc_exec_ctx *exec_ctx,
glb_lb_policy *glb_policy) {
grpc_lb_addresses *addresses;
if (glb_policy->serverlist != NULL) {
GPR_ASSERT(glb_policy->serverlist->num_servers > 0);
addresses = process_serverlist_locked(exec_ctx, glb_policy->serverlist);
} else {
// If rr_handover_locked() is invoked when we haven't received any
// serverlist from the balancer, we use the fallback backends returned by
// the resolver. Note that the fallback backend list may be empty, in which
// case the new round_robin policy will keep the requested picks pending.
GPR_ASSERT(glb_policy->fallback_backend_addresses != NULL);
addresses = grpc_lb_addresses_copy(glb_policy->fallback_backend_addresses);
}
grpc_lb_addresses *addresses =
process_serverlist_locked(exec_ctx, glb_policy->serverlist);
GPR_ASSERT(addresses != NULL);
grpc_lb_policy_args *args = (grpc_lb_policy_args *)gpr_zalloc(sizeof(*args));
args->client_channel_factory = glb_policy->cc_factory;
@ -832,6 +776,8 @@ static void create_rr_locked(grpc_exec_ctx *exec_ctx, glb_lb_policy *glb_policy,
/* glb_policy->rr_policy may be NULL (initial handover) */
static void rr_handover_locked(grpc_exec_ctx *exec_ctx,
glb_lb_policy *glb_policy) {
GPR_ASSERT(glb_policy->serverlist != NULL &&
glb_policy->serverlist->num_servers > 0);
if (glb_policy->shutting_down) return;
grpc_lb_policy_args *args = lb_policy_args_create(exec_ctx, glb_policy);
GPR_ASSERT(args != NULL);
@ -980,9 +926,6 @@ static void glb_destroy(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol) {
if (glb_policy->serverlist != NULL) {
grpc_grpclb_destroy_serverlist(glb_policy->serverlist);
}
if (glb_policy->fallback_backend_addresses != NULL) {
grpc_lb_addresses_destroy(exec_ctx, glb_policy->fallback_backend_addresses);
}
grpc_fake_resolver_response_generator_unref(glb_policy->response_generator);
grpc_subchannel_index_unref();
if (glb_policy->pending_update_args != NULL) {
@ -1124,28 +1067,10 @@ static void glb_cancel_picks_locked(grpc_exec_ctx *exec_ctx,
GRPC_ERROR_UNREF(error);
}
static void lb_on_fallback_timer_locked(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error);
static void query_for_backends_locked(grpc_exec_ctx *exec_ctx,
glb_lb_policy *glb_policy);
static void start_picking_locked(grpc_exec_ctx *exec_ctx,
glb_lb_policy *glb_policy) {
/* start a timer to fall back */
if (glb_policy->lb_fallback_timeout_ms > 0 &&
glb_policy->serverlist == NULL) {
gpr_timespec now = gpr_now(GPR_CLOCK_MONOTONIC);
gpr_timespec deadline = gpr_time_add(
now,
gpr_time_from_millis(glb_policy->lb_fallback_timeout_ms, GPR_TIMESPAN));
GRPC_LB_POLICY_WEAK_REF(&glb_policy->base, "grpclb_fallback_timer");
GRPC_CLOSURE_INIT(&glb_policy->lb_on_fallback, lb_on_fallback_timer_locked,
glb_policy,
grpc_combiner_scheduler(glb_policy->base.combiner));
glb_policy->fallback_timer_active = true;
grpc_timer_init(exec_ctx, &glb_policy->lb_fallback_timer, deadline,
&glb_policy->lb_on_fallback, now);
}
glb_policy->started_picking = true;
gpr_backoff_reset(&glb_policy->lb_call_backoff_state);
query_for_backends_locked(exec_ctx, glb_policy);
@ -1600,15 +1525,6 @@ static void lb_on_response_received_locked(grpc_exec_ctx *exec_ctx, void *arg,
if (glb_policy->serverlist != NULL) {
/* dispose of the old serverlist */
grpc_grpclb_destroy_serverlist(glb_policy->serverlist);
} else {
/* or dispose of the fallback */
grpc_lb_addresses_destroy(exec_ctx,
glb_policy->fallback_backend_addresses);
glb_policy->fallback_backend_addresses = NULL;
if (glb_policy->fallback_timer_active) {
grpc_timer_cancel(exec_ctx, &glb_policy->lb_fallback_timer);
glb_policy->fallback_timer_active = false;
}
}
/* and update the copy in the glb_lb_policy instance. This
* serverlist instance will be destroyed either upon the next
@ -1619,7 +1535,9 @@ static void lb_on_response_received_locked(grpc_exec_ctx *exec_ctx, void *arg,
}
} else {
if (GRPC_TRACER_ON(grpc_lb_glb_trace)) {
gpr_log(GPR_INFO, "Received empty server list, ignoring.");
gpr_log(GPR_INFO,
"Received empty server list. Picks will stay pending until "
"a response with > 0 servers is received");
}
grpc_grpclb_destroy_serverlist(serverlist);
}
@ -1642,6 +1560,9 @@ static void lb_on_response_received_locked(grpc_exec_ctx *exec_ctx, void *arg,
exec_ctx, glb_policy->lb_call, ops, (size_t)(op - ops),
&glb_policy->lb_on_response_received); /* loop */
GPR_ASSERT(GRPC_CALL_OK == call_error);
} else {
GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, &glb_policy->base,
"lb_on_response_received_locked_shutdown");
}
} else { /* empty payload: call cancelled. */
/* dispose of the "lb_on_response_received_locked" weak ref taken in
@ -1666,26 +1587,6 @@ static void lb_call_on_retry_timer_locked(grpc_exec_ctx *exec_ctx, void *arg,
GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, &glb_policy->base, "grpclb_retry_timer");
}
static void lb_on_fallback_timer_locked(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error) {
glb_lb_policy *glb_policy = (glb_lb_policy *)arg;
/* If we receive a serverlist after the timer fires but before this callback
* actually runs, don't do anything. */
if (glb_policy->serverlist != NULL) return;
glb_policy->fallback_timer_active = false;
if (!glb_policy->shutting_down && error == GRPC_ERROR_NONE) {
if (GRPC_TRACER_ON(grpc_lb_glb_trace)) {
gpr_log(GPR_INFO,
"Falling back to use backends from resolver (grpclb %p)",
(void *)glb_policy);
}
GPR_ASSERT(glb_policy->fallback_backend_addresses != NULL);
rr_handover_locked(exec_ctx, glb_policy);
}
GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, &glb_policy->base,
"grpclb_fallback_timer");
}
static void lb_on_server_status_received_locked(grpc_exec_ctx *exec_ctx,
void *arg, grpc_error *error) {
glb_lb_policy *glb_policy = (glb_lb_policy *)arg;
@ -1806,17 +1707,6 @@ static void glb_update_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy,
&glb_policy->lb_channel_connectivity,
&glb_policy->lb_channel_on_connectivity_changed, NULL);
}
// Propagate update to fallback_backend_addresses if a non-empty serverlist
// hasn't been received from the balancer.
if (glb_policy->serverlist == NULL) {
grpc_lb_addresses_destroy(exec_ctx, glb_policy->fallback_backend_addresses);
glb_policy->fallback_backend_addresses =
extract_backend_addresses_locked(exec_ctx, addresses);
if (glb_policy->rr_policy != NULL) {
rr_handover_locked(exec_ctx, glb_policy);
}
}
}
// Invoked as part of the update process. It continues watching the LB channel
@ -1899,7 +1789,13 @@ static const grpc_lb_policy_vtable glb_lb_policy_vtable = {
static grpc_lb_policy *glb_create(grpc_exec_ctx *exec_ctx,
grpc_lb_policy_factory *factory,
grpc_lb_policy_args *args) {
/* Count the number of gRPC-LB addresses. There must be at least one. */
/* Count the number of gRPC-LB addresses. There must be at least one.
* TODO(roth): For now, we ignore non-balancer addresses, but in the
* future, we may change the behavior such that we fall back to using
* the non-balancer addresses if we cannot reach any balancers. In the
* fallback case, we should use the LB policy indicated by
* GRPC_ARG_LB_POLICY_NAME (although if that specifies grpclb or is
* unset, we should default to pick_first). */
const grpc_arg *arg =
grpc_channel_args_find(args->args, GRPC_ARG_LB_ADDRESSES);
if (arg == NULL || arg->type != GRPC_ARG_POINTER) {
@ -1935,24 +1831,14 @@ static grpc_lb_policy *glb_create(grpc_exec_ctx *exec_ctx,
glb_policy->lb_call_timeout_ms =
grpc_channel_arg_get_integer(arg, (grpc_integer_options){0, 0, INT_MAX});
arg = grpc_channel_args_find(args->args, GRPC_ARG_GRPCLB_FALLBACK_TIMEOUT_MS);
glb_policy->lb_fallback_timeout_ms = grpc_channel_arg_get_integer(
arg, (grpc_integer_options){GRPC_GRPCLB_DEFAULT_FALLBACK_TIMEOUT_MS, 0,
INT_MAX});
// Make sure that GRPC_ARG_LB_POLICY_NAME is set in channel args,
// since we use this to trigger the client_load_reporting filter.
grpc_arg new_arg =
grpc_channel_arg_string_create(GRPC_ARG_LB_POLICY_NAME, "grpclb");
grpc_arg new_arg = grpc_channel_arg_string_create(
(char *)GRPC_ARG_LB_POLICY_NAME, (char *)"grpclb");
static const char *args_to_remove[] = {GRPC_ARG_LB_POLICY_NAME};
glb_policy->args = grpc_channel_args_copy_and_add_and_remove(
args->args, args_to_remove, GPR_ARRAY_SIZE(args_to_remove), &new_arg, 1);
/* Extract the backend addresses (may be empty) from the resolver for
* fallback. */
glb_policy->fallback_backend_addresses =
extract_backend_addresses_locked(exec_ctx, addresses);
/* Create a client channel over them to communicate with a LB service */
glb_policy->response_generator =
grpc_fake_resolver_response_generator_create();

@ -589,7 +589,7 @@ static void rr_connectivity_changed_locked(grpc_exec_ctx *exec_ctx, void *arg,
// Dispose of outdated subchannel lists.
if (sd->subchannel_list != p->subchannel_list &&
sd->subchannel_list != p->latest_pending_subchannel_list) {
char *reason = NULL;
const char *reason = NULL;
if (sd->subchannel_list->shutting_down) {
reason = "sl_outdated_straggler";
rr_subchannel_list_unref(exec_ctx, sd->subchannel_list, reason);

@ -56,7 +56,7 @@ grpc_lb_addresses* grpc_lb_addresses_copy(const grpc_lb_addresses* addresses) {
}
void grpc_lb_addresses_set_address(grpc_lb_addresses* addresses, size_t index,
const void* address, size_t address_len,
void* address, size_t address_len,
bool is_balancer, const char* balancer_name,
void* user_data) {
GPR_ASSERT(index < addresses->num_addresses);
@ -141,7 +141,7 @@ static const grpc_arg_pointer_vtable lb_addresses_arg_vtable = {
grpc_arg grpc_lb_addresses_create_channel_arg(
const grpc_lb_addresses* addresses) {
return grpc_channel_arg_pointer_create(
GRPC_ARG_LB_ADDRESSES, (void*)addresses, &lb_addresses_arg_vtable);
(char*)GRPC_ARG_LB_ADDRESSES, (void*)addresses, &lb_addresses_arg_vtable);
}
grpc_lb_addresses* grpc_lb_addresses_find_channel_arg(

@ -73,7 +73,7 @@ grpc_lb_addresses *grpc_lb_addresses_copy(const grpc_lb_addresses *addresses);
* \a address is a socket address of length \a address_len.
* Takes ownership of \a balancer_name. */
void grpc_lb_addresses_set_address(grpc_lb_addresses *addresses, size_t index,
const void *address, size_t address_len,
void *address, size_t address_len,
bool is_balancer, const char *balancer_name,
void *user_data);

@ -249,7 +249,7 @@ static void dns_ares_on_resolved_locked(grpc_exec_ctx *exec_ctx, void *arg,
service_config_string);
args_to_remove[num_args_to_remove++] = GRPC_ARG_SERVICE_CONFIG;
new_args[num_args_to_add++] = grpc_channel_arg_string_create(
GRPC_ARG_SERVICE_CONFIG, service_config_string);
(char *)GRPC_ARG_SERVICE_CONFIG, service_config_string);
service_config = grpc_service_config_create(service_config_string);
if (service_config != NULL) {
const char *lb_policy_name =
@ -257,7 +257,7 @@ static void dns_ares_on_resolved_locked(grpc_exec_ctx *exec_ctx, void *arg,
if (lb_policy_name != NULL) {
args_to_remove[num_args_to_remove++] = GRPC_ARG_LB_POLICY_NAME;
new_args[num_args_to_add++] = grpc_channel_arg_string_create(
GRPC_ARG_LB_POLICY_NAME, (char *)lb_policy_name);
(char *)GRPC_ARG_LB_POLICY_NAME, (char *)lb_policy_name);
}
}
}

@ -210,7 +210,7 @@ grpc_arg grpc_fake_resolver_response_generator_arg(
grpc_fake_resolver_response_generator* generator) {
grpc_arg arg;
arg.type = GRPC_ARG_POINTER;
arg.key = GRPC_ARG_FAKE_RESOLVER_RESPONSE_GENERATOR;
arg.key = (char*)GRPC_ARG_FAKE_RESOLVER_RESPONSE_GENERATOR;
arg.value.pointer.p = generator;
arg.value.pointer.vtable = &response_generator_arg_vtable;
return arg;

@ -811,6 +811,6 @@ const char *grpc_get_subchannel_address_uri_arg(const grpc_channel_args *args) {
grpc_arg grpc_create_subchannel_address_arg(const grpc_resolved_address *addr) {
return grpc_channel_arg_string_create(
GRPC_ARG_SUBCHANNEL_ADDRESS,
(char *)GRPC_ARG_SUBCHANNEL_ADDRESS,
addr->len > 0 ? grpc_sockaddr_to_uri(addr) : gpr_strdup(""));
}

@ -244,7 +244,7 @@ static void finish_send_message(grpc_exec_ctx *exec_ctx,
&calld->slices, &tmp);
if (did_compress) {
if (GRPC_TRACER_ON(grpc_compression_trace)) {
char *algo_name;
const char *algo_name;
const size_t before_size = calld->slices.length;
const size_t after_size = tmp.length;
const float savings_ratio = 1.0f - (float)after_size / (float)before_size;
@ -258,7 +258,7 @@ static void finish_send_message(grpc_exec_ctx *exec_ctx,
send_flags |= GRPC_WRITE_INTERNAL_COMPRESS;
} else {
if (GRPC_TRACER_ON(grpc_compression_trace)) {
char *algo_name;
const char *algo_name;
GPR_ASSERT(grpc_compression_algorithm_name(calld->compression_algorithm,
&algo_name));
gpr_log(GPR_DEBUG,

@ -55,7 +55,8 @@ static bool maybe_add_server_load_reporting_filter(
}
grpc_arg grpc_load_reporting_enable_arg() {
return grpc_channel_arg_integer_create(GRPC_ARG_ENABLE_LOAD_REPORTING, 1);
return grpc_channel_arg_integer_create((char *)GRPC_ARG_ENABLE_LOAD_REPORTING,
1);
}
/* Plugin registration */

@ -55,7 +55,7 @@ static grpc_channel *client_channel_factory_create_channel(
}
// Add channel arg containing the server URI.
grpc_arg arg = grpc_channel_arg_string_create(
GRPC_ARG_SERVER_URI,
(char *)GRPC_ARG_SERVER_URI,
grpc_resolver_factory_add_default_prefix_if_needed(exec_ctx, target));
const char *to_remove[] = {GRPC_ARG_SERVER_URI};
grpc_channel_args *new_args =

@ -42,7 +42,7 @@ grpc_channel *grpc_insecure_channel_create_from_fd(
(target, fd, args));
grpc_arg default_authority_arg = grpc_channel_arg_string_create(
GRPC_ARG_DEFAULT_AUTHORITY, "test.authority");
(char *)GRPC_ARG_DEFAULT_AUTHORITY, (char *)"test.authority");
grpc_channel_args *final_args =
grpc_channel_args_copy_and_add(args, &default_authority_arg, 1);

@ -23,6 +23,7 @@
void grpc_chttp2_plugin_init(void) {
grpc_register_tracer(&grpc_http_trace);
grpc_register_tracer(&grpc_flowctl_trace);
grpc_register_tracer(&grpc_trace_http2_stream_state);
#ifndef NDEBUG
grpc_register_tracer(&grpc_trace_chttp2_refcount);
#endif

@ -64,6 +64,11 @@
#define DEFAULT_KEEPALIVE_PERMIT_WITHOUT_CALLS false
#define KEEPALIVE_TIME_BACKOFF_MULTIPLIER 2
#define DEFAULT_MIN_SENT_PING_INTERVAL_WITHOUT_DATA_MS 300000 /* 5 minutes */
#define DEFAULT_MIN_RECV_PING_INTERVAL_WITHOUT_DATA_MS 300000 /* 5 minutes */
#define DEFAULT_MAX_PINGS_BETWEEN_DATA 0 /* unlimited */
#define DEFAULT_MAX_PING_STRIKES 2
static int g_default_client_keepalive_time_ms =
DEFAULT_CLIENT_KEEPALIVE_TIME_MS;
static int g_default_client_keepalive_timeout_ms =
@ -75,6 +80,13 @@ static int g_default_server_keepalive_timeout_ms =
static bool g_default_keepalive_permit_without_calls =
DEFAULT_KEEPALIVE_PERMIT_WITHOUT_CALLS;
static int g_default_min_sent_ping_interval_without_data_ms =
DEFAULT_MIN_SENT_PING_INTERVAL_WITHOUT_DATA_MS;
static int g_default_min_recv_ping_interval_without_data_ms =
DEFAULT_MIN_RECV_PING_INTERVAL_WITHOUT_DATA_MS;
static int g_default_max_pings_without_data = DEFAULT_MAX_PINGS_BETWEEN_DATA;
static int g_default_max_ping_strikes = DEFAULT_MAX_PING_STRIKES;
#define MAX_CLIENT_STREAM_ID 0x7fffffffu
grpc_tracer_flag grpc_http_trace = GRPC_TRACER_INITIALIZER(false, "http");
grpc_tracer_flag grpc_flowctl_trace = GRPC_TRACER_INITIALIZER(false, "flowctl");
@ -152,11 +164,6 @@ static void send_ping_locked(
static void retry_initiate_ping_locked(grpc_exec_ctx *exec_ctx, void *tp,
grpc_error *error);
#define DEFAULT_MIN_TIME_BETWEEN_PINGS_MS 0
#define DEFAULT_MAX_PINGS_BETWEEN_DATA 3
#define DEFAULT_MAX_PING_STRIKES 2
#define DEFAULT_MIN_PING_INTERVAL_WITHOUT_DATA_MS 300000 /* 5 minutes */
/** keepalive-relevant functions */
static void init_keepalive_ping_locked(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error);
@ -363,12 +370,12 @@ static void init_transport(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
GRPC_CHTTP2_SETTINGS_GRPC_ALLOW_TRUE_BINARY_METADATA, 1);
t->ping_policy = (grpc_chttp2_repeated_ping_policy){
.max_pings_without_data = DEFAULT_MAX_PINGS_BETWEEN_DATA,
.min_time_between_pings =
gpr_time_from_millis(DEFAULT_MIN_TIME_BETWEEN_PINGS_MS, GPR_TIMESPAN),
.max_ping_strikes = DEFAULT_MAX_PING_STRIKES,
.min_ping_interval_without_data = gpr_time_from_millis(
DEFAULT_MIN_PING_INTERVAL_WITHOUT_DATA_MS, GPR_TIMESPAN),
.max_pings_without_data = g_default_max_pings_without_data,
.min_sent_ping_interval_without_data = gpr_time_from_millis(
g_default_min_sent_ping_interval_without_data_ms, GPR_TIMESPAN),
.max_ping_strikes = g_default_max_ping_strikes,
.min_recv_ping_interval_without_data = gpr_time_from_millis(
g_default_min_recv_ping_interval_without_data_ms, GPR_TIMESPAN),
};
/* Keepalive setting */
@ -428,29 +435,37 @@ static void init_transport(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
GRPC_ARG_HTTP2_MAX_PINGS_WITHOUT_DATA)) {
t->ping_policy.max_pings_without_data = grpc_channel_arg_get_integer(
&channel_args->args[i],
(grpc_integer_options){DEFAULT_MAX_PINGS_BETWEEN_DATA, 0, INT_MAX});
(grpc_integer_options){g_default_max_pings_without_data, 0,
INT_MAX});
} else if (0 == strcmp(channel_args->args[i].key,
GRPC_ARG_HTTP2_MAX_PING_STRIKES)) {
t->ping_policy.max_ping_strikes = grpc_channel_arg_get_integer(
&channel_args->args[i],
(grpc_integer_options){DEFAULT_MAX_PING_STRIKES, 0, INT_MAX});
} else if (0 == strcmp(channel_args->args[i].key,
GRPC_ARG_HTTP2_MIN_TIME_BETWEEN_PINGS_MS)) {
t->ping_policy.min_time_between_pings = gpr_time_from_millis(
grpc_channel_arg_get_integer(
&channel_args->args[i],
(grpc_integer_options){DEFAULT_MIN_TIME_BETWEEN_PINGS_MS, 0,
INT_MAX}),
GPR_TIMESPAN);
(grpc_integer_options){g_default_max_ping_strikes, 0, INT_MAX});
} else if (0 ==
strcmp(channel_args->args[i].key,
GRPC_ARG_HTTP2_MIN_PING_INTERVAL_WITHOUT_DATA_MS)) {
t->ping_policy.min_ping_interval_without_data = gpr_time_from_millis(
grpc_channel_arg_get_integer(
&channel_args->args[i],
(grpc_integer_options){
DEFAULT_MIN_PING_INTERVAL_WITHOUT_DATA_MS, 0, INT_MAX}),
GPR_TIMESPAN);
strcmp(
channel_args->args[i].key,
GRPC_ARG_HTTP2_MIN_SENT_PING_INTERVAL_WITHOUT_DATA_MS)) {
t->ping_policy.min_sent_ping_interval_without_data =
gpr_time_from_millis(
grpc_channel_arg_get_integer(
&channel_args->args[i],
(grpc_integer_options){
g_default_min_sent_ping_interval_without_data_ms, 0,
INT_MAX}),
GPR_TIMESPAN);
} else if (0 ==
strcmp(
channel_args->args[i].key,
GRPC_ARG_HTTP2_MIN_RECV_PING_INTERVAL_WITHOUT_DATA_MS)) {
t->ping_policy.min_recv_ping_interval_without_data =
gpr_time_from_millis(
grpc_channel_arg_get_integer(
&channel_args->args[i],
(grpc_integer_options){
g_default_min_recv_ping_interval_without_data_ms, 0,
INT_MAX}),
GPR_TIMESPAN);
} else if (0 == strcmp(channel_args->args[i].key,
GRPC_ARG_HTTP2_WRITE_BUFFER_SIZE)) {
t->write_buffer_size = (uint32_t)grpc_channel_arg_get_integer(
@ -557,8 +572,8 @@ static void init_transport(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
}
}
t->ping_state.pings_before_data_required =
t->ping_policy.max_pings_without_data;
/* No pings allowed before receiving a header or data frame. */
t->ping_state.pings_before_data_required = 0;
t->ping_state.is_delayed_ping_timer_set = false;
t->ping_recv_state.last_ping_recv_time = gpr_inf_past(GPR_CLOCK_MONOTONIC);
@ -625,6 +640,9 @@ static void close_transport_locked(grpc_exec_ctx *exec_ctx,
connectivity_state_set(exec_ctx, t, GRPC_CHANNEL_SHUTDOWN,
GRPC_ERROR_REF(error), "close_transport");
grpc_endpoint_shutdown(exec_ctx, t->ep, GRPC_ERROR_REF(error));
if (t->ping_state.is_delayed_ping_timer_set) {
grpc_timer_cancel(exec_ctx, &t->ping_state.delayed_ping_timer);
}
switch (t->keepalive_state) {
case GRPC_CHTTP2_KEEPALIVE_STATE_WAITING:
grpc_timer_cancel(exec_ctx, &t->keepalive_ping_timer);
@ -1729,8 +1747,10 @@ static void retry_initiate_ping_locked(grpc_exec_ctx *exec_ctx, void *tp,
grpc_error *error) {
grpc_chttp2_transport *t = (grpc_chttp2_transport *)tp;
t->ping_state.is_delayed_ping_timer_set = false;
grpc_chttp2_initiate_write(exec_ctx, t,
GRPC_CHTTP2_INITIATE_WRITE_RETRY_SEND_PING);
if (error == GRPC_ERROR_NONE) {
grpc_chttp2_initiate_write(exec_ctx, t,
GRPC_CHTTP2_INITIATE_WRITE_RETRY_SEND_PING);
}
}
void grpc_chttp2_ack_ping(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
@ -2631,6 +2651,36 @@ void grpc_chttp2_config_default_keepalive_args(grpc_channel_args *args,
&args->args[i],
(grpc_integer_options){g_default_keepalive_permit_without_calls,
0, 1});
} else if (0 ==
strcmp(args->args[i].key, GRPC_ARG_HTTP2_MAX_PING_STRIKES)) {
g_default_max_ping_strikes = grpc_channel_arg_get_integer(
&args->args[i],
(grpc_integer_options){g_default_max_ping_strikes, 0, INT_MAX});
} else if (0 == strcmp(args->args[i].key,
GRPC_ARG_HTTP2_MAX_PINGS_WITHOUT_DATA)) {
g_default_max_pings_without_data = grpc_channel_arg_get_integer(
&args->args[i], (grpc_integer_options){
g_default_max_pings_without_data, 0, INT_MAX});
} else if (0 ==
strcmp(
args->args[i].key,
GRPC_ARG_HTTP2_MIN_SENT_PING_INTERVAL_WITHOUT_DATA_MS)) {
g_default_min_sent_ping_interval_without_data_ms =
grpc_channel_arg_get_integer(
&args->args[i],
(grpc_integer_options){
g_default_min_sent_ping_interval_without_data_ms, 0,
INT_MAX});
} else if (0 ==
strcmp(
args->args[i].key,
GRPC_ARG_HTTP2_MIN_RECV_PING_INTERVAL_WITHOUT_DATA_MS)) {
g_default_min_recv_ping_interval_without_data_ms =
grpc_channel_arg_get_integer(
&args->args[i],
(grpc_integer_options){
g_default_min_recv_ping_interval_without_data_ms, 0,
INT_MAX});
}
}
}

@ -25,6 +25,7 @@
extern grpc_tracer_flag grpc_http_trace;
extern grpc_tracer_flag grpc_flowctl_trace;
extern grpc_tracer_flag grpc_trace_http2_stream_state;
#ifndef NDEBUG
extern grpc_tracer_flag grpc_trace_chttp2_refcount;

@ -92,7 +92,7 @@ grpc_error *grpc_chttp2_ping_parser_parse(grpc_exec_ctx *exec_ctx, void *parser,
gpr_timespec now = gpr_now(GPR_CLOCK_MONOTONIC);
gpr_timespec next_allowed_ping =
gpr_time_add(t->ping_recv_state.last_ping_recv_time,
t->ping_policy.min_ping_interval_without_data);
t->ping_policy.min_recv_ping_interval_without_data);
if (t->keepalive_permit_without_calls == 0 &&
grpc_chttp2_stream_map_size(&t->stream_map) == 0) {

@ -112,10 +112,10 @@ typedef struct {
} grpc_chttp2_ping_queue;
typedef struct {
gpr_timespec min_time_between_pings;
int max_pings_without_data;
int max_ping_strikes;
gpr_timespec min_ping_interval_without_data;
gpr_timespec min_sent_ping_interval_without_data;
gpr_timespec min_recv_ping_interval_without_data;
} grpc_chttp2_repeated_ping_policy;
typedef struct {

@ -383,6 +383,9 @@ error_handler:
/* t->parser = grpc_chttp2_data_parser_parse;*/
t->parser = grpc_chttp2_data_parser_parse;
t->parser_data = &s->data_parser;
t->ping_state.pings_before_data_required =
t->ping_policy.max_pings_without_data;
t->ping_state.last_ping_sent_time = gpr_inf_past(GPR_CLOCK_MONOTONIC);
return GRPC_ERROR_NONE;
} else if (grpc_error_get_int(err, GRPC_ERROR_INT_STREAM_ID, NULL)) {
/* handle stream errors by closing the stream */
@ -559,6 +562,10 @@ static grpc_error *init_header_frame_parser(grpc_exec_ctx *exec_ctx,
(t->incoming_frame_flags & GRPC_CHTTP2_DATA_FLAG_END_STREAM) != 0;
}
t->ping_state.pings_before_data_required =
t->ping_policy.max_pings_without_data;
t->ping_state.last_ping_sent_time = gpr_inf_past(GPR_CLOCK_MONOTONIC);
/* could be a new grpc_chttp2_stream or an existing grpc_chttp2_stream */
s = grpc_chttp2_parsing_lookup_stream(t, t->incoming_stream_id);
if (s == NULL) {

@ -20,6 +20,27 @@
#include <grpc/support/log.h>
static char *stream_list_id_string(grpc_chttp2_stream_list_id id) {
switch (id) {
case GRPC_CHTTP2_LIST_WRITABLE:
return "writable";
case GRPC_CHTTP2_LIST_WRITING:
return "writing";
case GRPC_CHTTP2_LIST_STALLED_BY_TRANSPORT:
return "stalled_by_transport";
case GRPC_CHTTP2_LIST_STALLED_BY_STREAM:
return "stalled_by_stream";
case GRPC_CHTTP2_LIST_WAITING_FOR_CONCURRENCY:
return "waiting_for_concurrency";
case STREAM_LIST_COUNT:
GPR_UNREACHABLE_CODE(return "unknown");
}
GPR_UNREACHABLE_CODE(return "unknown");
}
grpc_tracer_flag grpc_trace_http2_stream_state =
GRPC_TRACER_INITIALIZER(false, "http2_stream_state");
/* core list management */
static bool stream_list_empty(grpc_chttp2_transport *t,
@ -44,6 +65,10 @@ static bool stream_list_pop(grpc_chttp2_transport *t,
s->included[id] = 0;
}
*stream = s;
if (s && GRPC_TRACER_ON(grpc_trace_http2_stream_state)) {
gpr_log(GPR_DEBUG, "%p[%d][%s]: pop from %s", t, s->id,
t->is_client ? "cli" : "svr", stream_list_id_string(id));
}
return s != 0;
}
@ -62,6 +87,10 @@ static void stream_list_remove(grpc_chttp2_transport *t, grpc_chttp2_stream *s,
} else {
t->lists[id].tail = s->links[id].prev;
}
if (GRPC_TRACER_ON(grpc_trace_http2_stream_state)) {
gpr_log(GPR_DEBUG, "%p[%d][%s]: remove from %s", t, s->id,
t->is_client ? "cli" : "svr", stream_list_id_string(id));
}
}
static bool stream_list_maybe_remove(grpc_chttp2_transport *t,
@ -90,6 +119,10 @@ static void stream_list_add_tail(grpc_chttp2_transport *t,
}
t->lists[id].tail = s;
s->included[id] = 1;
if (GRPC_TRACER_ON(grpc_trace_http2_stream_state)) {
gpr_log(GPR_DEBUG, "%p[%d][%s]: add to %s", t, s->id,
t->is_client ? "cli" : "svr", stream_list_id_string(id));
}
}
static bool stream_list_add(grpc_chttp2_transport *t, grpc_chttp2_stream *s,
@ -150,17 +183,12 @@ void grpc_chttp2_list_remove_waiting_for_concurrency(grpc_chttp2_transport *t,
void grpc_chttp2_list_add_stalled_by_transport(grpc_chttp2_transport *t,
grpc_chttp2_stream *s) {
GRPC_FLOW_CONTROL_IF_TRACING(
gpr_log(GPR_DEBUG, "stream %u stalled by transport", s->id));
stream_list_add(t, s, GRPC_CHTTP2_LIST_STALLED_BY_TRANSPORT);
}
bool grpc_chttp2_list_pop_stalled_by_transport(grpc_chttp2_transport *t,
grpc_chttp2_stream **s) {
bool ret = stream_list_pop(t, s, GRPC_CHTTP2_LIST_STALLED_BY_TRANSPORT);
GRPC_FLOW_CONTROL_IF_TRACING(if (ret) gpr_log(
GPR_DEBUG, "stream %u un-stalled by transport", (*s)->id));
return ret;
return stream_list_pop(t, s, GRPC_CHTTP2_LIST_STALLED_BY_TRANSPORT);
}
void grpc_chttp2_list_remove_stalled_by_transport(grpc_chttp2_transport *t,
@ -170,23 +198,15 @@ void grpc_chttp2_list_remove_stalled_by_transport(grpc_chttp2_transport *t,
void grpc_chttp2_list_add_stalled_by_stream(grpc_chttp2_transport *t,
grpc_chttp2_stream *s) {
GRPC_FLOW_CONTROL_IF_TRACING(
gpr_log(GPR_DEBUG, "stream %u stalled by stream", s->id));
stream_list_add(t, s, GRPC_CHTTP2_LIST_STALLED_BY_STREAM);
}
bool grpc_chttp2_list_pop_stalled_by_stream(grpc_chttp2_transport *t,
grpc_chttp2_stream **s) {
bool ret = stream_list_pop(t, s, GRPC_CHTTP2_LIST_STALLED_BY_STREAM);
GRPC_FLOW_CONTROL_IF_TRACING(
if (ret) gpr_log(GPR_DEBUG, "stream %u un-stalled by stream", (*s)->id));
return ret;
return stream_list_pop(t, s, GRPC_CHTTP2_LIST_STALLED_BY_STREAM);
}
bool grpc_chttp2_list_remove_stalled_by_stream(grpc_chttp2_transport *t,
grpc_chttp2_stream *s) {
bool ret = stream_list_maybe_remove(t, s, GRPC_CHTTP2_LIST_STALLED_BY_STREAM);
GRPC_FLOW_CONTROL_IF_TRACING(
if (ret) gpr_log(GPR_DEBUG, "stream %u un-stalled by stream", s->id));
return ret;
return stream_list_maybe_remove(t, s, GRPC_CHTTP2_LIST_STALLED_BY_STREAM);
}

@ -68,7 +68,7 @@ static void maybe_initiate_ping(grpc_exec_ctx *exec_ctx,
}
if (t->ping_state.pings_before_data_required == 0 &&
t->ping_policy.max_pings_without_data != 0) {
/* need to send something of substance before sending a ping again */
/* need to receive something of substance before sending a ping again */
if (GRPC_TRACER_ON(grpc_http_trace) ||
GRPC_TRACER_ON(grpc_bdp_estimator_trace)) {
gpr_log(GPR_DEBUG, "Ping delayed [%p]: too many recent pings: %d/%d",
@ -78,11 +78,18 @@ static void maybe_initiate_ping(grpc_exec_ctx *exec_ctx,
return;
}
gpr_timespec now = gpr_now(GPR_CLOCK_MONOTONIC);
gpr_timespec elapsed = gpr_time_sub(now, t->ping_state.last_ping_sent_time);
/*gpr_log(GPR_DEBUG, "elapsed:%d.%09d min:%d.%09d", (int)elapsed.tv_sec,
elapsed.tv_nsec, (int)t->ping_policy.min_time_between_pings.tv_sec,
(int)t->ping_policy.min_time_between_pings.tv_nsec);*/
if (gpr_time_cmp(elapsed, t->ping_policy.min_time_between_pings) < 0) {
gpr_timespec next_allowed_ping =
gpr_time_add(t->ping_state.last_ping_sent_time,
t->ping_policy.min_sent_ping_interval_without_data);
if (t->keepalive_permit_without_calls == 0 &&
grpc_chttp2_stream_map_size(&t->stream_map) == 0) {
next_allowed_ping = gpr_time_add(t->ping_recv_state.last_ping_recv_time,
gpr_time_from_seconds(7200, GPR_TIMESPAN));
}
/* gpr_log(GPR_DEBUG, "next_allowed_ping:%d.%09d now:%d.%09d",
(int)next_allowed_ping.tv_sec, (int)next_allowed_ping.tv_nsec,
(int)now.tv_sec, (int)now.tv_nsec); */
if (gpr_time_cmp(next_allowed_ping, now) > 0) {
/* not enough elapsed time between successive pings */
if (GRPC_TRACER_ON(grpc_http_trace) ||
GRPC_TRACER_ON(grpc_bdp_estimator_trace)) {
@ -93,9 +100,7 @@ static void maybe_initiate_ping(grpc_exec_ctx *exec_ctx,
if (!t->ping_state.is_delayed_ping_timer_set) {
t->ping_state.is_delayed_ping_timer_set = true;
grpc_timer_init(exec_ctx, &t->ping_state.delayed_ping_timer,
gpr_time_add(t->ping_state.last_ping_sent_time,
t->ping_policy.min_time_between_pings),
&t->retry_initiate_ping_locked,
next_allowed_ping, &t->retry_initiate_ping_locked,
gpr_now(GPR_CLOCK_MONOTONIC));
}
return;
@ -119,6 +124,12 @@ static void maybe_initiate_ping(grpc_exec_ctx *exec_ctx,
grpc_chttp2_ping_create(false, pq->inflight_id));
GRPC_STATS_INC_HTTP2_PINGS_SENT(exec_ctx);
t->ping_state.last_ping_sent_time = now;
if (GRPC_TRACER_ON(grpc_http_trace) ||
GRPC_TRACER_ON(grpc_bdp_estimator_trace)) {
gpr_log(GPR_DEBUG, "Ping sent [%p]: %d/%d", t->peer_string,
t->ping_state.pings_before_data_required,
t->ping_policy.max_pings_without_data);
}
t->ping_state.pings_before_data_required -=
(t->ping_state.pings_before_data_required != 0);
}
@ -257,8 +268,7 @@ grpc_chttp2_begin_write_result grpc_chttp2_begin_write(
.stats = &s->stats.outgoing};
grpc_chttp2_encode_header(exec_ctx, &t->hpack_compressor, NULL, 0,
s->send_initial_metadata, &hopt, &t->outbuf);
t->ping_state.pings_before_data_required =
t->ping_policy.max_pings_without_data;
now_writing = true;
if (!t->is_client) {
t->ping_recv_state.last_ping_recv_time =
gpr_inf_past(GPR_CLOCK_MONOTONIC);
@ -297,8 +307,6 @@ grpc_chttp2_begin_write_result grpc_chttp2_begin_write(
grpc_slice_buffer_add(
&t->outbuf, grpc_chttp2_window_update_create(s->id, stream_announce,
&s->stats.outgoing));
t->ping_state.pings_before_data_required =
t->ping_policy.max_pings_without_data;
if (!t->is_client) {
t->ping_recv_state.last_ping_recv_time =
gpr_inf_past(GPR_CLOCK_MONOTONIC);
@ -375,8 +383,6 @@ grpc_chttp2_begin_write_result grpc_chttp2_begin_write(
send_bytes);
s->sending_bytes += send_bytes;
}
t->ping_state.pings_before_data_required =
t->ping_policy.max_pings_without_data;
if (!t->is_client) {
t->ping_recv_state.last_ping_recv_time =
gpr_inf_past(GPR_CLOCK_MONOTONIC);
@ -487,8 +493,6 @@ grpc_chttp2_begin_write_result grpc_chttp2_begin_write(
grpc_slice_buffer_add(
&t->outbuf, grpc_chttp2_window_update_create(0, transport_announce,
&throwaway_stats));
t->ping_state.pings_before_data_required =
t->ping_policy.max_pings_without_data;
if (!t->is_client) {
t->ping_recv_state.last_ping_recv_time =
gpr_inf_past(GPR_CLOCK_MONOTONIC);

@ -1263,8 +1263,8 @@ grpc_channel *grpc_inproc_channel_create(grpc_server *server,
grpc_arg default_authority_arg;
default_authority_arg.type = GRPC_ARG_STRING;
default_authority_arg.key = GRPC_ARG_DEFAULT_AUTHORITY;
default_authority_arg.value.string = "inproc.authority";
default_authority_arg.key = (char *)GRPC_ARG_DEFAULT_AUTHORITY;
default_authority_arg.value.string = (char *)"inproc.authority";
grpc_channel_args *client_args =
grpc_channel_args_copy_and_add(args, &default_authority_arg, 1);

@ -243,7 +243,7 @@ grpc_channel_args *grpc_channel_args_set_compression_algorithm(
GPR_ASSERT(algorithm < GRPC_COMPRESS_ALGORITHMS_COUNT);
grpc_arg tmp;
tmp.type = GRPC_ARG_INTEGER;
tmp.key = GRPC_COMPRESSION_CHANNEL_DEFAULT_ALGORITHM;
tmp.key = (char *)GRPC_COMPRESSION_CHANNEL_DEFAULT_ALGORITHM;
tmp.value.integer = algorithm;
return grpc_channel_args_copy_and_add(a, &tmp, 1);
}
@ -253,7 +253,7 @@ grpc_channel_args *grpc_channel_args_set_stream_compression_algorithm(
GPR_ASSERT(algorithm < GRPC_STREAM_COMPRESS_ALGORITHMS_COUNT);
grpc_arg tmp;
tmp.type = GRPC_ARG_INTEGER;
tmp.key = GRPC_STREAM_COMPRESSION_CHANNEL_DEFAULT_ALGORITHM;
tmp.key = (char *)GRPC_STREAM_COMPRESSION_CHANNEL_DEFAULT_ALGORITHM;
tmp.value.integer = algorithm;
return grpc_channel_args_copy_and_add(a, &tmp, 1);
}
@ -308,7 +308,7 @@ grpc_channel_args *grpc_channel_args_compression_algorithm_set_state(
if (grpc_channel_args_get_compression_algorithm(*a) == algorithm &&
state == 0) {
char *algo_name = NULL;
const char *algo_name = NULL;
GPR_ASSERT(grpc_compression_algorithm_name(algorithm, &algo_name) != 0);
gpr_log(GPR_ERROR,
"Tried to disable default compression algorithm '%s'. The "
@ -324,7 +324,7 @@ grpc_channel_args *grpc_channel_args_compression_algorithm_set_state(
/* create a new arg */
grpc_arg tmp;
tmp.type = GRPC_ARG_INTEGER;
tmp.key = GRPC_COMPRESSION_CHANNEL_ENABLED_ALGORITHMS_BITSET;
tmp.key = (char *)GRPC_COMPRESSION_CHANNEL_ENABLED_ALGORITHMS_BITSET;
/* all enabled by default */
tmp.value.integer = (1u << GRPC_COMPRESS_ALGORITHMS_COUNT) - 1;
if (state != 0) {
@ -349,7 +349,7 @@ grpc_channel_args *grpc_channel_args_stream_compression_algorithm_set_state(
if (grpc_channel_args_get_stream_compression_algorithm(*a) == algorithm &&
state == 0) {
char *algo_name = NULL;
const char *algo_name = NULL;
GPR_ASSERT(grpc_stream_compression_algorithm_name(algorithm, &algo_name) !=
0);
gpr_log(GPR_ERROR,
@ -366,7 +366,7 @@ grpc_channel_args *grpc_channel_args_stream_compression_algorithm_set_state(
/* create a new arg */
grpc_arg tmp;
tmp.type = GRPC_ARG_INTEGER;
tmp.key = GRPC_STREAM_COMPRESSION_CHANNEL_ENABLED_ALGORITHMS_BITSET;
tmp.key = (char *)GRPC_STREAM_COMPRESSION_CHANNEL_ENABLED_ALGORITHMS_BITSET;
/* all enabled by default */
tmp.value.integer = (1u << GRPC_STREAM_COMPRESS_ALGORITHMS_COUNT) - 1;
if (state != 0) {

@ -281,7 +281,7 @@ grpc_channel_stack *grpc_channel_stack_from_top_element(
/* Given the top element of a call stack, get the call stack itself */
grpc_call_stack *grpc_call_stack_from_top_element(grpc_call_element *elem);
void grpc_call_log_op(char *file, int line, gpr_log_severity severity,
void grpc_call_log_op(const char *file, int line, gpr_log_severity severity,
grpc_call_element *elem,
grpc_transport_stream_op_batch *op);

@ -60,7 +60,7 @@ int grpc_stream_compression_algorithm_parse(
}
int grpc_compression_algorithm_name(grpc_compression_algorithm algorithm,
char **name) {
const char **name) {
GRPC_API_TRACE("grpc_compression_algorithm_parse(algorithm=%d, name=%p)", 2,
((int)algorithm, name));
switch (algorithm) {
@ -80,7 +80,7 @@ int grpc_compression_algorithm_name(grpc_compression_algorithm algorithm,
}
int grpc_stream_compression_algorithm_name(
grpc_stream_compression_algorithm algorithm, char **name) {
grpc_stream_compression_algorithm algorithm, const char **name) {
GRPC_API_TRACE(
"grpc_stream_compression_algorithm_parse(algorithm=%d, name=%p)", 2,
((int)algorithm, name));

@ -31,6 +31,12 @@ const char *grpc_stats_counter_name[GRPC_STATS_COUNTER_COUNT] = {
"server_channels_created",
"syscall_poll",
"syscall_wait",
"pollset_kick",
"pollset_kicked_without_poller",
"pollset_kicked_again",
"pollset_kick_wakeup_fd",
"pollset_kick_wakeup_cv",
"pollset_kick_own_thread",
"histogram_slow_lookups",
"syscall_write",
"syscall_read",
@ -93,6 +99,18 @@ const char *grpc_stats_counter_doc[GRPC_STATS_COUNTER_COUNT] = {
"Number of client subchannels created", "Number of server channels created",
"Number of polling syscalls (epoll_wait, poll, etc) made by this process",
"Number of sleeping syscalls made by this process",
"How many polling wakeups were performed by the process (only valid for "
"epoll1 right now)",
"How many times was a polling wakeup requested without an active poller "
"(only valid for epoll1 right now)",
"How many times was the same polling worker awoken repeatedly before "
"waking up (only valid for epoll1 right now)",
"How many times was an eventfd used as the wakeup vector for a polling "
"wakeup (only valid for epoll1 right now)",
"How many times was a condition variable used as the wakeup vector for a "
"polling wakeup (only valid for epoll1 right now)",
"How many times could a polling wakeup be satisfied by keeping the waking "
"thread awake? (only valid for epoll1 right now)",
"Number of times histogram increments went through the slow (binary "
"search) path",
"Number of write syscalls (or equivalent - eg sendmsg) made by this "
@ -160,6 +178,8 @@ const char *grpc_stats_counter_doc[GRPC_STATS_COUNTER_COUNT] = {
"outstanding requests)",
};
const char *grpc_stats_histogram_name[GRPC_STATS_HISTOGRAM_COUNT] = {
"call_initial_size",
"poll_events_returned",
"tcp_write_size",
"tcp_write_iov_size",
"tcp_read_size",
@ -174,6 +194,8 @@ const char *grpc_stats_histogram_name[GRPC_STATS_HISTOGRAM_COUNT] = {
"server_cqs_checked",
};
const char *grpc_stats_histogram_doc[GRPC_STATS_HISTOGRAM_COUNT] = {
"Initial size of the grpc_call arena created at call start",
"How many events are called for each syscall_poll",
"Number of bytes offered to each syscall_write",
"Number of byte segments offered to each syscall_write",
"Number of bytes received by each syscall_read",
@ -189,6 +211,42 @@ const char *grpc_stats_histogram_doc[GRPC_STATS_HISTOGRAM_COUNT] = {
"requested the incoming call",
};
const int grpc_stats_table_0[65] = {
0, 1, 2, 3, 4, 5, 7, 9, 11, 14,
17, 21, 26, 32, 39, 47, 57, 68, 82, 98,
117, 140, 167, 199, 238, 284, 339, 404, 482, 575,
685, 816, 972, 1158, 1380, 1644, 1959, 2334, 2780, 3312,
3945, 4699, 5597, 6667, 7941, 9459, 11267, 13420, 15984, 19038,
22676, 27009, 32169, 38315, 45635, 54353, 64737, 77104, 91834, 109378,
130273, 155159, 184799, 220100, 262144};
const uint8_t grpc_stats_table_1[124] = {
0, 0, 0, 1, 1, 1, 2, 2, 3, 3, 3, 4, 4, 5, 5, 6, 6, 6,
7, 7, 7, 8, 9, 9, 10, 10, 10, 11, 11, 12, 12, 13, 13, 14, 14, 15,
15, 16, 16, 17, 17, 18, 18, 19, 19, 20, 20, 21, 21, 22, 22, 22, 23, 24,
24, 25, 25, 26, 26, 26, 27, 27, 28, 29, 29, 30, 30, 30, 31, 31, 32, 33,
33, 34, 34, 34, 35, 35, 36, 37, 37, 37, 38, 38, 39, 39, 40, 40, 41, 41,
42, 42, 43, 43, 44, 44, 45, 45, 46, 46, 47, 47, 48, 48, 49, 49, 50, 50,
51, 51, 52, 52, 53, 53, 54, 54, 55, 55, 56, 56, 57, 57, 58, 58};
const int grpc_stats_table_2[129] = {
0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 30,
32, 34, 36, 38, 40, 42, 44, 46, 48, 50, 52, 54, 56, 58, 60,
63, 66, 69, 72, 75, 78, 81, 84, 87, 90, 94, 98, 102, 106, 110,
114, 118, 122, 126, 131, 136, 141, 146, 151, 156, 162, 168, 174, 180, 186,
192, 199, 206, 213, 220, 228, 236, 244, 252, 260, 269, 278, 287, 297, 307,
317, 327, 338, 349, 360, 372, 384, 396, 409, 422, 436, 450, 464, 479, 494,
510, 526, 543, 560, 578, 596, 615, 634, 654, 674, 695, 717, 739, 762, 785,
809, 834, 859, 885, 912, 939, 967, 996, 1024};
const uint8_t grpc_stats_table_3[166] = {
0, 0, 0, 1, 1, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7,
8, 8, 9, 9, 10, 10, 11, 11, 12, 12, 13, 13, 14, 14, 15, 15, 16, 16, 16,
17, 17, 18, 19, 19, 20, 21, 21, 22, 23, 23, 24, 25, 25, 26, 26, 27, 27, 28,
28, 29, 29, 30, 30, 31, 31, 32, 32, 33, 33, 34, 34, 35, 36, 36, 37, 38, 39,
40, 40, 41, 42, 42, 43, 44, 44, 45, 46, 46, 47, 48, 48, 49, 49, 50, 50, 51,
51, 52, 52, 53, 53, 54, 54, 55, 56, 57, 58, 59, 59, 60, 61, 62, 63, 63, 64,
65, 65, 66, 67, 67, 68, 69, 69, 70, 71, 71, 72, 72, 73, 73, 74, 75, 75, 76,
76, 77, 78, 79, 79, 80, 81, 82, 83, 84, 85, 85, 86, 87, 88, 88, 89, 90, 90,
91, 92, 92, 93, 94, 94, 95, 95, 96, 97, 97, 98, 98, 99};
const int grpc_stats_table_4[65] = {
0, 1, 2, 3, 4, 6, 8, 11,
15, 20, 26, 34, 44, 57, 73, 94,
121, 155, 199, 255, 327, 419, 537, 688,
@ -198,27 +256,78 @@ const int grpc_stats_table_0[65] = {
326126, 417200, 533707, 682750, 873414, 1117323, 1429345, 1828502,
2339127, 2992348, 3827987, 4896985, 6264509, 8013925, 10251880, 13114801,
16777216};
const uint8_t grpc_stats_table_1[87] = {
const uint8_t grpc_stats_table_5[87] = {
0, 0, 1, 1, 2, 3, 3, 4, 4, 5, 6, 6, 7, 8, 8, 9, 10, 11,
11, 12, 13, 13, 14, 15, 15, 16, 17, 17, 18, 19, 20, 20, 21, 22, 22, 23,
24, 25, 25, 26, 27, 27, 28, 29, 29, 30, 31, 31, 32, 33, 34, 34, 35, 36,
36, 37, 38, 39, 39, 40, 41, 41, 42, 43, 44, 44, 45, 45, 46, 47, 48, 48,
49, 50, 51, 51, 52, 53, 53, 54, 55, 56, 56, 57, 58, 58, 59};
const int grpc_stats_table_2[65] = {
const int grpc_stats_table_6[65] = {
0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,
14, 16, 18, 20, 22, 24, 27, 30, 33, 36, 39, 43, 47,
51, 56, 61, 66, 72, 78, 85, 92, 100, 109, 118, 128, 139,
151, 164, 178, 193, 209, 226, 244, 264, 285, 308, 333, 359, 387,
418, 451, 486, 524, 565, 609, 656, 707, 762, 821, 884, 952, 1024};
const uint8_t grpc_stats_table_3[102] = {
const uint8_t grpc_stats_table_7[102] = {
0, 0, 0, 1, 1, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6,
6, 7, 7, 7, 8, 8, 9, 9, 10, 11, 11, 12, 12, 13, 13, 14, 14,
14, 15, 15, 16, 16, 17, 17, 18, 19, 19, 20, 20, 21, 21, 22, 22, 23,
23, 24, 24, 24, 25, 26, 27, 27, 28, 28, 29, 29, 30, 30, 31, 31, 32,
32, 33, 33, 34, 35, 35, 36, 37, 37, 38, 38, 39, 39, 40, 40, 41, 41,
42, 42, 43, 44, 44, 45, 46, 46, 47, 48, 48, 49, 49, 50, 50, 51, 51};
const int grpc_stats_table_4[9] = {0, 1, 2, 4, 7, 13, 23, 39, 64};
const uint8_t grpc_stats_table_5[9] = {0, 0, 1, 2, 2, 3, 4, 4, 5};
const int grpc_stats_table_8[9] = {0, 1, 2, 4, 7, 13, 23, 39, 64};
const uint8_t grpc_stats_table_9[9] = {0, 0, 1, 2, 2, 3, 4, 4, 5};
void grpc_stats_inc_call_initial_size(grpc_exec_ctx *exec_ctx, int value) {
value = GPR_CLAMP(value, 0, 262144);
if (value < 6) {
GRPC_STATS_INC_HISTOGRAM((exec_ctx), GRPC_STATS_HISTOGRAM_CALL_INITIAL_SIZE,
value);
return;
}
union {
double dbl;
uint64_t uint;
} _val, _bkt;
_val.dbl = value;
if (_val.uint < 4651092515166879744ull) {
int bucket =
grpc_stats_table_1[((_val.uint - 4618441417868443648ull) >> 49)] + 6;
_bkt.dbl = grpc_stats_table_0[bucket];
bucket -= (_val.uint < _bkt.uint);
GRPC_STATS_INC_HISTOGRAM((exec_ctx), GRPC_STATS_HISTOGRAM_CALL_INITIAL_SIZE,
bucket);
return;
}
GRPC_STATS_INC_HISTOGRAM((exec_ctx), GRPC_STATS_HISTOGRAM_CALL_INITIAL_SIZE,
grpc_stats_histo_find_bucket_slow(
(exec_ctx), value, grpc_stats_table_0, 64));
}
void grpc_stats_inc_poll_events_returned(grpc_exec_ctx *exec_ctx, int value) {
value = GPR_CLAMP(value, 0, 1024);
if (value < 29) {
GRPC_STATS_INC_HISTOGRAM((exec_ctx),
GRPC_STATS_HISTOGRAM_POLL_EVENTS_RETURNED, value);
return;
}
union {
double dbl;
uint64_t uint;
} _val, _bkt;
_val.dbl = value;
if (_val.uint < 4642789003353915392ull) {
int bucket =
grpc_stats_table_3[((_val.uint - 4628855992006737920ull) >> 47)] + 29;
_bkt.dbl = grpc_stats_table_2[bucket];
bucket -= (_val.uint < _bkt.uint);
GRPC_STATS_INC_HISTOGRAM((exec_ctx),
GRPC_STATS_HISTOGRAM_POLL_EVENTS_RETURNED, bucket);
return;
}
GRPC_STATS_INC_HISTOGRAM((exec_ctx),
GRPC_STATS_HISTOGRAM_POLL_EVENTS_RETURNED,
grpc_stats_histo_find_bucket_slow(
(exec_ctx), value, grpc_stats_table_2, 128));
}
void grpc_stats_inc_tcp_write_size(grpc_exec_ctx *exec_ctx, int value) {
value = GPR_CLAMP(value, 0, 16777216);
if (value < 5) {
@ -233,8 +342,8 @@ void grpc_stats_inc_tcp_write_size(grpc_exec_ctx *exec_ctx, int value) {
_val.dbl = value;
if (_val.uint < 4683743612465315840ull) {
int bucket =
grpc_stats_table_1[((_val.uint - 4617315517961601024ull) >> 50)] + 5;
_bkt.dbl = grpc_stats_table_0[bucket];
grpc_stats_table_5[((_val.uint - 4617315517961601024ull) >> 50)] + 5;
_bkt.dbl = grpc_stats_table_4[bucket];
bucket -= (_val.uint < _bkt.uint);
GRPC_STATS_INC_HISTOGRAM((exec_ctx), GRPC_STATS_HISTOGRAM_TCP_WRITE_SIZE,
bucket);
@ -242,7 +351,7 @@ void grpc_stats_inc_tcp_write_size(grpc_exec_ctx *exec_ctx, int value) {
}
GRPC_STATS_INC_HISTOGRAM((exec_ctx), GRPC_STATS_HISTOGRAM_TCP_WRITE_SIZE,
grpc_stats_histo_find_bucket_slow(
(exec_ctx), value, grpc_stats_table_0, 64));
(exec_ctx), value, grpc_stats_table_4, 64));
}
void grpc_stats_inc_tcp_write_iov_size(grpc_exec_ctx *exec_ctx, int value) {
value = GPR_CLAMP(value, 0, 1024);
@ -258,8 +367,8 @@ void grpc_stats_inc_tcp_write_iov_size(grpc_exec_ctx *exec_ctx, int value) {
_val.dbl = value;
if (_val.uint < 4637863191261478912ull) {
int bucket =
grpc_stats_table_3[((_val.uint - 4623507967449235456ull) >> 48)] + 13;
_bkt.dbl = grpc_stats_table_2[bucket];
grpc_stats_table_7[((_val.uint - 4623507967449235456ull) >> 48)] + 13;
_bkt.dbl = grpc_stats_table_6[bucket];
bucket -= (_val.uint < _bkt.uint);
GRPC_STATS_INC_HISTOGRAM((exec_ctx),
GRPC_STATS_HISTOGRAM_TCP_WRITE_IOV_SIZE, bucket);
@ -267,7 +376,7 @@ void grpc_stats_inc_tcp_write_iov_size(grpc_exec_ctx *exec_ctx, int value) {
}
GRPC_STATS_INC_HISTOGRAM((exec_ctx), GRPC_STATS_HISTOGRAM_TCP_WRITE_IOV_SIZE,
grpc_stats_histo_find_bucket_slow(
(exec_ctx), value, grpc_stats_table_2, 64));
(exec_ctx), value, grpc_stats_table_6, 64));
}
void grpc_stats_inc_tcp_read_size(grpc_exec_ctx *exec_ctx, int value) {
value = GPR_CLAMP(value, 0, 16777216);
@ -283,8 +392,8 @@ void grpc_stats_inc_tcp_read_size(grpc_exec_ctx *exec_ctx, int value) {
_val.dbl = value;
if (_val.uint < 4683743612465315840ull) {
int bucket =
grpc_stats_table_1[((_val.uint - 4617315517961601024ull) >> 50)] + 5;
_bkt.dbl = grpc_stats_table_0[bucket];
grpc_stats_table_5[((_val.uint - 4617315517961601024ull) >> 50)] + 5;
_bkt.dbl = grpc_stats_table_4[bucket];
bucket -= (_val.uint < _bkt.uint);
GRPC_STATS_INC_HISTOGRAM((exec_ctx), GRPC_STATS_HISTOGRAM_TCP_READ_SIZE,
bucket);
@ -292,7 +401,7 @@ void grpc_stats_inc_tcp_read_size(grpc_exec_ctx *exec_ctx, int value) {
}
GRPC_STATS_INC_HISTOGRAM((exec_ctx), GRPC_STATS_HISTOGRAM_TCP_READ_SIZE,
grpc_stats_histo_find_bucket_slow(
(exec_ctx), value, grpc_stats_table_0, 64));
(exec_ctx), value, grpc_stats_table_4, 64));
}
void grpc_stats_inc_tcp_read_offer(grpc_exec_ctx *exec_ctx, int value) {
value = GPR_CLAMP(value, 0, 16777216);
@ -308,8 +417,8 @@ void grpc_stats_inc_tcp_read_offer(grpc_exec_ctx *exec_ctx, int value) {
_val.dbl = value;
if (_val.uint < 4683743612465315840ull) {
int bucket =
grpc_stats_table_1[((_val.uint - 4617315517961601024ull) >> 50)] + 5;
_bkt.dbl = grpc_stats_table_0[bucket];
grpc_stats_table_5[((_val.uint - 4617315517961601024ull) >> 50)] + 5;
_bkt.dbl = grpc_stats_table_4[bucket];
bucket -= (_val.uint < _bkt.uint);
GRPC_STATS_INC_HISTOGRAM((exec_ctx), GRPC_STATS_HISTOGRAM_TCP_READ_OFFER,
bucket);
@ -317,7 +426,7 @@ void grpc_stats_inc_tcp_read_offer(grpc_exec_ctx *exec_ctx, int value) {
}
GRPC_STATS_INC_HISTOGRAM((exec_ctx), GRPC_STATS_HISTOGRAM_TCP_READ_OFFER,
grpc_stats_histo_find_bucket_slow(
(exec_ctx), value, grpc_stats_table_0, 64));
(exec_ctx), value, grpc_stats_table_4, 64));
}
void grpc_stats_inc_tcp_read_offer_iov_size(grpc_exec_ctx *exec_ctx,
int value) {
@ -334,8 +443,8 @@ void grpc_stats_inc_tcp_read_offer_iov_size(grpc_exec_ctx *exec_ctx,
_val.dbl = value;
if (_val.uint < 4637863191261478912ull) {
int bucket =
grpc_stats_table_3[((_val.uint - 4623507967449235456ull) >> 48)] + 13;
_bkt.dbl = grpc_stats_table_2[bucket];
grpc_stats_table_7[((_val.uint - 4623507967449235456ull) >> 48)] + 13;
_bkt.dbl = grpc_stats_table_6[bucket];
bucket -= (_val.uint < _bkt.uint);
GRPC_STATS_INC_HISTOGRAM(
(exec_ctx), GRPC_STATS_HISTOGRAM_TCP_READ_OFFER_IOV_SIZE, bucket);
@ -344,7 +453,7 @@ void grpc_stats_inc_tcp_read_offer_iov_size(grpc_exec_ctx *exec_ctx,
GRPC_STATS_INC_HISTOGRAM((exec_ctx),
GRPC_STATS_HISTOGRAM_TCP_READ_OFFER_IOV_SIZE,
grpc_stats_histo_find_bucket_slow(
(exec_ctx), value, grpc_stats_table_2, 64));
(exec_ctx), value, grpc_stats_table_6, 64));
}
void grpc_stats_inc_http2_send_message_size(grpc_exec_ctx *exec_ctx,
int value) {
@ -361,8 +470,8 @@ void grpc_stats_inc_http2_send_message_size(grpc_exec_ctx *exec_ctx,
_val.dbl = value;
if (_val.uint < 4683743612465315840ull) {
int bucket =
grpc_stats_table_1[((_val.uint - 4617315517961601024ull) >> 50)] + 5;
_bkt.dbl = grpc_stats_table_0[bucket];
grpc_stats_table_5[((_val.uint - 4617315517961601024ull) >> 50)] + 5;
_bkt.dbl = grpc_stats_table_4[bucket];
bucket -= (_val.uint < _bkt.uint);
GRPC_STATS_INC_HISTOGRAM(
(exec_ctx), GRPC_STATS_HISTOGRAM_HTTP2_SEND_MESSAGE_SIZE, bucket);
@ -371,7 +480,7 @@ void grpc_stats_inc_http2_send_message_size(grpc_exec_ctx *exec_ctx,
GRPC_STATS_INC_HISTOGRAM((exec_ctx),
GRPC_STATS_HISTOGRAM_HTTP2_SEND_MESSAGE_SIZE,
grpc_stats_histo_find_bucket_slow(
(exec_ctx), value, grpc_stats_table_0, 64));
(exec_ctx), value, grpc_stats_table_4, 64));
}
void grpc_stats_inc_http2_send_initial_metadata_per_write(
grpc_exec_ctx *exec_ctx, int value) {
@ -389,8 +498,8 @@ void grpc_stats_inc_http2_send_initial_metadata_per_write(
_val.dbl = value;
if (_val.uint < 4637863191261478912ull) {
int bucket =
grpc_stats_table_3[((_val.uint - 4623507967449235456ull) >> 48)] + 13;
_bkt.dbl = grpc_stats_table_2[bucket];
grpc_stats_table_7[((_val.uint - 4623507967449235456ull) >> 48)] + 13;
_bkt.dbl = grpc_stats_table_6[bucket];
bucket -= (_val.uint < _bkt.uint);
GRPC_STATS_INC_HISTOGRAM(
(exec_ctx), GRPC_STATS_HISTOGRAM_HTTP2_SEND_INITIAL_METADATA_PER_WRITE,
@ -399,7 +508,7 @@ void grpc_stats_inc_http2_send_initial_metadata_per_write(
}
GRPC_STATS_INC_HISTOGRAM(
(exec_ctx), GRPC_STATS_HISTOGRAM_HTTP2_SEND_INITIAL_METADATA_PER_WRITE,
grpc_stats_histo_find_bucket_slow((exec_ctx), value, grpc_stats_table_2,
grpc_stats_histo_find_bucket_slow((exec_ctx), value, grpc_stats_table_6,
64));
}
void grpc_stats_inc_http2_send_message_per_write(grpc_exec_ctx *exec_ctx,
@ -417,8 +526,8 @@ void grpc_stats_inc_http2_send_message_per_write(grpc_exec_ctx *exec_ctx,
_val.dbl = value;
if (_val.uint < 4637863191261478912ull) {
int bucket =
grpc_stats_table_3[((_val.uint - 4623507967449235456ull) >> 48)] + 13;
_bkt.dbl = grpc_stats_table_2[bucket];
grpc_stats_table_7[((_val.uint - 4623507967449235456ull) >> 48)] + 13;
_bkt.dbl = grpc_stats_table_6[bucket];
bucket -= (_val.uint < _bkt.uint);
GRPC_STATS_INC_HISTOGRAM(
(exec_ctx), GRPC_STATS_HISTOGRAM_HTTP2_SEND_MESSAGE_PER_WRITE, bucket);
@ -427,7 +536,7 @@ void grpc_stats_inc_http2_send_message_per_write(grpc_exec_ctx *exec_ctx,
GRPC_STATS_INC_HISTOGRAM((exec_ctx),
GRPC_STATS_HISTOGRAM_HTTP2_SEND_MESSAGE_PER_WRITE,
grpc_stats_histo_find_bucket_slow(
(exec_ctx), value, grpc_stats_table_2, 64));
(exec_ctx), value, grpc_stats_table_6, 64));
}
void grpc_stats_inc_http2_send_trailing_metadata_per_write(
grpc_exec_ctx *exec_ctx, int value) {
@ -445,8 +554,8 @@ void grpc_stats_inc_http2_send_trailing_metadata_per_write(
_val.dbl = value;
if (_val.uint < 4637863191261478912ull) {
int bucket =
grpc_stats_table_3[((_val.uint - 4623507967449235456ull) >> 48)] + 13;
_bkt.dbl = grpc_stats_table_2[bucket];
grpc_stats_table_7[((_val.uint - 4623507967449235456ull) >> 48)] + 13;
_bkt.dbl = grpc_stats_table_6[bucket];
bucket -= (_val.uint < _bkt.uint);
GRPC_STATS_INC_HISTOGRAM(
(exec_ctx), GRPC_STATS_HISTOGRAM_HTTP2_SEND_TRAILING_METADATA_PER_WRITE,
@ -455,7 +564,7 @@ void grpc_stats_inc_http2_send_trailing_metadata_per_write(
}
GRPC_STATS_INC_HISTOGRAM(
(exec_ctx), GRPC_STATS_HISTOGRAM_HTTP2_SEND_TRAILING_METADATA_PER_WRITE,
grpc_stats_histo_find_bucket_slow((exec_ctx), value, grpc_stats_table_2,
grpc_stats_histo_find_bucket_slow((exec_ctx), value, grpc_stats_table_6,
64));
}
void grpc_stats_inc_http2_send_flowctl_per_write(grpc_exec_ctx *exec_ctx,
@ -473,8 +582,8 @@ void grpc_stats_inc_http2_send_flowctl_per_write(grpc_exec_ctx *exec_ctx,
_val.dbl = value;
if (_val.uint < 4637863191261478912ull) {
int bucket =
grpc_stats_table_3[((_val.uint - 4623507967449235456ull) >> 48)] + 13;
_bkt.dbl = grpc_stats_table_2[bucket];
grpc_stats_table_7[((_val.uint - 4623507967449235456ull) >> 48)] + 13;
_bkt.dbl = grpc_stats_table_6[bucket];
bucket -= (_val.uint < _bkt.uint);
GRPC_STATS_INC_HISTOGRAM(
(exec_ctx), GRPC_STATS_HISTOGRAM_HTTP2_SEND_FLOWCTL_PER_WRITE, bucket);
@ -483,7 +592,7 @@ void grpc_stats_inc_http2_send_flowctl_per_write(grpc_exec_ctx *exec_ctx,
GRPC_STATS_INC_HISTOGRAM((exec_ctx),
GRPC_STATS_HISTOGRAM_HTTP2_SEND_FLOWCTL_PER_WRITE,
grpc_stats_histo_find_bucket_slow(
(exec_ctx), value, grpc_stats_table_2, 64));
(exec_ctx), value, grpc_stats_table_6, 64));
}
void grpc_stats_inc_executor_closures_per_wakeup(grpc_exec_ctx *exec_ctx,
int value) {
@ -500,8 +609,8 @@ void grpc_stats_inc_executor_closures_per_wakeup(grpc_exec_ctx *exec_ctx,
_val.dbl = value;
if (_val.uint < 4637863191261478912ull) {
int bucket =
grpc_stats_table_3[((_val.uint - 4623507967449235456ull) >> 48)] + 13;
_bkt.dbl = grpc_stats_table_2[bucket];
grpc_stats_table_7[((_val.uint - 4623507967449235456ull) >> 48)] + 13;
_bkt.dbl = grpc_stats_table_6[bucket];
bucket -= (_val.uint < _bkt.uint);
GRPC_STATS_INC_HISTOGRAM(
(exec_ctx), GRPC_STATS_HISTOGRAM_EXECUTOR_CLOSURES_PER_WAKEUP, bucket);
@ -510,7 +619,7 @@ void grpc_stats_inc_executor_closures_per_wakeup(grpc_exec_ctx *exec_ctx,
GRPC_STATS_INC_HISTOGRAM((exec_ctx),
GRPC_STATS_HISTOGRAM_EXECUTOR_CLOSURES_PER_WAKEUP,
grpc_stats_histo_find_bucket_slow(
(exec_ctx), value, grpc_stats_table_2, 64));
(exec_ctx), value, grpc_stats_table_6, 64));
}
void grpc_stats_inc_server_cqs_checked(grpc_exec_ctx *exec_ctx, int value) {
value = GPR_CLAMP(value, 0, 64);
@ -526,8 +635,8 @@ void grpc_stats_inc_server_cqs_checked(grpc_exec_ctx *exec_ctx, int value) {
_val.dbl = value;
if (_val.uint < 4625196817309499392ull) {
int bucket =
grpc_stats_table_5[((_val.uint - 4613937818241073152ull) >> 51)] + 3;
_bkt.dbl = grpc_stats_table_4[bucket];
grpc_stats_table_9[((_val.uint - 4613937818241073152ull) >> 51)] + 3;
_bkt.dbl = grpc_stats_table_8[bucket];
bucket -= (_val.uint < _bkt.uint);
GRPC_STATS_INC_HISTOGRAM((exec_ctx),
GRPC_STATS_HISTOGRAM_SERVER_CQS_CHECKED, bucket);
@ -535,18 +644,21 @@ void grpc_stats_inc_server_cqs_checked(grpc_exec_ctx *exec_ctx, int value) {
}
GRPC_STATS_INC_HISTOGRAM((exec_ctx), GRPC_STATS_HISTOGRAM_SERVER_CQS_CHECKED,
grpc_stats_histo_find_bucket_slow(
(exec_ctx), value, grpc_stats_table_4, 8));
(exec_ctx), value, grpc_stats_table_8, 8));
}
const int grpc_stats_histo_buckets[12] = {64, 64, 64, 64, 64, 64,
64, 64, 64, 64, 64, 8};
const int grpc_stats_histo_start[12] = {0, 64, 128, 192, 256, 320,
384, 448, 512, 576, 640, 704};
const int *const grpc_stats_histo_bucket_boundaries[12] = {
grpc_stats_table_0, grpc_stats_table_2, grpc_stats_table_0,
grpc_stats_table_0, grpc_stats_table_2, grpc_stats_table_0,
grpc_stats_table_2, grpc_stats_table_2, grpc_stats_table_2,
grpc_stats_table_2, grpc_stats_table_2, grpc_stats_table_4};
void (*const grpc_stats_inc_histogram[12])(grpc_exec_ctx *exec_ctx, int x) = {
const int grpc_stats_histo_buckets[14] = {64, 128, 64, 64, 64, 64, 64,
64, 64, 64, 64, 64, 64, 8};
const int grpc_stats_histo_start[14] = {0, 64, 192, 256, 320, 384, 448,
512, 576, 640, 704, 768, 832, 896};
const int *const grpc_stats_histo_bucket_boundaries[14] = {
grpc_stats_table_0, grpc_stats_table_2, grpc_stats_table_4,
grpc_stats_table_6, grpc_stats_table_4, grpc_stats_table_4,
grpc_stats_table_6, grpc_stats_table_4, grpc_stats_table_6,
grpc_stats_table_6, grpc_stats_table_6, grpc_stats_table_6,
grpc_stats_table_6, grpc_stats_table_8};
void (*const grpc_stats_inc_histogram[14])(grpc_exec_ctx *exec_ctx, int x) = {
grpc_stats_inc_call_initial_size,
grpc_stats_inc_poll_events_returned,
grpc_stats_inc_tcp_write_size,
grpc_stats_inc_tcp_write_iov_size,
grpc_stats_inc_tcp_read_size,

@ -33,6 +33,12 @@ typedef enum {
GRPC_STATS_COUNTER_SERVER_CHANNELS_CREATED,
GRPC_STATS_COUNTER_SYSCALL_POLL,
GRPC_STATS_COUNTER_SYSCALL_WAIT,
GRPC_STATS_COUNTER_POLLSET_KICK,
GRPC_STATS_COUNTER_POLLSET_KICKED_WITHOUT_POLLER,
GRPC_STATS_COUNTER_POLLSET_KICKED_AGAIN,
GRPC_STATS_COUNTER_POLLSET_KICK_WAKEUP_FD,
GRPC_STATS_COUNTER_POLLSET_KICK_WAKEUP_CV,
GRPC_STATS_COUNTER_POLLSET_KICK_OWN_THREAD,
GRPC_STATS_COUNTER_HISTOGRAM_SLOW_LOOKUPS,
GRPC_STATS_COUNTER_SYSCALL_WRITE,
GRPC_STATS_COUNTER_SYSCALL_READ,
@ -92,6 +98,8 @@ typedef enum {
extern const char *grpc_stats_counter_name[GRPC_STATS_COUNTER_COUNT];
extern const char *grpc_stats_counter_doc[GRPC_STATS_COUNTER_COUNT];
typedef enum {
GRPC_STATS_HISTOGRAM_CALL_INITIAL_SIZE,
GRPC_STATS_HISTOGRAM_POLL_EVENTS_RETURNED,
GRPC_STATS_HISTOGRAM_TCP_WRITE_SIZE,
GRPC_STATS_HISTOGRAM_TCP_WRITE_IOV_SIZE,
GRPC_STATS_HISTOGRAM_TCP_READ_SIZE,
@ -109,31 +117,35 @@ typedef enum {
extern const char *grpc_stats_histogram_name[GRPC_STATS_HISTOGRAM_COUNT];
extern const char *grpc_stats_histogram_doc[GRPC_STATS_HISTOGRAM_COUNT];
typedef enum {
GRPC_STATS_HISTOGRAM_TCP_WRITE_SIZE_FIRST_SLOT = 0,
GRPC_STATS_HISTOGRAM_CALL_INITIAL_SIZE_FIRST_SLOT = 0,
GRPC_STATS_HISTOGRAM_CALL_INITIAL_SIZE_BUCKETS = 64,
GRPC_STATS_HISTOGRAM_POLL_EVENTS_RETURNED_FIRST_SLOT = 64,
GRPC_STATS_HISTOGRAM_POLL_EVENTS_RETURNED_BUCKETS = 128,
GRPC_STATS_HISTOGRAM_TCP_WRITE_SIZE_FIRST_SLOT = 192,
GRPC_STATS_HISTOGRAM_TCP_WRITE_SIZE_BUCKETS = 64,
GRPC_STATS_HISTOGRAM_TCP_WRITE_IOV_SIZE_FIRST_SLOT = 64,
GRPC_STATS_HISTOGRAM_TCP_WRITE_IOV_SIZE_FIRST_SLOT = 256,
GRPC_STATS_HISTOGRAM_TCP_WRITE_IOV_SIZE_BUCKETS = 64,
GRPC_STATS_HISTOGRAM_TCP_READ_SIZE_FIRST_SLOT = 128,
GRPC_STATS_HISTOGRAM_TCP_READ_SIZE_FIRST_SLOT = 320,
GRPC_STATS_HISTOGRAM_TCP_READ_SIZE_BUCKETS = 64,
GRPC_STATS_HISTOGRAM_TCP_READ_OFFER_FIRST_SLOT = 192,
GRPC_STATS_HISTOGRAM_TCP_READ_OFFER_FIRST_SLOT = 384,
GRPC_STATS_HISTOGRAM_TCP_READ_OFFER_BUCKETS = 64,
GRPC_STATS_HISTOGRAM_TCP_READ_OFFER_IOV_SIZE_FIRST_SLOT = 256,
GRPC_STATS_HISTOGRAM_TCP_READ_OFFER_IOV_SIZE_FIRST_SLOT = 448,
GRPC_STATS_HISTOGRAM_TCP_READ_OFFER_IOV_SIZE_BUCKETS = 64,
GRPC_STATS_HISTOGRAM_HTTP2_SEND_MESSAGE_SIZE_FIRST_SLOT = 320,
GRPC_STATS_HISTOGRAM_HTTP2_SEND_MESSAGE_SIZE_FIRST_SLOT = 512,
GRPC_STATS_HISTOGRAM_HTTP2_SEND_MESSAGE_SIZE_BUCKETS = 64,
GRPC_STATS_HISTOGRAM_HTTP2_SEND_INITIAL_METADATA_PER_WRITE_FIRST_SLOT = 384,
GRPC_STATS_HISTOGRAM_HTTP2_SEND_INITIAL_METADATA_PER_WRITE_FIRST_SLOT = 576,
GRPC_STATS_HISTOGRAM_HTTP2_SEND_INITIAL_METADATA_PER_WRITE_BUCKETS = 64,
GRPC_STATS_HISTOGRAM_HTTP2_SEND_MESSAGE_PER_WRITE_FIRST_SLOT = 448,
GRPC_STATS_HISTOGRAM_HTTP2_SEND_MESSAGE_PER_WRITE_FIRST_SLOT = 640,
GRPC_STATS_HISTOGRAM_HTTP2_SEND_MESSAGE_PER_WRITE_BUCKETS = 64,
GRPC_STATS_HISTOGRAM_HTTP2_SEND_TRAILING_METADATA_PER_WRITE_FIRST_SLOT = 512,
GRPC_STATS_HISTOGRAM_HTTP2_SEND_TRAILING_METADATA_PER_WRITE_FIRST_SLOT = 704,
GRPC_STATS_HISTOGRAM_HTTP2_SEND_TRAILING_METADATA_PER_WRITE_BUCKETS = 64,
GRPC_STATS_HISTOGRAM_HTTP2_SEND_FLOWCTL_PER_WRITE_FIRST_SLOT = 576,
GRPC_STATS_HISTOGRAM_HTTP2_SEND_FLOWCTL_PER_WRITE_FIRST_SLOT = 768,
GRPC_STATS_HISTOGRAM_HTTP2_SEND_FLOWCTL_PER_WRITE_BUCKETS = 64,
GRPC_STATS_HISTOGRAM_EXECUTOR_CLOSURES_PER_WAKEUP_FIRST_SLOT = 640,
GRPC_STATS_HISTOGRAM_EXECUTOR_CLOSURES_PER_WAKEUP_FIRST_SLOT = 832,
GRPC_STATS_HISTOGRAM_EXECUTOR_CLOSURES_PER_WAKEUP_BUCKETS = 64,
GRPC_STATS_HISTOGRAM_SERVER_CQS_CHECKED_FIRST_SLOT = 704,
GRPC_STATS_HISTOGRAM_SERVER_CQS_CHECKED_FIRST_SLOT = 896,
GRPC_STATS_HISTOGRAM_SERVER_CQS_CHECKED_BUCKETS = 8,
GRPC_STATS_HISTOGRAM_BUCKETS = 712
GRPC_STATS_HISTOGRAM_BUCKETS = 904
} grpc_stats_histogram_constants;
#define GRPC_STATS_INC_CLIENT_CALLS_CREATED(exec_ctx) \
GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_CLIENT_CALLS_CREATED)
@ -152,6 +164,19 @@ typedef enum {
GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_SYSCALL_POLL)
#define GRPC_STATS_INC_SYSCALL_WAIT(exec_ctx) \
GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_SYSCALL_WAIT)
#define GRPC_STATS_INC_POLLSET_KICK(exec_ctx) \
GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_POLLSET_KICK)
#define GRPC_STATS_INC_POLLSET_KICKED_WITHOUT_POLLER(exec_ctx) \
GRPC_STATS_INC_COUNTER((exec_ctx), \
GRPC_STATS_COUNTER_POLLSET_KICKED_WITHOUT_POLLER)
#define GRPC_STATS_INC_POLLSET_KICKED_AGAIN(exec_ctx) \
GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_POLLSET_KICKED_AGAIN)
#define GRPC_STATS_INC_POLLSET_KICK_WAKEUP_FD(exec_ctx) \
GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_POLLSET_KICK_WAKEUP_FD)
#define GRPC_STATS_INC_POLLSET_KICK_WAKEUP_CV(exec_ctx) \
GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_POLLSET_KICK_WAKEUP_CV)
#define GRPC_STATS_INC_POLLSET_KICK_OWN_THREAD(exec_ctx) \
GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_POLLSET_KICK_OWN_THREAD)
#define GRPC_STATS_INC_HISTOGRAM_SLOW_LOOKUPS(exec_ctx) \
GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_HISTOGRAM_SLOW_LOOKUPS)
#define GRPC_STATS_INC_SYSCALL_WRITE(exec_ctx) \
@ -322,6 +347,12 @@ typedef enum {
#define GRPC_STATS_INC_SERVER_SLOWPATH_REQUESTS_QUEUED(exec_ctx) \
GRPC_STATS_INC_COUNTER((exec_ctx), \
GRPC_STATS_COUNTER_SERVER_SLOWPATH_REQUESTS_QUEUED)
#define GRPC_STATS_INC_CALL_INITIAL_SIZE(exec_ctx, value) \
grpc_stats_inc_call_initial_size((exec_ctx), (int)(value))
void grpc_stats_inc_call_initial_size(grpc_exec_ctx *exec_ctx, int x);
#define GRPC_STATS_INC_POLL_EVENTS_RETURNED(exec_ctx, value) \
grpc_stats_inc_poll_events_returned((exec_ctx), (int)(value))
void grpc_stats_inc_poll_events_returned(grpc_exec_ctx *exec_ctx, int x);
#define GRPC_STATS_INC_TCP_WRITE_SIZE(exec_ctx, value) \
grpc_stats_inc_tcp_write_size((exec_ctx), (int)(value))
void grpc_stats_inc_tcp_write_size(grpc_exec_ctx *exec_ctx, int x);
@ -364,10 +395,10 @@ void grpc_stats_inc_executor_closures_per_wakeup(grpc_exec_ctx *exec_ctx,
#define GRPC_STATS_INC_SERVER_CQS_CHECKED(exec_ctx, value) \
grpc_stats_inc_server_cqs_checked((exec_ctx), (int)(value))
void grpc_stats_inc_server_cqs_checked(grpc_exec_ctx *exec_ctx, int x);
extern const int grpc_stats_histo_buckets[12];
extern const int grpc_stats_histo_start[12];
extern const int *const grpc_stats_histo_bucket_boundaries[12];
extern void (*const grpc_stats_inc_histogram[12])(grpc_exec_ctx *exec_ctx,
extern const int grpc_stats_histo_buckets[14];
extern const int grpc_stats_histo_start[14];
extern const int *const grpc_stats_histo_bucket_boundaries[14];
extern void (*const grpc_stats_inc_histogram[14])(grpc_exec_ctx *exec_ctx,
int x);
#endif /* GRPC_CORE_LIB_DEBUG_STATS_DATA_H */

@ -20,6 +20,10 @@
doc: Number of client side calls created by this process
- counter: server_calls_created
doc: Number of server side calls created by this process
- histogram: call_initial_size
max: 262144
buckets: 64
doc: Initial size of the grpc_call arena created at call start
- counter: cqs_created
doc: Number of completion queues created
- counter: client_channels_created
@ -33,6 +37,32 @@
doc: Number of polling syscalls (epoll_wait, poll, etc) made by this process
- counter: syscall_wait
doc: Number of sleeping syscalls made by this process
- histogram: poll_events_returned
max: 1024
buckets: 128
doc: How many events are called for each syscall_poll
- counter: pollset_kick
doc: How many polling wakeups were performed by the process
(only valid for epoll1 right now)
- counter: pollset_kicked_without_poller
doc: How many times was a polling wakeup requested without an active poller
(only valid for epoll1 right now)
- counter: pollset_kicked_again
doc: How many times was the same polling worker awoken repeatedly before
waking up
(only valid for epoll1 right now)
- counter: pollset_kick_wakeup_fd
doc: How many times was an eventfd used as the wakeup vector for a polling
wakeup
(only valid for epoll1 right now)
- counter: pollset_kick_wakeup_cv
doc: How many times was a condition variable used as the wakeup vector for a
polling wakeup
(only valid for epoll1 right now)
- counter: pollset_kick_own_thread
doc: How many times could a polling wakeup be satisfied by keeping the waking
thread awake?
(only valid for epoll1 right now)
# stats system
- counter: histogram_slow_lookups
doc: Number of times histogram increments went through the slow

@ -6,6 +6,12 @@ client_subchannels_created_per_iteration:FLOAT,
server_channels_created_per_iteration:FLOAT,
syscall_poll_per_iteration:FLOAT,
syscall_wait_per_iteration:FLOAT,
pollset_kick_per_iteration:FLOAT,
pollset_kicked_without_poller_per_iteration:FLOAT,
pollset_kicked_again_per_iteration:FLOAT,
pollset_kick_wakeup_fd_per_iteration:FLOAT,
pollset_kick_wakeup_cv_per_iteration:FLOAT,
pollset_kick_own_thread_per_iteration:FLOAT,
histogram_slow_lookups_per_iteration:FLOAT,
syscall_write_per_iteration:FLOAT,
syscall_read_per_iteration:FLOAT,

@ -35,7 +35,7 @@ typedef struct {
#else
bool value;
#endif
char *name;
const char *name;
} grpc_tracer_flag;
#ifdef GRPC_THREADSAFE_TRACER

@ -43,7 +43,8 @@ static void httpcli_ssl_destroy(grpc_exec_ctx *exec_ctx,
grpc_httpcli_ssl_channel_security_connector *c =
(grpc_httpcli_ssl_channel_security_connector *)sc;
if (c->handshaker_factory != NULL) {
tsi_ssl_client_handshaker_factory_destroy(c->handshaker_factory);
tsi_ssl_client_handshaker_factory_unref(c->handshaker_factory);
c->handshaker_factory = NULL;
}
if (c->secure_peer_name != NULL) gpr_free(c->secure_peer_name);
gpr_free(sc);

@ -167,7 +167,14 @@ void grpc_closure_sched(grpc_exec_ctx *exec_ctx, grpc_closure *c,
GPR_TIMER_BEGIN("grpc_closure_sched", 0);
if (c != NULL) {
#ifndef NDEBUG
GPR_ASSERT(!c->scheduled);
if (c->scheduled) {
gpr_log(GPR_ERROR,
"Closure already scheduled. (closure: %p, created: [%s:%d], "
"previously scheduled at: [%s: %d] run?: %s",
c, c->file_created, c->line_created, c->file_initiated,
c->line_initiated, c->run ? "true" : "false");
abort();
}
c->scheduled = true;
c->file_initiated = file;
c->line_initiated = line;
@ -191,7 +198,14 @@ void grpc_closure_list_sched(grpc_exec_ctx *exec_ctx, grpc_closure_list *list) {
while (c != NULL) {
grpc_closure *next = c->next_data.next;
#ifndef NDEBUG
GPR_ASSERT(!c->scheduled);
if (c->scheduled) {
gpr_log(GPR_ERROR,
"Closure already scheduled. (closure: %p, created: [%s:%d], "
"previously scheduled at: [%s: %d] run?: %s",
c, c->file_created, c->line_created, c->file_initiated,
c->line_initiated, c->run ? "true" : "false");
abort();
}
c->scheduled = true;
c->file_initiated = file;
c->line_initiated = line;

@ -641,7 +641,7 @@ static char *key_time(grpc_error_times which) {
static char *fmt_time(gpr_timespec tm) {
char *out;
char *pfx = "!!";
const char *pfx = "!!";
switch (tm.clock_type) {
case GPR_CLOCK_MONOTONIC:
pfx = "@monotonic:";

@ -130,9 +130,9 @@ static void fd_global_shutdown(void);
* Pollset Declarations
*/
typedef enum { UNKICKED, KICKED, DESIGNATED_POLLER } kick_state_t;
typedef enum { UNKICKED, KICKED, DESIGNATED_POLLER } kick_state;
static const char *kick_state_string(kick_state_t st) {
static const char *kick_state_string(kick_state st) {
switch (st) {
case UNKICKED:
return "UNKICKED";
@ -145,7 +145,7 @@ static const char *kick_state_string(kick_state_t st) {
}
struct grpc_pollset_worker {
kick_state_t kick_state;
kick_state state;
int kick_state_mutator; // which line of code last changed kick state
bool initialized_cv;
grpc_pollset_worker *next;
@ -154,9 +154,9 @@ struct grpc_pollset_worker {
grpc_closure_list schedule_on_end_work;
};
#define SET_KICK_STATE(worker, state) \
#define SET_KICK_STATE(worker, kick_state) \
do { \
(worker)->kick_state = (state); \
(worker)->state = (kick_state); \
(worker)->kick_state_mutator = __LINE__; \
} while (false)
@ -502,22 +502,27 @@ static void pollset_destroy(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset) {
gpr_mu_destroy(&pollset->mu);
}
static grpc_error *pollset_kick_all(grpc_pollset *pollset) {
static grpc_error *pollset_kick_all(grpc_exec_ctx *exec_ctx,
grpc_pollset *pollset) {
GPR_TIMER_BEGIN("pollset_kick_all", 0);
grpc_error *error = GRPC_ERROR_NONE;
if (pollset->root_worker != NULL) {
grpc_pollset_worker *worker = pollset->root_worker;
do {
switch (worker->kick_state) {
GRPC_STATS_INC_POLLSET_KICK(exec_ctx);
switch (worker->state) {
case KICKED:
GRPC_STATS_INC_POLLSET_KICKED_AGAIN(exec_ctx);
break;
case UNKICKED:
SET_KICK_STATE(worker, KICKED);
if (worker->initialized_cv) {
GRPC_STATS_INC_POLLSET_KICK_WAKEUP_CV(exec_ctx);
gpr_cv_signal(&worker->cv);
}
break;
case DESIGNATED_POLLER:
GRPC_STATS_INC_POLLSET_KICK_WAKEUP_FD(exec_ctx);
SET_KICK_STATE(worker, KICKED);
append_error(&error, grpc_wakeup_fd_wakeup(&global_wakeup_fd),
"pollset_kick_all");
@ -550,7 +555,7 @@ static void pollset_shutdown(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
GPR_ASSERT(!pollset->shutting_down);
pollset->shutdown_closure = closure;
pollset->shutting_down = true;
GRPC_LOG_IF_ERROR("pollset_shutdown", pollset_kick_all(pollset));
GRPC_LOG_IF_ERROR("pollset_shutdown", pollset_kick_all(exec_ctx, pollset));
pollset_maybe_finish_shutdown(exec_ctx, pollset);
GPR_TIMER_END("pollset_shutdown", 0);
}
@ -646,6 +651,8 @@ static grpc_error *do_epoll_wait(grpc_exec_ctx *exec_ctx, grpc_pollset *ps,
if (r < 0) return GRPC_OS_ERROR(errno, "epoll_wait");
GRPC_STATS_INC_POLL_EVENTS_RETURNED(exec_ctx, r);
if (GRPC_TRACER_ON(grpc_polling_trace)) {
gpr_log(GPR_DEBUG, "ps: %p poll got %d events", ps, r);
}
@ -688,7 +695,7 @@ static bool begin_worker(grpc_pollset *pollset, grpc_pollset_worker *worker,
gpr_mu_lock(&pollset->mu);
if (GRPC_TRACER_ON(grpc_polling_trace)) {
gpr_log(GPR_ERROR, "PS:%p BEGIN_REORG:%p kick_state=%s is_reassigning=%d",
pollset, worker, kick_state_string(worker->kick_state),
pollset, worker, kick_state_string(worker->state),
is_reassigning);
}
if (pollset->seen_inactive) {
@ -708,12 +715,12 @@ static bool begin_worker(grpc_pollset *pollset, grpc_pollset_worker *worker,
at this point is if it were "kicked specifically". Since the worker has
not added itself to the pollset yet (by calling worker_insert()), it is
not visible in the "kick any" path yet */
if (worker->kick_state == UNKICKED) {
if (worker->state == UNKICKED) {
pollset->seen_inactive = false;
if (neighborhood->active_root == NULL) {
neighborhood->active_root = pollset->next = pollset->prev = pollset;
/* Make this the designated poller if there isn't one already */
if (worker->kick_state == UNKICKED &&
if (worker->state == UNKICKED &&
gpr_atm_no_barrier_cas(&g_active_poller, 0, (gpr_atm)worker)) {
SET_KICK_STATE(worker, DESIGNATED_POLLER);
}
@ -733,19 +740,19 @@ static bool begin_worker(grpc_pollset *pollset, grpc_pollset_worker *worker,
worker_insert(pollset, worker);
pollset->begin_refs--;
if (worker->kick_state == UNKICKED && !pollset->kicked_without_poller) {
if (worker->state == UNKICKED && !pollset->kicked_without_poller) {
GPR_ASSERT(gpr_atm_no_barrier_load(&g_active_poller) != (gpr_atm)worker);
worker->initialized_cv = true;
gpr_cv_init(&worker->cv);
while (worker->kick_state == UNKICKED && !pollset->shutting_down) {
while (worker->state == UNKICKED && !pollset->shutting_down) {
if (GRPC_TRACER_ON(grpc_polling_trace)) {
gpr_log(GPR_ERROR, "PS:%p BEGIN_WAIT:%p kick_state=%s shutdown=%d",
pollset, worker, kick_state_string(worker->kick_state),
pollset, worker, kick_state_string(worker->state),
pollset->shutting_down);
}
if (gpr_cv_wait(&worker->cv, &pollset->mu, deadline) &&
worker->kick_state == UNKICKED) {
worker->state == UNKICKED) {
/* If gpr_cv_wait returns true (i.e a timeout), pretend that the worker
received a kick */
SET_KICK_STATE(worker, KICKED);
@ -758,7 +765,7 @@ static bool begin_worker(grpc_pollset *pollset, grpc_pollset_worker *worker,
gpr_log(GPR_ERROR,
"PS:%p BEGIN_DONE:%p kick_state=%s shutdown=%d "
"kicked_without_poller: %d",
pollset, worker, kick_state_string(worker->kick_state),
pollset, worker, kick_state_string(worker->state),
pollset->shutting_down, pollset->kicked_without_poller);
}
@ -778,11 +785,11 @@ static bool begin_worker(grpc_pollset *pollset, grpc_pollset_worker *worker,
}
GPR_TIMER_END("begin_worker", 0);
return worker->kick_state == DESIGNATED_POLLER && !pollset->shutting_down;
return worker->state == DESIGNATED_POLLER && !pollset->shutting_down;
}
static bool check_neighborhood_for_available_poller(
pollset_neighborhood *neighborhood) {
grpc_exec_ctx *exec_ctx, pollset_neighborhood *neighborhood) {
GPR_TIMER_BEGIN("check_neighborhood_for_available_poller", 0);
bool found_worker = false;
do {
@ -795,7 +802,7 @@ static bool check_neighborhood_for_available_poller(
grpc_pollset_worker *inspect_worker = inspect->root_worker;
if (inspect_worker != NULL) {
do {
switch (inspect_worker->kick_state) {
switch (inspect_worker->state) {
case UNKICKED:
if (gpr_atm_no_barrier_cas(&g_active_poller, 0,
(gpr_atm)inspect_worker)) {
@ -806,6 +813,7 @@ static bool check_neighborhood_for_available_poller(
SET_KICK_STATE(inspect_worker, DESIGNATED_POLLER);
if (inspect_worker->initialized_cv) {
GPR_TIMER_MARK("signal worker", 0);
GRPC_STATS_INC_POLLSET_KICK_WAKEUP_CV(exec_ctx);
gpr_cv_signal(&inspect_worker->cv);
}
} else {
@ -858,13 +866,14 @@ static void end_worker(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
grpc_closure_list_move(&worker->schedule_on_end_work,
&exec_ctx->closure_list);
if (gpr_atm_no_barrier_load(&g_active_poller) == (gpr_atm)worker) {
if (worker->next != worker && worker->next->kick_state == UNKICKED) {
if (worker->next != worker && worker->next->state == UNKICKED) {
if (GRPC_TRACER_ON(grpc_polling_trace)) {
gpr_log(GPR_DEBUG, " .. choose next poller to be peer %p", worker);
}
GPR_ASSERT(worker->next->initialized_cv);
gpr_atm_no_barrier_store(&g_active_poller, (gpr_atm)worker->next);
SET_KICK_STATE(worker->next, DESIGNATED_POLLER);
GRPC_STATS_INC_POLLSET_KICK_WAKEUP_CV(exec_ctx);
gpr_cv_signal(&worker->next->cv);
if (grpc_exec_ctx_has_work(exec_ctx)) {
gpr_mu_unlock(&pollset->mu);
@ -883,7 +892,8 @@ static void end_worker(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
&g_neighborhoods[(poller_neighborhood_idx + i) %
g_num_neighborhoods];
if (gpr_mu_trylock(&neighborhood->mu)) {
found_worker = check_neighborhood_for_available_poller(neighborhood);
found_worker =
check_neighborhood_for_available_poller(exec_ctx, neighborhood);
gpr_mu_unlock(&neighborhood->mu);
scan_state[i] = true;
} else {
@ -896,7 +906,8 @@ static void end_worker(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
&g_neighborhoods[(poller_neighborhood_idx + i) %
g_num_neighborhoods];
gpr_mu_lock(&neighborhood->mu);
found_worker = check_neighborhood_for_available_poller(neighborhood);
found_worker =
check_neighborhood_for_available_poller(exec_ctx, neighborhood);
gpr_mu_unlock(&neighborhood->mu);
}
grpc_exec_ctx_flush(exec_ctx);
@ -978,9 +989,10 @@ static grpc_error *pollset_work(grpc_exec_ctx *exec_ctx, grpc_pollset *ps,
return error;
}
static grpc_error *pollset_kick(grpc_pollset *pollset,
static grpc_error *pollset_kick(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
grpc_pollset_worker *specific_worker) {
GPR_TIMER_BEGIN("pollset_kick", 0);
GRPC_STATS_INC_POLLSET_KICK(exec_ctx);
grpc_error *ret_err = GRPC_ERROR_NONE;
if (GRPC_TRACER_ON(grpc_polling_trace)) {
gpr_strvec log;
@ -993,14 +1005,14 @@ static grpc_error *pollset_kick(grpc_pollset *pollset,
gpr_strvec_add(&log, tmp);
if (pollset->root_worker != NULL) {
gpr_asprintf(&tmp, " {kick_state=%s next=%p {kick_state=%s}}",
kick_state_string(pollset->root_worker->kick_state),
kick_state_string(pollset->root_worker->state),
pollset->root_worker->next,
kick_state_string(pollset->root_worker->next->kick_state));
kick_state_string(pollset->root_worker->next->state));
gpr_strvec_add(&log, tmp);
}
if (specific_worker != NULL) {
gpr_asprintf(&tmp, " worker_kick_state=%s",
kick_state_string(specific_worker->kick_state));
kick_state_string(specific_worker->state));
gpr_strvec_add(&log, tmp);
}
tmp = gpr_strvec_flatten(&log, NULL);
@ -1013,6 +1025,7 @@ static grpc_error *pollset_kick(grpc_pollset *pollset,
if (gpr_tls_get(&g_current_thread_pollset) != (intptr_t)pollset) {
grpc_pollset_worker *root_worker = pollset->root_worker;
if (root_worker == NULL) {
GRPC_STATS_INC_POLLSET_KICKED_WITHOUT_POLLER(exec_ctx);
pollset->kicked_without_poller = true;
if (GRPC_TRACER_ON(grpc_polling_trace)) {
gpr_log(GPR_ERROR, " .. kicked_without_poller");
@ -1020,13 +1033,15 @@ static grpc_error *pollset_kick(grpc_pollset *pollset,
goto done;
}
grpc_pollset_worker *next_worker = root_worker->next;
if (root_worker->kick_state == KICKED) {
if (root_worker->state == KICKED) {
GRPC_STATS_INC_POLLSET_KICKED_AGAIN(exec_ctx);
if (GRPC_TRACER_ON(grpc_polling_trace)) {
gpr_log(GPR_ERROR, " .. already kicked %p", root_worker);
}
SET_KICK_STATE(root_worker, KICKED);
goto done;
} else if (next_worker->kick_state == KICKED) {
} else if (next_worker->state == KICKED) {
GRPC_STATS_INC_POLLSET_KICKED_AGAIN(exec_ctx);
if (GRPC_TRACER_ON(grpc_polling_trace)) {
gpr_log(GPR_ERROR, " .. already kicked %p", next_worker);
}
@ -1037,13 +1052,15 @@ static grpc_error *pollset_kick(grpc_pollset *pollset,
// there is no next worker
root_worker == (grpc_pollset_worker *)gpr_atm_no_barrier_load(
&g_active_poller)) {
GRPC_STATS_INC_POLLSET_KICK_WAKEUP_FD(exec_ctx);
if (GRPC_TRACER_ON(grpc_polling_trace)) {
gpr_log(GPR_ERROR, " .. kicked %p", root_worker);
}
SET_KICK_STATE(root_worker, KICKED);
ret_err = grpc_wakeup_fd_wakeup(&global_wakeup_fd);
goto done;
} else if (next_worker->kick_state == UNKICKED) {
} else if (next_worker->state == UNKICKED) {
GRPC_STATS_INC_POLLSET_KICK_WAKEUP_CV(exec_ctx);
if (GRPC_TRACER_ON(grpc_polling_trace)) {
gpr_log(GPR_ERROR, " .. kicked %p", next_worker);
}
@ -1051,8 +1068,8 @@ static grpc_error *pollset_kick(grpc_pollset *pollset,
SET_KICK_STATE(next_worker, KICKED);
gpr_cv_signal(&next_worker->cv);
goto done;
} else if (next_worker->kick_state == DESIGNATED_POLLER) {
if (root_worker->kick_state != DESIGNATED_POLLER) {
} else if (next_worker->state == DESIGNATED_POLLER) {
if (root_worker->state != DESIGNATED_POLLER) {
if (GRPC_TRACER_ON(grpc_polling_trace)) {
gpr_log(
GPR_ERROR,
@ -1061,10 +1078,12 @@ static grpc_error *pollset_kick(grpc_pollset *pollset,
}
SET_KICK_STATE(root_worker, KICKED);
if (root_worker->initialized_cv) {
GRPC_STATS_INC_POLLSET_KICK_WAKEUP_CV(exec_ctx);
gpr_cv_signal(&root_worker->cv);
}
goto done;
} else {
GRPC_STATS_INC_POLLSET_KICK_WAKEUP_FD(exec_ctx);
if (GRPC_TRACER_ON(grpc_polling_trace)) {
gpr_log(GPR_ERROR, " .. non-root poller %p (root=%p)", next_worker,
root_worker);
@ -1074,11 +1093,13 @@ static grpc_error *pollset_kick(grpc_pollset *pollset,
goto done;
}
} else {
GPR_ASSERT(next_worker->kick_state == KICKED);
GRPC_STATS_INC_POLLSET_KICKED_AGAIN(exec_ctx);
GPR_ASSERT(next_worker->state == KICKED);
SET_KICK_STATE(next_worker, KICKED);
goto done;
}
} else {
GRPC_STATS_INC_POLLSET_KICK_OWN_THREAD(exec_ctx);
if (GRPC_TRACER_ON(grpc_polling_trace)) {
gpr_log(GPR_ERROR, " .. kicked while waking up");
}
@ -1088,13 +1109,14 @@ static grpc_error *pollset_kick(grpc_pollset *pollset,
GPR_UNREACHABLE_CODE(goto done);
}
if (specific_worker->kick_state == KICKED) {
if (specific_worker->state == KICKED) {
if (GRPC_TRACER_ON(grpc_polling_trace)) {
gpr_log(GPR_ERROR, " .. specific worker already kicked");
}
goto done;
} else if (gpr_tls_get(&g_current_thread_worker) ==
(intptr_t)specific_worker) {
GRPC_STATS_INC_POLLSET_KICK_OWN_THREAD(exec_ctx);
if (GRPC_TRACER_ON(grpc_polling_trace)) {
gpr_log(GPR_ERROR, " .. mark %p kicked", specific_worker);
}
@ -1102,6 +1124,7 @@ static grpc_error *pollset_kick(grpc_pollset *pollset,
goto done;
} else if (specific_worker ==
(grpc_pollset_worker *)gpr_atm_no_barrier_load(&g_active_poller)) {
GRPC_STATS_INC_POLLSET_KICK_WAKEUP_FD(exec_ctx);
if (GRPC_TRACER_ON(grpc_polling_trace)) {
gpr_log(GPR_ERROR, " .. kick active poller");
}
@ -1109,6 +1132,7 @@ static grpc_error *pollset_kick(grpc_pollset *pollset,
ret_err = grpc_wakeup_fd_wakeup(&global_wakeup_fd);
goto done;
} else if (specific_worker->initialized_cv) {
GRPC_STATS_INC_POLLSET_KICK_WAKEUP_CV(exec_ctx);
if (GRPC_TRACER_ON(grpc_polling_trace)) {
gpr_log(GPR_ERROR, " .. kick waiting worker");
}
@ -1116,6 +1140,7 @@ static grpc_error *pollset_kick(grpc_pollset *pollset,
gpr_cv_signal(&specific_worker->cv);
goto done;
} else {
GRPC_STATS_INC_POLLSET_KICKED_AGAIN(exec_ctx);
if (GRPC_TRACER_ON(grpc_polling_trace)) {
gpr_log(GPR_ERROR, " .. kick non-waiting worker");
}

@ -97,12 +97,12 @@ static void pg_join(grpc_exec_ctx *exec_ctx, polling_group *pg,
* pollable Declarations
*/
typedef struct pollable_t {
typedef struct pollable {
polling_obj po;
int epfd;
grpc_wakeup_fd wakeup;
grpc_pollset_worker *root_worker;
} pollable_t;
} pollable;
static const char *polling_obj_type_string(polling_obj_type t) {
switch (t) {
@ -122,7 +122,7 @@ static const char *polling_obj_type_string(polling_obj_type t) {
return "<invalid>";
}
static char *pollable_desc(pollable_t *p) {
static char *pollable_desc(pollable *p) {
char *out;
gpr_asprintf(&out, "type=%s group=%p epfd=%d wakeup=%d",
polling_obj_type_string(p->po.type), p->po.group, p->epfd,
@ -130,19 +130,19 @@ static char *pollable_desc(pollable_t *p) {
return out;
}
static pollable_t g_empty_pollable;
static pollable g_empty_pollable;
static void pollable_init(pollable_t *p, polling_obj_type type);
static void pollable_destroy(pollable_t *p);
static void pollable_init(pollable *p, polling_obj_type type);
static void pollable_destroy(pollable *p);
/* ensure that p->epfd, p->wakeup are initialized; p->po.mu must be held */
static grpc_error *pollable_materialize(pollable_t *p);
static grpc_error *pollable_materialize(pollable *p);
/*******************************************************************************
* Fd Declarations
*/
struct grpc_fd {
pollable_t pollable;
pollable pollable_obj;
int fd;
/* refst format:
bit 0 : 1=Active / 0=Orphaned
@ -193,15 +193,15 @@ struct grpc_pollset_worker {
pollset_worker_link links[POLLSET_WORKER_LINK_COUNT];
gpr_cv cv;
grpc_pollset *pollset;
pollable_t *pollable;
pollable *pollable_obj;
};
#define MAX_EPOLL_EVENTS 100
#define MAX_EPOLL_EVENTS_HANDLED_EACH_POLL_CALL 5
struct grpc_pollset {
pollable_t pollable;
pollable_t *current_pollable;
pollable pollable_obj;
pollable *current_pollable_obj;
int kick_alls_pending;
bool kicked_without_poller;
grpc_closure *shutdown_closure;
@ -282,7 +282,7 @@ static void fd_destroy(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) {
grpc_fd *fd = (grpc_fd *)arg;
/* Add the fd to the freelist */
grpc_iomgr_unregister_object(&fd->iomgr_object);
pollable_destroy(&fd->pollable);
pollable_destroy(&fd->pollable_obj);
gpr_mu_destroy(&fd->orphaned_mu);
gpr_mu_lock(&fd_freelist_mu);
fd->freelist_next = fd_freelist;
@ -343,7 +343,7 @@ static grpc_fd *fd_create(int fd, const char *name) {
new_fd = (grpc_fd *)gpr_malloc(sizeof(grpc_fd));
}
pollable_init(&new_fd->pollable, PO_FD);
pollable_init(&new_fd->pollable_obj, PO_FD);
gpr_atm_rel_store(&new_fd->refst, (gpr_atm)1);
new_fd->fd = fd;
@ -385,7 +385,7 @@ static void fd_orphan(grpc_exec_ctx *exec_ctx, grpc_fd *fd,
bool is_fd_closed = already_closed;
grpc_error *error = GRPC_ERROR_NONE;
gpr_mu_lock(&fd->pollable.po.mu);
gpr_mu_lock(&fd->pollable_obj.po.mu);
gpr_mu_lock(&fd->orphaned_mu);
fd->on_done_closure = on_done;
@ -411,7 +411,7 @@ static void fd_orphan(grpc_exec_ctx *exec_ctx, grpc_fd *fd,
GRPC_CLOSURE_SCHED(exec_ctx, fd->on_done_closure, GRPC_ERROR_REF(error));
gpr_mu_unlock(&fd->orphaned_mu);
gpr_mu_unlock(&fd->pollable.po.mu);
gpr_mu_unlock(&fd->pollable_obj.po.mu);
UNREF_BY(exec_ctx, fd, 2, reason); /* Drop the reference */
GRPC_LOG_IF_ERROR("fd_orphan", GRPC_ERROR_REF(error));
GRPC_ERROR_UNREF(error);
@ -451,13 +451,13 @@ static void fd_notify_on_write(grpc_exec_ctx *exec_ctx, grpc_fd *fd,
* Pollable Definitions
*/
static void pollable_init(pollable_t *p, polling_obj_type type) {
static void pollable_init(pollable *p, polling_obj_type type) {
po_init(&p->po, type);
p->root_worker = NULL;
p->epfd = -1;
}
static void pollable_destroy(pollable_t *p) {
static void pollable_destroy(pollable *p) {
po_destroy(&p->po);
if (p->epfd != -1) {
close(p->epfd);
@ -466,7 +466,7 @@ static void pollable_destroy(pollable_t *p) {
}
/* ensure that p->epfd, p->wakeup are initialized; p->po.mu must be held */
static grpc_error *pollable_materialize(pollable_t *p) {
static grpc_error *pollable_materialize(pollable *p) {
if (p->epfd == -1) {
int new_epfd = epoll_create1(EPOLL_CLOEXEC);
if (new_epfd < 0) {
@ -492,7 +492,7 @@ static grpc_error *pollable_materialize(pollable_t *p) {
}
/* pollable must be materialized */
static grpc_error *pollable_add_fd(pollable_t *p, grpc_fd *fd) {
static grpc_error *pollable_add_fd(pollable *p, grpc_fd *fd) {
grpc_error *error = GRPC_ERROR_NONE;
static const char *err_desc = "pollable_add_fd";
const int epfd = p->epfd;
@ -557,30 +557,34 @@ static void do_kick_all(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error_unused) {
grpc_error *error = GRPC_ERROR_NONE;
grpc_pollset *pollset = (grpc_pollset *)arg;
gpr_mu_lock(&pollset->pollable.po.mu);
gpr_mu_lock(&pollset->pollable_obj.po.mu);
if (pollset->root_worker != NULL) {
grpc_pollset_worker *worker = pollset->root_worker;
do {
if (worker->pollable != &pollset->pollable) {
gpr_mu_lock(&worker->pollable->po.mu);
GRPC_STATS_INC_POLLSET_KICK(exec_ctx);
if (worker->pollable_obj != &pollset->pollable_obj) {
gpr_mu_lock(&worker->pollable_obj->po.mu);
}
if (worker->initialized_cv && worker != pollset->root_worker) {
if (GRPC_TRACER_ON(grpc_polling_trace)) {
gpr_log(GPR_DEBUG, "PS:%p kickall_via_cv %p (pollable %p vs %p)",
pollset, worker, &pollset->pollable, worker->pollable);
pollset, worker, &pollset->pollable_obj,
worker->pollable_obj);
}
worker->kicked = true;
gpr_cv_signal(&worker->cv);
} else {
if (GRPC_TRACER_ON(grpc_polling_trace)) {
gpr_log(GPR_DEBUG, "PS:%p kickall_via_wakeup %p (pollable %p vs %p)",
pollset, worker, &pollset->pollable, worker->pollable);
pollset, worker, &pollset->pollable_obj,
worker->pollable_obj);
}
append_error(&error, grpc_wakeup_fd_wakeup(&worker->pollable->wakeup),
append_error(&error,
grpc_wakeup_fd_wakeup(&worker->pollable_obj->wakeup),
"pollset_shutdown");
}
if (worker->pollable != &pollset->pollable) {
gpr_mu_unlock(&worker->pollable->po.mu);
if (worker->pollable_obj != &pollset->pollable_obj) {
gpr_mu_unlock(&worker->pollable_obj->po.mu);
}
worker = worker->links[PWL_POLLSET].next;
@ -588,7 +592,7 @@ static void do_kick_all(grpc_exec_ctx *exec_ctx, void *arg,
}
pollset->kick_alls_pending--;
pollset_maybe_finish_shutdown(exec_ctx, pollset);
gpr_mu_unlock(&pollset->pollable.po.mu);
gpr_mu_unlock(&pollset->pollable_obj.po.mu);
GRPC_LOG_IF_ERROR("kick_all", error);
}
@ -599,7 +603,7 @@ static void pollset_kick_all(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset) {
GRPC_ERROR_NONE);
}
static grpc_error *pollset_kick_inner(grpc_pollset *pollset, pollable_t *p,
static grpc_error *pollset_kick_inner(grpc_pollset *pollset, pollable *p,
grpc_pollset_worker *specific_worker) {
if (GRPC_TRACER_ON(grpc_polling_trace)) {
gpr_log(GPR_DEBUG,
@ -662,26 +666,27 @@ static grpc_error *pollset_kick_inner(grpc_pollset *pollset, pollable_t *p,
}
/* p->po.mu must be held before calling this function */
static grpc_error *pollset_kick(grpc_pollset *pollset,
static grpc_error *pollset_kick(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
grpc_pollset_worker *specific_worker) {
pollable_t *p = pollset->current_pollable;
if (p != &pollset->pollable) {
pollable *p = pollset->current_pollable_obj;
GRPC_STATS_INC_POLLSET_KICK(exec_ctx);
if (p != &pollset->pollable_obj) {
gpr_mu_lock(&p->po.mu);
}
grpc_error *error = pollset_kick_inner(pollset, p, specific_worker);
if (p != &pollset->pollable) {
if (p != &pollset->pollable_obj) {
gpr_mu_unlock(&p->po.mu);
}
return error;
}
static void pollset_init(grpc_pollset *pollset, gpr_mu **mu) {
pollable_init(&pollset->pollable, PO_POLLSET);
pollset->current_pollable = &g_empty_pollable;
pollable_init(&pollset->pollable_obj, PO_POLLSET);
pollset->current_pollable_obj = &g_empty_pollable;
pollset->kicked_without_poller = false;
pollset->shutdown_closure = NULL;
pollset->root_worker = NULL;
*mu = &pollset->pollable.po.mu;
*mu = &pollset->pollable_obj.po.mu;
}
/* Convert a timespec to milliseconds:
@ -729,8 +734,8 @@ static void fd_become_writable(grpc_exec_ctx *exec_ctx, grpc_fd *fd) {
static grpc_error *fd_become_pollable_locked(grpc_fd *fd) {
grpc_error *error = GRPC_ERROR_NONE;
static const char *err_desc = "fd_become_pollable";
if (append_error(&error, pollable_materialize(&fd->pollable), err_desc)) {
append_error(&error, pollable_add_fd(&fd->pollable, fd), err_desc);
if (append_error(&error, pollable_materialize(&fd->pollable_obj), err_desc)) {
append_error(&error, pollable_add_fd(&fd->pollable_obj, fd), err_desc);
}
return error;
}
@ -744,8 +749,8 @@ static void pollset_shutdown(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
pollset_maybe_finish_shutdown(exec_ctx, pollset);
}
static bool pollset_is_pollable_fd(grpc_pollset *pollset, pollable_t *p) {
return p != &g_empty_pollable && p != &pollset->pollable;
static bool pollset_is_pollable_fd(grpc_pollset *pollset, pollable *p) {
return p != &g_empty_pollable && p != &pollset->pollable_obj;
}
static grpc_error *pollset_process_events(grpc_exec_ctx *exec_ctx,
@ -791,9 +796,9 @@ static grpc_error *pollset_process_events(grpc_exec_ctx *exec_ctx,
/* pollset_shutdown is guaranteed to be called before pollset_destroy. */
static void pollset_destroy(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset) {
pollable_destroy(&pollset->pollable);
if (pollset_is_pollable_fd(pollset, pollset->current_pollable)) {
UNREF_BY(exec_ctx, (grpc_fd *)pollset->current_pollable, 2,
pollable_destroy(&pollset->pollable_obj);
if (pollset_is_pollable_fd(pollset, pollset->current_pollable_obj)) {
UNREF_BY(exec_ctx, (grpc_fd *)pollset->current_pollable_obj, 2,
"pollset_pollable");
}
GRPC_LOG_IF_ERROR("pollset_process_events",
@ -801,7 +806,7 @@ static void pollset_destroy(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset) {
}
static grpc_error *pollset_epoll(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
pollable_t *p, gpr_timespec now,
pollable *p, gpr_timespec now,
gpr_timespec deadline) {
int timeout = poll_deadline_to_millis_timeout(deadline, now);
@ -883,68 +888,69 @@ static bool begin_worker(grpc_pollset *pollset, grpc_pollset_worker *worker,
worker->initialized_cv = false;
worker->kicked = false;
worker->pollset = pollset;
worker->pollable = pollset->current_pollable;
worker->pollable_obj = pollset->current_pollable_obj;
if (pollset_is_pollable_fd(pollset, worker->pollable)) {
REF_BY((grpc_fd *)worker->pollable, 2, "one_poll");
if (pollset_is_pollable_fd(pollset, worker->pollable_obj)) {
REF_BY((grpc_fd *)worker->pollable_obj, 2, "one_poll");
}
worker_insert(&pollset->root_worker, PWL_POLLSET, worker);
if (!worker_insert(&worker->pollable->root_worker, PWL_POLLABLE, worker)) {
if (!worker_insert(&worker->pollable_obj->root_worker, PWL_POLLABLE,
worker)) {
worker->initialized_cv = true;
gpr_cv_init(&worker->cv);
if (worker->pollable != &pollset->pollable) {
gpr_mu_unlock(&pollset->pollable.po.mu);
if (worker->pollable_obj != &pollset->pollable_obj) {
gpr_mu_unlock(&pollset->pollable_obj.po.mu);
}
if (GRPC_TRACER_ON(grpc_polling_trace) &&
worker->pollable->root_worker != worker) {
worker->pollable_obj->root_worker != worker) {
gpr_log(GPR_DEBUG, "PS:%p wait %p w=%p for %dms", pollset,
worker->pollable, worker,
worker->pollable_obj, worker,
poll_deadline_to_millis_timeout(deadline, *now));
}
while (do_poll && worker->pollable->root_worker != worker) {
if (gpr_cv_wait(&worker->cv, &worker->pollable->po.mu, deadline)) {
while (do_poll && worker->pollable_obj->root_worker != worker) {
if (gpr_cv_wait(&worker->cv, &worker->pollable_obj->po.mu, deadline)) {
if (GRPC_TRACER_ON(grpc_polling_trace)) {
gpr_log(GPR_DEBUG, "PS:%p timeout_wait %p w=%p", pollset,
worker->pollable, worker);
worker->pollable_obj, worker);
}
do_poll = false;
} else if (worker->kicked) {
if (GRPC_TRACER_ON(grpc_polling_trace)) {
gpr_log(GPR_DEBUG, "PS:%p wakeup %p w=%p", pollset, worker->pollable,
worker);
gpr_log(GPR_DEBUG, "PS:%p wakeup %p w=%p", pollset,
worker->pollable_obj, worker);
}
do_poll = false;
} else if (GRPC_TRACER_ON(grpc_polling_trace) &&
worker->pollable->root_worker != worker) {
worker->pollable_obj->root_worker != worker) {
gpr_log(GPR_DEBUG, "PS:%p spurious_wakeup %p w=%p", pollset,
worker->pollable, worker);
worker->pollable_obj, worker);
}
}
if (worker->pollable != &pollset->pollable) {
gpr_mu_unlock(&worker->pollable->po.mu);
gpr_mu_lock(&pollset->pollable.po.mu);
gpr_mu_lock(&worker->pollable->po.mu);
if (worker->pollable_obj != &pollset->pollable_obj) {
gpr_mu_unlock(&worker->pollable_obj->po.mu);
gpr_mu_lock(&pollset->pollable_obj.po.mu);
gpr_mu_lock(&worker->pollable_obj->po.mu);
}
*now = gpr_now(now->clock_type);
}
return do_poll && pollset->shutdown_closure == NULL &&
pollset->current_pollable == worker->pollable;
pollset->current_pollable_obj == worker->pollable_obj;
}
static void end_worker(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
grpc_pollset_worker *worker,
grpc_pollset_worker **worker_hdl) {
if (NEW_ROOT ==
worker_remove(&worker->pollable->root_worker, PWL_POLLABLE, worker)) {
gpr_cv_signal(&worker->pollable->root_worker->cv);
worker_remove(&worker->pollable_obj->root_worker, PWL_POLLABLE, worker)) {
gpr_cv_signal(&worker->pollable_obj->root_worker->cv);
}
if (worker->initialized_cv) {
gpr_cv_destroy(&worker->cv);
}
if (pollset_is_pollable_fd(pollset, worker->pollable)) {
UNREF_BY(exec_ctx, (grpc_fd *)worker->pollable, 2, "one_poll");
if (pollset_is_pollable_fd(pollset, worker->pollable_obj)) {
UNREF_BY(exec_ctx, (grpc_fd *)worker->pollable_obj, 2, "one_poll");
}
if (EMPTIED == worker_remove(&pollset->root_worker, PWL_POLLSET, worker)) {
pollset_maybe_finish_shutdown(exec_ctx, pollset);
@ -972,41 +978,41 @@ static grpc_error *pollset_work(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
pollset->kicked_without_poller = false;
return GRPC_ERROR_NONE;
}
if (pollset->current_pollable != &pollset->pollable) {
gpr_mu_lock(&pollset->current_pollable->po.mu);
if (pollset->current_pollable_obj != &pollset->pollable_obj) {
gpr_mu_lock(&pollset->current_pollable_obj->po.mu);
}
if (begin_worker(pollset, &worker, worker_hdl, &now, deadline)) {
gpr_tls_set(&g_current_thread_pollset, (intptr_t)pollset);
gpr_tls_set(&g_current_thread_worker, (intptr_t)&worker);
GPR_ASSERT(!pollset->shutdown_closure);
append_error(&error, pollable_materialize(worker.pollable), err_desc);
if (worker.pollable != &pollset->pollable) {
gpr_mu_unlock(&worker.pollable->po.mu);
append_error(&error, pollable_materialize(worker.pollable_obj), err_desc);
if (worker.pollable_obj != &pollset->pollable_obj) {
gpr_mu_unlock(&worker.pollable_obj->po.mu);
}
gpr_mu_unlock(&pollset->pollable.po.mu);
gpr_mu_unlock(&pollset->pollable_obj.po.mu);
if (pollset->event_cursor == pollset->event_count) {
append_error(&error, pollset_epoll(exec_ctx, pollset, worker.pollable,
append_error(&error, pollset_epoll(exec_ctx, pollset, worker.pollable_obj,
now, deadline),
err_desc);
}
append_error(&error, pollset_process_events(exec_ctx, pollset, false),
err_desc);
gpr_mu_lock(&pollset->pollable.po.mu);
if (worker.pollable != &pollset->pollable) {
gpr_mu_lock(&worker.pollable->po.mu);
gpr_mu_lock(&pollset->pollable_obj.po.mu);
if (worker.pollable_obj != &pollset->pollable_obj) {
gpr_mu_lock(&worker.pollable_obj->po.mu);
}
gpr_tls_set(&g_current_thread_pollset, 0);
gpr_tls_set(&g_current_thread_worker, 0);
pollset_maybe_finish_shutdown(exec_ctx, pollset);
}
end_worker(exec_ctx, pollset, &worker, worker_hdl);
if (worker.pollable != &pollset->pollable) {
gpr_mu_unlock(&worker.pollable->po.mu);
if (worker.pollable_obj != &pollset->pollable_obj) {
gpr_mu_unlock(&worker.pollable_obj->po.mu);
}
if (grpc_exec_ctx_has_work(exec_ctx)) {
gpr_mu_unlock(&pollset->pollable.po.mu);
gpr_mu_unlock(&pollset->pollable_obj.po.mu);
grpc_exec_ctx_flush(exec_ctx);
gpr_mu_lock(&pollset->pollable.po.mu);
gpr_mu_lock(&pollset->pollable_obj.po.mu);
}
return error;
}
@ -1023,27 +1029,27 @@ static grpc_error *pollset_add_fd_locked(grpc_exec_ctx *exec_ctx,
bool fd_locked) {
static const char *err_desc = "pollset_add_fd";
grpc_error *error = GRPC_ERROR_NONE;
if (pollset->current_pollable == &g_empty_pollable) {
if (pollset->current_pollable_obj == &g_empty_pollable) {
if (GRPC_TRACER_ON(grpc_polling_trace)) {
gpr_log(GPR_DEBUG,
"PS:%p add fd %p; transition pollable from empty to fd", pollset,
fd);
}
/* empty pollable --> single fd pollable_t */
/* empty pollable --> single fd pollable */
pollset_kick_all(exec_ctx, pollset);
pollset->current_pollable = &fd->pollable;
if (!fd_locked) gpr_mu_lock(&fd->pollable.po.mu);
pollset->current_pollable_obj = &fd->pollable_obj;
if (!fd_locked) gpr_mu_lock(&fd->pollable_obj.po.mu);
append_error(&error, fd_become_pollable_locked(fd), err_desc);
if (!fd_locked) gpr_mu_unlock(&fd->pollable.po.mu);
if (!fd_locked) gpr_mu_unlock(&fd->pollable_obj.po.mu);
REF_BY(fd, 2, "pollset_pollable");
} else if (pollset->current_pollable == &pollset->pollable) {
} else if (pollset->current_pollable_obj == &pollset->pollable_obj) {
if (GRPC_TRACER_ON(grpc_polling_trace)) {
gpr_log(GPR_DEBUG, "PS:%p add fd %p; already multipolling", pollset, fd);
}
append_error(&error, pollable_add_fd(pollset->current_pollable, fd),
append_error(&error, pollable_add_fd(pollset->current_pollable_obj, fd),
err_desc);
} else if (pollset->current_pollable != &fd->pollable) {
grpc_fd *had_fd = (grpc_fd *)pollset->current_pollable;
} else if (pollset->current_pollable_obj != &fd->pollable_obj) {
grpc_fd *had_fd = (grpc_fd *)pollset->current_pollable_obj;
if (GRPC_TRACER_ON(grpc_polling_trace)) {
gpr_log(GPR_DEBUG,
"PS:%p add fd %p; transition pollable from fd %p to multipoller",
@ -1055,11 +1061,11 @@ static grpc_error *pollset_add_fd_locked(grpc_exec_ctx *exec_ctx,
grpc_lfev_set_ready(exec_ctx, &had_fd->read_closure, "read");
grpc_lfev_set_ready(exec_ctx, &had_fd->write_closure, "write");
pollset_kick_all(exec_ctx, pollset);
pollset->current_pollable = &pollset->pollable;
if (append_error(&error, pollable_materialize(&pollset->pollable),
pollset->current_pollable_obj = &pollset->pollable_obj;
if (append_error(&error, pollable_materialize(&pollset->pollable_obj),
err_desc)) {
pollable_add_fd(&pollset->pollable, had_fd);
pollable_add_fd(&pollset->pollable, fd);
pollable_add_fd(&pollset->pollable_obj, had_fd);
pollable_add_fd(&pollset->pollable_obj, fd);
}
GRPC_CLOSURE_SCHED(exec_ctx,
GRPC_CLOSURE_CREATE(unref_fd_no_longer_poller, had_fd,
@ -1071,9 +1077,9 @@ static grpc_error *pollset_add_fd_locked(grpc_exec_ctx *exec_ctx,
static void pollset_add_fd(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
grpc_fd *fd) {
gpr_mu_lock(&pollset->pollable.po.mu);
gpr_mu_lock(&pollset->pollable_obj.po.mu);
grpc_error *error = pollset_add_fd_locked(exec_ctx, pollset, fd, false);
gpr_mu_unlock(&pollset->pollable.po.mu);
gpr_mu_unlock(&pollset->pollable_obj.po.mu);
GRPC_LOG_IF_ERROR("pollset_add_fd", error);
}
@ -1095,7 +1101,7 @@ static void pollset_set_destroy(grpc_exec_ctx *exec_ctx,
static void pollset_set_add_fd(grpc_exec_ctx *exec_ctx, grpc_pollset_set *pss,
grpc_fd *fd) {
po_join(exec_ctx, &pss->po, &fd->pollable.po);
po_join(exec_ctx, &pss->po, &fd->pollable_obj.po);
}
static void pollset_set_del_fd(grpc_exec_ctx *exec_ctx, grpc_pollset_set *pss,
@ -1103,7 +1109,7 @@ static void pollset_set_del_fd(grpc_exec_ctx *exec_ctx, grpc_pollset_set *pss,
static void pollset_set_add_pollset(grpc_exec_ctx *exec_ctx,
grpc_pollset_set *pss, grpc_pollset *ps) {
po_join(exec_ctx, &pss->po, &ps->pollable.po);
po_join(exec_ctx, &pss->po, &ps->pollable_obj.po);
}
static void pollset_set_del_pollset(grpc_exec_ctx *exec_ctx,

@ -1021,10 +1021,11 @@ static void push_front_worker(grpc_pollset *p, grpc_pollset_worker *worker) {
}
/* p->mu must be held before calling this function */
static grpc_error *pollset_kick(grpc_pollset *p,
static grpc_error *pollset_kick(grpc_exec_ctx *exec_ctx, grpc_pollset *p,
grpc_pollset_worker *specific_worker) {
GPR_TIMER_BEGIN("pollset_kick", 0);
grpc_error *error = GRPC_ERROR_NONE;
GRPC_STATS_INC_POLLSET_KICK(exec_ctx);
const char *err_desc = "Kick Failure";
grpc_pollset_worker *worker = specific_worker;
if (worker != NULL) {
@ -1158,7 +1159,7 @@ static void pollset_shutdown(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
GPR_ASSERT(!pollset->shutting_down);
pollset->shutting_down = true;
pollset->shutdown_done = closure;
pollset_kick(pollset, GRPC_POLLSET_KICK_BROADCAST);
pollset_kick(exec_ctx, pollset, GRPC_POLLSET_KICK_BROADCAST);
/* If the pollset has any workers, we cannot call finish_shutdown_locked()
because it would release the underlying polling island. In such a case, we

@ -209,7 +209,7 @@ static int poll_deadline_to_millis_timeout(gpr_timespec deadline,
#define GRPC_POLLSET_REEVALUATE_POLLING_ON_WAKEUP 2
/* As per pollset_kick, with an extended set of flags (defined above)
-- mostly for fd_posix's use. */
static grpc_error *pollset_kick_ext(grpc_pollset *p,
static grpc_error *pollset_kick_ext(grpc_exec_ctx *exec_ctx, grpc_pollset *p,
grpc_pollset_worker *specific_worker,
uint32_t flags) GRPC_MUST_USE_RESULT;
@ -365,36 +365,39 @@ static grpc_pollset *fd_get_read_notifier_pollset(grpc_exec_ctx *exec_ctx,
return notifier;
}
static grpc_error *pollset_kick_locked(grpc_fd_watcher *watcher) {
static grpc_error *pollset_kick_locked(grpc_exec_ctx *exec_ctx,
grpc_fd_watcher *watcher) {
gpr_mu_lock(&watcher->pollset->mu);
GPR_ASSERT(watcher->worker);
grpc_error *err = pollset_kick_ext(watcher->pollset, watcher->worker,
GRPC_POLLSET_REEVALUATE_POLLING_ON_WAKEUP);
grpc_error *err =
pollset_kick_ext(exec_ctx, watcher->pollset, watcher->worker,
GRPC_POLLSET_REEVALUATE_POLLING_ON_WAKEUP);
gpr_mu_unlock(&watcher->pollset->mu);
return err;
}
static void maybe_wake_one_watcher_locked(grpc_fd *fd) {
static void maybe_wake_one_watcher_locked(grpc_exec_ctx *exec_ctx,
grpc_fd *fd) {
if (fd->inactive_watcher_root.next != &fd->inactive_watcher_root) {
pollset_kick_locked(fd->inactive_watcher_root.next);
pollset_kick_locked(exec_ctx, fd->inactive_watcher_root.next);
} else if (fd->read_watcher) {
pollset_kick_locked(fd->read_watcher);
pollset_kick_locked(exec_ctx, fd->read_watcher);
} else if (fd->write_watcher) {
pollset_kick_locked(fd->write_watcher);
pollset_kick_locked(exec_ctx, fd->write_watcher);
}
}
static void wake_all_watchers_locked(grpc_fd *fd) {
static void wake_all_watchers_locked(grpc_exec_ctx *exec_ctx, grpc_fd *fd) {
grpc_fd_watcher *watcher;
for (watcher = fd->inactive_watcher_root.next;
watcher != &fd->inactive_watcher_root; watcher = watcher->next) {
pollset_kick_locked(watcher);
pollset_kick_locked(exec_ctx, watcher);
}
if (fd->read_watcher) {
pollset_kick_locked(fd->read_watcher);
pollset_kick_locked(exec_ctx, fd->read_watcher);
}
if (fd->write_watcher && fd->write_watcher != fd->read_watcher) {
pollset_kick_locked(fd->write_watcher);
pollset_kick_locked(exec_ctx, fd->write_watcher);
}
}
@ -435,7 +438,7 @@ static void fd_orphan(grpc_exec_ctx *exec_ctx, grpc_fd *fd,
if (!has_watchers(fd)) {
close_fd_locked(exec_ctx, fd);
} else {
wake_all_watchers_locked(fd);
wake_all_watchers_locked(exec_ctx, fd);
}
gpr_mu_unlock(&fd->mu);
UNREF_BY(fd, 2, reason); /* drop the reference */
@ -479,7 +482,7 @@ static void notify_on_locked(grpc_exec_ctx *exec_ctx, grpc_fd *fd,
/* already ready ==> queue the closure to run immediately */
*st = CLOSURE_NOT_READY;
GRPC_CLOSURE_SCHED(exec_ctx, closure, fd_shutdown_error(fd));
maybe_wake_one_watcher_locked(fd);
maybe_wake_one_watcher_locked(exec_ctx, fd);
} else {
/* upcallptr was set to a different closure. This is an error! */
gpr_log(GPR_ERROR,
@ -648,7 +651,7 @@ static void fd_end_poll(grpc_exec_ctx *exec_ctx, grpc_fd_watcher *watcher,
}
}
if (kick) {
maybe_wake_one_watcher_locked(fd);
maybe_wake_one_watcher_locked(exec_ctx, fd);
}
if (fd_is_orphaned(fd) && !has_watchers(fd) && !fd->closed) {
close_fd_locked(exec_ctx, fd);
@ -712,11 +715,12 @@ static void kick_append_error(grpc_error **composite, grpc_error *error) {
*composite = grpc_error_add_child(*composite, error);
}
static grpc_error *pollset_kick_ext(grpc_pollset *p,
static grpc_error *pollset_kick_ext(grpc_exec_ctx *exec_ctx, grpc_pollset *p,
grpc_pollset_worker *specific_worker,
uint32_t flags) {
GPR_TIMER_BEGIN("pollset_kick_ext", 0);
grpc_error *error = GRPC_ERROR_NONE;
GRPC_STATS_INC_POLLSET_KICK(exec_ctx);
/* pollset->mu already held */
if (specific_worker != NULL) {
@ -782,9 +786,9 @@ static grpc_error *pollset_kick_ext(grpc_pollset *p,
return error;
}
static grpc_error *pollset_kick(grpc_pollset *p,
static grpc_error *pollset_kick(grpc_exec_ctx *exec_ctx, grpc_pollset *p,
grpc_pollset_worker *specific_worker) {
return pollset_kick_ext(p, specific_worker, 0);
return pollset_kick_ext(exec_ctx, p, specific_worker, 0);
}
/* global state management */
@ -847,7 +851,7 @@ static void pollset_add_fd(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
}
pollset->fds[pollset->fd_count++] = fd;
GRPC_FD_REF(fd, "multipoller");
pollset_kick(pollset, NULL);
pollset_kick(exec_ctx, pollset, NULL);
exit:
gpr_mu_unlock(&pollset->mu);
}
@ -1083,7 +1087,7 @@ static grpc_error *pollset_work(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
/* check shutdown conditions */
if (pollset->shutting_down) {
if (pollset_has_workers(pollset)) {
pollset_kick(pollset, NULL);
pollset_kick(exec_ctx, pollset, NULL);
} else if (!pollset->called_shutdown && !pollset_has_observers(pollset)) {
pollset->called_shutdown = 1;
gpr_mu_unlock(&pollset->mu);
@ -1112,7 +1116,7 @@ static void pollset_shutdown(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
GPR_ASSERT(!pollset->shutting_down);
pollset->shutting_down = 1;
pollset->shutdown_done = closure;
pollset_kick(pollset, GRPC_POLLSET_KICK_BROADCAST);
pollset_kick(exec_ctx, pollset, GRPC_POLLSET_KICK_BROADCAST);
if (!pollset_has_workers(pollset)) {
GRPC_CLOSURE_LIST_SCHED(exec_ctx, &pollset->idle_jobs);
}

@ -210,9 +210,9 @@ grpc_error *grpc_pollset_work(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
return g_event_engine->pollset_work(exec_ctx, pollset, worker, now, deadline);
}
grpc_error *grpc_pollset_kick(grpc_pollset *pollset,
grpc_error *grpc_pollset_kick(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
grpc_pollset_worker *specific_worker) {
return g_event_engine->pollset_kick(pollset, specific_worker);
return g_event_engine->pollset_kick(exec_ctx, pollset, specific_worker);
}
void grpc_pollset_add_fd(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,

@ -54,7 +54,7 @@ typedef struct grpc_event_engine_vtable {
grpc_error *(*pollset_work)(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
grpc_pollset_worker **worker, gpr_timespec now,
gpr_timespec deadline);
grpc_error *(*pollset_kick)(grpc_pollset *pollset,
grpc_error *(*pollset_kick)(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
grpc_pollset_worker *specific_worker);
void (*pollset_add_fd)(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
struct grpc_fd *fd);

@ -76,7 +76,7 @@ grpc_error *grpc_pollset_work(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
/* Break one polling thread out of polling work for this pollset.
If specific_worker is non-NULL, then kick that worker. */
grpc_error *grpc_pollset_kick(grpc_pollset *pollset,
grpc_error *grpc_pollset_kick(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
grpc_pollset_worker *specific_worker)
GRPC_MUST_USE_RESULT;

@ -98,7 +98,7 @@ void grpc_pollset_init(grpc_pollset *pollset, gpr_mu **mu) {
void grpc_pollset_shutdown(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
grpc_closure *closure) {
pollset->shutting_down = 1;
grpc_pollset_kick(pollset, GRPC_POLLSET_KICK_BROADCAST);
grpc_pollset_kick(exec_ctx, pollset, GRPC_POLLSET_KICK_BROADCAST);
if (!pollset->is_iocp_worker) {
GRPC_CLOSURE_SCHED(exec_ctx, closure, GRPC_ERROR_NONE);
} else {
@ -181,7 +181,7 @@ done:
return GRPC_ERROR_NONE;
}
grpc_error *grpc_pollset_kick(grpc_pollset *p,
grpc_error *grpc_pollset_kick(grpc_exec_ctx *exec_ctx, grpc_pollset *p,
grpc_pollset_worker *specific_worker) {
if (specific_worker != NULL) {
if (specific_worker == GRPC_POLLSET_KICK_BROADCAST) {
@ -209,7 +209,7 @@ grpc_error *grpc_pollset_kick(grpc_pollset *p,
specific_worker =
pop_front_worker(&p->root_worker, GRPC_POLLSET_WORKER_LINK_POLLSET);
if (specific_worker != NULL) {
grpc_pollset_kick(p, specific_worker);
grpc_pollset_kick(exec_ctx, p, specific_worker);
} else if (p->is_iocp_worker) {
grpc_iocp_kick();
} else {

@ -85,7 +85,7 @@ static grpc_error *blocking_resolve_address_impl(
if (s != 0) {
/* Retry if well-known service name is recognized */
char *svc[][2] = {{"http", "80"}, {"https", "443"}};
const char *svc[][2] = {{"http", "80"}, {"https", "443"}};
for (i = 0; i < GPR_ARRAY_SIZE(svc); i++) {
if (strcmp(port, svc[i][0]) == 0) {
GRPC_SCHEDULING_START_BLOCKING_REGION;

@ -79,7 +79,8 @@ static void on_compute_engine_detection_http_response(grpc_exec_ctx *exec_ctx,
detector->is_done = 1;
GRPC_LOG_IF_ERROR(
"Pollset kick",
grpc_pollset_kick(grpc_polling_entity_pollset(&detector->pollent), NULL));
grpc_pollset_kick(exec_ctx,
grpc_polling_entity_pollset(&detector->pollent), NULL));
gpr_mu_unlock(g_polling_mu);
}

@ -455,14 +455,14 @@ grpc_server_security_connector *grpc_fake_server_security_connector_create(
typedef struct {
grpc_channel_security_connector base;
tsi_ssl_client_handshaker_factory *handshaker_factory;
tsi_ssl_client_handshaker_factory *client_handshaker_factory;
char *target_name;
char *overridden_target_name;
} grpc_ssl_channel_security_connector;
typedef struct {
grpc_server_security_connector base;
tsi_ssl_server_handshaker_factory *handshaker_factory;
tsi_ssl_server_handshaker_factory *server_handshaker_factory;
} grpc_ssl_server_security_connector;
static void ssl_channel_destroy(grpc_exec_ctx *exec_ctx,
@ -470,9 +470,8 @@ static void ssl_channel_destroy(grpc_exec_ctx *exec_ctx,
grpc_ssl_channel_security_connector *c =
(grpc_ssl_channel_security_connector *)sc;
grpc_call_credentials_unref(exec_ctx, c->base.request_metadata_creds);
if (c->handshaker_factory != NULL) {
tsi_ssl_client_handshaker_factory_destroy(c->handshaker_factory);
}
tsi_ssl_client_handshaker_factory_unref(c->client_handshaker_factory);
c->client_handshaker_factory = NULL;
if (c->target_name != NULL) gpr_free(c->target_name);
if (c->overridden_target_name != NULL) gpr_free(c->overridden_target_name);
gpr_free(sc);
@ -482,9 +481,8 @@ static void ssl_server_destroy(grpc_exec_ctx *exec_ctx,
grpc_security_connector *sc) {
grpc_ssl_server_security_connector *c =
(grpc_ssl_server_security_connector *)sc;
if (c->handshaker_factory != NULL) {
tsi_ssl_server_handshaker_factory_destroy(c->handshaker_factory);
}
tsi_ssl_server_handshaker_factory_unref(c->server_handshaker_factory);
c->server_handshaker_factory = NULL;
gpr_free(sc);
}
@ -496,7 +494,7 @@ static void ssl_channel_add_handshakers(grpc_exec_ctx *exec_ctx,
// Instantiate TSI handshaker.
tsi_handshaker *tsi_hs = NULL;
tsi_result result = tsi_ssl_client_handshaker_factory_create_handshaker(
c->handshaker_factory,
c->client_handshaker_factory,
c->overridden_target_name != NULL ? c->overridden_target_name
: c->target_name,
&tsi_hs);
@ -521,7 +519,7 @@ static void ssl_server_add_handshakers(grpc_exec_ctx *exec_ctx,
// Instantiate TSI handshaker.
tsi_handshaker *tsi_hs = NULL;
tsi_result result = tsi_ssl_server_handshaker_factory_create_handshaker(
c->handshaker_factory, &tsi_hs);
c->server_handshaker_factory, &tsi_hs);
if (result != TSI_OK) {
gpr_log(GPR_ERROR, "Handshaker creation failed with error %s.",
tsi_result_to_string(result));
@ -852,7 +850,7 @@ grpc_security_status grpc_ssl_channel_security_connector_create(
result = tsi_create_ssl_client_handshaker_factory(
has_key_cert_pair ? &config->pem_key_cert_pair : NULL, pem_root_certs,
ssl_cipher_suites(), alpn_protocol_strings, (uint16_t)num_alpn_protocols,
&c->handshaker_factory);
&c->client_handshaker_factory);
if (result != TSI_OK) {
gpr_log(GPR_ERROR, "Handshaker factory creation failed with %s.",
tsi_result_to_string(result));
@ -897,7 +895,7 @@ grpc_security_status grpc_ssl_server_security_connector_create(
config->pem_root_certs, get_tsi_client_certificate_request_type(
config->client_certificate_request),
ssl_cipher_suites(), alpn_protocol_strings, (uint16_t)num_alpn_protocols,
&c->handshaker_factory);
&c->server_handshaker_factory);
if (result != TSI_OK) {
gpr_log(GPR_ERROR, "Handshaker factory creation failed with %s.",
tsi_result_to_string(result));

@ -137,7 +137,7 @@ static void on_peer_checked_inner(grpc_exec_ctx *exec_ctx,
// Create zero-copy frame protector, if implemented.
tsi_zero_copy_grpc_protector *zero_copy_protector = NULL;
tsi_result result = tsi_handshaker_result_create_zero_copy_grpc_protector(
h->handshaker_result, NULL, &zero_copy_protector);
exec_ctx, h->handshaker_result, NULL, &zero_copy_protector);
if (result != TSI_OK && result != TSI_UNIMPLEMENTED) {
error = grpc_set_tsi_error_result(
GRPC_ERROR_CREATE_FROM_STATIC_STRING(

@ -57,7 +57,7 @@ void gpr_log(const char *file, int line, gpr_log_severity severity,
}
void gpr_default_log(gpr_log_func_args *args) {
char *final_slash;
const char *final_slash;
char *prefix;
const char *display_file;
char time_buffer[64];

@ -276,7 +276,7 @@ static void add_string_to_split(const char *beg, const char *end, char ***strs,
void gpr_string_split(const char *input, const char *sep, char ***strs,
size_t *nstrs) {
char *next;
const char *next;
*strs = NULL;
*nstrs = 0;
size_t capstrs = 0;

@ -135,7 +135,7 @@ typedef struct batch_control {
typedef struct {
gpr_mu child_list_mu;
grpc_call *first_child;
} parent_call_t;
} parent_call;
typedef struct {
grpc_call *parent;
@ -144,7 +144,7 @@ typedef struct {
parent->mu */
grpc_call *sibling_next;
grpc_call *sibling_prev;
} child_call_t;
} child_call;
#define RECV_NONE ((gpr_atm)0)
#define RECV_INITIAL_METADATA_FIRST ((gpr_atm)1)
@ -157,8 +157,8 @@ struct grpc_call {
grpc_polling_entity pollent;
grpc_channel *channel;
gpr_timespec start_time;
/* parent_call_t* */ gpr_atm parent_call_atm;
child_call_t *child_call;
/* parent_call* */ gpr_atm parent_call_atm;
child_call *child;
/* client or server call */
bool is_client;
@ -304,21 +304,21 @@ void *grpc_call_arena_alloc(grpc_call *call, size_t size) {
return gpr_arena_alloc(call->arena, size);
}
static parent_call_t *get_or_create_parent_call(grpc_call *call) {
parent_call_t *p = (parent_call_t *)gpr_atm_acq_load(&call->parent_call_atm);
static parent_call *get_or_create_parent_call(grpc_call *call) {
parent_call *p = (parent_call *)gpr_atm_acq_load(&call->parent_call_atm);
if (p == NULL) {
p = (parent_call_t *)gpr_arena_alloc(call->arena, sizeof(*p));
p = (parent_call *)gpr_arena_alloc(call->arena, sizeof(*p));
gpr_mu_init(&p->child_list_mu);
if (!gpr_atm_rel_cas(&call->parent_call_atm, (gpr_atm)NULL, (gpr_atm)p)) {
gpr_mu_destroy(&p->child_list_mu);
p = (parent_call_t *)gpr_atm_acq_load(&call->parent_call_atm);
p = (parent_call *)gpr_atm_acq_load(&call->parent_call_atm);
}
}
return p;
}
static parent_call_t *get_parent_call(grpc_call *call) {
return (parent_call_t *)gpr_atm_acq_load(&call->parent_call_atm);
static parent_call *get_parent_call(grpc_call *call) {
return (parent_call *)gpr_atm_acq_load(&call->parent_call_atm);
}
grpc_error *grpc_call_create(grpc_exec_ctx *exec_ctx,
@ -330,8 +330,9 @@ grpc_error *grpc_call_create(grpc_exec_ctx *exec_ctx,
grpc_channel_get_channel_stack(args->channel);
grpc_call *call;
GPR_TIMER_BEGIN("grpc_call_create", 0);
gpr_arena *arena =
gpr_arena_create(grpc_channel_get_call_size_estimate(args->channel));
size_t initial_size = grpc_channel_get_call_size_estimate(args->channel);
GRPC_STATS_INC_CALL_INITIAL_SIZE(exec_ctx, initial_size);
gpr_arena *arena = gpr_arena_create(initial_size);
call = (grpc_call *)gpr_arena_alloc(
arena, sizeof(grpc_call) + channel_stack->call_stack_size);
gpr_ref_init(&call->ext_ref, 1);
@ -377,24 +378,24 @@ grpc_error *grpc_call_create(grpc_exec_ctx *exec_ctx,
bool immediately_cancel = false;
if (args->parent_call != NULL) {
child_call_t *cc = call->child_call =
(child_call_t *)gpr_arena_alloc(arena, sizeof(child_call_t));
call->child_call->parent = args->parent_call;
if (args->parent != NULL) {
child_call *cc = call->child =
(child_call *)gpr_arena_alloc(arena, sizeof(child_call));
call->child->parent = args->parent;
GRPC_CALL_INTERNAL_REF(args->parent_call, "child");
GRPC_CALL_INTERNAL_REF(args->parent, "child");
GPR_ASSERT(call->is_client);
GPR_ASSERT(!args->parent_call->is_client);
GPR_ASSERT(!args->parent->is_client);
parent_call_t *pc = get_or_create_parent_call(args->parent_call);
parent_call *pc = get_or_create_parent_call(args->parent);
gpr_mu_lock(&pc->child_list_mu);
if (args->propagation_mask & GRPC_PROPAGATE_DEADLINE) {
send_deadline = gpr_time_min(
gpr_convert_clock_type(send_deadline,
args->parent_call->send_deadline.clock_type),
args->parent_call->send_deadline);
args->parent->send_deadline.clock_type),
args->parent->send_deadline);
}
/* for now GRPC_PROPAGATE_TRACING_CONTEXT *MUST* be passed with
* GRPC_PROPAGATE_STATS_CONTEXT */
@ -406,9 +407,9 @@ grpc_error *grpc_call_create(grpc_exec_ctx *exec_ctx,
"Census tracing propagation requested "
"without Census context propagation"));
}
grpc_call_context_set(
call, GRPC_CONTEXT_TRACING,
args->parent_call->context[GRPC_CONTEXT_TRACING].value, NULL);
grpc_call_context_set(call, GRPC_CONTEXT_TRACING,
args->parent->context[GRPC_CONTEXT_TRACING].value,
NULL);
} else if (args->propagation_mask & GRPC_PROPAGATE_CENSUS_STATS_CONTEXT) {
add_init_error(&error, GRPC_ERROR_CREATE_FROM_STATIC_STRING(
"Census context propagation requested "
@ -416,7 +417,7 @@ grpc_error *grpc_call_create(grpc_exec_ctx *exec_ctx,
}
if (args->propagation_mask & GRPC_PROPAGATE_CANCELLATION) {
call->cancellation_is_inherited = 1;
if (gpr_atm_acq_load(&args->parent_call->received_final_op_atm)) {
if (gpr_atm_acq_load(&args->parent->received_final_op_atm)) {
immediately_cancel = true;
}
}
@ -426,9 +427,9 @@ grpc_error *grpc_call_create(grpc_exec_ctx *exec_ctx,
cc->sibling_next = cc->sibling_prev = call;
} else {
cc->sibling_next = pc->first_child;
cc->sibling_prev = pc->first_child->child_call->sibling_prev;
cc->sibling_next->child_call->sibling_prev =
cc->sibling_prev->child_call->sibling_next = call;
cc->sibling_prev = pc->first_child->child->sibling_prev;
cc->sibling_next->child->sibling_prev =
cc->sibling_prev->child->sibling_next = call;
}
gpr_mu_unlock(&pc->child_list_mu);
@ -533,7 +534,7 @@ static void destroy_call(grpc_exec_ctx *exec_ctx, void *call,
if (c->receiving_stream != NULL) {
grpc_byte_stream_destroy(exec_ctx, c->receiving_stream);
}
parent_call_t *pc = get_parent_call(c);
parent_call *pc = get_parent_call(c);
if (pc != NULL) {
gpr_mu_destroy(&pc->child_list_mu);
}
@ -570,14 +571,14 @@ void grpc_call_ref(grpc_call *c) { gpr_ref(&c->ext_ref); }
void grpc_call_unref(grpc_call *c) {
if (!gpr_unref(&c->ext_ref)) return;
child_call_t *cc = c->child_call;
child_call *cc = c->child;
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
GPR_TIMER_BEGIN("grpc_call_unref", 0);
GRPC_API_TRACE("grpc_call_unref(c=%p)", 1, (c));
if (cc) {
parent_call_t *pc = get_parent_call(cc->parent);
parent_call *pc = get_parent_call(cc->parent);
gpr_mu_lock(&pc->child_list_mu);
if (c == pc->first_child) {
pc->first_child = cc->sibling_next;
@ -585,8 +586,8 @@ void grpc_call_unref(grpc_call *c) {
pc->first_child = NULL;
}
}
cc->sibling_prev->child_call->sibling_next = cc->sibling_next;
cc->sibling_next->child_call->sibling_prev = cc->sibling_prev;
cc->sibling_prev->child->sibling_next = cc->sibling_next;
cc->sibling_next->child->sibling_prev = cc->sibling_prev;
gpr_mu_unlock(&pc->child_list_mu);
GRPC_CALL_INTERNAL_UNREF(&exec_ctx, cc->parent, "child");
}
@ -1309,14 +1310,14 @@ static void post_batch_completion(grpc_exec_ctx *exec_ctx,
/* propagate cancellation to any interested children */
gpr_atm_rel_store(&call->received_final_op_atm, 1);
parent_call_t *pc = get_parent_call(call);
parent_call *pc = get_parent_call(call);
if (pc != NULL) {
grpc_call *child;
gpr_mu_lock(&pc->child_list_mu);
child = pc->first_child;
if (child != NULL) {
do {
next_child_call = child->child_call->sibling_next;
next_child_call = child->child->sibling_next;
if (child->cancellation_is_inherited) {
GRPC_CALL_INTERNAL_REF(child, "propagate_cancel");
cancel_with_error(exec_ctx, child, STATUS_FROM_API_OVERRIDE,
@ -1511,7 +1512,7 @@ static void validate_filtered_metadata(grpc_exec_ctx *exec_ctx,
} else if (grpc_compression_options_is_stream_compression_algorithm_enabled(
&compression_options, algo) == 0) {
/* check if algorithm is supported by current channel config */
char *algo_name = NULL;
const char *algo_name = NULL;
grpc_stream_compression_algorithm_name(algo, &algo_name);
gpr_asprintf(&error_msg, "Stream compression algorithm '%s' is disabled.",
algo_name);
@ -1525,7 +1526,7 @@ static void validate_filtered_metadata(grpc_exec_ctx *exec_ctx,
if (!GPR_BITGET(call->stream_encodings_accepted_by_peer,
call->incoming_stream_compression_algorithm)) {
if (GRPC_TRACER_ON(grpc_compression_trace)) {
char *algo_name = NULL;
const char *algo_name = NULL;
grpc_stream_compression_algorithm_name(
call->incoming_stream_compression_algorithm, &algo_name);
gpr_log(
@ -1552,7 +1553,7 @@ static void validate_filtered_metadata(grpc_exec_ctx *exec_ctx,
} else if (grpc_compression_options_is_algorithm_enabled(
&compression_options, algo) == 0) {
/* check if algorithm is supported by current channel config */
char *algo_name = NULL;
const char *algo_name = NULL;
grpc_compression_algorithm_name(algo, &algo_name);
gpr_asprintf(&error_msg, "Compression algorithm '%s' is disabled.",
algo_name);
@ -1568,7 +1569,7 @@ static void validate_filtered_metadata(grpc_exec_ctx *exec_ctx,
if (!GPR_BITGET(call->encodings_accepted_by_peer,
call->incoming_compression_algorithm)) {
if (GRPC_TRACER_ON(grpc_compression_trace)) {
char *algo_name = NULL;
const char *algo_name = NULL;
grpc_compression_algorithm_name(call->incoming_compression_algorithm,
&algo_name);
gpr_log(GPR_ERROR,

@ -37,7 +37,7 @@ typedef void (*grpc_ioreq_completion_func)(grpc_exec_ctx *exec_ctx,
typedef struct grpc_call_create_args {
grpc_channel *channel;
grpc_call *parent_call;
grpc_call *parent;
uint32_t propagation_mask;
grpc_completion_queue *cq;

@ -282,7 +282,7 @@ static grpc_call *grpc_channel_create_call_internal(
grpc_call_create_args args;
memset(&args, 0, sizeof(args));
args.channel = channel;
args.parent_call = parent_call;
args.parent = parent_call;
args.propagation_mask = propagation_mask;
args.cq = cq;
args.pollset_set_alternative = pollset_set_alternative;

@ -55,7 +55,7 @@ typedef struct {
bool can_listen;
size_t (*size)(void);
void (*init)(grpc_pollset *pollset, gpr_mu **mu);
grpc_error *(*kick)(grpc_pollset *pollset,
grpc_error *(*kick)(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
grpc_pollset_worker *specific_worker);
grpc_error *(*work)(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
grpc_pollset_worker **worker, gpr_timespec now,
@ -131,7 +131,8 @@ static grpc_error *non_polling_poller_work(grpc_exec_ctx *exec_ctx,
}
static grpc_error *non_polling_poller_kick(
grpc_pollset *pollset, grpc_pollset_worker *specific_worker) {
grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
grpc_pollset_worker *specific_worker) {
non_polling_poller *p = (non_polling_poller *)pollset;
if (specific_worker == NULL) specific_worker = (grpc_pollset_worker *)p->root;
if (specific_worker != NULL) {
@ -565,13 +566,13 @@ static void cq_check_tag(grpc_completion_queue *cq, void *tag, bool lock_cq) {}
* true if the increment was successful; false if the counter is zero */
static bool atm_inc_if_nonzero(gpr_atm *counter) {
while (true) {
gpr_atm count = gpr_atm_no_barrier_load(counter);
gpr_atm count = gpr_atm_acq_load(counter);
/* If zero, we are done. If not, we must to a CAS (instead of an atomic
* increment) to maintain the contract: do not increment the counter if it
* is zero. */
if (count == 0) {
return false;
} else if (gpr_atm_no_barrier_cas(counter, count, count + 1)) {
} else if (gpr_atm_full_cas(counter, count, count + 1)) {
break;
}
}
@ -643,15 +644,19 @@ static void cq_end_op_for_next(grpc_exec_ctx *exec_ctx,
/* Add the completion to the queue */
bool is_first = cq_event_queue_push(&cqd->queue, storage);
gpr_atm_no_barrier_fetch_add(&cqd->things_queued_ever, 1);
bool will_definitely_shutdown =
gpr_atm_no_barrier_load(&cqd->pending_events) == 1;
/* Since we do not hold the cq lock here, it is important to do an 'acquire'
load here (instead of a 'no_barrier' load) to match with the release store
(done via gpr_atm_full_fetch_add(pending_events, -1)) in cq_shutdown_next
*/
bool will_definitely_shutdown = gpr_atm_acq_load(&cqd->pending_events) == 1;
if (!will_definitely_shutdown) {
/* Only kick if this is the first item queued */
if (is_first) {
gpr_mu_lock(cq->mu);
grpc_error *kick_error =
cq->poller_vtable->kick(POLLSET_FROM_CQ(cq), NULL);
cq->poller_vtable->kick(exec_ctx, POLLSET_FROM_CQ(cq), NULL);
gpr_mu_unlock(cq->mu);
if (kick_error != GRPC_ERROR_NONE) {
@ -737,7 +742,7 @@ static void cq_end_op_for_pluck(grpc_exec_ctx *exec_ctx,
}
grpc_error *kick_error =
cq->poller_vtable->kick(POLLSET_FROM_CQ(cq), pluck_worker);
cq->poller_vtable->kick(exec_ctx, POLLSET_FROM_CQ(cq), pluck_worker);
gpr_mu_unlock(cq->mu);
@ -888,7 +893,7 @@ static grpc_event cq_next(grpc_completion_queue *cq, gpr_timespec deadline,
}
}
if (gpr_atm_no_barrier_load(&cqd->pending_events) == 0) {
if (gpr_atm_acq_load(&cqd->pending_events) == 0) {
/* Before returning, check if the queue has any items left over (since
gpr_mpscq_pop() can sometimes return NULL even if the queue is not
empty. If so, keep retrying but do not return GRPC_QUEUE_SHUTDOWN */
@ -934,9 +939,9 @@ static grpc_event cq_next(grpc_completion_queue *cq, gpr_timespec deadline,
}
if (cq_event_queue_num_items(&cqd->queue) > 0 &&
gpr_atm_no_barrier_load(&cqd->pending_events) > 0) {
gpr_atm_acq_load(&cqd->pending_events) > 0) {
gpr_mu_lock(cq->mu);
cq->poller_vtable->kick(POLLSET_FROM_CQ(cq), NULL);
cq->poller_vtable->kick(&exec_ctx, POLLSET_FROM_CQ(cq), NULL);
gpr_mu_unlock(cq->mu);
}
@ -985,6 +990,9 @@ static void cq_shutdown_next(grpc_exec_ctx *exec_ctx,
return;
}
cqd->shutdown_called = true;
/* Doing a full_fetch_add (i.e acq/release) here to match with
* cq_begin_op_for_next and and cq_end_op_for_next functions which read/write
* on this counter without necessarily holding a lock on cq */
if (gpr_atm_full_fetch_add(&cqd->pending_events, -1) == 1) {
cq_finish_shutdown_next(exec_ctx, cq);
}

@ -197,7 +197,7 @@ char *grpc_transport_op_string(grpc_transport_op *op) {
return out;
}
void grpc_call_log_op(char *file, int line, gpr_log_severity severity,
void grpc_call_log_op(const char *file, int line, gpr_log_severity severity,
grpc_call_element *elem,
grpc_transport_stream_op_batch *op) {
char *str = grpc_transport_stream_op_batch_string(op);

@ -493,7 +493,8 @@ static tsi_result fake_handshaker_result_extract_peer(
}
static tsi_result fake_handshaker_result_create_zero_copy_grpc_protector(
const tsi_handshaker_result *self, size_t *max_output_protected_frame_size,
void *exec_ctx, const tsi_handshaker_result *self,
size_t *max_output_protected_frame_size,
tsi_zero_copy_grpc_protector **protector) {
*protector =
tsi_create_fake_zero_copy_grpc_protector(max_output_protected_frame_size);

@ -67,7 +67,13 @@
/* --- Structure definitions. ---*/
struct tsi_ssl_handshaker_factory {
const tsi_ssl_handshaker_factory_vtable *vtable;
gpr_refcount refcount;
};
struct tsi_ssl_client_handshaker_factory {
tsi_ssl_handshaker_factory base;
SSL_CTX *ssl_context;
unsigned char *alpn_protocol_list;
size_t alpn_protocol_list_length;
@ -77,6 +83,7 @@ struct tsi_ssl_server_handshaker_factory {
/* Several contexts to support SNI.
The tsi_peer array contains the subject names of the server certificates
associated with the contexts at the same index. */
tsi_ssl_handshaker_factory base;
SSL_CTX **ssl_contexts;
tsi_peer *ssl_context_x509_subject_names;
size_t ssl_context_count;
@ -90,6 +97,7 @@ typedef struct {
BIO *into_ssl;
BIO *from_ssl;
tsi_result result;
tsi_ssl_handshaker_factory *factory_ref;
} tsi_ssl_handshaker;
typedef struct {
@ -846,6 +854,47 @@ static const tsi_frame_protector_vtable frame_protector_vtable = {
ssl_protector_destroy,
};
/* --- tsi_server_handshaker_factory methods implementation. --- */
static void tsi_ssl_handshaker_factory_destroy(
tsi_ssl_handshaker_factory *self) {
if (self == NULL) return;
if (self->vtable != NULL && self->vtable->destroy != NULL) {
self->vtable->destroy(self);
}
/* Note, we don't free(self) here because this object is always directly
* embedded in another object. If tsi_ssl_handshaker_factory_init allocates
* any memory, it should be free'd here. */
}
static tsi_ssl_handshaker_factory *tsi_ssl_handshaker_factory_ref(
tsi_ssl_handshaker_factory *self) {
if (self == NULL) return NULL;
gpr_refn(&self->refcount, 1);
return self;
}
static void tsi_ssl_handshaker_factory_unref(tsi_ssl_handshaker_factory *self) {
if (self == NULL) return;
if (gpr_unref(&self->refcount)) {
tsi_ssl_handshaker_factory_destroy(self);
}
}
static tsi_ssl_handshaker_factory_vtable handshaker_factory_vtable = {NULL};
/* Initializes a tsi_ssl_handshaker_factory object. Caller is responsible for
* allocating memory for the factory. */
static void tsi_ssl_handshaker_factory_init(
tsi_ssl_handshaker_factory *factory) {
GPR_ASSERT(factory != NULL);
factory->vtable = &handshaker_factory_vtable;
gpr_ref_init(&factory->refcount, 1);
}
/* --- tsi_handshaker methods implementation. ---*/
static tsi_result ssl_handshaker_get_bytes_to_send_to_peer(tsi_handshaker *self,
@ -1013,6 +1062,7 @@ static tsi_result ssl_handshaker_create_frame_protector(
static void ssl_handshaker_destroy(tsi_handshaker *self) {
tsi_ssl_handshaker *impl = (tsi_ssl_handshaker *)self;
SSL_free(impl->ssl); /* The BIO objects are owned by ssl */
tsi_ssl_handshaker_factory_unref(impl->factory_ref);
gpr_free(impl);
}
@ -1030,6 +1080,7 @@ static const tsi_handshaker_vtable handshaker_vtable = {
static tsi_result create_tsi_ssl_handshaker(SSL_CTX *ctx, int is_client,
const char *server_name_indication,
tsi_ssl_handshaker_factory *factory,
tsi_handshaker **handshaker) {
SSL *ssl = SSL_new(ctx);
BIO *into_ssl = NULL;
@ -1085,6 +1136,8 @@ static tsi_result create_tsi_ssl_handshaker(SSL_CTX *ctx, int is_client,
impl->from_ssl = from_ssl;
impl->result = TSI_HANDSHAKE_IN_PROGRESS;
impl->base.vtable = &handshaker_vtable;
impl->factory_ref = tsi_ssl_handshaker_factory_ref(factory);
*handshaker = &impl->base;
return TSI_OK;
}
@ -1121,11 +1174,20 @@ tsi_result tsi_ssl_client_handshaker_factory_create_handshaker(
tsi_ssl_client_handshaker_factory *self, const char *server_name_indication,
tsi_handshaker **handshaker) {
return create_tsi_ssl_handshaker(self->ssl_context, 1, server_name_indication,
handshaker);
&self->base, handshaker);
}
void tsi_ssl_client_handshaker_factory_destroy(
void tsi_ssl_client_handshaker_factory_unref(
tsi_ssl_client_handshaker_factory *self) {
if (self == NULL) return;
tsi_ssl_handshaker_factory_unref(&self->base);
}
static void tsi_ssl_client_handshaker_factory_destroy(
tsi_ssl_handshaker_factory *factory) {
if (factory == NULL) return;
tsi_ssl_client_handshaker_factory *self =
(tsi_ssl_client_handshaker_factory *)factory;
if (self->ssl_context != NULL) SSL_CTX_free(self->ssl_context);
if (self->alpn_protocol_list != NULL) gpr_free(self->alpn_protocol_list);
gpr_free(self);
@ -1150,11 +1212,21 @@ tsi_result tsi_ssl_server_handshaker_factory_create_handshaker(
if (self->ssl_context_count == 0) return TSI_INVALID_ARGUMENT;
/* Create the handshaker with the first context. We will switch if needed
because of SNI in ssl_server_handshaker_factory_servername_callback. */
return create_tsi_ssl_handshaker(self->ssl_contexts[0], 0, NULL, handshaker);
return create_tsi_ssl_handshaker(self->ssl_contexts[0], 0, NULL, &self->base,
handshaker);
}
void tsi_ssl_server_handshaker_factory_destroy(
void tsi_ssl_server_handshaker_factory_unref(
tsi_ssl_server_handshaker_factory *self) {
if (self == NULL) return;
tsi_ssl_handshaker_factory_unref(&self->base);
}
static void tsi_ssl_server_handshaker_factory_destroy(
tsi_ssl_handshaker_factory *factory) {
if (factory == NULL) return;
tsi_ssl_server_handshaker_factory *self =
(tsi_ssl_server_handshaker_factory *)factory;
size_t i;
for (i = 0; i < self->ssl_context_count; i++) {
if (self->ssl_contexts[i] != NULL) {
@ -1263,6 +1335,9 @@ static int server_handshaker_factory_npn_advertised_callback(
/* --- tsi_ssl_handshaker_factory constructors. --- */
static tsi_ssl_handshaker_factory_vtable client_handshaker_factory_vtable = {
tsi_ssl_client_handshaker_factory_destroy};
tsi_result tsi_create_ssl_client_handshaker_factory(
const tsi_ssl_pem_key_cert_pair *pem_key_cert_pair,
const char *pem_root_certs, const char *cipher_suites,
@ -1285,6 +1360,9 @@ tsi_result tsi_create_ssl_client_handshaker_factory(
}
impl = gpr_zalloc(sizeof(*impl));
tsi_ssl_handshaker_factory_init(&impl->base);
impl->base.vtable = &client_handshaker_factory_vtable;
impl->ssl_context = ssl_context;
do {
@ -1322,7 +1400,7 @@ tsi_result tsi_create_ssl_client_handshaker_factory(
}
} while (0);
if (result != TSI_OK) {
tsi_ssl_client_handshaker_factory_destroy(impl);
tsi_ssl_handshaker_factory_unref(&impl->base);
return result;
}
SSL_CTX_set_verify(ssl_context, SSL_VERIFY_PEER, NULL);
@ -1332,6 +1410,9 @@ tsi_result tsi_create_ssl_client_handshaker_factory(
return TSI_OK;
}
static tsi_ssl_handshaker_factory_vtable server_handshaker_factory_vtable = {
tsi_ssl_server_handshaker_factory_destroy};
tsi_result tsi_create_ssl_server_handshaker_factory(
const tsi_ssl_pem_key_cert_pair *pem_key_cert_pairs,
size_t num_key_cert_pairs, const char *pem_client_root_certs,
@ -1364,12 +1445,15 @@ tsi_result tsi_create_ssl_server_handshaker_factory_ex(
}
impl = gpr_zalloc(sizeof(*impl));
tsi_ssl_handshaker_factory_init(&impl->base);
impl->base.vtable = &server_handshaker_factory_vtable;
impl->ssl_contexts = gpr_zalloc(num_key_cert_pairs * sizeof(SSL_CTX *));
impl->ssl_context_x509_subject_names =
gpr_zalloc(num_key_cert_pairs * sizeof(tsi_peer));
if (impl->ssl_contexts == NULL ||
impl->ssl_context_x509_subject_names == NULL) {
tsi_ssl_server_handshaker_factory_destroy(impl);
tsi_ssl_handshaker_factory_unref(&impl->base);
return TSI_OUT_OF_RESOURCES;
}
impl->ssl_context_count = num_key_cert_pairs;
@ -1379,7 +1463,7 @@ tsi_result tsi_create_ssl_server_handshaker_factory_ex(
&impl->alpn_protocol_list,
&impl->alpn_protocol_list_length);
if (result != TSI_OK) {
tsi_ssl_server_handshaker_factory_destroy(impl);
tsi_ssl_handshaker_factory_unref(&impl->base);
return result;
}
}
@ -1451,10 +1535,11 @@ tsi_result tsi_create_ssl_server_handshaker_factory_ex(
} while (0);
if (result != TSI_OK) {
tsi_ssl_server_handshaker_factory_destroy(impl);
tsi_ssl_handshaker_factory_unref(&impl->base);
return result;
}
}
*factory = impl;
return TSI_OK;
}
@ -1501,3 +1586,15 @@ int tsi_ssl_peer_matches_name(const tsi_peer *peer, const char *name) {
return 0; /* Not found. */
}
/* --- Testing support. --- */
const tsi_ssl_handshaker_factory_vtable *tsi_ssl_handshaker_factory_swap_vtable(
tsi_ssl_handshaker_factory *factory,
tsi_ssl_handshaker_factory_vtable *new_vtable) {
GPR_ASSERT(factory != NULL);
GPR_ASSERT(factory->vtable != NULL);
const tsi_ssl_handshaker_factory_vtable *orig_vtable = factory->vtable;
factory->vtable = new_vtable;
return orig_vtable;
}

@ -96,10 +96,10 @@ tsi_result tsi_ssl_client_handshaker_factory_create_handshaker(
tsi_ssl_client_handshaker_factory *self, const char *server_name_indication,
tsi_handshaker **handshaker);
/* Destroys the handshaker factory. WARNING: it is unsafe to destroy a factory
while handshakers created with this factory are still in use. */
void tsi_ssl_client_handshaker_factory_destroy(
tsi_ssl_client_handshaker_factory *self);
/* Decrements reference count of the handshaker factory. Handshaker factory will
* be destroyed once no references exist. */
void tsi_ssl_client_handshaker_factory_unref(
tsi_ssl_client_handshaker_factory *factory);
/* --- tsi_ssl_server_handshaker_factory object ---
@ -158,9 +158,9 @@ tsi_result tsi_create_ssl_server_handshaker_factory_ex(
tsi_result tsi_ssl_server_handshaker_factory_create_handshaker(
tsi_ssl_server_handshaker_factory *self, tsi_handshaker **handshaker);
/* Destroys the handshaker factory. WARNING: it is unsafe to destroy a factory
while handshakers created with this factory are still in use. */
void tsi_ssl_server_handshaker_factory_destroy(
/* Decrements reference count of the handshaker factory. Handshaker factory will
* be destroyed once no references exist. */
void tsi_ssl_server_handshaker_factory_unref(
tsi_ssl_server_handshaker_factory *self);
/* Util that checks that an ssl peer matches a specific name.
@ -170,6 +170,29 @@ void tsi_ssl_server_handshaker_factory_destroy(
- handle public suffix wildchar more strictly (e.g. *.co.uk) */
int tsi_ssl_peer_matches_name(const tsi_peer *peer, const char *name);
/* --- Testing support. ---
These functions and typedefs are not intended to be used outside of testing.
*/
/* Base type of client and server handshaker factories. */
typedef struct tsi_ssl_handshaker_factory tsi_ssl_handshaker_factory;
/* Function pointer to handshaker_factory destructor. */
typedef void (*tsi_ssl_handshaker_factory_destructor)(
tsi_ssl_handshaker_factory *factory);
/* Virtual table for tsi_ssl_handshaker_factory. */
typedef struct {
tsi_ssl_handshaker_factory_destructor destroy;
} tsi_ssl_handshaker_factory_vtable;
/* Set destructor of handshaker_factory to new_destructor, returns previous
destructor. */
const tsi_ssl_handshaker_factory_vtable *tsi_ssl_handshaker_factory_swap_vtable(
tsi_ssl_handshaker_factory *factory,
tsi_ssl_handshaker_factory_vtable *new_vtable);
#ifdef __cplusplus
}
#endif

@ -84,11 +84,17 @@ struct tsi_handshaker {
};
/* Base for tsi_handshaker_result implementations.
See transport_security_interface.h for documentation. */
See transport_security_interface.h for documentation.
The exec_ctx parameter in create_zero_copy_grpc_protector is supposed to be
of type grpc_exec_ctx*, but we're using void* instead to avoid making the TSI
API depend on grpc. The create_zero_copy_grpc_protector() method is only used
in grpc, where we do need the exec_ctx passed through, but the API still
needs to compile in other applications, where grpc_exec_ctx is not defined.
*/
typedef struct {
tsi_result (*extract_peer)(const tsi_handshaker_result *self, tsi_peer *peer);
tsi_result (*create_zero_copy_grpc_protector)(
const tsi_handshaker_result *self,
void *exec_ctx, const tsi_handshaker_result *self,
size_t *max_output_protected_frame_size,
tsi_zero_copy_grpc_protector **protector);
tsi_result (*create_frame_protector)(const tsi_handshaker_result *self,

@ -20,16 +20,18 @@
/* This method creates a tsi_zero_copy_grpc_protector object. */
tsi_result tsi_handshaker_result_create_zero_copy_grpc_protector(
const tsi_handshaker_result *self, size_t *max_output_protected_frame_size,
grpc_exec_ctx *exec_ctx, const tsi_handshaker_result *self,
size_t *max_output_protected_frame_size,
tsi_zero_copy_grpc_protector **protector) {
if (self == NULL || self->vtable == NULL || protector == NULL) {
if (exec_ctx == NULL || self == NULL || self->vtable == NULL ||
protector == NULL) {
return TSI_INVALID_ARGUMENT;
}
if (self->vtable->create_zero_copy_grpc_protector == NULL) {
return TSI_UNIMPLEMENTED;
}
return self->vtable->create_zero_copy_grpc_protector(
self, max_output_protected_frame_size, protector);
exec_ctx, self, max_output_protected_frame_size, protector);
}
/* --- tsi_zero_copy_grpc_protector common implementation. ---

@ -30,7 +30,8 @@ extern "C" {
assuming there is no fatal error.
The caller is responsible for destroying the protector. */
tsi_result tsi_handshaker_result_create_zero_copy_grpc_protector(
const tsi_handshaker_result *self, size_t *max_output_protected_frame_size,
grpc_exec_ctx *exec_ctx, const tsi_handshaker_result *self,
size_t *max_output_protected_frame_size,
tsi_zero_copy_grpc_protector **protector);
/* -- tsi_zero_copy_grpc_protector object -- */

@ -96,7 +96,7 @@ void ClientContext::set_call(grpc_call* call,
void ClientContext::set_compression_algorithm(
grpc_compression_algorithm algorithm) {
char* algorithm_name = nullptr;
const char* algorithm_name = nullptr;
if (!grpc_compression_algorithm_name(algorithm, &algorithm_name)) {
gpr_log(GPR_ERROR, "Name for compression algorithm '%d' unknown.",
algorithm);

@ -22,14 +22,29 @@
namespace grpc {
namespace {
std::unique_ptr<GenericClientAsyncReaderWriter> CallInternal(
ChannelInterface* channel, ClientContext* context,
const grpc::string& method, CompletionQueue* cq, bool start, void* tag) {
return std::unique_ptr<GenericClientAsyncReaderWriter>(
GenericClientAsyncReaderWriter::Create(
channel, cq, RpcMethod(method.c_str(), RpcMethod::BIDI_STREAMING),
context, start, tag));
}
} // namespace
// begin a call to a named method
std::unique_ptr<GenericClientAsyncReaderWriter> GenericStub::Call(
ClientContext* context, const grpc::string& method, CompletionQueue* cq,
void* tag) {
return std::unique_ptr<GenericClientAsyncReaderWriter>(
GenericClientAsyncReaderWriter::Create(
channel_.get(), cq,
RpcMethod(method.c_str(), RpcMethod::BIDI_STREAMING), context, tag));
return CallInternal(channel_.get(), context, method, cq, true, tag);
}
// setup a call to a named method
std::unique_ptr<GenericClientAsyncReaderWriter> GenericStub::PrepareCall(
ClientContext* context, const grpc::string& method, CompletionQueue* cq) {
return CallInternal(channel_.get(), context, method, cq, false, nullptr);
}
} // namespace grpc

@ -86,10 +86,6 @@ void ChannelArguments::SetCompressionAlgorithm(
SetInt(GRPC_COMPRESSION_CHANNEL_DEFAULT_ALGORITHM, algorithm);
}
void ChannelArguments::SetGrpclbFallbackTimeout(int fallback_timeout) {
SetInt(GRPC_ARG_GRPCLB_FALLBACK_TIMEOUT_MS, fallback_timeout);
}
void ChannelArguments::SetSocketMutator(grpc_socket_mutator* mutator) {
if (!mutator) {
return;

@ -190,7 +190,7 @@ bool ServerContext::IsCancelled() const {
void ServerContext::set_compression_algorithm(
grpc_compression_algorithm algorithm) {
char* algorithm_name = NULL;
const char* algorithm_name = NULL;
if (!grpc_compression_algorithm_name(algorithm, &algorithm_name)) {
gpr_log(GPR_ERROR, "Name for compression algorithm '%d' unknown.",
algorithm);

@ -15,7 +15,6 @@
<PackageTags>gRPC RPC Protocol HTTP/2 Auth OAuth2</PackageTags>
<PackageProjectUrl>https://github.com/grpc/grpc</PackageProjectUrl>
<PackageLicenseUrl>https://github.com/grpc/grpc/blob/master/LICENSE</PackageLicenseUrl>
<NetStandardImplicitPackageVersion Condition=" '$(TargetFramework)' == 'netstandard1.5' ">1.6.0</NetStandardImplicitPackageVersion>
<IncludeSymbols>true</IncludeSymbols>
<IncludeSource>true</IncludeSource>
<GenerateDocumentationFile>true</GenerateDocumentationFile>

@ -15,7 +15,6 @@
<PackageTags>gRPC test testing</PackageTags>
<PackageProjectUrl>https://github.com/grpc/grpc</PackageProjectUrl>
<PackageLicenseUrl>https://github.com/grpc/grpc/blob/master/LICENSE</PackageLicenseUrl>
<NetStandardImplicitPackageVersion Condition=" '$(TargetFramework)' == 'netstandard1.5' ">1.6.0</NetStandardImplicitPackageVersion>
<IncludeSymbols>true</IncludeSymbols>
<IncludeSource>true</IncludeSource>
<GenerateDocumentationFile>true</GenerateDocumentationFile>

@ -8,8 +8,6 @@
<AssemblyName>Grpc.Core.Tests</AssemblyName>
<OutputType>Exe</OutputType>
<PackageId>Grpc.Core.Tests</PackageId>
<PackageTargetFallback Condition=" '$(TargetFramework)' == 'netcoreapp1.0' ">$(PackageTargetFallback);portable-net45</PackageTargetFallback>
<RuntimeFrameworkVersion Condition=" '$(TargetFramework)' == 'netcoreapp1.0' ">1.0.4</RuntimeFrameworkVersion>
<TreatWarningsAsErrors>true</TreatWarningsAsErrors>
</PropertyGroup>
@ -21,7 +19,6 @@
<PackageReference Include="Newtonsoft.Json" Version="9.0.1" />
<PackageReference Include="NUnit" Version="3.6.0" />
<PackageReference Include="NUnitLite" Version="3.6.0" />
<PackageReference Include="NUnit.ConsoleRunner" Version="3.6.0" />
<PackageReference Include="OpenCover" Version="4.6.519" />
<PackageReference Include="ReportGenerator" Version="2.4.4.0" />
</ItemGroup>

@ -14,7 +14,6 @@
<PackageTags>gRPC RPC Protocol HTTP/2</PackageTags>
<PackageProjectUrl>https://github.com/grpc/grpc</PackageProjectUrl>
<PackageLicenseUrl>https://github.com/grpc/grpc/blob/master/LICENSE</PackageLicenseUrl>
<NetStandardImplicitPackageVersion Condition=" '$(TargetFramework)' == 'netstandard1.5' ">1.6.0</NetStandardImplicitPackageVersion>
<IncludeSymbols>true</IncludeSymbols>
<IncludeSource>true</IncludeSource>
<GenerateDocumentationFile>true</GenerateDocumentationFile>
@ -65,7 +64,7 @@
<ItemGroup Condition=" '$(TargetFramework)' == 'netstandard1.5' ">
<PackageReference Include="System.Runtime.Loader" Version="4.0.0" />
<PackageReference Include="System.Threading.Thread" Version="4.0.0" />
<PackageReference Include="System.Threading.ThreadPool" Version="4.0.0" />
<PackageReference Include="System.Threading.ThreadPool" Version="4.0.10" />
</ItemGroup>
<Import Project="NativeDeps.csproj.include" />

@ -8,7 +8,6 @@
<AssemblyName>Grpc.Examples.MathClient</AssemblyName>
<OutputType>Exe</OutputType>
<PackageId>Grpc.Examples.MathClient</PackageId>
<RuntimeFrameworkVersion Condition=" '$(TargetFramework)' == 'netcoreapp1.0' ">1.0.4</RuntimeFrameworkVersion>
<TreatWarningsAsErrors>true</TreatWarningsAsErrors>
</PropertyGroup>

@ -8,7 +8,6 @@
<AssemblyName>Grpc.Examples.MathServer</AssemblyName>
<OutputType>Exe</OutputType>
<PackageId>Grpc.Examples.MathServer</PackageId>
<RuntimeFrameworkVersion Condition=" '$(TargetFramework)' == 'netcoreapp1.0' ">1.0.4</RuntimeFrameworkVersion>
<TreatWarningsAsErrors>true</TreatWarningsAsErrors>
</PropertyGroup>

@ -8,8 +8,6 @@
<AssemblyName>Grpc.Examples.Tests</AssemblyName>
<OutputType>Exe</OutputType>
<PackageId>Grpc.Examples.Tests</PackageId>
<PackageTargetFallback Condition=" '$(TargetFramework)' == 'netcoreapp1.0' ">$(PackageTargetFallback);portable-net45</PackageTargetFallback>
<RuntimeFrameworkVersion Condition=" '$(TargetFramework)' == 'netcoreapp1.0' ">1.0.4</RuntimeFrameworkVersion>
<TreatWarningsAsErrors>true</TreatWarningsAsErrors>
</PropertyGroup>

@ -7,7 +7,6 @@
<TargetFrameworks>net45;netcoreapp1.0</TargetFrameworks>
<AssemblyName>Grpc.Examples</AssemblyName>
<PackageId>Grpc.Examples</PackageId>
<RuntimeFrameworkVersion Condition=" '$(TargetFramework)' == 'netcoreapp1.0' ">1.0.4</RuntimeFrameworkVersion>
<TreatWarningsAsErrors>true</TreatWarningsAsErrors>
</PropertyGroup>

@ -8,8 +8,6 @@
<AssemblyName>Grpc.HealthCheck.Tests</AssemblyName>
<OutputType>Exe</OutputType>
<PackageId>Grpc.HealthCheck.Tests</PackageId>
<PackageTargetFallback Condition=" '$(TargetFramework)' == 'netcoreapp1.0' ">$(PackageTargetFallback);portable-net45</PackageTargetFallback>
<RuntimeFrameworkVersion Condition=" '$(TargetFramework)' == 'netcoreapp1.0' ">1.0.4</RuntimeFrameworkVersion>
<TreatWarningsAsErrors>true</TreatWarningsAsErrors>
</PropertyGroup>

@ -14,7 +14,6 @@
<PackageTags>gRPC health check</PackageTags>
<PackageProjectUrl>https://github.com/grpc/grpc</PackageProjectUrl>
<PackageLicenseUrl>https://github.com/grpc/grpc/blob/master/LICENSE</PackageLicenseUrl>
<NetStandardImplicitPackageVersion Condition=" '$(TargetFramework)' == 'netstandard1.5' ">1.6.0</NetStandardImplicitPackageVersion>
<IncludeSymbols>true</IncludeSymbols>
<IncludeSource>true</IncludeSource>
<GenerateDocumentationFile>true</GenerateDocumentationFile>

@ -8,8 +8,6 @@
<AssemblyName>Grpc.IntegrationTesting.Client</AssemblyName>
<OutputType>Exe</OutputType>
<PackageId>Grpc.IntegrationTesting.Client</PackageId>
<PackageTargetFallback Condition=" '$(TargetFramework)' == 'netcoreapp1.0' ">$(PackageTargetFallback);portable-net45</PackageTargetFallback>
<RuntimeFrameworkVersion Condition=" '$(TargetFramework)' == 'netcoreapp1.0' ">1.0.4</RuntimeFrameworkVersion>
<TreatWarningsAsErrors>true</TreatWarningsAsErrors>
</PropertyGroup>

@ -9,8 +9,6 @@
<OutputType>Exe</OutputType>
<PackageId>Grpc.IntegrationTesting.QpsWorker</PackageId>
<ServerGarbageCollection>true</ServerGarbageCollection>
<PackageTargetFallback Condition=" '$(TargetFramework)' == 'netcoreapp1.0' ">$(PackageTargetFallback);portable-net45</PackageTargetFallback>
<RuntimeFrameworkVersion Condition=" '$(TargetFramework)' == 'netcoreapp1.0' ">1.0.4</RuntimeFrameworkVersion>
<TreatWarningsAsErrors>true</TreatWarningsAsErrors>
</PropertyGroup>

@ -8,8 +8,6 @@
<AssemblyName>Grpc.IntegrationTesting.Server</AssemblyName>
<OutputType>Exe</OutputType>
<PackageId>Grpc.IntegrationTesting.Server</PackageId>
<PackageTargetFallback Condition=" '$(TargetFramework)' == 'netcoreapp1.0' ">$(PackageTargetFallback);portable-net45</PackageTargetFallback>
<RuntimeFrameworkVersion Condition=" '$(TargetFramework)' == 'netcoreapp1.0' ">1.0.4</RuntimeFrameworkVersion>
<TreatWarningsAsErrors>true</TreatWarningsAsErrors>
</PropertyGroup>

@ -8,8 +8,6 @@
<AssemblyName>Grpc.IntegrationTesting.StressClient</AssemblyName>
<OutputType>Exe</OutputType>
<PackageId>Grpc.IntegrationTesting.StressClient</PackageId>
<PackageTargetFallback Condition=" '$(TargetFramework)' == 'netcoreapp1.0' ">$(PackageTargetFallback);portable-net45</PackageTargetFallback>
<RuntimeFrameworkVersion Condition=" '$(TargetFramework)' == 'netcoreapp1.0' ">1.0.4</RuntimeFrameworkVersion>
<TreatWarningsAsErrors>true</TreatWarningsAsErrors>
</PropertyGroup>

@ -8,8 +8,6 @@
<AssemblyName>Grpc.IntegrationTesting</AssemblyName>
<OutputType>Exe</OutputType>
<PackageId>Grpc.IntegrationTesting</PackageId>
<PackageTargetFallback Condition=" '$(TargetFramework)' == 'netcoreapp1.0' ">$(PackageTargetFallback);portable-net45</PackageTargetFallback>
<RuntimeFrameworkVersion Condition=" '$(TargetFramework)' == 'netcoreapp1.0' ">1.0.4</RuntimeFrameworkVersion>
<TreatWarningsAsErrors>true</TreatWarningsAsErrors>
</PropertyGroup>
@ -31,10 +29,6 @@
<Reference Include="Microsoft.CSharp" />
</ItemGroup>
<ItemGroup Condition=" '$(TargetFramework)' == 'netcoreapp1.0' ">
<PackageReference Include="System.Linq.Expressions" Version="4.1.1" />
</ItemGroup>
<ItemGroup>
<Compile Include="..\Grpc.Core\Version.cs" />
</ItemGroup>

@ -8,8 +8,6 @@
<AssemblyName>Grpc.Microbenchmarks</AssemblyName>
<OutputType>Exe</OutputType>
<PackageId>Grpc.Microbenchmarks</PackageId>
<PackageTargetFallback Condition=" '$(TargetFramework)' == 'netcoreapp1.0' ">$(PackageTargetFallback);portable-net45</PackageTargetFallback>
<RuntimeFrameworkVersion Condition=" '$(TargetFramework)' == 'netcoreapp1.0' ">1.0.4</RuntimeFrameworkVersion>
<TreatWarningsAsErrors>true</TreatWarningsAsErrors>
</PropertyGroup>

Some files were not shown because too many files have changed in this diff Show More

Loading…
Cancel
Save