Merge remote-tracking branch 'upstream/master' into pick_first_subchannel_list

pull/12878/head
Mark D. Roth 7 years ago
commit 0e8cad5bba
  1. 6
      BUILD
  2. 136
      CMakeLists.txt
  3. 12
      INSTALL.md
  4. 161
      Makefile
  5. 2
      binding.gyp
  6. 41
      build.yaml
  7. 3
      config.m4
  8. 3
      config.w32
  9. 60
      doc/core/moving-to-c++.md
  10. 7
      doc/load-balancing.md
  11. 21
      gRPC-Core.podspec
  12. 3
      gRPC.podspec
  13. 6
      grpc.gemspec
  14. 5
      grpc.gyp
  15. 2
      include/grpc++/server_builder.h
  16. 1
      include/grpc/impl/codegen/atm_gcc_atomic.h
  17. 1
      include/grpc/impl/codegen/atm_gcc_sync.h
  18. 1
      include/grpc/impl/codegen/atm_windows.h
  19. 32
      include/grpc/module.modulemap
  20. 6
      package.xml
  21. 2
      src/core/ext/census/base_resources.h
  22. 2
      src/core/ext/census/census_interface.h
  23. 2
      src/core/ext/census/census_log.h
  24. 2
      src/core/ext/census/hash_table.h
  25. 2
      src/core/ext/census/mlog.h
  26. 2
      src/core/ext/census/resource.h
  27. 2
      src/core/ext/census/trace_context.h
  28. 2
      src/core/ext/census/trace_propagation.h
  29. 2
      src/core/ext/census/tracing.h
  30. 2
      src/core/ext/census/window_stats.h
  31. 4
      src/core/ext/filters/client_channel/channel_connectivity.cc
  32. 34
      src/core/ext/filters/client_channel/client_channel.cc
  33. 2
      src/core/ext/filters/client_channel/client_channel.h
  34. 2
      src/core/ext/filters/client_channel/client_channel_factory.h
  35. 4
      src/core/ext/filters/client_channel/connector.h
  36. 2
      src/core/ext/filters/client_channel/http_connect_handshaker.h
  37. 2
      src/core/ext/filters/client_channel/http_proxy.h
  38. 132
      src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.cc
  39. 2
      src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.h
  40. 11
      src/core/ext/filters/client_channel/lb_policy/grpclb/load_balancer_api.cc
  41. 3
      src/core/ext/filters/client_channel/lb_policy/grpclb/load_balancer_api.h
  42. 2
      src/core/ext/filters/client_channel/lb_policy_factory.h
  43. 2
      src/core/ext/filters/client_channel/lb_policy_registry.h
  44. 2
      src/core/ext/filters/client_channel/parse_address.h
  45. 2
      src/core/ext/filters/client_channel/proxy_mapper.h
  46. 2
      src/core/ext/filters/client_channel/proxy_mapper_registry.h
  47. 30
      src/core/ext/filters/client_channel/resolver/dns/c_ares/dns_resolver_ares.cc
  48. 34
      src/core/ext/filters/client_channel/resolver/dns/native/dns_resolver.cc
  49. 2
      src/core/ext/filters/client_channel/resolver_factory.h
  50. 2
      src/core/ext/filters/client_channel/resolver_registry.h
  51. 2
      src/core/ext/filters/client_channel/retry_throttle.h
  52. 25
      src/core/ext/filters/client_channel/subchannel.cc
  53. 2
      src/core/ext/filters/client_channel/subchannel.h
  54. 2
      src/core/ext/filters/client_channel/subchannel_index.h
  55. 2
      src/core/ext/filters/client_channel/uri_parser.h
  56. 17
      src/core/ext/filters/deadline/deadline_filter.cc
  57. 7
      src/core/ext/filters/deadline/deadline_filter.h
  58. 75
      src/core/ext/filters/max_age/max_age_filter.cc
  59. 2
      src/core/ext/filters/workarounds/workaround_utils.h
  60. 2
      src/core/ext/transport/chttp2/alpn/alpn.h
  61. 4
      src/core/ext/transport/chttp2/server/chttp2_server.cc
  62. 2
      src/core/ext/transport/chttp2/server/chttp2_server.h
  63. 2
      src/core/ext/transport/chttp2/transport/bin_decoder.h
  64. 2
      src/core/ext/transport/chttp2/transport/bin_encoder.h
  65. 259
      src/core/ext/transport/chttp2/transport/chttp2_transport.cc
  66. 89
      src/core/ext/transport/chttp2/transport/flow_control.cc
  67. 2
      src/core/ext/transport/chttp2/transport/frame_data.h
  68. 2
      src/core/ext/transport/chttp2/transport/frame_goaway.h
  69. 13
      src/core/ext/transport/chttp2/transport/frame_ping.cc
  70. 2
      src/core/ext/transport/chttp2/transport/frame_ping.h
  71. 2
      src/core/ext/transport/chttp2/transport/frame_rst_stream.h
  72. 2
      src/core/ext/transport/chttp2/transport/frame_settings.h
  73. 2
      src/core/ext/transport/chttp2/transport/frame_window_update.h
  74. 10
      src/core/ext/transport/chttp2/transport/hpack_encoder.cc
  75. 2
      src/core/ext/transport/chttp2/transport/hpack_encoder.h
  76. 2
      src/core/ext/transport/chttp2/transport/hpack_parser.h
  77. 2
      src/core/ext/transport/chttp2/transport/http2_settings.h
  78. 4
      src/core/ext/transport/chttp2/transport/incoming_metadata.cc
  79. 4
      src/core/ext/transport/chttp2/transport/incoming_metadata.h
  80. 40
      src/core/ext/transport/chttp2/transport/internal.h
  81. 26
      src/core/ext/transport/chttp2/transport/parsing.cc
  82. 2
      src/core/ext/transport/chttp2/transport/stream_map.h
  83. 768
      src/core/ext/transport/chttp2/transport/writing.cc
  84. 685
      src/core/ext/transport/inproc/inproc_transport.cc
  85. 33
      src/core/lib/backoff/backoff.cc
  86. 34
      src/core/lib/backoff/backoff.h
  87. 2
      src/core/lib/channel/channel_args.h
  88. 2
      src/core/lib/channel/channel_stack.h
  89. 2
      src/core/lib/channel/connected_channel.h
  90. 6
      src/core/lib/channel/handshaker.cc
  91. 4
      src/core/lib/channel/handshaker.h
  92. 2
      src/core/lib/channel/handshaker_factory.h
  93. 2
      src/core/lib/channel/handshaker_registry.h
  94. 2
      src/core/lib/compression/algorithm_metadata.h
  95. 2
      src/core/lib/compression/message_compress.h
  96. 2
      src/core/lib/debug/stats_data.cc
  97. 4
      src/core/lib/debug/stats_data.h
  98. 3
      src/core/lib/debug/stats_data.yaml
  99. 1
      src/core/lib/debug/stats_data_bq_schema.sql
  100. 2
      src/core/lib/http/format_request.h
  101. Some files were not shown because too many files have changed in this diff Show More

@ -467,7 +467,6 @@ grpc_cc_library(
"src/core/lib/support/arena.cc",
"src/core/lib/support/atm.cc",
"src/core/lib/support/avl.cc",
"src/core/lib/support/backoff.cc",
"src/core/lib/support/cmdline.cc",
"src/core/lib/support/cpu_iphone.cc",
"src/core/lib/support/cpu_linux.cc",
@ -514,8 +513,6 @@ grpc_cc_library(
"src/core/lib/support/atomic.h",
"src/core/lib/support/atomic_with_atm.h",
"src/core/lib/support/atomic_with_std.h",
"src/core/lib/support/backoff.h",
"src/core/lib/support/block_annotate.h",
"src/core/lib/support/env.h",
"src/core/lib/support/memory.h",
"src/core/lib/support/mpscq.h",
@ -568,6 +565,7 @@ grpc_cc_library(
grpc_cc_library(
name = "grpc_base_c",
srcs = [
"src/core/lib/backoff/backoff.cc",
"src/core/lib/channel/channel_args.cc",
"src/core/lib/channel/channel_stack.cc",
"src/core/lib/channel/channel_stack_builder.cc",
@ -762,6 +760,7 @@ grpc_cc_library(
"src/core/lib/iomgr/socket_utils_posix.h",
"src/core/lib/iomgr/socket_windows.h",
"src/core/lib/iomgr/sys_epoll_wrapper.h",
"src/core/lib/iomgr/block_annotate.h",
"src/core/lib/iomgr/tcp_client.h",
"src/core/lib/iomgr/tcp_client_posix.h",
"src/core/lib/iomgr/tcp_posix.h",
@ -817,6 +816,7 @@ grpc_cc_library(
"src/core/lib/transport/timeout_encoding.h",
"src/core/lib/transport/transport.h",
"src/core/lib/transport/transport_impl.h",
"src/core/lib/backoff/backoff.h",
],
external_deps = [
"zlib",

@ -379,6 +379,7 @@ add_dependencies(buildtests_c algorithm_test)
add_dependencies(buildtests_c alloc_test)
add_dependencies(buildtests_c alpn_test)
add_dependencies(buildtests_c arena_test)
add_dependencies(buildtests_c backoff_test)
add_dependencies(buildtests_c bad_server_response_test)
add_dependencies(buildtests_c bdp_estimator_test)
add_dependencies(buildtests_c bin_decoder_test)
@ -428,7 +429,6 @@ if(_gRPC_PLATFORM_LINUX OR _gRPC_PLATFORM_MAC OR _gRPC_PLATFORM_POSIX)
add_dependencies(buildtests_c goaway_server_test)
endif()
add_dependencies(buildtests_c gpr_avl_test)
add_dependencies(buildtests_c gpr_backoff_test)
add_dependencies(buildtests_c gpr_cmdline_test)
add_dependencies(buildtests_c gpr_cpu_test)
add_dependencies(buildtests_c gpr_env_test)
@ -592,7 +592,6 @@ add_dependencies(buildtests_c h2_sockpair_test)
add_dependencies(buildtests_c h2_sockpair+trace_test)
add_dependencies(buildtests_c h2_sockpair_1byte_test)
add_dependencies(buildtests_c h2_ssl_test)
add_dependencies(buildtests_c h2_ssl_cert_test)
add_dependencies(buildtests_c h2_ssl_proxy_test)
if(_gRPC_PLATFORM_LINUX OR _gRPC_PLATFORM_MAC OR _gRPC_PLATFORM_POSIX)
add_dependencies(buildtests_c h2_uds_test)
@ -704,6 +703,7 @@ add_dependencies(buildtests_cxx grpc_tool_test)
add_dependencies(buildtests_cxx grpclb_api_test)
add_dependencies(buildtests_cxx grpclb_end2end_test)
add_dependencies(buildtests_cxx grpclb_test)
add_dependencies(buildtests_cxx h2_ssl_cert_test)
add_dependencies(buildtests_cxx health_service_end2end_test)
if(_gRPC_PLATFORM_LINUX OR _gRPC_PLATFORM_MAC OR _gRPC_PLATFORM_POSIX)
add_dependencies(buildtests_cxx http2_client)
@ -786,7 +786,6 @@ add_library(gpr
src/core/lib/support/arena.cc
src/core/lib/support/atm.cc
src/core/lib/support/avl.cc
src/core/lib/support/backoff.cc
src/core/lib/support/cmdline.cc
src/core/lib/support/cpu_iphone.cc
src/core/lib/support/cpu_linux.cc
@ -955,6 +954,7 @@ endif (gRPC_BUILD_TESTS)
add_library(grpc
src/core/lib/surface/init.cc
src/core/lib/backoff/backoff.cc
src/core/lib/channel/channel_args.cc
src/core/lib/channel/channel_stack.cc
src/core/lib/channel/channel_stack_builder.cc
@ -1307,6 +1307,7 @@ endif()
add_library(grpc_cronet
src/core/lib/surface/init.cc
src/core/lib/backoff/backoff.cc
src/core/lib/channel/channel_args.cc
src/core/lib/channel/channel_stack.cc
src/core/lib/channel/channel_stack_builder.cc
@ -1626,6 +1627,7 @@ add_library(grpc_test_util
test/core/util/port_server_client.c
test/core/util/slice_splitter.c
test/core/util/trickle_endpoint.c
src/core/lib/backoff/backoff.cc
src/core/lib/channel/channel_args.cc
src/core/lib/channel/channel_stack.cc
src/core/lib/channel/channel_stack_builder.cc
@ -1889,6 +1891,7 @@ add_library(grpc_test_util_unsecure
test/core/util/port_server_client.c
test/core/util/slice_splitter.c
test/core/util/trickle_endpoint.c
src/core/lib/backoff/backoff.cc
src/core/lib/channel/channel_args.cc
src/core/lib/channel/channel_stack.cc
src/core/lib/channel/channel_stack_builder.cc
@ -2138,6 +2141,7 @@ endif (gRPC_BUILD_TESTS)
add_library(grpc_unsecure
src/core/lib/surface/init.cc
src/core/lib/surface/init_unsecure.cc
src/core/lib/backoff/backoff.cc
src/core/lib/channel/channel_args.cc
src/core/lib/channel/channel_stack.cc
src/core/lib/channel/channel_stack_builder.cc
@ -2896,6 +2900,7 @@ add_library(grpc++_cronet
src/core/ext/transport/chttp2/transport/stream_map.cc
src/core/ext/transport/chttp2/transport/varint.cc
src/core/ext/transport/chttp2/transport/writing.cc
src/core/lib/backoff/backoff.cc
src/core/lib/channel/channel_args.cc
src/core/lib/channel/channel_stack.cc
src/core/lib/channel/channel_stack_builder.cc
@ -5197,6 +5202,35 @@ target_link_libraries(arena_test
endif (gRPC_BUILD_TESTS)
if (gRPC_BUILD_TESTS)
add_executable(backoff_test
test/core/backoff/backoff_test.c
)
target_include_directories(backoff_test
PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
PRIVATE ${BORINGSSL_ROOT_DIR}/include
PRIVATE ${PROTOBUF_ROOT_DIR}/src
PRIVATE ${BENCHMARK_ROOT_DIR}/include
PRIVATE ${ZLIB_ROOT_DIR}
PRIVATE ${CMAKE_CURRENT_BINARY_DIR}/third_party/zlib
PRIVATE ${CARES_INCLUDE_DIR}
PRIVATE ${CMAKE_CURRENT_BINARY_DIR}/third_party/cares/cares
PRIVATE ${CMAKE_CURRENT_BINARY_DIR}/third_party/gflags/include
)
target_link_libraries(backoff_test
${_gRPC_ALLTARGETS_LIBRARIES}
grpc_test_util
grpc
gpr_test_util
gpr
)
endif (gRPC_BUILD_TESTS)
if (gRPC_BUILD_TESTS)
add_executable(bad_server_response_test
test/core/end2end/bad_server_response_test.c
)
@ -6297,33 +6331,6 @@ target_link_libraries(gpr_avl_test
endif (gRPC_BUILD_TESTS)
if (gRPC_BUILD_TESTS)
add_executable(gpr_backoff_test
test/core/support/backoff_test.c
)
target_include_directories(gpr_backoff_test
PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
PRIVATE ${BORINGSSL_ROOT_DIR}/include
PRIVATE ${PROTOBUF_ROOT_DIR}/src
PRIVATE ${BENCHMARK_ROOT_DIR}/include
PRIVATE ${ZLIB_ROOT_DIR}
PRIVATE ${CMAKE_CURRENT_BINARY_DIR}/third_party/zlib
PRIVATE ${CARES_INCLUDE_DIR}
PRIVATE ${CMAKE_CURRENT_BINARY_DIR}/third_party/cares/cares
PRIVATE ${CMAKE_CURRENT_BINARY_DIR}/third_party/gflags/include
)
target_link_libraries(gpr_backoff_test
${_gRPC_ALLTARGETS_LIBRARIES}
gpr_test_util
gpr
)
endif (gRPC_BUILD_TESTS)
if (gRPC_BUILD_TESTS)
add_executable(gpr_cmdline_test
test/core/support/cmdline_test.c
)
@ -11158,6 +11165,45 @@ target_link_libraries(grpclb_test
endif (gRPC_BUILD_TESTS)
if (gRPC_BUILD_TESTS)
add_executable(h2_ssl_cert_test
test/core/end2end/h2_ssl_cert_test.cc
third_party/googletest/googletest/src/gtest-all.cc
third_party/googletest/googlemock/src/gmock-all.cc
)
target_include_directories(h2_ssl_cert_test
PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
PRIVATE ${BORINGSSL_ROOT_DIR}/include
PRIVATE ${PROTOBUF_ROOT_DIR}/src
PRIVATE ${BENCHMARK_ROOT_DIR}/include
PRIVATE ${ZLIB_ROOT_DIR}
PRIVATE ${CMAKE_CURRENT_BINARY_DIR}/third_party/zlib
PRIVATE ${CARES_INCLUDE_DIR}
PRIVATE ${CMAKE_CURRENT_BINARY_DIR}/third_party/cares/cares
PRIVATE ${CMAKE_CURRENT_BINARY_DIR}/third_party/gflags/include
PRIVATE third_party/googletest/googletest/include
PRIVATE third_party/googletest/googletest
PRIVATE third_party/googletest/googlemock/include
PRIVATE third_party/googletest/googlemock
PRIVATE ${_gRPC_PROTO_GENS_DIR}
)
target_link_libraries(h2_ssl_cert_test
${_gRPC_PROTOBUF_LIBRARIES}
${_gRPC_ALLTARGETS_LIBRARIES}
grpc_test_util
grpc++
grpc
gpr_test_util
gpr
${_gRPC_GFLAGS_LIBRARIES}
)
endif (gRPC_BUILD_TESTS)
if (gRPC_BUILD_TESTS)
add_executable(health_service_end2end_test
test/cpp/end2end/health_service_end2end_test.cc
third_party/googletest/googletest/src/gtest-all.cc
@ -13580,36 +13626,6 @@ target_link_libraries(h2_ssl_test
endif (gRPC_BUILD_TESTS)
if (gRPC_BUILD_TESTS)
add_executable(h2_ssl_cert_test
test/core/end2end/fixtures/h2_ssl_cert.c
)
target_include_directories(h2_ssl_cert_test
PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
PRIVATE ${BORINGSSL_ROOT_DIR}/include
PRIVATE ${PROTOBUF_ROOT_DIR}/src
PRIVATE ${BENCHMARK_ROOT_DIR}/include
PRIVATE ${ZLIB_ROOT_DIR}
PRIVATE ${CMAKE_CURRENT_BINARY_DIR}/third_party/zlib
PRIVATE ${CARES_INCLUDE_DIR}
PRIVATE ${CMAKE_CURRENT_BINARY_DIR}/third_party/cares/cares
PRIVATE ${CMAKE_CURRENT_BINARY_DIR}/third_party/gflags/include
)
target_link_libraries(h2_ssl_cert_test
${_gRPC_ALLTARGETS_LIBRARIES}
end2end_tests
grpc_test_util
grpc
gpr_test_util
gpr
)
endif (gRPC_BUILD_TESTS)
if (gRPC_BUILD_TESTS)
add_executable(h2_ssl_proxy_test
test/core/end2end/fixtures/h2_ssl_proxy.c
)

@ -94,6 +94,7 @@ on experience with the tools involved.
### Building using CMake (RECOMMENDED)
Builds gRPC C and C++ with boringssl.
- Install Visual Studio 2015 or 2017 (Visual C++ compiler will be used).
- Install [CMake](https://cmake.org/download/).
- Install [Active State Perl](https://www.activestate.com/activeperl/) (`choco install activeperl`)
- Install [Ninja](https://ninja-build.org/) (`choco install ninja`)
@ -101,7 +102,9 @@ Builds gRPC C and C++ with boringssl.
- Install [yasm](http://yasm.tortall.net/) and add it to `PATH` (`choco install yasm`)
- Run these commands in the repo root directory
Using Ninja (faster build, supports boringssl's assembly optimizations)
#### cmake: Using Ninja (faster build, supports boringssl's assembly optimizations).
Please note that when using Ninja, you'll still need Visual C++ (part of Visual Studio)
installed to be able to compile the C/C++ sources.
```
> md .build
> cd .build
@ -110,7 +113,12 @@ Using Ninja (faster build, supports boringssl's assembly optimizations)
> cmake --build .
```
Using Visual Studio 2015 (can only build with OPENSSL_NO_ASM)
#### cmake: Using Visual Studio 2015 (can only build with OPENSSL_NO_ASM).
When using the "Visual Studio" generator,
cmake will generate a solution (`grpc.sln`) that contains a VS project for
every target defined in `CMakeLists.txt` (+ few extra convenience projects
added automatically by cmake). After opening the solution with Visual Studio
you will be able to browse and build the code as usual.
```
> md .build
> cd .build

@ -950,6 +950,7 @@ alloc_test: $(BINDIR)/$(CONFIG)/alloc_test
alpn_test: $(BINDIR)/$(CONFIG)/alpn_test
api_fuzzer: $(BINDIR)/$(CONFIG)/api_fuzzer
arena_test: $(BINDIR)/$(CONFIG)/arena_test
backoff_test: $(BINDIR)/$(CONFIG)/backoff_test
bad_server_response_test: $(BINDIR)/$(CONFIG)/bad_server_response_test
bdp_estimator_test: $(BINDIR)/$(CONFIG)/bdp_estimator_test
bin_decoder_test: $(BINDIR)/$(CONFIG)/bin_decoder_test
@ -988,7 +989,6 @@ gen_legal_metadata_characters: $(BINDIR)/$(CONFIG)/gen_legal_metadata_characters
gen_percent_encoding_tables: $(BINDIR)/$(CONFIG)/gen_percent_encoding_tables
goaway_server_test: $(BINDIR)/$(CONFIG)/goaway_server_test
gpr_avl_test: $(BINDIR)/$(CONFIG)/gpr_avl_test
gpr_backoff_test: $(BINDIR)/$(CONFIG)/gpr_backoff_test
gpr_cmdline_test: $(BINDIR)/$(CONFIG)/gpr_cmdline_test
gpr_cpu_test: $(BINDIR)/$(CONFIG)/gpr_cpu_test
gpr_env_test: $(BINDIR)/$(CONFIG)/gpr_env_test
@ -1145,6 +1145,7 @@ grpc_tool_test: $(BINDIR)/$(CONFIG)/grpc_tool_test
grpclb_api_test: $(BINDIR)/$(CONFIG)/grpclb_api_test
grpclb_end2end_test: $(BINDIR)/$(CONFIG)/grpclb_end2end_test
grpclb_test: $(BINDIR)/$(CONFIG)/grpclb_test
h2_ssl_cert_test: $(BINDIR)/$(CONFIG)/h2_ssl_cert_test
health_service_end2end_test: $(BINDIR)/$(CONFIG)/health_service_end2end_test
http2_client: $(BINDIR)/$(CONFIG)/http2_client
hybrid_end2end_test: $(BINDIR)/$(CONFIG)/hybrid_end2end_test
@ -1247,7 +1248,6 @@ h2_sockpair_test: $(BINDIR)/$(CONFIG)/h2_sockpair_test
h2_sockpair+trace_test: $(BINDIR)/$(CONFIG)/h2_sockpair+trace_test
h2_sockpair_1byte_test: $(BINDIR)/$(CONFIG)/h2_sockpair_1byte_test
h2_ssl_test: $(BINDIR)/$(CONFIG)/h2_ssl_test
h2_ssl_cert_test: $(BINDIR)/$(CONFIG)/h2_ssl_cert_test
h2_ssl_proxy_test: $(BINDIR)/$(CONFIG)/h2_ssl_proxy_test
h2_uds_test: $(BINDIR)/$(CONFIG)/h2_uds_test
inproc_test: $(BINDIR)/$(CONFIG)/inproc_test
@ -1350,6 +1350,7 @@ buildtests_c: privatelibs_c \
$(BINDIR)/$(CONFIG)/alloc_test \
$(BINDIR)/$(CONFIG)/alpn_test \
$(BINDIR)/$(CONFIG)/arena_test \
$(BINDIR)/$(CONFIG)/backoff_test \
$(BINDIR)/$(CONFIG)/bad_server_response_test \
$(BINDIR)/$(CONFIG)/bdp_estimator_test \
$(BINDIR)/$(CONFIG)/bin_decoder_test \
@ -1383,7 +1384,6 @@ buildtests_c: privatelibs_c \
$(BINDIR)/$(CONFIG)/fling_test \
$(BINDIR)/$(CONFIG)/goaway_server_test \
$(BINDIR)/$(CONFIG)/gpr_avl_test \
$(BINDIR)/$(CONFIG)/gpr_backoff_test \
$(BINDIR)/$(CONFIG)/gpr_cmdline_test \
$(BINDIR)/$(CONFIG)/gpr_cpu_test \
$(BINDIR)/$(CONFIG)/gpr_env_test \
@ -1507,7 +1507,6 @@ buildtests_c: privatelibs_c \
$(BINDIR)/$(CONFIG)/h2_sockpair+trace_test \
$(BINDIR)/$(CONFIG)/h2_sockpair_1byte_test \
$(BINDIR)/$(CONFIG)/h2_ssl_test \
$(BINDIR)/$(CONFIG)/h2_ssl_cert_test \
$(BINDIR)/$(CONFIG)/h2_ssl_proxy_test \
$(BINDIR)/$(CONFIG)/h2_uds_test \
$(BINDIR)/$(CONFIG)/inproc_test \
@ -1583,6 +1582,7 @@ buildtests_cxx: privatelibs_cxx \
$(BINDIR)/$(CONFIG)/grpclb_api_test \
$(BINDIR)/$(CONFIG)/grpclb_end2end_test \
$(BINDIR)/$(CONFIG)/grpclb_test \
$(BINDIR)/$(CONFIG)/h2_ssl_cert_test \
$(BINDIR)/$(CONFIG)/health_service_end2end_test \
$(BINDIR)/$(CONFIG)/http2_client \
$(BINDIR)/$(CONFIG)/hybrid_end2end_test \
@ -1703,6 +1703,7 @@ buildtests_cxx: privatelibs_cxx \
$(BINDIR)/$(CONFIG)/grpclb_api_test \
$(BINDIR)/$(CONFIG)/grpclb_end2end_test \
$(BINDIR)/$(CONFIG)/grpclb_test \
$(BINDIR)/$(CONFIG)/h2_ssl_cert_test \
$(BINDIR)/$(CONFIG)/health_service_end2end_test \
$(BINDIR)/$(CONFIG)/http2_client \
$(BINDIR)/$(CONFIG)/hybrid_end2end_test \
@ -1761,6 +1762,8 @@ test_c: buildtests_c
$(Q) $(BINDIR)/$(CONFIG)/alpn_test || ( echo test alpn_test failed ; exit 1 )
$(E) "[RUN] Testing arena_test"
$(Q) $(BINDIR)/$(CONFIG)/arena_test || ( echo test arena_test failed ; exit 1 )
$(E) "[RUN] Testing backoff_test"
$(Q) $(BINDIR)/$(CONFIG)/backoff_test || ( echo test backoff_test failed ; exit 1 )
$(E) "[RUN] Testing bad_server_response_test"
$(Q) $(BINDIR)/$(CONFIG)/bad_server_response_test || ( echo test bad_server_response_test failed ; exit 1 )
$(E) "[RUN] Testing bdp_estimator_test"
@ -1823,8 +1826,6 @@ test_c: buildtests_c
$(Q) $(BINDIR)/$(CONFIG)/goaway_server_test || ( echo test goaway_server_test failed ; exit 1 )
$(E) "[RUN] Testing gpr_avl_test"
$(Q) $(BINDIR)/$(CONFIG)/gpr_avl_test || ( echo test gpr_avl_test failed ; exit 1 )
$(E) "[RUN] Testing gpr_backoff_test"
$(Q) $(BINDIR)/$(CONFIG)/gpr_backoff_test || ( echo test gpr_backoff_test failed ; exit 1 )
$(E) "[RUN] Testing gpr_cmdline_test"
$(Q) $(BINDIR)/$(CONFIG)/gpr_cmdline_test || ( echo test gpr_cmdline_test failed ; exit 1 )
$(E) "[RUN] Testing gpr_cpu_test"
@ -2109,6 +2110,8 @@ test_cxx: buildtests_cxx
$(Q) $(BINDIR)/$(CONFIG)/grpclb_end2end_test || ( echo test grpclb_end2end_test failed ; exit 1 )
$(E) "[RUN] Testing grpclb_test"
$(Q) $(BINDIR)/$(CONFIG)/grpclb_test || ( echo test grpclb_test failed ; exit 1 )
$(E) "[RUN] Testing h2_ssl_cert_test"
$(Q) $(BINDIR)/$(CONFIG)/h2_ssl_cert_test || ( echo test h2_ssl_cert_test failed ; exit 1 )
$(E) "[RUN] Testing health_service_end2end_test"
$(Q) $(BINDIR)/$(CONFIG)/health_service_end2end_test || ( echo test health_service_end2end_test failed ; exit 1 )
$(E) "[RUN] Testing interop_test"
@ -2800,7 +2803,6 @@ LIBGPR_SRC = \
src/core/lib/support/arena.cc \
src/core/lib/support/atm.cc \
src/core/lib/support/avl.cc \
src/core/lib/support/backoff.cc \
src/core/lib/support/cmdline.cc \
src/core/lib/support/cpu_iphone.cc \
src/core/lib/support/cpu_linux.cc \
@ -2946,6 +2948,7 @@ endif
LIBGRPC_SRC = \
src/core/lib/surface/init.cc \
src/core/lib/backoff/backoff.cc \
src/core/lib/channel/channel_args.cc \
src/core/lib/channel/channel_stack.cc \
src/core/lib/channel/channel_stack_builder.cc \
@ -3298,6 +3301,7 @@ endif
LIBGRPC_CRONET_SRC = \
src/core/lib/surface/init.cc \
src/core/lib/backoff/backoff.cc \
src/core/lib/channel/channel_args.cc \
src/core/lib/channel/channel_stack.cc \
src/core/lib/channel/channel_stack_builder.cc \
@ -3616,6 +3620,7 @@ LIBGRPC_TEST_UTIL_SRC = \
test/core/util/port_server_client.c \
test/core/util/slice_splitter.c \
test/core/util/trickle_endpoint.c \
src/core/lib/backoff/backoff.cc \
src/core/lib/channel/channel_args.cc \
src/core/lib/channel/channel_stack.cc \
src/core/lib/channel/channel_stack_builder.cc \
@ -3870,6 +3875,7 @@ LIBGRPC_TEST_UTIL_UNSECURE_SRC = \
test/core/util/port_server_client.c \
test/core/util/slice_splitter.c \
test/core/util/trickle_endpoint.c \
src/core/lib/backoff/backoff.cc \
src/core/lib/channel/channel_args.cc \
src/core/lib/channel/channel_stack.cc \
src/core/lib/channel/channel_stack_builder.cc \
@ -4097,6 +4103,7 @@ endif
LIBGRPC_UNSECURE_SRC = \
src/core/lib/surface/init.cc \
src/core/lib/surface/init_unsecure.cc \
src/core/lib/backoff/backoff.cc \
src/core/lib/channel/channel_args.cc \
src/core/lib/channel/channel_stack.cc \
src/core/lib/channel/channel_stack_builder.cc \
@ -4838,6 +4845,7 @@ LIBGRPC++_CRONET_SRC = \
src/core/ext/transport/chttp2/transport/stream_map.cc \
src/core/ext/transport/chttp2/transport/varint.cc \
src/core/ext/transport/chttp2/transport/writing.cc \
src/core/lib/backoff/backoff.cc \
src/core/lib/channel/channel_args.cc \
src/core/lib/channel/channel_stack.cc \
src/core/lib/channel/channel_stack_builder.cc \
@ -8894,6 +8902,38 @@ endif
endif
BACKOFF_TEST_SRC = \
test/core/backoff/backoff_test.c \
BACKOFF_TEST_OBJS = $(addprefix $(OBJDIR)/$(CONFIG)/, $(addsuffix .o, $(basename $(BACKOFF_TEST_SRC))))
ifeq ($(NO_SECURE),true)
# You can't build secure targets if you don't have OpenSSL.
$(BINDIR)/$(CONFIG)/backoff_test: openssl_dep_error
else
$(BINDIR)/$(CONFIG)/backoff_test: $(BACKOFF_TEST_OBJS) $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a
$(E) "[LD] Linking $@"
$(Q) mkdir -p `dirname $@`
$(Q) $(LD) $(LDFLAGS) $(BACKOFF_TEST_OBJS) $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a $(LDLIBS) $(LDLIBS_SECURE) -o $(BINDIR)/$(CONFIG)/backoff_test
endif
$(OBJDIR)/$(CONFIG)/test/core/backoff/backoff_test.o: $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a
deps_backoff_test: $(BACKOFF_TEST_OBJS:.o=.dep)
ifneq ($(NO_SECURE),true)
ifneq ($(NO_DEPS),true)
-include $(BACKOFF_TEST_OBJS:.o=.dep)
endif
endif
BAD_SERVER_RESPONSE_TEST_SRC = \
test/core/end2end/bad_server_response_test.c \
@ -10113,38 +10153,6 @@ endif
endif
GPR_BACKOFF_TEST_SRC = \
test/core/support/backoff_test.c \
GPR_BACKOFF_TEST_OBJS = $(addprefix $(OBJDIR)/$(CONFIG)/, $(addsuffix .o, $(basename $(GPR_BACKOFF_TEST_SRC))))
ifeq ($(NO_SECURE),true)
# You can't build secure targets if you don't have OpenSSL.
$(BINDIR)/$(CONFIG)/gpr_backoff_test: openssl_dep_error
else
$(BINDIR)/$(CONFIG)/gpr_backoff_test: $(GPR_BACKOFF_TEST_OBJS) $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a
$(E) "[LD] Linking $@"
$(Q) mkdir -p `dirname $@`
$(Q) $(LD) $(LDFLAGS) $(GPR_BACKOFF_TEST_OBJS) $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a $(LDLIBS) $(LDLIBS_SECURE) -o $(BINDIR)/$(CONFIG)/gpr_backoff_test
endif
$(OBJDIR)/$(CONFIG)/test/core/support/backoff_test.o: $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a
deps_gpr_backoff_test: $(GPR_BACKOFF_TEST_OBJS:.o=.dep)
ifneq ($(NO_SECURE),true)
ifneq ($(NO_DEPS),true)
-include $(GPR_BACKOFF_TEST_OBJS:.o=.dep)
endif
endif
GPR_CMDLINE_TEST_SRC = \
test/core/support/cmdline_test.c \
@ -15650,6 +15658,49 @@ endif
$(OBJDIR)/$(CONFIG)/test/cpp/grpclb/grpclb_test.o: $(GENDIR)/src/proto/grpc/lb/v1/load_balancer.pb.cc $(GENDIR)/src/proto/grpc/lb/v1/load_balancer.grpc.pb.cc
H2_SSL_CERT_TEST_SRC = \
test/core/end2end/h2_ssl_cert_test.cc \
H2_SSL_CERT_TEST_OBJS = $(addprefix $(OBJDIR)/$(CONFIG)/, $(addsuffix .o, $(basename $(H2_SSL_CERT_TEST_SRC))))
ifeq ($(NO_SECURE),true)
# You can't build secure targets if you don't have OpenSSL.
$(BINDIR)/$(CONFIG)/h2_ssl_cert_test: openssl_dep_error
else
ifeq ($(NO_PROTOBUF),true)
# You can't build the protoc plugins or protobuf-enabled targets if you don't have protobuf 3.0.0+.
$(BINDIR)/$(CONFIG)/h2_ssl_cert_test: protobuf_dep_error
else
$(BINDIR)/$(CONFIG)/h2_ssl_cert_test: $(PROTOBUF_DEP) $(H2_SSL_CERT_TEST_OBJS) $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc++.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a
$(E) "[LD] Linking $@"
$(Q) mkdir -p `dirname $@`
$(Q) $(LDXX) $(LDFLAGS) $(H2_SSL_CERT_TEST_OBJS) $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc++.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a $(LDLIBSXX) $(LDLIBS_PROTOBUF) $(LDLIBS) $(LDLIBS_SECURE) $(GTEST_LIB) -o $(BINDIR)/$(CONFIG)/h2_ssl_cert_test
endif
endif
$(OBJDIR)/$(CONFIG)/test/core/end2end/h2_ssl_cert_test.o: $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc++.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a
deps_h2_ssl_cert_test: $(H2_SSL_CERT_TEST_OBJS:.o=.dep)
ifneq ($(NO_SECURE),true)
ifneq ($(NO_DEPS),true)
-include $(H2_SSL_CERT_TEST_OBJS:.o=.dep)
endif
endif
HEALTH_SERVICE_END2END_TEST_SRC = \
test/cpp/end2end/health_service_end2end_test.cc \
@ -19097,38 +19148,6 @@ endif
endif
H2_SSL_CERT_TEST_SRC = \
test/core/end2end/fixtures/h2_ssl_cert.c \
H2_SSL_CERT_TEST_OBJS = $(addprefix $(OBJDIR)/$(CONFIG)/, $(addsuffix .o, $(basename $(H2_SSL_CERT_TEST_SRC))))
ifeq ($(NO_SECURE),true)
# You can't build secure targets if you don't have OpenSSL.
$(BINDIR)/$(CONFIG)/h2_ssl_cert_test: openssl_dep_error
else
$(BINDIR)/$(CONFIG)/h2_ssl_cert_test: $(H2_SSL_CERT_TEST_OBJS) $(LIBDIR)/$(CONFIG)/libend2end_tests.a $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a
$(E) "[LD] Linking $@"
$(Q) mkdir -p `dirname $@`
$(Q) $(LD) $(LDFLAGS) $(H2_SSL_CERT_TEST_OBJS) $(LIBDIR)/$(CONFIG)/libend2end_tests.a $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a $(LDLIBS) $(LDLIBS_SECURE) -o $(BINDIR)/$(CONFIG)/h2_ssl_cert_test
endif
$(OBJDIR)/$(CONFIG)/test/core/end2end/fixtures/h2_ssl_cert.o: $(LIBDIR)/$(CONFIG)/libend2end_tests.a $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a
deps_h2_ssl_cert_test: $(H2_SSL_CERT_TEST_OBJS:.o=.dep)
ifneq ($(NO_SECURE),true)
ifneq ($(NO_DEPS),true)
-include $(H2_SSL_CERT_TEST_OBJS:.o=.dep)
endif
endif
H2_SSL_PROXY_TEST_SRC = \
test/core/end2end/fixtures/h2_ssl_proxy.c \

@ -600,7 +600,6 @@
'src/core/lib/support/arena.cc',
'src/core/lib/support/atm.cc',
'src/core/lib/support/avl.cc',
'src/core/lib/support/backoff.cc',
'src/core/lib/support/cmdline.cc',
'src/core/lib/support/cpu_iphone.cc',
'src/core/lib/support/cpu_linux.cc',
@ -658,6 +657,7 @@
],
'sources': [
'src/core/lib/surface/init.cc',
'src/core/lib/backoff/backoff.cc',
'src/core/lib/channel/channel_args.cc',
'src/core/lib/channel/channel_stack.cc',
'src/core/lib/channel/channel_stack_builder.cc',

@ -66,7 +66,6 @@ filegroups:
- src/core/lib/support/arena.cc
- src/core/lib/support/atm.cc
- src/core/lib/support/avl.cc
- src/core/lib/support/backoff.cc
- src/core/lib/support/cmdline.cc
- src/core/lib/support/cpu_iphone.cc
- src/core/lib/support/cpu_linux.cc
@ -143,8 +142,6 @@ filegroups:
- src/core/lib/support/atomic.h
- src/core/lib/support/atomic_with_atm.h
- src/core/lib/support/atomic_with_std.h
- src/core/lib/support/backoff.h
- src/core/lib/support/block_annotate.h
- src/core/lib/support/env.h
- src/core/lib/support/memory.h
- src/core/lib/support/mpscq.h
@ -185,6 +182,7 @@ filegroups:
- grpc++_codegen_base
- name: grpc_base
src:
- src/core/lib/backoff/backoff.cc
- src/core/lib/channel/channel_args.cc
- src/core/lib/channel/channel_stack.cc
- src/core/lib/channel/channel_stack_builder.cc
@ -337,6 +335,7 @@ filegroups:
- include/grpc/status.h
- include/grpc/support/workaround_list.h
headers:
- src/core/lib/backoff/backoff.h
- src/core/lib/channel/channel_args.h
- src/core/lib/channel/channel_stack.h
- src/core/lib/channel/channel_stack_builder.h
@ -355,6 +354,7 @@ filegroups:
- src/core/lib/http/format_request.h
- src/core/lib/http/httpcli.h
- src/core/lib/http/parser.h
- src/core/lib/iomgr/block_annotate.h
- src/core/lib/iomgr/call_combiner.h
- src/core/lib/iomgr/closure.h
- src/core/lib/iomgr/combiner.h
@ -1785,6 +1785,16 @@ targets:
deps:
- gpr_test_util
- gpr
- name: backoff_test
build: test
language: c
src:
- test/core/backoff/backoff_test.c
deps:
- grpc_test_util
- grpc
- gpr_test_util
- gpr
- name: bad_server_response_test
build: test
language: c
@ -2213,14 +2223,6 @@ targets:
deps:
- gpr_test_util
- gpr
- name: gpr_backoff_test
build: test
language: c
src:
- test/core/support/backoff_test.c
deps:
- gpr_test_util
- gpr
- name: gpr_cmdline_test
build: test
language: c
@ -2279,6 +2281,7 @@ targets:
deps:
- gpr_test_util
- gpr
uses_polling: false
- name: gpr_spinlock_test
cpu_cost: 3
build: test
@ -4166,6 +4169,22 @@ targets:
excluded_poll_engines:
- poll
- poll-cv
- name: h2_ssl_cert_test
gtest: true
build: test
language: c++
headers:
- test/core/end2end/end2end_tests.h
src:
- test/core/end2end/h2_ssl_cert_test.cc
deps:
- grpc_test_util
- grpc++
- grpc
- gpr_test_util
- gpr
uses:
- grpc++_test
- name: health_service_end2end_test
gtest: true
build: test

@ -45,7 +45,6 @@ if test "$PHP_GRPC" != "no"; then
src/core/lib/support/arena.cc \
src/core/lib/support/atm.cc \
src/core/lib/support/avl.cc \
src/core/lib/support/backoff.cc \
src/core/lib/support/cmdline.cc \
src/core/lib/support/cpu_iphone.cc \
src/core/lib/support/cpu_linux.cc \
@ -86,6 +85,7 @@ if test "$PHP_GRPC" != "no"; then
src/core/lib/support/tmpfile_windows.cc \
src/core/lib/support/wrap_memcpy.cc \
src/core/lib/surface/init.cc \
src/core/lib/backoff/backoff.cc \
src/core/lib/channel/channel_args.cc \
src/core/lib/channel/channel_stack.cc \
src/core/lib/channel/channel_stack_builder.cc \
@ -688,6 +688,7 @@ if test "$PHP_GRPC" != "no"; then
PHP_ADD_BUILD_DIR($ext_builddir/src/core/ext/transport/chttp2/server/secure)
PHP_ADD_BUILD_DIR($ext_builddir/src/core/ext/transport/chttp2/transport)
PHP_ADD_BUILD_DIR($ext_builddir/src/core/ext/transport/inproc)
PHP_ADD_BUILD_DIR($ext_builddir/src/core/lib/backoff)
PHP_ADD_BUILD_DIR($ext_builddir/src/core/lib/channel)
PHP_ADD_BUILD_DIR($ext_builddir/src/core/lib/compression)
PHP_ADD_BUILD_DIR($ext_builddir/src/core/lib/debug)

@ -22,7 +22,6 @@ if (PHP_GRPC != "no") {
"src\\core\\lib\\support\\arena.cc " +
"src\\core\\lib\\support\\atm.cc " +
"src\\core\\lib\\support\\avl.cc " +
"src\\core\\lib\\support\\backoff.cc " +
"src\\core\\lib\\support\\cmdline.cc " +
"src\\core\\lib\\support\\cpu_iphone.cc " +
"src\\core\\lib\\support\\cpu_linux.cc " +
@ -63,6 +62,7 @@ if (PHP_GRPC != "no") {
"src\\core\\lib\\support\\tmpfile_windows.cc " +
"src\\core\\lib\\support\\wrap_memcpy.cc " +
"src\\core\\lib\\surface\\init.cc " +
"src\\core\\lib\\backoff\\backoff.cc " +
"src\\core\\lib\\channel\\channel_args.cc " +
"src\\core\\lib\\channel\\channel_stack.cc " +
"src\\core\\lib\\channel\\channel_stack_builder.cc " +
@ -700,6 +700,7 @@ if (PHP_GRPC != "no") {
FSO.CreateFolder(base_dir+"\\ext\\grpc\\src\\core\\ext\\transport\\chttp2\\transport");
FSO.CreateFolder(base_dir+"\\ext\\grpc\\src\\core\\ext\\transport\\inproc");
FSO.CreateFolder(base_dir+"\\ext\\grpc\\src\\core\\lib");
FSO.CreateFolder(base_dir+"\\ext\\grpc\\src\\core\\lib\\backoff");
FSO.CreateFolder(base_dir+"\\ext\\grpc\\src\\core\\lib\\channel");
FSO.CreateFolder(base_dir+"\\ext\\grpc\\src\\core\\lib\\compression");
FSO.CreateFolder(base_dir+"\\ext\\grpc\\src\\core\\lib\\debug");

@ -0,0 +1,60 @@
# Moving gRPC core to C++
October 2017
ctiller, markdroth, vjpai
## Background and Goal
gRPC core was originally written in C89 for several reasons
(possibility of kernel integration, ease of wrapping, compiler
support, etc). Over time, this was changed to C99 as all relevant
compilers in active use came to support C99 effectively.
[Now, gRPC core is C++](https://github.com/grpc/proposal/blob/master/L6-allow-c%2B%2B-in-grpc-core.md)
(although the code is still idiomatically C code) with C linkage for
public functions. Throughout all of these transitions, the public
header files are committed to remain in C89.
The goal now is to make the gRPC core implementation true idiomatic
C++ compatible with
[Google's C++ style guide](https://google.github.io/styleguide/cppguide.html).
## Constraints
- No use of standard library
- Standard library makes wrapping difficult/impossible and also reduces platform portability
- This takes precedence over using C++ style guide
- But lambdas are ok
- As are third-party libraries that meet our build requirements (such as many parts of abseil)
- There will be some C++ features that don't work
- `new` and `delete`
- pure virtual functions are not allowed because the message that prints out "Pure Virtual Function called" is part of the standard library
- Make a `#define GRPC_ABSTRACT {GPR_ASSERT(false);}` instead of `= 0;`
- The sanity for making sure that we don't depend on libstdc++ is that at least some tests should explicitly not include it
- Most tests can migrate to use gtest
- There are tremendous # of code paths that can now be exposed to unit tests because of the use of gtest and C++
- But at least some tests should not use gtest
## Roadmap
- What should be the phases of getting code converted to idiomatic C++
- Opportunistically do leaf code that other parts don't depend on
- Spend a little time deciding how to do non-leaf stuff that isn't central or polymorphic (e.g., timer, call combiner)
- For big central or polymorphic interfaces, actually do an API review (for things like transport, filter API, endpoint, closure, exec_ctx, ...) .
- Core internal changes don't need a gRFC, but core surface changes do
- But an API review should include at least a PR with the header change and tests to use it before it gets used more broadly
- iomgr polling for POSIX is a gray area whether it's a leaf or central
- What is the schedule?
- In Q4 2017, if some stuff happens opportunistically, great; otherwise ¯\\\_(ツ)\_/¯
- More updates as team time becomes available and committed to this project
## Implications for C++ API and wrapped languages
- For C++ structs, switch to `using` when possible (e.g., Slice,
ByteBuffer, ...)
- The C++ API implementation might directly start using
`grpc_transport_stream_op_batch` rather than the core surface `grpc_op`.
- Can we get wrapped languages to a point where we can statically link C++? This will take a year in probability but that would allow the use of `std::`
- Are there other environments that don't support std library, like maybe Android NDK?
- Probably, that might push things out to 18 months

@ -129,10 +129,9 @@ works:
by the resolver. It asks the balancer for the server addresses to
use for the server name originally requested by the client (i.e.,
the same one originally passed to the name resolver).
- Note: The `grpclb` policy currently ignores any non-balancer
addresses returned by the resolver. However, in the future, it
may be changed to use these addresses as a fallback in case no
balancers can be contacted.
- Note: In the `grpclb` policy, the non-balancer addresses returned
by the resolver are used as a fallback in case no balancers can be
contacted when the LB policy is started.
2. The gRPC servers to which the load balancer is directing the client
may report load to the load balancers, if that information is needed
by the load balancer's configuration.

@ -106,6 +106,8 @@ Pod::Spec.new do |s|
ss.source_files = 'include/grpc/support/alloc.h',
'include/grpc/support/atm.h',
'include/grpc/support/atm_gcc_atomic.h',
'include/grpc/support/atm_gcc_sync.h',
'include/grpc/support/atm_windows.h',
'include/grpc/support/avl.h',
'include/grpc/support/cmdline.h',
'include/grpc/support/cpu.h',
@ -120,13 +122,18 @@ Pod::Spec.new do |s|
'include/grpc/support/sync_custom.h',
'include/grpc/support/sync_generic.h',
'include/grpc/support/sync_posix.h',
'include/grpc/support/sync_windows.h',
'include/grpc/support/thd.h',
'include/grpc/support/time.h',
'include/grpc/support/tls.h',
'include/grpc/support/tls_gcc.h',
'include/grpc/support/tls_msvc.h',
'include/grpc/support/tls_pthread.h',
'include/grpc/support/useful.h',
'include/grpc/impl/codegen/atm.h',
'include/grpc/impl/codegen/atm_gcc_atomic.h',
'include/grpc/impl/codegen/atm_gcc_sync.h',
'include/grpc/impl/codegen/atm_windows.h',
'include/grpc/impl/codegen/gpr_slice.h',
'include/grpc/impl/codegen/gpr_types.h',
'include/grpc/impl/codegen/port_platform.h',
@ -134,6 +141,7 @@ Pod::Spec.new do |s|
'include/grpc/impl/codegen/sync_custom.h',
'include/grpc/impl/codegen/sync_generic.h',
'include/grpc/impl/codegen/sync_posix.h',
'include/grpc/impl/codegen/sync_windows.h',
'include/grpc/impl/codegen/byte_buffer.h',
'include/grpc/impl/codegen/byte_buffer_reader.h',
'include/grpc/impl/codegen/compression_types.h',
@ -145,6 +153,8 @@ Pod::Spec.new do |s|
'include/grpc/impl/codegen/status.h',
'include/grpc/impl/codegen/atm.h',
'include/grpc/impl/codegen/atm_gcc_atomic.h',
'include/grpc/impl/codegen/atm_gcc_sync.h',
'include/grpc/impl/codegen/atm_windows.h',
'include/grpc/impl/codegen/gpr_slice.h',
'include/grpc/impl/codegen/gpr_types.h',
'include/grpc/impl/codegen/port_platform.h',
@ -152,6 +162,7 @@ Pod::Spec.new do |s|
'include/grpc/impl/codegen/sync_custom.h',
'include/grpc/impl/codegen/sync_generic.h',
'include/grpc/impl/codegen/sync_posix.h',
'include/grpc/impl/codegen/sync_windows.h',
'include/grpc/grpc_security.h',
'include/grpc/byte_buffer.h',
'include/grpc/byte_buffer_reader.h',
@ -179,8 +190,6 @@ Pod::Spec.new do |s|
'src/core/lib/support/atomic.h',
'src/core/lib/support/atomic_with_atm.h',
'src/core/lib/support/atomic_with_std.h',
'src/core/lib/support/backoff.h',
'src/core/lib/support/block_annotate.h',
'src/core/lib/support/env.h',
'src/core/lib/support/memory.h',
'src/core/lib/support/mpscq.h',
@ -197,7 +206,6 @@ Pod::Spec.new do |s|
'src/core/lib/support/arena.cc',
'src/core/lib/support/atm.cc',
'src/core/lib/support/avl.cc',
'src/core/lib/support/backoff.cc',
'src/core/lib/support/cmdline.cc',
'src/core/lib/support/cpu_iphone.cc',
'src/core/lib/support/cpu_linux.cc',
@ -309,6 +317,7 @@ Pod::Spec.new do |s|
'src/core/ext/filters/deadline/deadline_filter.h',
'src/core/ext/transport/chttp2/client/chttp2_connector.h',
'src/core/ext/transport/inproc/inproc_transport.h',
'src/core/lib/backoff/backoff.h',
'src/core/lib/channel/channel_args.h',
'src/core/lib/channel/channel_stack.h',
'src/core/lib/channel/channel_stack_builder.h',
@ -327,6 +336,7 @@ Pod::Spec.new do |s|
'src/core/lib/http/format_request.h',
'src/core/lib/http/httpcli.h',
'src/core/lib/http/parser.h',
'src/core/lib/iomgr/block_annotate.h',
'src/core/lib/iomgr/call_combiner.h',
'src/core/lib/iomgr/closure.h',
'src/core/lib/iomgr/combiner.h',
@ -462,6 +472,7 @@ Pod::Spec.new do |s|
'src/core/ext/filters/workarounds/workaround_cronet_compression_filter.h',
'src/core/ext/filters/workarounds/workaround_utils.h',
'src/core/lib/surface/init.cc',
'src/core/lib/backoff/backoff.cc',
'src/core/lib/channel/channel_args.cc',
'src/core/lib/channel/channel_stack.cc',
'src/core/lib/channel/channel_stack_builder.cc',
@ -726,8 +737,6 @@ Pod::Spec.new do |s|
'src/core/lib/support/atomic.h',
'src/core/lib/support/atomic_with_atm.h',
'src/core/lib/support/atomic_with_std.h',
'src/core/lib/support/backoff.h',
'src/core/lib/support/block_annotate.h',
'src/core/lib/support/env.h',
'src/core/lib/support/memory.h',
'src/core/lib/support/mpscq.h',
@ -810,6 +819,7 @@ Pod::Spec.new do |s|
'src/core/ext/filters/deadline/deadline_filter.h',
'src/core/ext/transport/chttp2/client/chttp2_connector.h',
'src/core/ext/transport/inproc/inproc_transport.h',
'src/core/lib/backoff/backoff.h',
'src/core/lib/channel/channel_args.h',
'src/core/lib/channel/channel_stack.h',
'src/core/lib/channel/channel_stack_builder.h',
@ -828,6 +838,7 @@ Pod::Spec.new do |s|
'src/core/lib/http/format_request.h',
'src/core/lib/http/httpcli.h',
'src/core/lib/http/parser.h',
'src/core/lib/iomgr/block_annotate.h',
'src/core/lib/iomgr/call_combiner.h',
'src/core/lib/iomgr/closure.h',
'src/core/lib/iomgr/combiner.h',

@ -63,10 +63,13 @@ Pod::Spec.new do |s|
end
s.subspec 'GID' do |ss|
ss.ios.deployment_target = '7.0'
ss.header_mappings_dir = "#{src_dir}"
ss.source_files = "#{src_dir}/GRPCCall+GID.{h,m}"
ss.dependency "#{s.name}/Main", version
ss.dependency 'Google/SignIn'
end
end

@ -88,8 +88,6 @@ Gem::Specification.new do |s|
s.files += %w( src/core/lib/support/atomic.h )
s.files += %w( src/core/lib/support/atomic_with_atm.h )
s.files += %w( src/core/lib/support/atomic_with_std.h )
s.files += %w( src/core/lib/support/backoff.h )
s.files += %w( src/core/lib/support/block_annotate.h )
s.files += %w( src/core/lib/support/env.h )
s.files += %w( src/core/lib/support/memory.h )
s.files += %w( src/core/lib/support/mpscq.h )
@ -106,7 +104,6 @@ Gem::Specification.new do |s|
s.files += %w( src/core/lib/support/arena.cc )
s.files += %w( src/core/lib/support/atm.cc )
s.files += %w( src/core/lib/support/avl.cc )
s.files += %w( src/core/lib/support/backoff.cc )
s.files += %w( src/core/lib/support/cmdline.cc )
s.files += %w( src/core/lib/support/cpu_iphone.cc )
s.files += %w( src/core/lib/support/cpu_linux.cc )
@ -252,6 +249,7 @@ Gem::Specification.new do |s|
s.files += %w( src/core/ext/filters/deadline/deadline_filter.h )
s.files += %w( src/core/ext/transport/chttp2/client/chttp2_connector.h )
s.files += %w( src/core/ext/transport/inproc/inproc_transport.h )
s.files += %w( src/core/lib/backoff/backoff.h )
s.files += %w( src/core/lib/channel/channel_args.h )
s.files += %w( src/core/lib/channel/channel_stack.h )
s.files += %w( src/core/lib/channel/channel_stack_builder.h )
@ -270,6 +268,7 @@ Gem::Specification.new do |s|
s.files += %w( src/core/lib/http/format_request.h )
s.files += %w( src/core/lib/http/httpcli.h )
s.files += %w( src/core/lib/http/parser.h )
s.files += %w( src/core/lib/iomgr/block_annotate.h )
s.files += %w( src/core/lib/iomgr/call_combiner.h )
s.files += %w( src/core/lib/iomgr/closure.h )
s.files += %w( src/core/lib/iomgr/combiner.h )
@ -409,6 +408,7 @@ Gem::Specification.new do |s|
s.files += %w( src/core/ext/filters/workarounds/workaround_cronet_compression_filter.h )
s.files += %w( src/core/ext/filters/workarounds/workaround_utils.h )
s.files += %w( src/core/lib/surface/init.cc )
s.files += %w( src/core/lib/backoff/backoff.cc )
s.files += %w( src/core/lib/channel/channel_args.cc )
s.files += %w( src/core/lib/channel/channel_stack.cc )
s.files += %w( src/core/lib/channel/channel_stack_builder.cc )

@ -164,7 +164,6 @@
'src/core/lib/support/arena.cc',
'src/core/lib/support/atm.cc',
'src/core/lib/support/avl.cc',
'src/core/lib/support/backoff.cc',
'src/core/lib/support/cmdline.cc',
'src/core/lib/support/cpu_iphone.cc',
'src/core/lib/support/cpu_linux.cc',
@ -224,6 +223,7 @@
],
'sources': [
'src/core/lib/surface/init.cc',
'src/core/lib/backoff/backoff.cc',
'src/core/lib/channel/channel_args.cc',
'src/core/lib/channel/channel_stack.cc',
'src/core/lib/channel/channel_stack_builder.cc',
@ -526,6 +526,7 @@
'test/core/util/port_server_client.c',
'test/core/util/slice_splitter.c',
'test/core/util/trickle_endpoint.c',
'src/core/lib/backoff/backoff.cc',
'src/core/lib/channel/channel_args.cc',
'src/core/lib/channel/channel_stack.cc',
'src/core/lib/channel/channel_stack_builder.cc',
@ -732,6 +733,7 @@
'test/core/util/port_server_client.c',
'test/core/util/slice_splitter.c',
'test/core/util/trickle_endpoint.c',
'src/core/lib/backoff/backoff.cc',
'src/core/lib/channel/channel_args.cc',
'src/core/lib/channel/channel_stack.cc',
'src/core/lib/channel/channel_stack_builder.cc',
@ -923,6 +925,7 @@
'sources': [
'src/core/lib/surface/init.cc',
'src/core/lib/surface/init_unsecure.cc',
'src/core/lib/backoff/backoff.cc',
'src/core/lib/channel/channel_args.cc',
'src/core/lib/channel/channel_stack.cc',
'src/core/lib/channel/channel_stack_builder.cc',

@ -140,7 +140,7 @@ class ServerBuilder {
/// please use IPv6 any, i.e., [::]:<port>, which also accepts IPv4
/// connections. Valid values include dns:///localhost:1234, /
/// 192.168.1.1:31416, dns:///[::1]:27182, etc.).
/// \params creds The credentials associated with the server.
/// \param creds The credentials associated with the server.
/// \param selected_port[out] If not `nullptr`, gets populated with the port
/// number bound to the \a grpc::Server for the corresponding endpoint after
/// it is successfully bound, 0 otherwise.

@ -25,6 +25,7 @@
typedef intptr_t gpr_atm;
#define GPR_ATM_MAX INTPTR_MAX
#define GPR_ATM_MIN INTPTR_MIN
#ifdef GPR_LOW_LEVEL_COUNTERS
extern gpr_atm gpr_counter_atm_cas;

@ -25,6 +25,7 @@
typedef intptr_t gpr_atm;
#define GPR_ATM_MAX INTPTR_MAX
#define GPR_ATM_MIN INTPTR_MIN
#define GPR_ATM_COMPILE_BARRIER_() __asm__ __volatile__("" : : : "memory")

@ -24,6 +24,7 @@
typedef intptr_t gpr_atm;
#define GPR_ATM_MAX INTPTR_MAX
#define GPR_ATM_MIN INTPTR_MIN
#define gpr_atm_full_barrier MemoryBarrier

@ -4,7 +4,6 @@ framework module grpc {
header "support/alloc.h"
header "support/atm.h"
header "support/atm_gcc_atomic.h"
header "support/avl.h"
header "support/cmdline.h"
header "support/cpu.h"
@ -16,23 +15,17 @@ framework module grpc {
header "support/string_util.h"
header "support/subprocess.h"
header "support/sync.h"
header "support/sync_custom.h"
header "support/sync_generic.h"
header "support/sync_posix.h"
header "support/thd.h"
header "support/time.h"
header "support/tls.h"
header "support/tls_pthread.h"
header "support/useful.h"
header "impl/codegen/atm.h"
header "impl/codegen/atm_gcc_atomic.h"
header "impl/codegen/gpr_slice.h"
header "impl/codegen/gpr_types.h"
header "impl/codegen/port_platform.h"
header "impl/codegen/sync.h"
header "impl/codegen/sync_custom.h"
header "impl/codegen/sync_generic.h"
header "impl/codegen/sync_posix.h"
header "impl/codegen/byte_buffer.h"
header "impl/codegen/byte_buffer_reader.h"
header "impl/codegen/compression_types.h"
@ -43,14 +36,11 @@ framework module grpc {
header "impl/codegen/slice.h"
header "impl/codegen/status.h"
header "impl/codegen/atm.h"
header "impl/codegen/atm_gcc_atomic.h"
header "impl/codegen/gpr_slice.h"
header "impl/codegen/gpr_types.h"
header "impl/codegen/port_platform.h"
header "impl/codegen/sync.h"
header "impl/codegen/sync_custom.h"
header "impl/codegen/sync_generic.h"
header "impl/codegen/sync_posix.h"
header "grpc_security.h"
header "byte_buffer.h"
header "byte_buffer_reader.h"
@ -65,6 +55,28 @@ framework module grpc {
header "support/workaround_list.h"
header "census.h"
textual header "support/atm_gcc_atomic.h"
textual header "support/atm_gcc_sync.h"
textual header "support/atm_windows.h"
textual header "support/sync_custom.h"
textual header "support/sync_posix.h"
textual header "support/sync_windows.h"
textual header "support/tls_gcc.h"
textual header "support/tls_msvc.h"
textual header "support/tls_pthread.h"
textual header "impl/codegen/atm_gcc_atomic.h"
textual header "impl/codegen/atm_gcc_sync.h"
textual header "impl/codegen/atm_windows.h"
textual header "impl/codegen/sync_custom.h"
textual header "impl/codegen/sync_posix.h"
textual header "impl/codegen/sync_windows.h"
textual header "impl/codegen/atm_gcc_atomic.h"
textual header "impl/codegen/atm_gcc_sync.h"
textual header "impl/codegen/atm_windows.h"
textual header "impl/codegen/sync_custom.h"
textual header "impl/codegen/sync_posix.h"
textual header "impl/codegen/sync_windows.h"
export *
module * { export * }
}

@ -100,8 +100,6 @@
<file baseinstalldir="/" name="src/core/lib/support/atomic.h" role="src" />
<file baseinstalldir="/" name="src/core/lib/support/atomic_with_atm.h" role="src" />
<file baseinstalldir="/" name="src/core/lib/support/atomic_with_std.h" role="src" />
<file baseinstalldir="/" name="src/core/lib/support/backoff.h" role="src" />
<file baseinstalldir="/" name="src/core/lib/support/block_annotate.h" role="src" />
<file baseinstalldir="/" name="src/core/lib/support/env.h" role="src" />
<file baseinstalldir="/" name="src/core/lib/support/memory.h" role="src" />
<file baseinstalldir="/" name="src/core/lib/support/mpscq.h" role="src" />
@ -118,7 +116,6 @@
<file baseinstalldir="/" name="src/core/lib/support/arena.cc" role="src" />
<file baseinstalldir="/" name="src/core/lib/support/atm.cc" role="src" />
<file baseinstalldir="/" name="src/core/lib/support/avl.cc" role="src" />
<file baseinstalldir="/" name="src/core/lib/support/backoff.cc" role="src" />
<file baseinstalldir="/" name="src/core/lib/support/cmdline.cc" role="src" />
<file baseinstalldir="/" name="src/core/lib/support/cpu_iphone.cc" role="src" />
<file baseinstalldir="/" name="src/core/lib/support/cpu_linux.cc" role="src" />
@ -264,6 +261,7 @@
<file baseinstalldir="/" name="src/core/ext/filters/deadline/deadline_filter.h" role="src" />
<file baseinstalldir="/" name="src/core/ext/transport/chttp2/client/chttp2_connector.h" role="src" />
<file baseinstalldir="/" name="src/core/ext/transport/inproc/inproc_transport.h" role="src" />
<file baseinstalldir="/" name="src/core/lib/backoff/backoff.h" role="src" />
<file baseinstalldir="/" name="src/core/lib/channel/channel_args.h" role="src" />
<file baseinstalldir="/" name="src/core/lib/channel/channel_stack.h" role="src" />
<file baseinstalldir="/" name="src/core/lib/channel/channel_stack_builder.h" role="src" />
@ -282,6 +280,7 @@
<file baseinstalldir="/" name="src/core/lib/http/format_request.h" role="src" />
<file baseinstalldir="/" name="src/core/lib/http/httpcli.h" role="src" />
<file baseinstalldir="/" name="src/core/lib/http/parser.h" role="src" />
<file baseinstalldir="/" name="src/core/lib/iomgr/block_annotate.h" role="src" />
<file baseinstalldir="/" name="src/core/lib/iomgr/call_combiner.h" role="src" />
<file baseinstalldir="/" name="src/core/lib/iomgr/closure.h" role="src" />
<file baseinstalldir="/" name="src/core/lib/iomgr/combiner.h" role="src" />
@ -421,6 +420,7 @@
<file baseinstalldir="/" name="src/core/ext/filters/workarounds/workaround_cronet_compression_filter.h" role="src" />
<file baseinstalldir="/" name="src/core/ext/filters/workarounds/workaround_utils.h" role="src" />
<file baseinstalldir="/" name="src/core/lib/surface/init.cc" role="src" />
<file baseinstalldir="/" name="src/core/lib/backoff/backoff.cc" role="src" />
<file baseinstalldir="/" name="src/core/lib/channel/channel_args.cc" role="src" />
<file baseinstalldir="/" name="src/core/lib/channel/channel_stack.cc" role="src" />
<file baseinstalldir="/" name="src/core/lib/channel/channel_stack_builder.cc" role="src" />

@ -29,4 +29,4 @@ void define_base_resources();
}
#endif
#endif /* GRPC_CORE_EXT_CENSUS_BASE_RESOURCES_H */
#endif /* GRPC_CORE_EXT_CENSUS_BASE_RESOURCES_H */

@ -66,4 +66,4 @@ void census_tracing_end_op(census_op_id op_id);
}
#endif
#endif /* GRPC_CORE_EXT_CENSUS_CENSUS_INTERFACE_H */
#endif /* GRPC_CORE_EXT_CENSUS_CENSUS_INTERFACE_H */

@ -81,4 +81,4 @@ int census_log_out_of_space_count(void);
}
#endif
#endif /* GRPC_CORE_EXT_CENSUS_CENSUS_LOG_H */
#endif /* GRPC_CORE_EXT_CENSUS_CENSUS_LOG_H */

@ -121,4 +121,4 @@ uint64_t census_ht_for_all(const census_ht *ht, census_ht_itr_cb);
}
#endif
#endif /* GRPC_CORE_EXT_CENSUS_HASH_TABLE_H */
#endif /* GRPC_CORE_EXT_CENSUS_HASH_TABLE_H */

@ -85,4 +85,4 @@ int64_t census_log_out_of_space_count(void);
}
#endif
#endif /* GRPC_CORE_EXT_CENSUS_MLOG_H */
#endif /* GRPC_CORE_EXT_CENSUS_MLOG_H */

@ -53,4 +53,4 @@ int32_t define_resource(const resource *base);
}
#endif
#endif /* GRPC_CORE_EXT_CENSUS_RESOURCE_H */
#endif /* GRPC_CORE_EXT_CENSUS_RESOURCE_H */

@ -61,4 +61,4 @@ bool decode_trace_context(google_trace_TraceContext *ctxt, uint8_t *buffer,
}
#endif
#endif /* GRPC_CORE_EXT_CENSUS_TRACE_CONTEXT_H */
#endif /* GRPC_CORE_EXT_CENSUS_TRACE_CONTEXT_H */

@ -53,4 +53,4 @@ size_t http_format_to_trace_span_context(const char *buf, size_t buf_size,
}
#endif
#endif /* GRPC_CORE_EXT_CENSUS_TRACE_PROPAGATION_H */
#endif /* GRPC_CORE_EXT_CENSUS_TRACE_PROPAGATION_H */

@ -114,4 +114,4 @@ void trace_end_span(const trace_status *status, trace_span_context *span_ctxt);
}
#endif
#endif /* GRPC_CORE_EXT_CENSUS_TRACING_H */
#endif /* GRPC_CORE_EXT_CENSUS_TRACING_H */

@ -163,4 +163,4 @@ void census_window_stats_destroy(struct census_window_stats *wstats);
}
#endif
#endif /* GRPC_CORE_EXT_CENSUS_WINDOW_STATS_H */
#endif /* GRPC_CORE_EXT_CENSUS_WINDOW_STATS_H */

@ -188,8 +188,8 @@ static void watcher_timer_init(grpc_exec_ctx *exec_ctx, void *arg,
watcher_timer_init_arg *wa = (watcher_timer_init_arg *)arg;
grpc_timer_init(exec_ctx, &wa->w->alarm,
gpr_convert_clock_type(wa->deadline, GPR_CLOCK_MONOTONIC),
&wa->w->on_timeout, gpr_now(GPR_CLOCK_MONOTONIC));
grpc_timespec_to_millis_round_up(wa->deadline),
&wa->w->on_timeout);
gpr_free(wa);
}

@ -71,7 +71,7 @@ typedef enum {
typedef struct {
gpr_refcount refs;
gpr_timespec timeout;
grpc_millis timeout;
wait_for_ready_value wait_for_ready;
} method_parameters;
@ -101,17 +101,18 @@ static bool parse_wait_for_ready(grpc_json *field,
return true;
}
static bool parse_timeout(grpc_json *field, gpr_timespec *timeout) {
static bool parse_timeout(grpc_json *field, grpc_millis *timeout) {
if (field->type != GRPC_JSON_STRING) return false;
size_t len = strlen(field->value);
if (field->value[len - 1] != 's') return false;
char *buf = gpr_strdup(field->value);
buf[len - 1] = '\0'; // Remove trailing 's'.
char *decimal_point = strchr(buf, '.');
int nanos = 0;
if (decimal_point != NULL) {
*decimal_point = '\0';
timeout->tv_nsec = gpr_parse_nonnegative_int(decimal_point + 1);
if (timeout->tv_nsec == -1) {
nanos = gpr_parse_nonnegative_int(decimal_point + 1);
if (nanos == -1) {
gpr_free(buf);
return false;
}
@ -130,24 +131,25 @@ static bool parse_timeout(grpc_json *field, gpr_timespec *timeout) {
gpr_free(buf);
return false;
}
timeout->tv_nsec *= multiplier;
nanos *= multiplier;
}
timeout->tv_sec = gpr_parse_nonnegative_int(buf);
int seconds = gpr_parse_nonnegative_int(buf);
gpr_free(buf);
if (timeout->tv_sec == -1) return false;
if (seconds == -1) return false;
*timeout = seconds * GPR_MS_PER_SEC + nanos / GPR_NS_PER_MS;
return true;
}
static void *method_parameters_create_from_json(const grpc_json *json) {
wait_for_ready_value wait_for_ready = WAIT_FOR_READY_UNSET;
gpr_timespec timeout = {0, 0, GPR_TIMESPAN};
grpc_millis timeout = 0;
for (grpc_json *field = json->child; field != NULL; field = field->next) {
if (field->key == NULL) continue;
if (strcmp(field->key, "waitForReady") == 0) {
if (wait_for_ready != WAIT_FOR_READY_UNSET) return NULL; // Duplicate.
if (!parse_wait_for_ready(field, &wait_for_ready)) return NULL;
} else if (strcmp(field->key, "timeout") == 0) {
if (timeout.tv_sec > 0 || timeout.tv_nsec > 0) return NULL; // Duplicate.
if (timeout > 0) return NULL; // Duplicate.
if (!parse_timeout(field, &timeout)) return NULL;
}
}
@ -826,7 +828,7 @@ typedef struct client_channel_call_data {
grpc_slice path; // Request path.
gpr_timespec call_start_time;
gpr_timespec deadline;
grpc_millis deadline;
gpr_arena *arena;
grpc_call_stack *owning_call;
grpc_call_combiner *call_combiner;
@ -979,11 +981,11 @@ static void apply_service_config_to_call_locked(grpc_exec_ctx *exec_ctx,
// If the deadline from the service config is shorter than the one
// from the client API, reset the deadline timer.
if (chand->deadline_checking_enabled &&
gpr_time_cmp(calld->method_params->timeout,
gpr_time_0(GPR_TIMESPAN)) != 0) {
const gpr_timespec per_method_deadline =
gpr_time_add(calld->call_start_time, calld->method_params->timeout);
if (gpr_time_cmp(per_method_deadline, calld->deadline) < 0) {
calld->method_params->timeout != 0) {
const grpc_millis per_method_deadline =
grpc_timespec_to_millis_round_up(calld->call_start_time) +
calld->method_params->timeout;
if (per_method_deadline < calld->deadline) {
calld->deadline = per_method_deadline;
grpc_deadline_state_reset(exec_ctx, elem, calld->deadline);
}
@ -1422,7 +1424,7 @@ static grpc_error *cc_init_call_elem(grpc_exec_ctx *exec_ctx,
// Initialize data members.
calld->path = grpc_slice_ref_internal(args->path);
calld->call_start_time = args->start_time;
calld->deadline = gpr_convert_clock_type(args->deadline, GPR_CLOCK_MONOTONIC);
calld->deadline = args->deadline;
calld->arena = args->arena;
calld->owning_call = args->call_stack;
calld->call_combiner = args->call_combiner;

@ -60,4 +60,4 @@ grpc_subchannel_call *grpc_client_channel_get_subchannel_call(
}
#endif
#endif /* GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_CLIENT_CHANNEL_H */
#endif /* GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_CLIENT_CHANNEL_H */

@ -82,4 +82,4 @@ grpc_arg grpc_client_channel_factory_create_channel_arg(
}
#endif
#endif /* GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_CLIENT_CHANNEL_FACTORY_H */
#endif /* GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_CLIENT_CHANNEL_FACTORY_H */

@ -38,7 +38,7 @@ typedef struct {
/** set of pollsets interested in this connection */
grpc_pollset_set *interested_parties;
/** deadline for connection */
gpr_timespec deadline;
grpc_millis deadline;
/** channel arguments (to be passed to transport) */
const grpc_channel_args *channel_args;
} grpc_connect_in_args;
@ -78,4 +78,4 @@ void grpc_connector_shutdown(grpc_exec_ctx *exec_ctx, grpc_connector *connector,
}
#endif
#endif /* GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_CONNECTOR_H */
#endif /* GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_CONNECTOR_H */

@ -39,4 +39,4 @@ void grpc_http_connect_register_handshaker_factory();
}
#endif
#endif /* GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_HTTP_CONNECT_HANDSHAKER_H */
#endif /* GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_HTTP_CONNECT_HANDSHAKER_H */

@ -29,4 +29,4 @@ void grpc_register_http_proxy_mapper();
}
#endif
#endif /* GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_HTTP_PROXY_H */
#endif /* GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_HTTP_PROXY_H */

@ -103,6 +103,7 @@
#include "src/core/ext/filters/client_channel/parse_address.h"
#include "src/core/ext/filters/client_channel/resolver/fake/fake_resolver.h"
#include "src/core/ext/filters/client_channel/subchannel_index.h"
#include "src/core/lib/backoff/backoff.h"
#include "src/core/lib/channel/channel_args.h"
#include "src/core/lib/channel/channel_stack.h"
#include "src/core/lib/iomgr/combiner.h"
@ -112,7 +113,6 @@
#include "src/core/lib/slice/slice_hash_table.h"
#include "src/core/lib/slice/slice_internal.h"
#include "src/core/lib/slice/slice_string_helpers.h"
#include "src/core/lib/support/backoff.h"
#include "src/core/lib/support/string.h"
#include "src/core/lib/surface/call.h"
#include "src/core/lib/surface/channel.h"
@ -345,9 +345,6 @@ typedef struct glb_lb_policy {
/** are we currently updating lb_call? */
bool updating_lb_call;
/** are we currently updating lb_channel? */
bool updating_lb_channel;
/** are we already watching the LB channel's connectivity? */
bool watching_lb_channel;
@ -360,9 +357,6 @@ typedef struct glb_lb_policy {
/** called upon changes to the LB channel's connectivity. */
grpc_closure lb_channel_on_connectivity_changed;
/** args from the latest update received while already updating, or NULL */
grpc_lb_policy_args *pending_update_args;
/************************************************************/
/* client data associated with the LB server communication */
/************************************************************/
@ -397,7 +391,7 @@ typedef struct glb_lb_policy {
grpc_slice lb_call_status_details;
/** LB call retry backoff state */
gpr_backoff lb_call_backoff_state;
grpc_backoff lb_call_backoff_state;
/** LB call retry timer */
grpc_timer lb_call_retry_timer;
@ -411,7 +405,7 @@ typedef struct glb_lb_policy {
* recreated whenever lb_call is replaced. */
grpc_grpclb_client_stats *client_stats;
/* Interval and timer for next client load report. */
gpr_timespec client_stats_report_interval;
grpc_millis client_stats_report_interval;
grpc_timer client_load_report_timer;
bool client_load_report_timer_pending;
bool last_client_load_report_counters_were_zero;
@ -982,10 +976,6 @@ static void glb_destroy(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol) {
}
grpc_fake_resolver_response_generator_unref(glb_policy->response_generator);
grpc_subchannel_index_unref();
if (glb_policy->pending_update_args != NULL) {
grpc_channel_args_destroy(exec_ctx, glb_policy->pending_update_args->args);
gpr_free(glb_policy->pending_update_args);
}
gpr_free(glb_policy);
}
@ -1010,6 +1000,10 @@ static void glb_shutdown_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol) {
grpc_timer_cancel(exec_ctx, &glb_policy->lb_call_retry_timer);
glb_policy->retry_timer_active = false;
}
if (glb_policy->fallback_timer_active) {
grpc_timer_cancel(exec_ctx, &glb_policy->lb_fallback_timer);
glb_policy->fallback_timer_active = false;
}
pending_pick *pp = glb_policy->pending_picks;
glb_policy->pending_picks = NULL;
@ -1130,21 +1124,19 @@ static void start_picking_locked(grpc_exec_ctx *exec_ctx,
/* start a timer to fall back */
if (glb_policy->lb_fallback_timeout_ms > 0 &&
glb_policy->serverlist == NULL && !glb_policy->fallback_timer_active) {
gpr_timespec now = gpr_now(GPR_CLOCK_MONOTONIC);
gpr_timespec deadline = gpr_time_add(
now,
gpr_time_from_millis(glb_policy->lb_fallback_timeout_ms, GPR_TIMESPAN));
grpc_millis deadline =
grpc_exec_ctx_now(exec_ctx) + glb_policy->lb_fallback_timeout_ms;
GRPC_LB_POLICY_WEAK_REF(&glb_policy->base, "grpclb_fallback_timer");
GRPC_CLOSURE_INIT(&glb_policy->lb_on_fallback, lb_on_fallback_timer_locked,
glb_policy,
grpc_combiner_scheduler(glb_policy->base.combiner));
glb_policy->fallback_timer_active = true;
grpc_timer_init(exec_ctx, &glb_policy->lb_fallback_timer, deadline,
&glb_policy->lb_on_fallback, now);
&glb_policy->lb_on_fallback);
}
glb_policy->started_picking = true;
gpr_backoff_reset(&glb_policy->lb_call_backoff_state);
grpc_backoff_reset(&glb_policy->lb_call_backoff_state);
query_for_backends_locked(exec_ctx, glb_policy);
}
@ -1270,17 +1262,15 @@ static void maybe_restart_lb_call(grpc_exec_ctx *exec_ctx,
glb_policy->updating_lb_call = false;
} else if (!glb_policy->shutting_down) {
/* if we aren't shutting down, restart the LB client call after some time */
gpr_timespec now = gpr_now(GPR_CLOCK_MONOTONIC);
gpr_timespec next_try =
gpr_backoff_step(&glb_policy->lb_call_backoff_state, now);
grpc_millis next_try =
grpc_backoff_step(exec_ctx, &glb_policy->lb_call_backoff_state);
if (GRPC_TRACER_ON(grpc_lb_glb_trace)) {
gpr_log(GPR_DEBUG, "Connection to LB server lost (grpclb: %p)...",
(void *)glb_policy);
gpr_timespec timeout = gpr_time_sub(next_try, now);
if (gpr_time_cmp(timeout, gpr_time_0(timeout.clock_type)) > 0) {
gpr_log(GPR_DEBUG,
"... retry_timer_active in %" PRId64 ".%09d seconds.",
timeout.tv_sec, timeout.tv_nsec);
grpc_millis timeout = next_try - grpc_exec_ctx_now(exec_ctx);
if (timeout > 0) {
gpr_log(GPR_DEBUG, "... retry_timer_active in %" PRIdPTR "ms.",
timeout);
} else {
gpr_log(GPR_DEBUG, "... retry_timer_active immediately.");
}
@ -1291,7 +1281,7 @@ static void maybe_restart_lb_call(grpc_exec_ctx *exec_ctx,
grpc_combiner_scheduler(glb_policy->base.combiner));
glb_policy->retry_timer_active = true;
grpc_timer_init(exec_ctx, &glb_policy->lb_call_retry_timer, next_try,
&glb_policy->lb_on_call_retry, now);
&glb_policy->lb_on_call_retry);
}
GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, &glb_policy->base,
"lb_on_server_status_received_locked");
@ -1302,15 +1292,14 @@ static void send_client_load_report_locked(grpc_exec_ctx *exec_ctx, void *arg,
static void schedule_next_client_load_report(grpc_exec_ctx *exec_ctx,
glb_lb_policy *glb_policy) {
const gpr_timespec now = gpr_now(GPR_CLOCK_MONOTONIC);
const gpr_timespec next_client_load_report_time =
gpr_time_add(now, glb_policy->client_stats_report_interval);
const grpc_millis next_client_load_report_time =
grpc_exec_ctx_now(exec_ctx) + glb_policy->client_stats_report_interval;
GRPC_CLOSURE_INIT(&glb_policy->client_load_report_closure,
send_client_load_report_locked, glb_policy,
grpc_combiner_scheduler(glb_policy->base.combiner));
grpc_timer_init(exec_ctx, &glb_policy->client_load_report_timer,
next_client_load_report_time,
&glb_policy->client_load_report_closure, now);
&glb_policy->client_load_report_closure);
}
static void client_load_report_done_locked(grpc_exec_ctx *exec_ctx, void *arg,
@ -1404,12 +1393,10 @@ static void lb_call_init_locked(grpc_exec_ctx *exec_ctx,
* glb_policy->base.interested_parties, which is comprised of the polling
* entities from \a client_channel. */
grpc_slice host = grpc_slice_from_copied_string(glb_policy->server_name);
gpr_timespec deadline =
grpc_millis deadline =
glb_policy->lb_call_timeout_ms == 0
? gpr_inf_future(GPR_CLOCK_MONOTONIC)
: gpr_time_add(gpr_now(GPR_CLOCK_MONOTONIC),
gpr_time_from_millis(glb_policy->lb_call_timeout_ms,
GPR_TIMESPAN));
? GRPC_MILLIS_INF_FUTURE
: grpc_exec_ctx_now(exec_ctx) + glb_policy->lb_call_timeout_ms;
glb_policy->lb_call = grpc_channel_create_pollset_set_call(
exec_ctx, glb_policy->lb_channel, NULL, GRPC_PROPAGATE_DEFAULTS,
glb_policy->base.interested_parties,
@ -1440,12 +1427,12 @@ static void lb_call_init_locked(grpc_exec_ctx *exec_ctx,
lb_on_response_received_locked, glb_policy,
grpc_combiner_scheduler(glb_policy->base.combiner));
gpr_backoff_init(&glb_policy->lb_call_backoff_state,
GRPC_GRPCLB_INITIAL_CONNECT_BACKOFF_SECONDS,
GRPC_GRPCLB_RECONNECT_BACKOFF_MULTIPLIER,
GRPC_GRPCLB_RECONNECT_JITTER,
GRPC_GRPCLB_MIN_CONNECT_TIMEOUT_SECONDS * 1000,
GRPC_GRPCLB_RECONNECT_MAX_BACKOFF_SECONDS * 1000);
grpc_backoff_init(&glb_policy->lb_call_backoff_state,
GRPC_GRPCLB_INITIAL_CONNECT_BACKOFF_SECONDS,
GRPC_GRPCLB_RECONNECT_BACKOFF_MULTIPLIER,
GRPC_GRPCLB_RECONNECT_JITTER,
GRPC_GRPCLB_MIN_CONNECT_TIMEOUT_SECONDS * 1000,
GRPC_GRPCLB_RECONNECT_MAX_BACKOFF_SECONDS * 1000);
glb_policy->seen_initial_response = false;
glb_policy->last_client_load_report_counters_were_zero = false;
@ -1553,7 +1540,7 @@ static void lb_on_response_received_locked(grpc_exec_ctx *exec_ctx, void *arg,
memset(ops, 0, sizeof(ops));
grpc_op *op = ops;
if (glb_policy->lb_response_payload != NULL) {
gpr_backoff_reset(&glb_policy->lb_call_backoff_state);
grpc_backoff_reset(&glb_policy->lb_call_backoff_state);
/* Received data from the LB server. Look inside
* glb_policy->lb_response_payload, for a serverlist. */
grpc_byte_buffer_reader bbr;
@ -1567,16 +1554,14 @@ static void lb_on_response_received_locked(grpc_exec_ctx *exec_ctx, void *arg,
(response = grpc_grpclb_initial_response_parse(response_slice)) !=
NULL) {
if (response->has_client_stats_report_interval) {
glb_policy->client_stats_report_interval =
gpr_time_max(gpr_time_from_seconds(1, GPR_TIMESPAN),
grpc_grpclb_duration_to_timespec(
&response->client_stats_report_interval));
glb_policy->client_stats_report_interval = GPR_MAX(
GPR_MS_PER_SEC, grpc_grpclb_duration_to_millis(
&response->client_stats_report_interval));
if (GRPC_TRACER_ON(grpc_lb_glb_trace)) {
gpr_log(GPR_INFO,
"received initial LB response message; "
"client load reporting interval = %" PRId64 ".%09d sec",
glb_policy->client_stats_report_interval.tv_sec,
glb_policy->client_stats_report_interval.tv_nsec);
"client load reporting interval = %" PRIdPTR " milliseconds",
glb_policy->client_stats_report_interval);
}
/* take a weak ref (won't prevent calling of \a glb_shutdown() if the
* strong ref count goes to zero) to be unref'd in
@ -1757,45 +1742,22 @@ static void glb_update_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy,
}
const grpc_lb_addresses *addresses =
(const grpc_lb_addresses *)arg->value.pointer.p;
// If a non-empty serverlist hasn't been received from the balancer,
// propagate the update to fallback_backend_addresses.
if (glb_policy->serverlist == NULL) {
// If a non-empty serverlist hasn't been received from the balancer,
// propagate the update to fallback_backend_addresses.
fallback_update_locked(exec_ctx, glb_policy, addresses);
} else if (glb_policy->updating_lb_channel) {
// If we have recieved serverlist from the balancer, we need to defer update
// when there is an in-progress one.
if (GRPC_TRACER_ON(grpc_lb_glb_trace)) {
gpr_log(GPR_INFO,
"Update already in progress for grpclb %p. Deferring update.",
(void *)glb_policy);
}
if (glb_policy->pending_update_args != NULL) {
grpc_channel_args_destroy(exec_ctx,
glb_policy->pending_update_args->args);
gpr_free(glb_policy->pending_update_args);
}
glb_policy->pending_update_args = (grpc_lb_policy_args *)gpr_zalloc(
sizeof(*glb_policy->pending_update_args));
glb_policy->pending_update_args->client_channel_factory =
args->client_channel_factory;
glb_policy->pending_update_args->args = grpc_channel_args_copy(args->args);
glb_policy->pending_update_args->combiner = args->combiner;
return;
}
glb_policy->updating_lb_channel = true;
GPR_ASSERT(glb_policy->lb_channel != NULL);
// Propagate updates to the LB channel (pick_first) through the fake
// resolver.
grpc_channel_args *lb_channel_args = build_lb_channel_args(
exec_ctx, addresses, glb_policy->response_generator, args->args);
/* Propagate updates to the LB channel (pick first) through the fake resolver
*/
grpc_fake_resolver_response_generator_set_response(
exec_ctx, glb_policy->response_generator, lb_channel_args);
grpc_channel_args_destroy(exec_ctx, lb_channel_args);
// Start watching the LB channel connectivity for connection, if not
// already doing so.
if (!glb_policy->watching_lb_channel) {
// Watch the LB channel connectivity for connection.
glb_policy->lb_channel_connectivity = grpc_channel_check_connectivity_state(
glb_policy->lb_channel, true /* try to connect */);
grpc_channel_element *client_channel_elem = grpc_channel_stack_last_element(
@ -1847,18 +1809,10 @@ static void glb_lb_channel_on_connectivity_changed_cb(grpc_exec_ctx *exec_ctx,
/* fallthrough */
case GRPC_CHANNEL_READY:
if (glb_policy->lb_call != NULL) {
glb_policy->updating_lb_channel = false;
glb_policy->updating_lb_call = true;
grpc_call_cancel(glb_policy->lb_call, NULL);
// lb_on_server_status_received will pick up the cancel and reinit
// lb_on_server_status_received() will pick up the cancel and reinit
// lb_call.
if (glb_policy->pending_update_args != NULL) {
grpc_lb_policy_args *args = glb_policy->pending_update_args;
glb_policy->pending_update_args = NULL;
glb_update_locked(exec_ctx, &glb_policy->base, args);
grpc_channel_args_destroy(exec_ctx, args->args);
gpr_free(args);
}
} else if (glb_policy->started_picking && !glb_policy->shutting_down) {
if (glb_policy->retry_timer_active) {
grpc_timer_cancel(exec_ctx, &glb_policy->lb_call_retry_timer);

@ -34,4 +34,4 @@ grpc_lb_policy_factory *grpc_glb_lb_factory_create();
}
#endif
#endif /* GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_LB_POLICY_GRPCLB_GRPCLB_H */
#endif /* GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_LB_POLICY_GRPCLB_GRPCLB_H */

@ -299,13 +299,10 @@ int grpc_grpclb_duration_compare(const grpc_grpclb_duration *lhs,
return 0;
}
gpr_timespec grpc_grpclb_duration_to_timespec(
grpc_grpclb_duration *duration_pb) {
gpr_timespec duration;
duration.tv_sec = duration_pb->has_seconds ? duration_pb->seconds : 0;
duration.tv_nsec = duration_pb->has_nanos ? duration_pb->nanos : 0;
duration.clock_type = GPR_TIMESPAN;
return duration;
grpc_millis grpc_grpclb_duration_to_millis(grpc_grpclb_duration *duration_pb) {
return (grpc_millis)(
(duration_pb->has_seconds ? duration_pb->seconds : 0) * GPR_MS_PER_SEC +
(duration_pb->has_nanos ? duration_pb->nanos : 0) / GPR_NS_PER_MS);
}
void grpc_grpclb_initial_response_destroy(

@ -81,8 +81,7 @@ void grpc_grpclb_destroy_serverlist(grpc_grpclb_serverlist *serverlist);
int grpc_grpclb_duration_compare(const grpc_grpclb_duration *lhs,
const grpc_grpclb_duration *rhs);
gpr_timespec grpc_grpclb_duration_to_timespec(
grpc_grpclb_duration *duration_pb);
grpc_millis grpc_grpclb_duration_to_millis(grpc_grpclb_duration *duration_pb);
/** Destroy \a initial_response */
void grpc_grpclb_initial_response_destroy(

@ -138,4 +138,4 @@ grpc_lb_policy *grpc_lb_policy_factory_create_lb_policy(
}
#endif
#endif /* GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_LB_POLICY_FACTORY_H */
#endif /* GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_LB_POLICY_FACTORY_H */

@ -45,4 +45,4 @@ grpc_lb_policy *grpc_lb_policy_create(grpc_exec_ctx *exec_ctx, const char *name,
}
#endif
#endif /* GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_LB_POLICY_REGISTRY_H */
#endif /* GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_LB_POLICY_REGISTRY_H */

@ -53,4 +53,4 @@ bool grpc_parse_ipv6_hostport(const char *hostport, grpc_resolved_address *addr,
}
#endif
#endif /* GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_PARSE_ADDRESS_H */
#endif /* GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_PARSE_ADDRESS_H */

@ -79,4 +79,4 @@ void grpc_proxy_mapper_destroy(grpc_proxy_mapper* mapper);
}
#endif
#endif /* GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_PROXY_MAPPER_H */
#endif /* GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_PROXY_MAPPER_H */

@ -49,4 +49,4 @@ bool grpc_proxy_mappers_map_address(grpc_exec_ctx* exec_ctx,
}
#endif
#endif /* GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_PROXY_MAPPER_REGISTRY_H */
#endif /* GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_PROXY_MAPPER_REGISTRY_H */

@ -32,13 +32,13 @@
#include "src/core/ext/filters/client_channel/lb_policy_registry.h"
#include "src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.h"
#include "src/core/ext/filters/client_channel/resolver_registry.h"
#include "src/core/lib/backoff/backoff.h"
#include "src/core/lib/channel/channel_args.h"
#include "src/core/lib/iomgr/combiner.h"
#include "src/core/lib/iomgr/gethostname.h"
#include "src/core/lib/iomgr/resolve_address.h"
#include "src/core/lib/iomgr/timer.h"
#include "src/core/lib/json/json.h"
#include "src/core/lib/support/backoff.h"
#include "src/core/lib/support/env.h"
#include "src/core/lib/support/string.h"
#include "src/core/lib/transport/service_config.h"
@ -89,7 +89,7 @@ typedef struct {
bool have_retry_timer;
grpc_timer retry_timer;
/** retry backoff state */
gpr_backoff backoff_state;
grpc_backoff backoff_state;
/** currently resolving addresses */
grpc_lb_addresses *lb_addresses;
@ -137,7 +137,7 @@ static void dns_ares_channel_saw_error_locked(grpc_exec_ctx *exec_ctx,
grpc_resolver *resolver) {
ares_dns_resolver *r = (ares_dns_resolver *)resolver;
if (!r->resolving) {
gpr_backoff_reset(&r->backoff_state);
grpc_backoff_reset(&r->backoff_state);
dns_ares_start_resolving_locked(exec_ctx, r);
}
}
@ -271,22 +271,20 @@ static void dns_ares_on_resolved_locked(grpc_exec_ctx *exec_ctx, void *arg,
} else {
const char *msg = grpc_error_string(error);
gpr_log(GPR_DEBUG, "dns resolution failed: %s", msg);
gpr_timespec now = gpr_now(GPR_CLOCK_MONOTONIC);
gpr_timespec next_try = gpr_backoff_step(&r->backoff_state, now);
gpr_timespec timeout = gpr_time_sub(next_try, now);
grpc_millis next_try = grpc_backoff_step(exec_ctx, &r->backoff_state);
grpc_millis timeout = next_try - grpc_exec_ctx_now(exec_ctx);
gpr_log(GPR_INFO, "dns resolution failed (will retry): %s",
grpc_error_string(error));
GPR_ASSERT(!r->have_retry_timer);
r->have_retry_timer = true;
GRPC_RESOLVER_REF(&r->base, "retry-timer");
if (gpr_time_cmp(timeout, gpr_time_0(timeout.clock_type)) > 0) {
gpr_log(GPR_DEBUG, "retrying in %" PRId64 ".%09d seconds", timeout.tv_sec,
timeout.tv_nsec);
if (timeout > 0) {
gpr_log(GPR_DEBUG, "retrying in %" PRIdPTR " milliseconds", timeout);
} else {
gpr_log(GPR_DEBUG, "retrying immediately");
}
grpc_timer_init(exec_ctx, &r->retry_timer, next_try,
&r->dns_ares_on_retry_timer_locked, now);
&r->dns_ares_on_retry_timer_locked);
}
if (r->resolved_result != NULL) {
grpc_channel_args_destroy(exec_ctx, r->resolved_result);
@ -307,7 +305,7 @@ static void dns_ares_next_locked(grpc_exec_ctx *exec_ctx,
r->next_completion = on_complete;
r->target_result = target_result;
if (r->resolved_version == 0 && !r->resolving) {
gpr_backoff_reset(&r->backoff_state);
grpc_backoff_reset(&r->backoff_state);
dns_ares_start_resolving_locked(exec_ctx, r);
} else {
dns_ares_maybe_finish_next_locked(exec_ctx, r);
@ -381,11 +379,11 @@ static grpc_resolver *dns_ares_create(grpc_exec_ctx *exec_ctx,
grpc_pollset_set_add_pollset_set(exec_ctx, r->interested_parties,
args->pollset_set);
}
gpr_backoff_init(&r->backoff_state, GRPC_DNS_INITIAL_CONNECT_BACKOFF_SECONDS,
GRPC_DNS_RECONNECT_BACKOFF_MULTIPLIER,
GRPC_DNS_RECONNECT_JITTER,
GRPC_DNS_MIN_CONNECT_TIMEOUT_SECONDS * 1000,
GRPC_DNS_RECONNECT_MAX_BACKOFF_SECONDS * 1000);
grpc_backoff_init(&r->backoff_state, GRPC_DNS_INITIAL_CONNECT_BACKOFF_SECONDS,
GRPC_DNS_RECONNECT_BACKOFF_MULTIPLIER,
GRPC_DNS_RECONNECT_JITTER,
GRPC_DNS_MIN_CONNECT_TIMEOUT_SECONDS * 1000,
GRPC_DNS_RECONNECT_MAX_BACKOFF_SECONDS * 1000);
GRPC_CLOSURE_INIT(&r->dns_ares_on_retry_timer_locked,
dns_ares_on_retry_timer_locked, r,
grpc_combiner_scheduler(r->base.combiner));

@ -27,11 +27,11 @@
#include "src/core/ext/filters/client_channel/lb_policy_registry.h"
#include "src/core/ext/filters/client_channel/resolver_registry.h"
#include "src/core/lib/backoff/backoff.h"
#include "src/core/lib/channel/channel_args.h"
#include "src/core/lib/iomgr/combiner.h"
#include "src/core/lib/iomgr/resolve_address.h"
#include "src/core/lib/iomgr/timer.h"
#include "src/core/lib/support/backoff.h"
#include "src/core/lib/support/env.h"
#include "src/core/lib/support/string.h"
@ -70,7 +70,7 @@ typedef struct {
grpc_timer retry_timer;
grpc_closure on_retry;
/** retry backoff state */
gpr_backoff backoff_state;
grpc_backoff backoff_state;
/** currently resolving addresses */
grpc_resolved_addresses *addresses;
@ -113,7 +113,7 @@ static void dns_channel_saw_error_locked(grpc_exec_ctx *exec_ctx,
grpc_resolver *resolver) {
dns_resolver *r = (dns_resolver *)resolver;
if (!r->resolving) {
gpr_backoff_reset(&r->backoff_state);
grpc_backoff_reset(&r->backoff_state);
dns_start_resolving_locked(exec_ctx, r);
}
}
@ -126,7 +126,7 @@ static void dns_next_locked(grpc_exec_ctx *exec_ctx, grpc_resolver *resolver,
r->next_completion = on_complete;
r->target_result = target_result;
if (r->resolved_version == 0 && !r->resolving) {
gpr_backoff_reset(&r->backoff_state);
grpc_backoff_reset(&r->backoff_state);
dns_start_resolving_locked(exec_ctx, r);
} else {
dns_maybe_finish_next_locked(exec_ctx, r);
@ -153,6 +153,9 @@ static void dns_on_resolved_locked(grpc_exec_ctx *exec_ctx, void *arg,
grpc_channel_args *result = NULL;
GPR_ASSERT(r->resolving);
r->resolving = false;
GRPC_ERROR_REF(error);
error = grpc_error_set_str(error, GRPC_ERROR_STR_TARGET_ADDRESS,
grpc_slice_from_copied_string(r->name_to_resolve));
if (r->addresses != NULL) {
grpc_lb_addresses *addresses = grpc_lb_addresses_create(
r->addresses->naddrs, NULL /* user_data_vtable */);
@ -167,23 +170,21 @@ static void dns_on_resolved_locked(grpc_exec_ctx *exec_ctx, void *arg,
grpc_resolved_addresses_destroy(r->addresses);
grpc_lb_addresses_destroy(exec_ctx, addresses);
} else {
gpr_timespec now = gpr_now(GPR_CLOCK_MONOTONIC);
gpr_timespec next_try = gpr_backoff_step(&r->backoff_state, now);
gpr_timespec timeout = gpr_time_sub(next_try, now);
grpc_millis next_try = grpc_backoff_step(exec_ctx, &r->backoff_state);
grpc_millis timeout = next_try - grpc_exec_ctx_now(exec_ctx);
gpr_log(GPR_INFO, "dns resolution failed (will retry): %s",
grpc_error_string(error));
GPR_ASSERT(!r->have_retry_timer);
r->have_retry_timer = true;
GRPC_RESOLVER_REF(&r->base, "retry-timer");
if (gpr_time_cmp(timeout, gpr_time_0(timeout.clock_type)) > 0) {
gpr_log(GPR_DEBUG, "retrying in %" PRId64 ".%09d seconds", timeout.tv_sec,
timeout.tv_nsec);
if (timeout > 0) {
gpr_log(GPR_DEBUG, "retrying in %" PRIdPTR " milliseconds", timeout);
} else {
gpr_log(GPR_DEBUG, "retrying immediately");
}
GRPC_CLOSURE_INIT(&r->on_retry, dns_on_retry_timer_locked, r,
grpc_combiner_scheduler(r->base.combiner));
grpc_timer_init(exec_ctx, &r->retry_timer, next_try, &r->on_retry, now);
grpc_timer_init(exec_ctx, &r->retry_timer, next_try, &r->on_retry);
}
if (r->resolved_result != NULL) {
grpc_channel_args_destroy(exec_ctx, r->resolved_result);
@ -191,6 +192,7 @@ static void dns_on_resolved_locked(grpc_exec_ctx *exec_ctx, void *arg,
r->resolved_result = result;
r->resolved_version++;
dns_maybe_finish_next_locked(exec_ctx, r);
GRPC_ERROR_UNREF(error);
GRPC_RESOLVER_UNREF(exec_ctx, &r->base, "dns-resolving");
}
@ -254,11 +256,11 @@ static grpc_resolver *dns_create(grpc_exec_ctx *exec_ctx,
grpc_pollset_set_add_pollset_set(exec_ctx, r->interested_parties,
args->pollset_set);
}
gpr_backoff_init(&r->backoff_state, GRPC_DNS_INITIAL_CONNECT_BACKOFF_SECONDS,
GRPC_DNS_RECONNECT_BACKOFF_MULTIPLIER,
GRPC_DNS_RECONNECT_JITTER,
GRPC_DNS_MIN_CONNECT_TIMEOUT_SECONDS * 1000,
GRPC_DNS_RECONNECT_MAX_BACKOFF_SECONDS * 1000);
grpc_backoff_init(&r->backoff_state, GRPC_DNS_INITIAL_CONNECT_BACKOFF_SECONDS,
GRPC_DNS_RECONNECT_BACKOFF_MULTIPLIER,
GRPC_DNS_RECONNECT_JITTER,
GRPC_DNS_MIN_CONNECT_TIMEOUT_SECONDS * 1000,
GRPC_DNS_RECONNECT_MAX_BACKOFF_SECONDS * 1000);
return &r->base;
}

@ -75,4 +75,4 @@ char *grpc_resolver_factory_get_default_authority(
}
#endif
#endif /* GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_RESOLVER_FACTORY_H */
#endif /* GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_RESOLVER_FACTORY_H */

@ -74,4 +74,4 @@ char *grpc_resolver_factory_add_default_prefix_if_needed(
}
#endif
#endif /* GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_RESOLVER_REGISTRY_H */
#endif /* GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_RESOLVER_REGISTRY_H */

@ -55,4 +55,4 @@ grpc_server_retry_throttle_data* grpc_retry_throttle_map_get_data_for_server(
}
#endif
#endif /* GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_RETRY_THROTTLE_H */
#endif /* GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_RETRY_THROTTLE_H */

@ -31,6 +31,7 @@
#include "src/core/ext/filters/client_channel/proxy_mapper_registry.h"
#include "src/core/ext/filters/client_channel/subchannel_index.h"
#include "src/core/ext/filters/client_channel/uri_parser.h"
#include "src/core/lib/backoff/backoff.h"
#include "src/core/lib/channel/channel_args.h"
#include "src/core/lib/channel/connected_channel.h"
#include "src/core/lib/debug/stats.h"
@ -38,7 +39,6 @@
#include "src/core/lib/iomgr/timer.h"
#include "src/core/lib/profiling/timers.h"
#include "src/core/lib/slice/slice_internal.h"
#include "src/core/lib/support/backoff.h"
#include "src/core/lib/surface/channel.h"
#include "src/core/lib/surface/channel_init.h"
#include "src/core/lib/transport/connectivity_state.h"
@ -118,9 +118,9 @@ struct grpc_subchannel {
external_state_watcher root_external_state_watcher;
/** next connect attempt time */
gpr_timespec next_attempt;
grpc_millis next_attempt;
/** backoff state */
gpr_backoff backoff_state;
grpc_backoff backoff_state;
/** do we have an active alarm? */
bool have_alarm;
/** have we started the backoff loop */
@ -364,7 +364,7 @@ grpc_subchannel *grpc_subchannel_create(grpc_exec_ctx *exec_ctx,
}
}
}
gpr_backoff_init(
grpc_backoff_init(
&c->backoff_state, initial_backoff_ms,
fixed_reconnect_backoff ? 1.0
: GRPC_SUBCHANNEL_RECONNECT_BACKOFF_MULTIPLIER,
@ -428,8 +428,7 @@ static void on_alarm(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) {
}
if (error == GRPC_ERROR_NONE) {
gpr_log(GPR_INFO, "Failed to connect to channel, retrying");
c->next_attempt =
gpr_backoff_step(&c->backoff_state, gpr_now(GPR_CLOCK_MONOTONIC));
c->next_attempt = grpc_backoff_step(exec_ctx, &c->backoff_state);
continue_connect_locked(exec_ctx, c);
gpr_mu_unlock(&c->mu);
} else {
@ -464,24 +463,22 @@ static void maybe_start_connecting_locked(grpc_exec_ctx *exec_ctx,
c->connecting = true;
GRPC_SUBCHANNEL_WEAK_REF(c, "connecting");
gpr_timespec now = gpr_now(GPR_CLOCK_MONOTONIC);
if (!c->backoff_begun) {
c->backoff_begun = true;
c->next_attempt = gpr_backoff_begin(&c->backoff_state, now);
c->next_attempt = grpc_backoff_begin(exec_ctx, &c->backoff_state);
continue_connect_locked(exec_ctx, c);
} else {
GPR_ASSERT(!c->have_alarm);
c->have_alarm = true;
gpr_timespec time_til_next = gpr_time_sub(c->next_attempt, now);
if (gpr_time_cmp(time_til_next, gpr_time_0(time_til_next.clock_type)) <=
0) {
const grpc_millis time_til_next =
c->next_attempt - grpc_exec_ctx_now(exec_ctx);
if (time_til_next <= 0) {
gpr_log(GPR_INFO, "Retry immediately");
} else {
gpr_log(GPR_INFO, "Retry in %" PRId64 ".%09d seconds",
time_til_next.tv_sec, time_til_next.tv_nsec);
gpr_log(GPR_INFO, "Retry in %" PRIdPTR " milliseconds", time_til_next);
}
GRPC_CLOSURE_INIT(&c->on_alarm, on_alarm, c, grpc_schedule_on_exec_ctx);
grpc_timer_init(exec_ctx, &c->alarm, c->next_attempt, &c->on_alarm, now);
grpc_timer_init(exec_ctx, &c->alarm, c->next_attempt, &c->on_alarm);
}
}

@ -107,7 +107,7 @@ typedef struct {
grpc_polling_entity *pollent;
grpc_slice path;
gpr_timespec start_time;
gpr_timespec deadline;
grpc_millis deadline;
gpr_arena *arena;
grpc_call_context_element *context;
grpc_call_combiner *call_combiner;

@ -86,4 +86,4 @@ void grpc_subchannel_index_test_only_set_force_creation(bool force_creation);
}
#endif
#endif /* GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_SUBCHANNEL_INDEX_H */
#endif /* GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_SUBCHANNEL_INDEX_H */

@ -55,4 +55,4 @@ void grpc_uri_destroy(grpc_uri *uri);
}
#endif
#endif /* GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_URI_PARSER_H */
#endif /* GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_URI_PARSER_H */

@ -86,9 +86,8 @@ static void timer_callback(grpc_exec_ctx* exec_ctx, void* arg,
// synchronized.
static void start_timer_if_needed(grpc_exec_ctx* exec_ctx,
grpc_call_element* elem,
gpr_timespec deadline) {
deadline = gpr_convert_clock_type(deadline, GPR_CLOCK_MONOTONIC);
if (gpr_time_cmp(deadline, gpr_inf_future(GPR_CLOCK_MONOTONIC)) == 0) {
grpc_millis deadline) {
if (deadline == GRPC_MILLIS_INF_FUTURE) {
return;
}
grpc_deadline_state* deadline_state = (grpc_deadline_state*)elem->call_data;
@ -114,8 +113,7 @@ static void start_timer_if_needed(grpc_exec_ctx* exec_ctx,
}
GPR_ASSERT(closure != NULL);
GRPC_CALL_STACK_REF(deadline_state->call_stack, "deadline_timer");
grpc_timer_init(exec_ctx, &deadline_state->timer, deadline, closure,
gpr_now(GPR_CLOCK_MONOTONIC));
grpc_timer_init(exec_ctx, &deadline_state->timer, deadline, closure);
}
// Cancels the deadline timer.
@ -155,7 +153,7 @@ static void inject_on_complete_cb(grpc_deadline_state* deadline_state,
struct start_timer_after_init_state {
bool in_call_combiner;
grpc_call_element* elem;
gpr_timespec deadline;
grpc_millis deadline;
grpc_closure closure;
};
static void start_timer_after_init(grpc_exec_ctx* exec_ctx, void* arg,
@ -182,14 +180,13 @@ static void start_timer_after_init(grpc_exec_ctx* exec_ctx, void* arg,
void grpc_deadline_state_init(grpc_exec_ctx* exec_ctx, grpc_call_element* elem,
grpc_call_stack* call_stack,
grpc_call_combiner* call_combiner,
gpr_timespec deadline) {
grpc_millis deadline) {
grpc_deadline_state* deadline_state = (grpc_deadline_state*)elem->call_data;
deadline_state->call_stack = call_stack;
deadline_state->call_combiner = call_combiner;
// Deadline will always be infinite on servers, so the timer will only be
// set on clients with a finite deadline.
deadline = gpr_convert_clock_type(deadline, GPR_CLOCK_MONOTONIC);
if (gpr_time_cmp(deadline, gpr_inf_future(GPR_CLOCK_MONOTONIC)) != 0) {
if (deadline != GRPC_MILLIS_INF_FUTURE) {
// When the deadline passes, we indicate the failure by sending down
// an op with cancel_error set. However, we can't send down any ops
// until after the call stack is fully initialized. If we start the
@ -214,7 +211,7 @@ void grpc_deadline_state_destroy(grpc_exec_ctx* exec_ctx,
}
void grpc_deadline_state_reset(grpc_exec_ctx* exec_ctx, grpc_call_element* elem,
gpr_timespec new_deadline) {
grpc_millis new_deadline) {
grpc_deadline_state* deadline_state = (grpc_deadline_state*)elem->call_data;
cancel_timer_if_needed(exec_ctx, deadline_state);
start_timer_if_needed(exec_ctx, elem, new_deadline);

@ -56,7 +56,8 @@ typedef struct grpc_deadline_state {
void grpc_deadline_state_init(grpc_exec_ctx* exec_ctx, grpc_call_element* elem,
grpc_call_stack* call_stack,
grpc_call_combiner* call_combiner,
gpr_timespec deadline);
grpc_millis deadline);
void grpc_deadline_state_destroy(grpc_exec_ctx* exec_ctx,
grpc_call_element* elem);
@ -70,7 +71,7 @@ void grpc_deadline_state_destroy(grpc_exec_ctx* exec_ctx,
//
// Note: Must be called while holding the call combiner.
void grpc_deadline_state_reset(grpc_exec_ctx* exec_ctx, grpc_call_element* elem,
gpr_timespec new_deadline);
grpc_millis new_deadline);
// To be called from the client-side filter's start_transport_stream_op_batch()
// method. Ensures that the deadline timer is cancelled when the call
@ -97,4 +98,4 @@ extern const grpc_channel_filter grpc_server_deadline_filter;
}
#endif
#endif /* GRPC_CORE_EXT_FILTERS_DEADLINE_DEADLINE_FILTER_H */
#endif /* GRPC_CORE_EXT_FILTERS_DEADLINE_DEADLINE_FILTER_H */

@ -56,11 +56,11 @@ typedef struct channel_data {
max_connection_idle */
grpc_timer max_idle_timer;
/* Allowed max time a channel may have no outstanding rpcs */
gpr_timespec max_connection_idle;
grpc_millis max_connection_idle;
/* Allowed max time a channel may exist */
gpr_timespec max_connection_age;
grpc_millis max_connection_age;
/* Allowed grace period after the channel reaches its max age */
gpr_timespec max_connection_age_grace;
grpc_millis max_connection_age_grace;
/* Closure to run when the channel's idle duration reaches max_connection_idle
and should be closed gracefully */
grpc_closure close_max_idle_channel;
@ -99,10 +99,9 @@ static void increase_call_count(grpc_exec_ctx* exec_ctx, channel_data* chand) {
static void decrease_call_count(grpc_exec_ctx* exec_ctx, channel_data* chand) {
if (gpr_atm_full_fetch_add(&chand->call_count, -1) == 1) {
GRPC_CHANNEL_STACK_REF(chand->channel_stack, "max_age max_idle_timer");
grpc_timer_init(
exec_ctx, &chand->max_idle_timer,
gpr_time_add(gpr_now(GPR_CLOCK_MONOTONIC), chand->max_connection_idle),
&chand->close_max_idle_channel, gpr_now(GPR_CLOCK_MONOTONIC));
grpc_timer_init(exec_ctx, &chand->max_idle_timer,
grpc_exec_ctx_now(exec_ctx) + chand->max_connection_idle,
&chand->close_max_idle_channel);
}
}
@ -123,10 +122,9 @@ static void start_max_age_timer_after_init(grpc_exec_ctx* exec_ctx, void* arg,
gpr_mu_lock(&chand->max_age_timer_mu);
chand->max_age_timer_pending = true;
GRPC_CHANNEL_STACK_REF(chand->channel_stack, "max_age max_age_timer");
grpc_timer_init(
exec_ctx, &chand->max_age_timer,
gpr_time_add(gpr_now(GPR_CLOCK_MONOTONIC), chand->max_connection_age),
&chand->close_max_age_channel, gpr_now(GPR_CLOCK_MONOTONIC));
grpc_timer_init(exec_ctx, &chand->max_age_timer,
grpc_exec_ctx_now(exec_ctx) + chand->max_connection_age,
&chand->close_max_age_channel);
gpr_mu_unlock(&chand->max_age_timer_mu);
grpc_transport_op* op = grpc_make_transport_op(NULL);
op->on_connectivity_state_change = &chand->channel_connectivity_changed,
@ -144,11 +142,12 @@ static void start_max_age_grace_timer_after_goaway_op(grpc_exec_ctx* exec_ctx,
gpr_mu_lock(&chand->max_age_timer_mu);
chand->max_age_grace_timer_pending = true;
GRPC_CHANNEL_STACK_REF(chand->channel_stack, "max_age max_age_grace_timer");
grpc_timer_init(exec_ctx, &chand->max_age_grace_timer,
gpr_time_add(gpr_now(GPR_CLOCK_MONOTONIC),
chand->max_connection_age_grace),
&chand->force_close_max_age_channel,
gpr_now(GPR_CLOCK_MONOTONIC));
grpc_timer_init(
exec_ctx, &chand->max_age_grace_timer,
chand->max_connection_age_grace == GRPC_MILLIS_INF_FUTURE
? GRPC_MILLIS_INF_FUTURE
: grpc_exec_ctx_now(exec_ctx) + chand->max_connection_age_grace,
&chand->force_close_max_age_channel);
gpr_mu_unlock(&chand->max_age_timer_mu);
GRPC_CHANNEL_STACK_UNREF(exec_ctx, chand->channel_stack,
"max_age start_max_age_grace_timer_after_goaway_op");
@ -249,7 +248,8 @@ static void channel_connectivity_changed(grpc_exec_ctx* exec_ctx, void* arg,
connection storms. Note that the MAX_CONNECTION_AGE option without jitter
would not create connection storms by itself, but if there happened to be a
connection storm it could cause it to repeat at a fixed period. */
static int add_random_max_connection_age_jitter(int value) {
static grpc_millis
add_random_max_connection_age_jitter_and_convert_to_grpc_millis(int value) {
/* generate a random number between 1 - MAX_CONNECTION_AGE_JITTER and
1 + MAX_CONNECTION_AGE_JITTER */
double multiplier = rand() * MAX_CONNECTION_AGE_JITTER * 2.0 / RAND_MAX +
@ -257,7 +257,9 @@ static int add_random_max_connection_age_jitter(int value) {
double result = multiplier * value;
/* INT_MAX - 0.5 converts the value to float, so that result will not be
cast to int implicitly before the comparison. */
return result > INT_MAX - 0.5 ? INT_MAX : (int)result;
return result > ((double)GRPC_MILLIS_INF_FUTURE) - 0.5
? GRPC_MILLIS_INF_FUTURE
: (grpc_millis)result;
}
/* Constructor for call_data. */
@ -287,45 +289,36 @@ static grpc_error* init_channel_elem(grpc_exec_ctx* exec_ctx,
chand->max_age_grace_timer_pending = false;
chand->channel_stack = args->channel_stack;
chand->max_connection_age =
DEFAULT_MAX_CONNECTION_AGE_MS == INT_MAX
? gpr_inf_future(GPR_TIMESPAN)
: gpr_time_from_millis(add_random_max_connection_age_jitter(
DEFAULT_MAX_CONNECTION_AGE_MS),
GPR_TIMESPAN);
add_random_max_connection_age_jitter_and_convert_to_grpc_millis(
DEFAULT_MAX_CONNECTION_AGE_MS);
chand->max_connection_age_grace =
DEFAULT_MAX_CONNECTION_AGE_GRACE_MS == INT_MAX
? gpr_inf_future(GPR_TIMESPAN)
: gpr_time_from_millis(DEFAULT_MAX_CONNECTION_AGE_GRACE_MS,
GPR_TIMESPAN);
chand->max_connection_idle =
DEFAULT_MAX_CONNECTION_IDLE_MS == INT_MAX
? gpr_inf_future(GPR_TIMESPAN)
: gpr_time_from_millis(DEFAULT_MAX_CONNECTION_IDLE_MS, GPR_TIMESPAN);
? GRPC_MILLIS_INF_FUTURE
: DEFAULT_MAX_CONNECTION_AGE_GRACE_MS;
chand->max_connection_idle = DEFAULT_MAX_CONNECTION_IDLE_MS == INT_MAX
? GRPC_MILLIS_INF_FUTURE
: DEFAULT_MAX_CONNECTION_IDLE_MS;
for (size_t i = 0; i < args->channel_args->num_args; ++i) {
if (0 == strcmp(args->channel_args->args[i].key,
GRPC_ARG_MAX_CONNECTION_AGE_MS)) {
const int value = grpc_channel_arg_get_integer(
&args->channel_args->args[i], MAX_CONNECTION_AGE_INTEGER_OPTIONS);
chand->max_connection_age =
value == INT_MAX
? gpr_inf_future(GPR_TIMESPAN)
: gpr_time_from_millis(
add_random_max_connection_age_jitter(value), GPR_TIMESPAN);
add_random_max_connection_age_jitter_and_convert_to_grpc_millis(
value);
} else if (0 == strcmp(args->channel_args->args[i].key,
GRPC_ARG_MAX_CONNECTION_AGE_GRACE_MS)) {
const int value = grpc_channel_arg_get_integer(
&args->channel_args->args[i],
{DEFAULT_MAX_CONNECTION_AGE_GRACE_MS, 0, INT_MAX});
chand->max_connection_age_grace =
value == INT_MAX ? gpr_inf_future(GPR_TIMESPAN)
: gpr_time_from_millis(value, GPR_TIMESPAN);
value == INT_MAX ? GRPC_MILLIS_INF_FUTURE : value;
} else if (0 == strcmp(args->channel_args->args[i].key,
GRPC_ARG_MAX_CONNECTION_IDLE_MS)) {
const int value = grpc_channel_arg_get_integer(
&args->channel_args->args[i], MAX_CONNECTION_IDLE_INTEGER_OPTIONS);
chand->max_connection_idle =
value == INT_MAX ? gpr_inf_future(GPR_TIMESPAN)
: gpr_time_from_millis(value, GPR_TIMESPAN);
value == INT_MAX ? GRPC_MILLIS_INF_FUTURE : value;
}
}
GRPC_CLOSURE_INIT(&chand->close_max_idle_channel, close_max_idle_channel,
@ -348,8 +341,7 @@ static grpc_error* init_channel_elem(grpc_exec_ctx* exec_ctx,
channel_connectivity_changed, chand,
grpc_schedule_on_exec_ctx);
if (gpr_time_cmp(chand->max_connection_age, gpr_inf_future(GPR_TIMESPAN)) !=
0) {
if (chand->max_connection_age != GRPC_MILLIS_INF_FUTURE) {
/* When the channel reaches its max age, we send down an op with
goaway_error set. However, we can't send down any ops until after the
channel stack is fully initialized. If we start the timer here, we have
@ -366,8 +358,7 @@ static grpc_error* init_channel_elem(grpc_exec_ctx* exec_ctx,
/* Initialize the number of calls as 1, so that the max_idle_timer will not
start until start_max_idle_timer_after_init is invoked. */
gpr_atm_rel_store(&chand->call_count, 1);
if (gpr_time_cmp(chand->max_connection_idle, gpr_inf_future(GPR_TIMESPAN)) !=
0) {
if (chand->max_connection_idle != GRPC_MILLIS_INF_FUTURE) {
GRPC_CHANNEL_STACK_REF(chand->channel_stack,
"max_age start_max_idle_timer_after_init");
GRPC_CLOSURE_SCHED(exec_ctx, &chand->start_max_idle_timer_after_init,

@ -42,4 +42,4 @@ void grpc_register_workaround(uint32_t id, user_agent_parser parser);
}
#endif
#endif /* GRPC_CORE_EXT_FILTERS_WORKAROUNDS_WORKAROUND_UTILS_H */
#endif /* GRPC_CORE_EXT_FILTERS_WORKAROUNDS_WORKAROUND_UTILS_H */

@ -39,4 +39,4 @@ const char *grpc_chttp2_get_alpn_version_index(size_t i);
}
#endif
#endif /* GRPC_CORE_EXT_TRANSPORT_CHTTP2_ALPN_ALPN_H */
#endif /* GRPC_CORE_EXT_TRANSPORT_CHTTP2_ALPN_ALPN_H */

@ -134,8 +134,8 @@ static void on_accept(grpc_exec_ctx *exec_ctx, void *arg, grpc_endpoint *tcp,
connection_state->handshake_mgr);
// TODO(roth): We should really get this timeout value from channel
// args instead of hard-coding it.
const gpr_timespec deadline = gpr_time_add(
gpr_now(GPR_CLOCK_MONOTONIC), gpr_time_from_seconds(120, GPR_TIMESPAN));
const grpc_millis deadline =
grpc_exec_ctx_now(exec_ctx) + 120 * GPR_MS_PER_SEC;
grpc_handshake_manager_do_handshake(exec_ctx, connection_state->handshake_mgr,
tcp, state->args, deadline, acceptor,
on_handshake_done, connection_state);

@ -37,4 +37,4 @@ grpc_error *grpc_chttp2_server_add_port(grpc_exec_ctx *exec_ctx,
}
#endif
#endif /* GRPC_CORE_EXT_TRANSPORT_CHTTP2_SERVER_CHTTP2_SERVER_H */
#endif /* GRPC_CORE_EXT_TRANSPORT_CHTTP2_SERVER_CHTTP2_SERVER_H */

@ -57,4 +57,4 @@ grpc_slice grpc_chttp2_base64_decode_with_length(grpc_exec_ctx *exec_ctx,
}
#endif
#endif /* GRPC_CORE_EXT_TRANSPORT_CHTTP2_TRANSPORT_BIN_DECODER_H */
#endif /* GRPC_CORE_EXT_TRANSPORT_CHTTP2_TRANSPORT_BIN_DECODER_H */

@ -44,4 +44,4 @@ grpc_slice grpc_chttp2_base64_encode_and_huffman_compress(grpc_slice input);
}
#endif
#endif /* GRPC_CORE_EXT_TRANSPORT_CHTTP2_TRANSPORT_BIN_ENCODER_H */
#endif /* GRPC_CORE_EXT_TRANSPORT_CHTTP2_TRANSPORT_BIN_ENCODER_H */

@ -159,11 +159,9 @@ static void finish_bdp_ping_locked(grpc_exec_ctx *exec_ctx, void *tp,
static void cancel_pings(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
grpc_error *error);
static void send_ping_locked(
grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
grpc_chttp2_ping_type ping_type, grpc_closure *on_initiate,
grpc_closure *on_complete,
grpc_chttp2_initiate_write_reason initiate_write_reason);
static void send_ping_locked(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
grpc_closure *on_initiate,
grpc_closure *on_complete);
static void retry_initiate_ping_locked(grpc_exec_ctx *exec_ctx, void *tp,
grpc_error *error);
@ -279,6 +277,7 @@ static void init_transport(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
t->is_client = is_client;
t->flow_control.remote_window = DEFAULT_WINDOW;
t->flow_control.announced_window = DEFAULT_WINDOW;
t->flow_control.target_initial_window_size = DEFAULT_WINDOW;
t->flow_control.t = t;
t->deframe_state = is_client ? GRPC_DTS_FH_0 : GRPC_DTS_CLIENT_PREFIX_0;
t->is_first_frame = true;
@ -317,17 +316,6 @@ static void init_transport(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
grpc_combiner_scheduler(t->combiner));
grpc_bdp_estimator_init(&t->flow_control.bdp_estimator, t->peer_string);
t->flow_control.last_pid_update = gpr_now(GPR_CLOCK_MONOTONIC);
grpc_pid_controller_init(&t->flow_control.pid_controller,
{
4, /* gain_p */
8, /* gain_t */
0, /* gain_d */
log2(DEFAULT_WINDOW), /* initial_control_value */
-1, /* min_control_value */
25, /* max_control_value */
10 /* integral_range */
});
grpc_chttp2_goaway_parser_init(&t->goaway_parser);
grpc_chttp2_hpack_parser_init(exec_ctx, &t->hpack_parser);
@ -366,43 +354,33 @@ static void init_transport(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
queue_setting_update(exec_ctx, t,
GRPC_CHTTP2_SETTINGS_MAX_CONCURRENT_STREAMS, 0);
}
queue_setting_update(exec_ctx, t, GRPC_CHTTP2_SETTINGS_INITIAL_WINDOW_SIZE,
DEFAULT_WINDOW);
queue_setting_update(exec_ctx, t, GRPC_CHTTP2_SETTINGS_MAX_HEADER_LIST_SIZE,
DEFAULT_MAX_HEADER_LIST_SIZE);
queue_setting_update(exec_ctx, t,
GRPC_CHTTP2_SETTINGS_GRPC_ALLOW_TRUE_BINARY_METADATA, 1);
t->ping_policy.max_pings_without_data = g_default_max_pings_without_data;
t->ping_policy.min_sent_ping_interval_without_data = gpr_time_from_millis(
g_default_min_sent_ping_interval_without_data_ms, GPR_TIMESPAN);
t->ping_policy.min_sent_ping_interval_without_data =
g_default_min_sent_ping_interval_without_data_ms;
t->ping_policy.max_ping_strikes = g_default_max_ping_strikes;
t->ping_policy.min_recv_ping_interval_without_data = gpr_time_from_millis(
g_default_min_recv_ping_interval_without_data_ms, GPR_TIMESPAN);
t->ping_policy.min_recv_ping_interval_without_data =
g_default_min_recv_ping_interval_without_data_ms;
/* Keepalive setting */
if (t->is_client) {
t->keepalive_time =
g_default_client_keepalive_time_ms == INT_MAX
? gpr_inf_future(GPR_TIMESPAN)
: gpr_time_from_millis(g_default_client_keepalive_time_ms,
GPR_TIMESPAN);
t->keepalive_timeout =
g_default_client_keepalive_timeout_ms == INT_MAX
? gpr_inf_future(GPR_TIMESPAN)
: gpr_time_from_millis(g_default_client_keepalive_timeout_ms,
GPR_TIMESPAN);
t->keepalive_time = g_default_client_keepalive_time_ms == INT_MAX
? GRPC_MILLIS_INF_FUTURE
: g_default_client_keepalive_time_ms;
t->keepalive_timeout = g_default_client_keepalive_timeout_ms == INT_MAX
? GRPC_MILLIS_INF_FUTURE
: g_default_client_keepalive_timeout_ms;
} else {
t->keepalive_time =
g_default_server_keepalive_time_ms == INT_MAX
? gpr_inf_future(GPR_TIMESPAN)
: gpr_time_from_millis(g_default_server_keepalive_time_ms,
GPR_TIMESPAN);
t->keepalive_timeout =
g_default_server_keepalive_timeout_ms == INT_MAX
? gpr_inf_future(GPR_TIMESPAN)
: gpr_time_from_millis(g_default_server_keepalive_timeout_ms,
GPR_TIMESPAN);
t->keepalive_time = g_default_server_keepalive_time_ms == INT_MAX
? GRPC_MILLIS_INF_FUTURE
: g_default_server_keepalive_time_ms;
t->keepalive_timeout = g_default_server_keepalive_timeout_ms == INT_MAX
? GRPC_MILLIS_INF_FUTURE
: g_default_server_keepalive_timeout_ms;
}
t->keepalive_permit_without_calls = g_default_keepalive_permit_without_calls;
@ -447,23 +425,21 @@ static void init_transport(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
channel_args->args[i].key,
GRPC_ARG_HTTP2_MIN_SENT_PING_INTERVAL_WITHOUT_DATA_MS)) {
t->ping_policy.min_sent_ping_interval_without_data =
gpr_time_from_millis(
grpc_channel_arg_get_integer(
&channel_args->args[i],
{g_default_min_sent_ping_interval_without_data_ms, 0,
INT_MAX}),
GPR_TIMESPAN);
grpc_channel_arg_get_integer(
&channel_args->args[i],
grpc_integer_options{
g_default_min_sent_ping_interval_without_data_ms, 0,
INT_MAX});
} else if (0 ==
strcmp(
channel_args->args[i].key,
GRPC_ARG_HTTP2_MIN_RECV_PING_INTERVAL_WITHOUT_DATA_MS)) {
t->ping_policy.min_recv_ping_interval_without_data =
gpr_time_from_millis(
grpc_channel_arg_get_integer(
&channel_args->args[i],
{g_default_min_recv_ping_interval_without_data_ms, 0,
INT_MAX}),
GPR_TIMESPAN);
grpc_channel_arg_get_integer(
&channel_args->args[i],
grpc_integer_options{
g_default_min_recv_ping_interval_without_data_ms, 0,
INT_MAX});
} else if (0 == strcmp(channel_args->args[i].key,
GRPC_ARG_HTTP2_WRITE_BUFFER_SIZE)) {
t->write_buffer_size = (uint32_t)grpc_channel_arg_get_integer(
@ -476,22 +452,21 @@ static void init_transport(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
GRPC_ARG_KEEPALIVE_TIME_MS)) {
const int value = grpc_channel_arg_get_integer(
&channel_args->args[i],
{t->is_client ? g_default_client_keepalive_time_ms
: g_default_server_keepalive_time_ms,
1, INT_MAX});
t->keepalive_time = value == INT_MAX
? gpr_inf_future(GPR_TIMESPAN)
: gpr_time_from_millis(value, GPR_TIMESPAN);
grpc_integer_options{t->is_client
? g_default_client_keepalive_time_ms
: g_default_server_keepalive_time_ms,
1, INT_MAX});
t->keepalive_time = value == INT_MAX ? GRPC_MILLIS_INF_FUTURE : value;
} else if (0 == strcmp(channel_args->args[i].key,
GRPC_ARG_KEEPALIVE_TIMEOUT_MS)) {
const int value = grpc_channel_arg_get_integer(
&channel_args->args[i],
{t->is_client ? g_default_client_keepalive_timeout_ms
: g_default_server_keepalive_timeout_ms,
0, INT_MAX});
t->keepalive_timeout = value == INT_MAX
? gpr_inf_future(GPR_TIMESPAN)
: gpr_time_from_millis(value, GPR_TIMESPAN);
grpc_integer_options{t->is_client
? g_default_client_keepalive_timeout_ms
: g_default_server_keepalive_timeout_ms,
0, INT_MAX});
t->keepalive_timeout =
value == INT_MAX ? GRPC_MILLIS_INF_FUTURE : value;
} else if (0 == strcmp(channel_args->args[i].key,
GRPC_ARG_KEEPALIVE_PERMIT_WITHOUT_CALLS)) {
t->keepalive_permit_without_calls =
@ -571,23 +546,27 @@ static void init_transport(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
t->ping_state.pings_before_data_required = 0;
t->ping_state.is_delayed_ping_timer_set = false;
t->ping_recv_state.last_ping_recv_time = gpr_inf_past(GPR_CLOCK_MONOTONIC);
t->ping_recv_state.last_ping_recv_time = GRPC_MILLIS_INF_PAST;
t->ping_recv_state.ping_strikes = 0;
/* Start keepalive pings */
if (gpr_time_cmp(t->keepalive_time, gpr_inf_future(GPR_TIMESPAN)) != 0) {
if (t->keepalive_time != GRPC_MILLIS_INF_FUTURE) {
t->keepalive_state = GRPC_CHTTP2_KEEPALIVE_STATE_WAITING;
GRPC_CHTTP2_REF_TRANSPORT(t, "init keepalive ping");
grpc_timer_init(
exec_ctx, &t->keepalive_ping_timer,
gpr_time_add(gpr_now(GPR_CLOCK_MONOTONIC), t->keepalive_time),
&t->init_keepalive_ping_locked, gpr_now(GPR_CLOCK_MONOTONIC));
grpc_timer_init(exec_ctx, &t->keepalive_ping_timer,
grpc_exec_ctx_now(exec_ctx) + t->keepalive_time,
&t->init_keepalive_ping_locked);
} else {
/* Use GRPC_CHTTP2_KEEPALIVE_STATE_DISABLED to indicate there are no
inflight keeaplive timers */
t->keepalive_state = GRPC_CHTTP2_KEEPALIVE_STATE_DISABLED;
}
grpc_chttp2_act_on_flowctl_action(
exec_ctx,
grpc_chttp2_flowctl_get_action(exec_ctx, &t->flow_control, NULL), t,
NULL);
grpc_chttp2_initiate_write(exec_ctx, t,
GRPC_CHTTP2_INITIATE_WRITE_INITIAL_WRITE);
post_benign_reclaimer(exec_ctx, t);
@ -698,7 +677,7 @@ static int init_stream(grpc_exec_ctx *exec_ctx, grpc_transport *gt,
grpc_chttp2_incoming_metadata_buffer_init(&s->metadata_buffer[1], arena);
grpc_chttp2_data_parser_init(&s->data_parser);
grpc_slice_buffer_init(&s->flow_controlled_buffer);
s->deadline = gpr_inf_future(GPR_CLOCK_MONOTONIC);
s->deadline = GRPC_MILLIS_INF_FUTURE;
GRPC_CLOSURE_INIT(&s->complete_fetch_locked, complete_fetch_locked, s,
grpc_schedule_on_exec_ctx);
grpc_slice_buffer_init(&s->unprocessed_incoming_frames_buffer);
@ -902,9 +881,6 @@ static void inc_initiate_write_reason(
case GRPC_CHTTP2_INITIATE_WRITE_SEND_SETTINGS:
GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_SEND_SETTINGS(exec_ctx);
break;
case GRPC_CHTTP2_INITIATE_WRITE_BDP_ESTIMATOR_PING:
GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_BDP_ESTIMATOR_PING(exec_ctx);
break;
case GRPC_CHTTP2_INITIATE_WRITE_FLOW_CONTROL_UNSTALLED_BY_SETTING:
GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_FLOW_CONTROL_UNSTALLED_BY_SETTING(
exec_ctx);
@ -1042,6 +1018,7 @@ static void write_action_begin_locked(grpc_exec_ctx *exec_ctx, void *gt,
write_action, t, scheduler),
GRPC_ERROR_NONE);
} else {
GRPC_STATS_INC_HTTP2_SPURIOUS_WRITES_BEGUN(exec_ctx);
set_write_state(exec_ctx, t, GRPC_CHTTP2_WRITE_STATE_IDLE,
"begin writing nothing");
GRPC_CHTTP2_UNREF_TRANSPORT(exec_ctx, t, "writing");
@ -1140,14 +1117,12 @@ void grpc_chttp2_add_incoming_goaway(grpc_exec_ctx *exec_ctx,
gpr_log(GPR_ERROR,
"Received a GOAWAY with error code ENHANCE_YOUR_CALM and debug "
"data equal to \"too_many_pings\"");
double current_keepalive_time_ms =
gpr_timespec_to_micros(t->keepalive_time) / 1000;
double current_keepalive_time_ms = (double)t->keepalive_time;
t->keepalive_time =
current_keepalive_time_ms > INT_MAX / KEEPALIVE_TIME_BACKOFF_MULTIPLIER
? gpr_inf_future(GPR_TIMESPAN)
: gpr_time_from_millis((int64_t)(current_keepalive_time_ms *
KEEPALIVE_TIME_BACKOFF_MULTIPLIER),
GPR_TIMESPAN);
? GRPC_MILLIS_INF_FUTURE
: (grpc_millis)(current_keepalive_time_ms *
KEEPALIVE_TIME_BACKOFF_MULTIPLIER);
}
/* lie: use transient failure from the transport to indicate goaway has been
@ -1461,8 +1436,7 @@ static void perform_stream_op_locked(grpc_exec_ctx *exec_ctx, void *stream_op,
t->settings[GRPC_PEER_SETTINGS]
[GRPC_CHTTP2_SETTINGS_MAX_HEADER_LIST_SIZE];
if (t->is_client) {
s->deadline =
gpr_time_min(s->deadline, s->send_initial_metadata->deadline);
s->deadline = GPR_MIN(s->deadline, s->send_initial_metadata->deadline);
}
if (metadata_size > metadata_peer_limit) {
grpc_chttp2_cancel_stream(
@ -1646,8 +1620,8 @@ static void perform_stream_op_locked(grpc_exec_ctx *exec_ctx, void *stream_op,
&t->flow_control, &s->flow_control, GRPC_HEADER_SIZE_IN_BYTES,
already_received);
grpc_chttp2_act_on_flowctl_action(
exec_ctx,
grpc_chttp2_flowctl_get_action(&t->flow_control, &s->flow_control),
exec_ctx, grpc_chttp2_flowctl_get_action(exec_ctx, &t->flow_control,
&s->flow_control),
t, s);
}
}
@ -1680,16 +1654,14 @@ static void perform_stream_op(grpc_exec_ctx *exec_ctx, grpc_transport *gt,
if (!t->is_client) {
if (op->send_initial_metadata) {
gpr_timespec deadline =
grpc_millis deadline =
op->payload->send_initial_metadata.send_initial_metadata->deadline;
GPR_ASSERT(0 ==
gpr_time_cmp(gpr_inf_future(deadline.clock_type), deadline));
GPR_ASSERT(deadline == GRPC_MILLIS_INF_FUTURE);
}
if (op->send_trailing_metadata) {
gpr_timespec deadline =
grpc_millis deadline =
op->payload->send_trailing_metadata.send_trailing_metadata->deadline;
GPR_ASSERT(0 ==
gpr_time_cmp(gpr_inf_future(deadline.clock_type), deadline));
GPR_ASSERT(deadline == GRPC_MILLIS_INF_FUTURE);
}
}
@ -1713,28 +1685,21 @@ static void cancel_pings(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
grpc_error *error) {
/* callback remaining pings: they're not allowed to call into the transpot,
and maybe they hold resources that need to be freed */
for (size_t i = 0; i < GRPC_CHTTP2_PING_TYPE_COUNT; i++) {
grpc_chttp2_ping_queue *pq = &t->ping_queues[i];
for (size_t j = 0; j < GRPC_CHTTP2_PCL_COUNT; j++) {
grpc_closure_list_fail_all(&pq->lists[j], GRPC_ERROR_REF(error));
GRPC_CLOSURE_LIST_SCHED(exec_ctx, &pq->lists[j]);
}
grpc_chttp2_ping_queue *pq = &t->ping_queue;
for (size_t j = 0; j < GRPC_CHTTP2_PCL_COUNT; j++) {
grpc_closure_list_fail_all(&pq->lists[j], GRPC_ERROR_REF(error));
GRPC_CLOSURE_LIST_SCHED(exec_ctx, &pq->lists[j]);
}
GRPC_ERROR_UNREF(error);
}
static void send_ping_locked(
grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
grpc_chttp2_ping_type ping_type, grpc_closure *on_initiate,
grpc_closure *on_ack,
grpc_chttp2_initiate_write_reason initiate_write_reason) {
grpc_chttp2_ping_queue *pq = &t->ping_queues[ping_type];
static void send_ping_locked(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
grpc_closure *on_initiate, grpc_closure *on_ack) {
grpc_chttp2_ping_queue *pq = &t->ping_queue;
grpc_closure_list_append(&pq->lists[GRPC_CHTTP2_PCL_INITIATE], on_initiate,
GRPC_ERROR_NONE);
if (grpc_closure_list_append(&pq->lists[GRPC_CHTTP2_PCL_NEXT], on_ack,
GRPC_ERROR_NONE)) {
grpc_chttp2_initiate_write(exec_ctx, t, initiate_write_reason);
}
grpc_closure_list_append(&pq->lists[GRPC_CHTTP2_PCL_NEXT], on_ack,
GRPC_ERROR_NONE);
}
static void retry_initiate_ping_locked(grpc_exec_ctx *exec_ctx, void *tp,
@ -1749,8 +1714,7 @@ static void retry_initiate_ping_locked(grpc_exec_ctx *exec_ctx, void *tp,
void grpc_chttp2_ack_ping(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
uint64_t id) {
grpc_chttp2_ping_queue *pq =
&t->ping_queues[id % GRPC_CHTTP2_PING_TYPE_COUNT];
grpc_chttp2_ping_queue *pq = &t->ping_queue;
if (pq->inflight_id != id) {
char *from = grpc_endpoint_get_peer(t->ep);
gpr_log(GPR_DEBUG, "Unknown ping response from %s: %" PRIx64, from, id);
@ -1769,8 +1733,8 @@ static void send_goaway(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
t->sent_goaway_state = GRPC_CHTTP2_GOAWAY_SEND_SCHEDULED;
grpc_http2_error_code http_error;
grpc_slice slice;
grpc_error_get_status(error, gpr_inf_future(GPR_CLOCK_MONOTONIC), NULL,
&slice, &http_error);
grpc_error_get_status(exec_ctx, error, GRPC_MILLIS_INF_FUTURE, NULL, &slice,
&http_error);
grpc_chttp2_goaway_append(t->last_new_stream_id, (uint32_t)http_error,
grpc_slice_ref_internal(slice), &t->qbuf);
grpc_chttp2_initiate_write(exec_ctx, t,
@ -1780,7 +1744,7 @@ static void send_goaway(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
void grpc_chttp2_add_ping_strike(grpc_exec_ctx *exec_ctx,
grpc_chttp2_transport *t) {
gpr_log(GPR_DEBUG, "PING strike");
t->ping_recv_state.ping_strikes++;
if (++t->ping_recv_state.ping_strikes > t->ping_policy.max_ping_strikes &&
t->ping_policy.max_ping_strikes != 0) {
send_goaway(exec_ctx, t,
@ -1820,9 +1784,9 @@ static void perform_transport_op_locked(grpc_exec_ctx *exec_ctx,
}
if (op->send_ping) {
send_ping_locked(exec_ctx, t, GRPC_CHTTP2_PING_ON_NEXT_WRITE, NULL,
op->send_ping,
GRPC_CHTTP2_INITIATE_WRITE_APPLICATION_PING);
send_ping_locked(exec_ctx, t, NULL, op->send_ping);
grpc_chttp2_initiate_write(exec_ctx, t,
GRPC_CHTTP2_INITIATE_WRITE_APPLICATION_PING);
}
if (op->on_connectivity_state_change != NULL) {
@ -2069,7 +2033,8 @@ void grpc_chttp2_cancel_stream(grpc_exec_ctx *exec_ctx,
if (!s->read_closed || !s->write_closed) {
if (s->id != 0) {
grpc_http2_error_code http_error;
grpc_error_get_status(due_to_error, s->deadline, NULL, NULL, &http_error);
grpc_error_get_status(exec_ctx, due_to_error, s->deadline, NULL, NULL,
&http_error);
grpc_slice_buffer_add(
&t->qbuf, grpc_chttp2_rst_stream_create(s->id, (uint32_t)http_error,
&s->stats.outgoing));
@ -2087,7 +2052,7 @@ void grpc_chttp2_fake_status(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
grpc_chttp2_stream *s, grpc_error *error) {
grpc_status_code status;
grpc_slice slice;
grpc_error_get_status(error, s->deadline, &status, &slice, NULL);
grpc_error_get_status(exec_ctx, error, s->deadline, &status, &slice, NULL);
if (status != GRPC_STATUS_OK) {
s->seen_error = true;
@ -2252,7 +2217,8 @@ static void close_from_api(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
uint32_t len = 0;
grpc_status_code grpc_status;
grpc_slice slice;
grpc_error_get_status(error, s->deadline, &grpc_status, &slice, NULL);
grpc_error_get_status(exec_ctx, error, s->deadline, &grpc_status, &slice,
NULL);
GPR_ASSERT(grpc_status >= 0 && (int)grpc_status < 100);
@ -2469,10 +2435,8 @@ void grpc_chttp2_act_on_flowctl_action(grpc_exec_ctx *exec_ctx,
if (action.need_ping) {
GRPC_CHTTP2_REF_TRANSPORT(t, "bdp_ping");
grpc_bdp_estimator_schedule_ping(&t->flow_control.bdp_estimator);
send_ping_locked(exec_ctx, t,
GRPC_CHTTP2_PING_BEFORE_TRANSPORT_WINDOW_UPDATE,
&t->start_bdp_ping_locked, &t->finish_bdp_ping_locked,
GRPC_CHTTP2_INITIATE_WRITE_BDP_ESTIMATOR_PING);
send_ping_locked(exec_ctx, t, &t->start_bdp_ping_locked,
&t->finish_bdp_ping_locked);
}
}
@ -2580,7 +2544,8 @@ static void read_action_locked(grpc_exec_ctx *exec_ctx, void *tp,
grpc_endpoint_read(exec_ctx, t->ep, &t->read_buffer,
&t->read_action_locked);
grpc_chttp2_act_on_flowctl_action(
exec_ctx, grpc_chttp2_flowctl_get_bdp_action(&t->flow_control), t,
exec_ctx,
grpc_chttp2_flowctl_get_action(exec_ctx, &t->flow_control, NULL), t,
NULL);
GRPC_CHTTP2_UNREF_TRANSPORT(exec_ctx, t, "keep_reading");
} else {
@ -2613,7 +2578,7 @@ static void finish_bdp_ping_locked(grpc_exec_ctx *exec_ctx, void *tp,
if (GRPC_TRACER_ON(grpc_http_trace)) {
gpr_log(GPR_DEBUG, "%s: Complete BDP ping", t->peer_string);
}
grpc_bdp_estimator_complete_ping(&t->flow_control.bdp_estimator);
grpc_bdp_estimator_complete_ping(exec_ctx, &t->flow_control.bdp_estimator);
GRPC_CHTTP2_UNREF_TRANSPORT(exec_ctx, t, "bdp_ping");
}
@ -2687,24 +2652,22 @@ static void init_keepalive_ping_locked(grpc_exec_ctx *exec_ctx, void *arg,
grpc_chttp2_stream_map_size(&t->stream_map) > 0) {
t->keepalive_state = GRPC_CHTTP2_KEEPALIVE_STATE_PINGING;
GRPC_CHTTP2_REF_TRANSPORT(t, "keepalive ping end");
send_ping_locked(exec_ctx, t, GRPC_CHTTP2_PING_ON_NEXT_WRITE,
&t->start_keepalive_ping_locked,
&t->finish_keepalive_ping_locked,
GRPC_CHTTP2_INITIATE_WRITE_KEEPALIVE_PING);
send_ping_locked(exec_ctx, t, &t->start_keepalive_ping_locked,
&t->finish_keepalive_ping_locked);
grpc_chttp2_initiate_write(exec_ctx, t,
GRPC_CHTTP2_INITIATE_WRITE_KEEPALIVE_PING);
} else {
GRPC_CHTTP2_REF_TRANSPORT(t, "init keepalive ping");
grpc_timer_init(
exec_ctx, &t->keepalive_ping_timer,
gpr_time_add(gpr_now(GPR_CLOCK_MONOTONIC), t->keepalive_time),
&t->init_keepalive_ping_locked, gpr_now(GPR_CLOCK_MONOTONIC));
grpc_timer_init(exec_ctx, &t->keepalive_ping_timer,
grpc_exec_ctx_now(exec_ctx) + t->keepalive_time,
&t->init_keepalive_ping_locked);
}
} else if (error == GRPC_ERROR_CANCELLED) {
/* The keepalive ping timer may be cancelled by bdp */
GRPC_CHTTP2_REF_TRANSPORT(t, "init keepalive ping");
grpc_timer_init(
exec_ctx, &t->keepalive_ping_timer,
gpr_time_add(gpr_now(GPR_CLOCK_MONOTONIC), t->keepalive_time),
&t->init_keepalive_ping_locked, gpr_now(GPR_CLOCK_MONOTONIC));
grpc_timer_init(exec_ctx, &t->keepalive_ping_timer,
grpc_exec_ctx_now(exec_ctx) + t->keepalive_time,
&t->init_keepalive_ping_locked);
}
GRPC_CHTTP2_UNREF_TRANSPORT(exec_ctx, t, "init keepalive ping");
}
@ -2713,10 +2676,9 @@ static void start_keepalive_ping_locked(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error) {
grpc_chttp2_transport *t = (grpc_chttp2_transport *)arg;
GRPC_CHTTP2_REF_TRANSPORT(t, "keepalive watchdog");
grpc_timer_init(
exec_ctx, &t->keepalive_watchdog_timer,
gpr_time_add(gpr_now(GPR_CLOCK_MONOTONIC), t->keepalive_timeout),
&t->keepalive_watchdog_fired_locked, gpr_now(GPR_CLOCK_MONOTONIC));
grpc_timer_init(exec_ctx, &t->keepalive_watchdog_timer,
grpc_exec_ctx_now(exec_ctx) + t->keepalive_time,
&t->keepalive_watchdog_fired_locked);
}
static void finish_keepalive_ping_locked(grpc_exec_ctx *exec_ctx, void *arg,
@ -2727,10 +2689,9 @@ static void finish_keepalive_ping_locked(grpc_exec_ctx *exec_ctx, void *arg,
t->keepalive_state = GRPC_CHTTP2_KEEPALIVE_STATE_WAITING;
grpc_timer_cancel(exec_ctx, &t->keepalive_watchdog_timer);
GRPC_CHTTP2_REF_TRANSPORT(t, "init keepalive ping");
grpc_timer_init(
exec_ctx, &t->keepalive_ping_timer,
gpr_time_add(gpr_now(GPR_CLOCK_MONOTONIC), t->keepalive_time),
&t->init_keepalive_ping_locked, gpr_now(GPR_CLOCK_MONOTONIC));
grpc_timer_init(exec_ctx, &t->keepalive_ping_timer,
grpc_exec_ctx_now(exec_ctx) + t->keepalive_time,
&t->init_keepalive_ping_locked);
}
}
GRPC_CHTTP2_UNREF_TRANSPORT(exec_ctx, t, "keepalive ping end");
@ -2830,9 +2791,9 @@ static void incoming_byte_stream_next_locked(grpc_exec_ctx *exec_ctx,
bs->next_action.max_size_hint,
cur_length);
grpc_chttp2_act_on_flowctl_action(
exec_ctx,
grpc_chttp2_flowctl_get_action(&t->flow_control, &s->flow_control), t,
s);
exec_ctx, grpc_chttp2_flowctl_get_action(exec_ctx, &t->flow_control,
&s->flow_control),
t, s);
}
GPR_ASSERT(s->unprocessed_incoming_frames_buffer.length == 0);
if (s->frame_storage.length > 0) {
@ -3180,8 +3141,6 @@ const char *grpc_chttp2_initiate_write_reason_string(
return "TRANSPORT_FLOW_CONTROL";
case GRPC_CHTTP2_INITIATE_WRITE_SEND_SETTINGS:
return "SEND_SETTINGS";
case GRPC_CHTTP2_INITIATE_WRITE_BDP_ESTIMATOR_PING:
return "BDP_ESTIMATOR_PING";
case GRPC_CHTTP2_INITIATE_WRITE_FLOW_CONTROL_UNSTALLED_BY_SETTING:
return "FLOW_CONTROL_UNSTALLED_BY_SETTING";
case GRPC_CHTTP2_INITIATE_WRITE_FLOW_CONTROL_UNSTALLED_BY_UPDATE:

@ -176,11 +176,9 @@ static void trace_action(grpc_chttp2_transport_flowctl* tfc,
/* How many bytes of incoming flow control would we like to advertise */
static uint32_t grpc_chttp2_target_announced_window(
const grpc_chttp2_transport_flowctl* tfc) {
return (uint32_t)GPR_MIN(
(int64_t)((1u << 31) - 1),
tfc->announced_stream_total_over_incoming_window +
tfc->t->settings[GRPC_SENT_SETTINGS]
[GRPC_CHTTP2_SETTINGS_INITIAL_WINDOW_SIZE]);
return (uint32_t)GPR_MIN((int64_t)((1u << 31) - 1),
tfc->announced_stream_total_over_incoming_window +
tfc->target_initial_window_size);
}
// we have sent data on the wire, we must track this in our bookkeeping for the
@ -282,13 +280,14 @@ grpc_error* grpc_chttp2_flowctl_recv_data(grpc_chttp2_transport_flowctl* tfc,
// Returns a non zero announce integer if we should send a transport window
// update
uint32_t grpc_chttp2_flowctl_maybe_send_transport_update(
grpc_chttp2_transport_flowctl* tfc) {
grpc_chttp2_transport_flowctl* tfc, bool writing_anyway) {
PRETRACE(tfc, NULL);
uint32_t target_announced_window = grpc_chttp2_target_announced_window(tfc);
uint32_t threshold_to_send_transport_window_update =
tfc->t->outbuf.count > 0 ? 3 * target_announced_window / 4
: target_announced_window / 2;
if (tfc->announced_window <= threshold_to_send_transport_window_update &&
if ((writing_anyway ||
tfc->announced_window <= threshold_to_send_transport_window_update) &&
tfc->announced_window != target_announced_window) {
uint32_t announce = (uint32_t)GPR_CLAMP(
target_announced_window - tfc->announced_window, 0, UINT32_MAX);
@ -393,15 +392,27 @@ static grpc_chttp2_flowctl_urgency delta_is_significant(
// Takes in a target and uses the pid controller to return a stabilized
// guess at the new bdp.
static double get_pid_controller_guess(grpc_chttp2_transport_flowctl* tfc,
static double get_pid_controller_guess(grpc_exec_ctx* exec_ctx,
grpc_chttp2_transport_flowctl* tfc,
double target) {
double bdp_error = target - grpc_pid_controller_last(&tfc->pid_controller);
gpr_timespec now = gpr_now(GPR_CLOCK_MONOTONIC);
gpr_timespec dt_timespec = gpr_time_sub(now, tfc->last_pid_update);
double dt = (double)dt_timespec.tv_sec + dt_timespec.tv_nsec * 1e-9;
if (dt > 0.1) {
dt = 0.1;
grpc_millis now = grpc_exec_ctx_now(exec_ctx);
if (!tfc->pid_controller_initialized) {
tfc->last_pid_update = now;
tfc->pid_controller_initialized = true;
grpc_pid_controller_args args;
memset(&args, 0, sizeof(args));
args.gain_p = 4;
args.gain_i = 8;
args.gain_d = 0;
args.initial_control_value = target;
args.min_control_value = -1;
args.max_control_value = 25;
args.integral_range = 10;
grpc_pid_controller_init(&tfc->pid_controller, args);
return pow(2, target);
}
double bdp_error = target - grpc_pid_controller_last(&tfc->pid_controller);
double dt = (double)(now - tfc->last_pid_update) * 1e-3;
double log2_bdp_guess =
grpc_pid_controller_update(&tfc->pid_controller, bdp_error, dt);
tfc->last_pid_update = now;
@ -414,20 +425,25 @@ static double get_target_under_memory_pressure(
// do not increase window under heavy memory pressure.
double memory_pressure = grpc_resource_quota_get_memory_pressure(
grpc_resource_user_quota(grpc_endpoint_get_resource_user(tfc->t->ep)));
if (memory_pressure > 0.8) {
target *= 1 - GPR_MIN(1, (memory_pressure - 0.8) / 0.1);
static const double kLowMemPressure = 0.1;
static const double kZeroTarget = 22;
static const double kHighMemPressure = 0.8;
static const double kMaxMemPressure = 0.9;
if (memory_pressure < kLowMemPressure && target < kZeroTarget) {
target = (target - kZeroTarget) * memory_pressure / kLowMemPressure +
kZeroTarget;
} else if (memory_pressure > kHighMemPressure) {
target *= 1 - GPR_MIN(1, (memory_pressure - kHighMemPressure) /
(kMaxMemPressure - kHighMemPressure));
}
return target;
}
grpc_chttp2_flowctl_action grpc_chttp2_flowctl_get_action(
grpc_chttp2_transport_flowctl* tfc, grpc_chttp2_stream_flowctl* sfc) {
grpc_exec_ctx* exec_ctx, grpc_chttp2_transport_flowctl* tfc,
grpc_chttp2_stream_flowctl* sfc) {
grpc_chttp2_flowctl_action action;
memset(&action, 0, sizeof(action));
uint32_t target_announced_window = grpc_chttp2_target_announced_window(tfc);
if (tfc->announced_window < target_announced_window / 2) {
action.send_transport_update = GRPC_CHTTP2_FLOWCTL_UPDATE_IMMEDIATELY;
}
// TODO(ncteisen): tune this
if (sfc != NULL && !sfc->s->read_closed) {
uint32_t sent_init_window =
@ -442,20 +458,12 @@ grpc_chttp2_flowctl_action grpc_chttp2_flowctl_get_action(
action.send_stream_update = GRPC_CHTTP2_FLOWCTL_QUEUE_UPDATE;
}
}
TRACEACTION(tfc, action);
return action;
}
grpc_chttp2_flowctl_action grpc_chttp2_flowctl_get_bdp_action(
grpc_chttp2_transport_flowctl* tfc) {
grpc_chttp2_flowctl_action action;
memset(&action, 0, sizeof(action));
if (tfc->enable_bdp_probe) {
action.need_ping = grpc_bdp_estimator_need_ping(&tfc->bdp_estimator);
action.need_ping =
grpc_bdp_estimator_need_ping(exec_ctx, &tfc->bdp_estimator);
// get bdp estimate and update initial_window accordingly.
int64_t estimate = -1;
int32_t bdp = -1;
if (grpc_bdp_estimator_get_estimate(&tfc->bdp_estimator, &estimate)) {
double target = 1 + log2((double)estimate);
@ -466,17 +474,18 @@ grpc_chttp2_flowctl_action grpc_chttp2_flowctl_get_bdp_action(
// run our target through the pid controller to stabilize change.
// TODO(ncteisen): experiment with other controllers here.
double bdp_guess = get_pid_controller_guess(tfc, target);
double bdp_guess = get_pid_controller_guess(exec_ctx, tfc, target);
// Though initial window 'could' drop to 0, we keep the floor at 128
bdp = GPR_MAX((int32_t)bdp_guess, 128);
tfc->target_initial_window_size =
(int32_t)GPR_CLAMP(bdp_guess, 128, INT32_MAX);
grpc_chttp2_flowctl_urgency init_window_update_urgency =
delta_is_significant(tfc, bdp,
delta_is_significant(tfc, tfc->target_initial_window_size,
GRPC_CHTTP2_SETTINGS_INITIAL_WINDOW_SIZE);
if (init_window_update_urgency != GRPC_CHTTP2_FLOWCTL_NO_ACTION_NEEDED) {
action.send_setting_update = init_window_update_urgency;
action.initial_window_size = (uint32_t)bdp;
action.initial_window_size = (uint32_t)tfc->target_initial_window_size;
}
}
@ -485,8 +494,9 @@ grpc_chttp2_flowctl_action grpc_chttp2_flowctl_get_bdp_action(
if (grpc_bdp_estimator_get_bw(&tfc->bdp_estimator, &bw_dbl)) {
// we target the max of BDP or bandwidth in microseconds.
int32_t frame_size = (int32_t)GPR_CLAMP(
GPR_MAX((int32_t)GPR_CLAMP(bw_dbl, 0, INT_MAX) / 1000, bdp), 16384,
16777215);
GPR_MAX((int32_t)GPR_CLAMP(bw_dbl, 0, INT_MAX) / 1000,
tfc->target_initial_window_size),
16384, 16777215);
grpc_chttp2_flowctl_urgency frame_size_urgency = delta_is_significant(
tfc, frame_size, GRPC_CHTTP2_SETTINGS_MAX_FRAME_SIZE);
if (frame_size_urgency != GRPC_CHTTP2_FLOWCTL_NO_ACTION_NEEDED) {
@ -497,7 +507,10 @@ grpc_chttp2_flowctl_action grpc_chttp2_flowctl_get_bdp_action(
}
}
}
uint32_t target_announced_window = grpc_chttp2_target_announced_window(tfc);
if (tfc->announced_window < target_announced_window / 2) {
action.send_transport_update = GRPC_CHTTP2_FLOWCTL_UPDATE_IMMEDIATELY;
}
TRACEACTION(tfc, action);
return action;
}

@ -88,4 +88,4 @@ grpc_error *grpc_deframe_unprocessed_incoming_frames(
}
#endif
#endif /* GRPC_CORE_EXT_TRANSPORT_CHTTP2_TRANSPORT_FRAME_DATA_H */
#endif /* GRPC_CORE_EXT_TRANSPORT_CHTTP2_TRANSPORT_FRAME_DATA_H */

@ -68,4 +68,4 @@ void grpc_chttp2_goaway_append(uint32_t last_stream_id, uint32_t error_code,
}
#endif
#endif /* GRPC_CORE_EXT_TRANSPORT_CHTTP2_TRANSPORT_FRAME_GOAWAY_H */
#endif /* GRPC_CORE_EXT_TRANSPORT_CHTTP2_TRANSPORT_FRAME_GOAWAY_H */

@ -89,10 +89,10 @@ grpc_error *grpc_chttp2_ping_parser_parse(grpc_exec_ctx *exec_ctx, void *parser,
grpc_chttp2_ack_ping(exec_ctx, t, p->opaque_8bytes);
} else {
if (!t->is_client) {
gpr_timespec now = gpr_now(GPR_CLOCK_MONOTONIC);
gpr_timespec next_allowed_ping =
gpr_time_add(t->ping_recv_state.last_ping_recv_time,
t->ping_policy.min_recv_ping_interval_without_data);
grpc_millis now = grpc_exec_ctx_now(exec_ctx);
grpc_millis next_allowed_ping =
t->ping_recv_state.last_ping_recv_time +
t->ping_policy.min_recv_ping_interval_without_data;
if (t->keepalive_permit_without_calls == 0 &&
grpc_chttp2_stream_map_size(&t->stream_map) == 0) {
@ -100,11 +100,10 @@ grpc_error *grpc_chttp2_ping_parser_parse(grpc_exec_ctx *exec_ctx, void *parser,
no less than two hours. When there is no outstanding streams, we
restrict the number of PINGS equivalent to TCP Keep-Alive. */
next_allowed_ping =
gpr_time_add(t->ping_recv_state.last_ping_recv_time,
gpr_time_from_seconds(7200, GPR_TIMESPAN));
t->ping_recv_state.last_ping_recv_time + 7200 * GPR_MS_PER_SEC;
}
if (gpr_time_cmp(next_allowed_ping, now) > 0) {
if (next_allowed_ping > now) {
grpc_chttp2_add_ping_strike(exec_ctx, t);
}

@ -49,4 +49,4 @@ void grpc_set_disable_ping_ack(bool disable_ping_ack);
}
#endif
#endif /* GRPC_CORE_EXT_TRANSPORT_CHTTP2_TRANSPORT_FRAME_PING_H */
#endif /* GRPC_CORE_EXT_TRANSPORT_CHTTP2_TRANSPORT_FRAME_PING_H */

@ -48,4 +48,4 @@ grpc_error *grpc_chttp2_rst_stream_parser_parse(grpc_exec_ctx *exec_ctx,
}
#endif
#endif /* GRPC_CORE_EXT_TRANSPORT_CHTTP2_TRANSPORT_FRAME_RST_STREAM_H */
#endif /* GRPC_CORE_EXT_TRANSPORT_CHTTP2_TRANSPORT_FRAME_RST_STREAM_H */

@ -66,4 +66,4 @@ grpc_error *grpc_chttp2_settings_parser_parse(grpc_exec_ctx *exec_ctx,
}
#endif
#endif /* GRPC_CORE_EXT_TRANSPORT_CHTTP2_TRANSPORT_FRAME_SETTINGS_H */
#endif /* GRPC_CORE_EXT_TRANSPORT_CHTTP2_TRANSPORT_FRAME_SETTINGS_H */

@ -47,4 +47,4 @@ grpc_error *grpc_chttp2_window_update_parser_parse(
}
#endif
#endif /* GRPC_CORE_EXT_TRANSPORT_CHTTP2_TRANSPORT_FRAME_WINDOW_UPDATE_H */
#endif /* GRPC_CORE_EXT_TRANSPORT_CHTTP2_TRANSPORT_FRAME_WINDOW_UPDATE_H */

@ -535,12 +535,12 @@ static void hpack_enc(grpc_exec_ctx *exec_ctx, grpc_chttp2_hpack_compressor *c,
#define TIMEOUT_KEY "grpc-timeout"
static void deadline_enc(grpc_exec_ctx *exec_ctx,
grpc_chttp2_hpack_compressor *c, gpr_timespec deadline,
grpc_chttp2_hpack_compressor *c, grpc_millis deadline,
framer_state *st) {
char timeout_str[GRPC_HTTP2_TIMEOUT_ENCODE_MIN_BUFSIZE];
grpc_mdelem mdelem;
grpc_http2_encode_timeout(
gpr_time_sub(deadline, gpr_now(deadline.clock_type)), timeout_str);
grpc_http2_encode_timeout(deadline - grpc_exec_ctx_now(exec_ctx),
timeout_str);
mdelem = grpc_mdelem_from_slices(exec_ctx, GRPC_MDSTR_GRPC_TIMEOUT,
grpc_slice_from_copied_string(timeout_str));
hpack_enc(exec_ctx, c, mdelem, st);
@ -660,8 +660,8 @@ void grpc_chttp2_encode_header(grpc_exec_ctx *exec_ctx,
for (grpc_linked_mdelem *l = metadata->list.head; l; l = l->next) {
hpack_enc(exec_ctx, c, l->md, &st);
}
gpr_timespec deadline = metadata->deadline;
if (gpr_time_cmp(deadline, gpr_inf_future(deadline.clock_type)) != 0) {
grpc_millis deadline = metadata->deadline;
if (deadline != GRPC_MILLIS_INF_FUTURE) {
deadline_enc(exec_ctx, c, deadline, &st);
}

@ -99,4 +99,4 @@ void grpc_chttp2_encode_header(grpc_exec_ctx *exec_ctx,
}
#endif
#endif /* GRPC_CORE_EXT_TRANSPORT_CHTTP2_TRANSPORT_HPACK_ENCODER_H */
#endif /* GRPC_CORE_EXT_TRANSPORT_CHTTP2_TRANSPORT_HPACK_ENCODER_H */

@ -119,4 +119,4 @@ grpc_error *grpc_chttp2_header_parser_parse(grpc_exec_ctx *exec_ctx,
}
#endif
#endif /* GRPC_CORE_EXT_TRANSPORT_CHTTP2_TRANSPORT_HPACK_PARSER_H */
#endif /* GRPC_CORE_EXT_TRANSPORT_CHTTP2_TRANSPORT_HPACK_PARSER_H */

@ -64,4 +64,4 @@ extern const grpc_chttp2_setting_parameters
}
#endif
#endif /* GRPC_CORE_EXT_TRANSPORT_CHTTP2_TRANSPORT_HTTP2_SETTINGS_H */
#endif /* GRPC_CORE_EXT_TRANSPORT_CHTTP2_TRANSPORT_HTTP2_SETTINGS_H */

@ -29,7 +29,7 @@ void grpc_chttp2_incoming_metadata_buffer_init(
grpc_chttp2_incoming_metadata_buffer *buffer, gpr_arena *arena) {
buffer->arena = arena;
grpc_metadata_batch_init(&buffer->batch);
buffer->batch.deadline = gpr_inf_future(GPR_CLOCK_REALTIME);
buffer->batch.deadline = GRPC_MILLIS_INF_FUTURE;
}
void grpc_chttp2_incoming_metadata_buffer_destroy(
@ -62,7 +62,7 @@ grpc_error *grpc_chttp2_incoming_metadata_buffer_replace_or_add(
}
void grpc_chttp2_incoming_metadata_buffer_set_deadline(
grpc_chttp2_incoming_metadata_buffer *buffer, gpr_timespec deadline) {
grpc_chttp2_incoming_metadata_buffer *buffer, grpc_millis deadline) {
buffer->batch.deadline = deadline;
}

@ -47,10 +47,10 @@ grpc_error *grpc_chttp2_incoming_metadata_buffer_replace_or_add(
grpc_exec_ctx *exec_ctx, grpc_chttp2_incoming_metadata_buffer *buffer,
grpc_mdelem elem) GRPC_MUST_USE_RESULT;
void grpc_chttp2_incoming_metadata_buffer_set_deadline(
grpc_chttp2_incoming_metadata_buffer *buffer, gpr_timespec deadline);
grpc_chttp2_incoming_metadata_buffer *buffer, grpc_millis deadline);
#ifdef __cplusplus
}
#endif
#endif /* GRPC_CORE_EXT_TRANSPORT_CHTTP2_TRANSPORT_INCOMING_METADATA_H */
#endif /* GRPC_CORE_EXT_TRANSPORT_CHTTP2_TRANSPORT_INCOMING_METADATA_H */

@ -65,12 +65,6 @@ typedef enum {
GRPC_CHTTP2_WRITE_STATE_WRITING_WITH_MORE,
} grpc_chttp2_write_state;
typedef enum {
GRPC_CHTTP2_PING_ON_NEXT_WRITE = 0,
GRPC_CHTTP2_PING_BEFORE_TRANSPORT_WINDOW_UPDATE,
GRPC_CHTTP2_PING_TYPE_COUNT /* must be last */
} grpc_chttp2_ping_type;
typedef enum {
GRPC_CHTTP2_OPTIMIZE_FOR_LATENCY,
GRPC_CHTTP2_OPTIMIZE_FOR_THROUGHPUT,
@ -97,7 +91,6 @@ typedef enum {
GRPC_CHTTP2_INITIATE_WRITE_STREAM_FLOW_CONTROL,
GRPC_CHTTP2_INITIATE_WRITE_TRANSPORT_FLOW_CONTROL,
GRPC_CHTTP2_INITIATE_WRITE_SEND_SETTINGS,
GRPC_CHTTP2_INITIATE_WRITE_BDP_ESTIMATOR_PING,
GRPC_CHTTP2_INITIATE_WRITE_FLOW_CONTROL_UNSTALLED_BY_SETTING,
GRPC_CHTTP2_INITIATE_WRITE_FLOW_CONTROL_UNSTALLED_BY_UPDATE,
GRPC_CHTTP2_INITIATE_WRITE_APPLICATION_PING,
@ -118,19 +111,19 @@ typedef struct {
typedef struct {
int max_pings_without_data;
int max_ping_strikes;
gpr_timespec min_sent_ping_interval_without_data;
gpr_timespec min_recv_ping_interval_without_data;
grpc_millis min_sent_ping_interval_without_data;
grpc_millis min_recv_ping_interval_without_data;
} grpc_chttp2_repeated_ping_policy;
typedef struct {
gpr_timespec last_ping_sent_time;
grpc_millis last_ping_sent_time;
int pings_before_data_required;
grpc_timer delayed_ping_timer;
bool is_delayed_ping_timer_set;
} grpc_chttp2_repeated_ping_state;
typedef struct {
gpr_timespec last_ping_recv_time;
grpc_millis last_ping_recv_time;
int ping_strikes;
} grpc_chttp2_server_ping_recv_state;
@ -269,6 +262,8 @@ typedef struct {
* to send WINDOW_UPDATE frames. */
int64_t announced_window;
int32_t target_initial_window_size;
/** should we probe bdp? */
bool enable_bdp_probe;
@ -276,8 +271,9 @@ typedef struct {
grpc_bdp_estimator bdp_estimator;
/* pid controller */
bool pid_controller_initialized;
grpc_pid_controller pid_controller;
gpr_timespec last_pid_update;
grpc_millis last_pid_update;
// pointer back to transport for tracing
const grpc_chttp2_transport *t;
@ -374,7 +370,7 @@ struct grpc_chttp2_transport {
uint32_t last_new_stream_id;
/** ping queues for various ping insertion points */
grpc_chttp2_ping_queue ping_queues[GRPC_CHTTP2_PING_TYPE_COUNT];
grpc_chttp2_ping_queue ping_queue;
grpc_chttp2_repeated_ping_policy ping_policy;
grpc_chttp2_repeated_ping_state ping_state;
uint64_t ping_ctr; /* unique id for pings */
@ -459,9 +455,9 @@ struct grpc_chttp2_transport {
/** watchdog to kill the transport when waiting for the keepalive ping */
grpc_timer keepalive_watchdog_timer;
/** time duration in between pings */
gpr_timespec keepalive_time;
grpc_millis keepalive_time;
/** grace period for a ping to complete before watchdog kicks in */
gpr_timespec keepalive_timeout;
grpc_millis keepalive_timeout;
/** if keepalive pings are allowed when there's no outstanding streams */
bool keepalive_permit_without_calls;
/** keep-alive state machine state */
@ -570,7 +566,7 @@ struct grpc_chttp2_stream {
grpc_error *byte_stream_error; /* protected by t combiner */
bool received_last_frame; /* protected by t combiner */
gpr_timespec deadline;
grpc_millis deadline;
/** saw some stream level error */
grpc_error *forced_close_error;
@ -661,8 +657,8 @@ bool grpc_chttp2_list_add_writable_stream(grpc_chttp2_transport *t,
returns non-zero if there was a stream available */
bool grpc_chttp2_list_pop_writable_stream(grpc_chttp2_transport *t,
grpc_chttp2_stream **s);
bool grpc_chttp2_list_remove_writable_stream(
grpc_chttp2_transport *t, grpc_chttp2_stream *s) GRPC_MUST_USE_RESULT;
bool grpc_chttp2_list_remove_writable_stream(grpc_chttp2_transport *t,
grpc_chttp2_stream *s);
bool grpc_chttp2_list_add_writing_stream(grpc_chttp2_transport *t,
grpc_chttp2_stream *s);
@ -711,7 +707,7 @@ grpc_error *grpc_chttp2_flowctl_recv_data(grpc_chttp2_transport_flowctl *tfc,
// returns an announce if we should send a transport update to our peer,
// else returns zero
uint32_t grpc_chttp2_flowctl_maybe_send_transport_update(
grpc_chttp2_transport_flowctl *tfc);
grpc_chttp2_transport_flowctl *tfc, bool writing_anyway);
// returns an announce if we should send a stream update to our peer, else
// returns zero
@ -758,10 +754,8 @@ typedef struct {
// Reads the flow control data and returns and actionable struct that will tell
// chttp2 exactly what it needs to do
grpc_chttp2_flowctl_action grpc_chttp2_flowctl_get_action(
grpc_chttp2_transport_flowctl *tfc, grpc_chttp2_stream_flowctl *sfc);
grpc_chttp2_flowctl_action grpc_chttp2_flowctl_get_bdp_action(
grpc_chttp2_transport_flowctl *tfc);
grpc_exec_ctx *exec_ctx, grpc_chttp2_transport_flowctl *tfc,
grpc_chttp2_stream_flowctl *sfc);
// Takes in a flow control action and performs all the needed operations.
void grpc_chttp2_act_on_flowctl_action(grpc_exec_ctx *exec_ctx,

@ -359,8 +359,9 @@ static grpc_error *init_data_frame_parser(grpc_exec_ctx *exec_ctx,
s == NULL ? NULL : &s->flow_control,
t->incoming_frame_size);
grpc_chttp2_act_on_flowctl_action(
exec_ctx, grpc_chttp2_flowctl_get_action(
&t->flow_control, s == NULL ? NULL : &s->flow_control),
exec_ctx,
grpc_chttp2_flowctl_get_action(exec_ctx, &t->flow_control,
s == NULL ? NULL : &s->flow_control),
t, s);
if (err != GRPC_ERROR_NONE) {
goto error_handler;
@ -385,7 +386,7 @@ error_handler:
t->parser_data = &s->data_parser;
t->ping_state.pings_before_data_required =
t->ping_policy.max_pings_without_data;
t->ping_state.last_ping_sent_time = gpr_inf_past(GPR_CLOCK_MONOTONIC);
t->ping_state.last_ping_sent_time = GRPC_MILLIS_INF_PAST;
return GRPC_ERROR_NONE;
} else if (grpc_error_get_int(err, GRPC_ERROR_INT_STREAM_ID, NULL)) {
/* handle stream errors by closing the stream */
@ -430,26 +431,27 @@ static void on_initial_header(grpc_exec_ctx *exec_ctx, void *tp,
}
if (grpc_slice_eq(GRPC_MDKEY(md), GRPC_MDSTR_GRPC_TIMEOUT)) {
gpr_timespec *cached_timeout =
(gpr_timespec *)grpc_mdelem_get_user_data(md, free_timeout);
gpr_timespec timeout;
grpc_millis *cached_timeout =
static_cast<grpc_millis *>(grpc_mdelem_get_user_data(md, free_timeout));
grpc_millis timeout;
if (cached_timeout == NULL) {
/* not already parsed: parse it now, and store the result away */
cached_timeout = (gpr_timespec *)gpr_malloc(sizeof(gpr_timespec));
cached_timeout = (grpc_millis *)gpr_malloc(sizeof(grpc_millis));
if (!grpc_http2_decode_timeout(GRPC_MDVALUE(md), cached_timeout)) {
char *val = grpc_slice_to_c_string(GRPC_MDVALUE(md));
gpr_log(GPR_ERROR, "Ignoring bad timeout value '%s'", val);
gpr_free(val);
*cached_timeout = gpr_inf_future(GPR_TIMESPAN);
*cached_timeout = GRPC_MILLIS_INF_FUTURE;
}
timeout = *cached_timeout;
grpc_mdelem_set_user_data(md, free_timeout, cached_timeout);
} else {
timeout = *cached_timeout;
}
grpc_chttp2_incoming_metadata_buffer_set_deadline(
&s->metadata_buffer[0],
gpr_time_add(gpr_now(GPR_CLOCK_MONOTONIC), timeout));
if (timeout != GRPC_MILLIS_INF_FUTURE) {
grpc_chttp2_incoming_metadata_buffer_set_deadline(
&s->metadata_buffer[0], grpc_exec_ctx_now(exec_ctx) + timeout);
}
GRPC_MDELEM_UNREF(exec_ctx, md);
} else {
const size_t new_size = s->metadata_buffer[0].size + GRPC_MDELEM_LENGTH(md);
@ -564,7 +566,7 @@ static grpc_error *init_header_frame_parser(grpc_exec_ctx *exec_ctx,
t->ping_state.pings_before_data_required =
t->ping_policy.max_pings_without_data;
t->ping_state.last_ping_sent_time = gpr_inf_past(GPR_CLOCK_MONOTONIC);
t->ping_state.last_ping_sent_time = GRPC_MILLIS_INF_PAST;
/* could be a new grpc_chttp2_stream or an existing grpc_chttp2_stream */
s = grpc_chttp2_parsing_lookup_stream(t, t->incoming_stream_id);

@ -73,4 +73,4 @@ void grpc_chttp2_stream_map_for_each(grpc_chttp2_stream_map *map,
}
#endif
#endif /* GRPC_CORE_EXT_TRANSPORT_CHTTP2_TRANSPORT_STREAM_MAP_H */
#endif /* GRPC_CORE_EXT_TRANSPORT_CHTTP2_TRANSPORT_STREAM_MAP_H */

@ -42,18 +42,9 @@ static void finish_write_cb(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
t->write_cb_pool = cb;
}
static void collapse_pings_from_into(grpc_chttp2_transport *t,
grpc_chttp2_ping_type ping_type,
grpc_chttp2_ping_queue *pq) {
for (size_t i = 0; i < GRPC_CHTTP2_PCL_COUNT; i++) {
grpc_closure_list_move(&t->ping_queues[ping_type].lists[i], &pq->lists[i]);
}
}
static void maybe_initiate_ping(grpc_exec_ctx *exec_ctx,
grpc_chttp2_transport *t,
grpc_chttp2_ping_type ping_type) {
grpc_chttp2_ping_queue *pq = &t->ping_queues[ping_type];
grpc_chttp2_transport *t) {
grpc_chttp2_ping_queue *pq = &t->ping_queue;
if (grpc_closure_list_empty(pq->lists[GRPC_CHTTP2_PCL_NEXT])) {
/* no ping needed: wait */
return;
@ -62,7 +53,8 @@ static void maybe_initiate_ping(grpc_exec_ctx *exec_ctx,
/* ping already in-flight: wait */
if (GRPC_TRACER_ON(grpc_http_trace) ||
GRPC_TRACER_ON(grpc_bdp_estimator_trace)) {
gpr_log(GPR_DEBUG, "Ping delayed [%p]: already pinging", t->peer_string);
gpr_log(GPR_DEBUG, "%s: Ping delayed [%p]: already pinging",
t->is_client ? "CLIENT" : "SERVER", t->peer_string);
}
return;
}
@ -71,51 +63,38 @@ static void maybe_initiate_ping(grpc_exec_ctx *exec_ctx,
/* need to receive something of substance before sending a ping again */
if (GRPC_TRACER_ON(grpc_http_trace) ||
GRPC_TRACER_ON(grpc_bdp_estimator_trace)) {
gpr_log(GPR_DEBUG, "Ping delayed [%p]: too many recent pings: %d/%d",
t->peer_string, t->ping_state.pings_before_data_required,
gpr_log(GPR_DEBUG, "%s: Ping delayed [%p]: too many recent pings: %d/%d",
t->is_client ? "CLIENT" : "SERVER", t->peer_string,
t->ping_state.pings_before_data_required,
t->ping_policy.max_pings_without_data);
}
return;
}
gpr_timespec now = gpr_now(GPR_CLOCK_MONOTONIC);
gpr_timespec next_allowed_ping =
gpr_time_add(t->ping_state.last_ping_sent_time,
t->ping_policy.min_sent_ping_interval_without_data);
grpc_millis now = grpc_exec_ctx_now(exec_ctx);
grpc_millis next_allowed_ping =
t->ping_state.last_ping_sent_time +
t->ping_policy.min_sent_ping_interval_without_data;
if (t->keepalive_permit_without_calls == 0 &&
grpc_chttp2_stream_map_size(&t->stream_map) == 0) {
next_allowed_ping = gpr_time_add(t->ping_recv_state.last_ping_recv_time,
gpr_time_from_seconds(7200, GPR_TIMESPAN));
next_allowed_ping =
t->ping_recv_state.last_ping_recv_time + 7200 * GPR_MS_PER_SEC;
}
/* gpr_log(GPR_DEBUG, "next_allowed_ping:%d.%09d now:%d.%09d",
(int)next_allowed_ping.tv_sec, (int)next_allowed_ping.tv_nsec,
(int)now.tv_sec, (int)now.tv_nsec); */
if (gpr_time_cmp(next_allowed_ping, now) > 0) {
if (next_allowed_ping > now) {
/* not enough elapsed time between successive pings */
if (GRPC_TRACER_ON(grpc_http_trace) ||
GRPC_TRACER_ON(grpc_bdp_estimator_trace)) {
gpr_log(GPR_DEBUG,
"Ping delayed [%p]: not enough time elapsed since last ping",
t->peer_string);
"%s: Ping delayed [%p]: not enough time elapsed since last ping",
t->is_client ? "CLIENT" : "SERVER", t->peer_string);
}
if (!t->ping_state.is_delayed_ping_timer_set) {
t->ping_state.is_delayed_ping_timer_set = true;
grpc_timer_init(exec_ctx, &t->ping_state.delayed_ping_timer,
next_allowed_ping, &t->retry_initiate_ping_locked,
gpr_now(GPR_CLOCK_MONOTONIC));
next_allowed_ping, &t->retry_initiate_ping_locked);
}
return;
}
/* coalesce equivalent pings into this one */
switch (ping_type) {
case GRPC_CHTTP2_PING_BEFORE_TRANSPORT_WINDOW_UPDATE:
collapse_pings_from_into(t, GRPC_CHTTP2_PING_ON_NEXT_WRITE, pq);
break;
case GRPC_CHTTP2_PING_ON_NEXT_WRITE:
break;
case GRPC_CHTTP2_PING_TYPE_COUNT:
GPR_UNREACHABLE_CODE(break);
}
pq->inflight_id = t->ping_ctr * GRPC_CHTTP2_PING_TYPE_COUNT + ping_type;
pq->inflight_id = t->ping_ctr;
t->ping_ctr++;
GRPC_CLOSURE_LIST_SCHED(exec_ctx, &pq->lists[GRPC_CHTTP2_PCL_INITIATE]);
grpc_closure_list_move(&pq->lists[GRPC_CHTTP2_PCL_NEXT],
@ -126,7 +105,8 @@ static void maybe_initiate_ping(grpc_exec_ctx *exec_ctx,
t->ping_state.last_ping_sent_time = now;
if (GRPC_TRACER_ON(grpc_http_trace) ||
GRPC_TRACER_ON(grpc_bdp_estimator_trace)) {
gpr_log(GPR_DEBUG, "Ping sent [%p]: %d/%d", t->peer_string,
gpr_log(GPR_DEBUG, "%s: Ping sent [%p]: %d/%d",
t->is_client ? "CLIENT" : "SERVER", t->peer_string,
t->ping_state.pings_before_data_required,
t->ping_policy.max_pings_without_data);
}
@ -156,6 +136,25 @@ static bool update_list(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
return sched_any;
}
static void report_stall(grpc_chttp2_transport *t, grpc_chttp2_stream *s,
const char *staller) {
gpr_log(
GPR_DEBUG,
"%s:%p stream %d stalled by %s [fc:pending=%" PRIdPTR ":flowed=%" PRId64
":peer_initwin=%d:t_win=%" PRId64 ":s_win=%d:s_delta=%" PRId64 "]",
t->peer_string, t, s->id, staller, s->flow_controlled_buffer.length,
s->flow_controlled_bytes_flowed,
t->settings[GRPC_ACKED_SETTINGS]
[GRPC_CHTTP2_SETTINGS_INITIAL_WINDOW_SIZE],
t->flow_control.remote_window,
(uint32_t)GPR_MAX(
0,
s->flow_control.remote_window_delta +
(int64_t)t->settings[GRPC_PEER_SETTINGS]
[GRPC_CHTTP2_SETTINGS_INITIAL_WINDOW_SIZE]),
s->flow_control.remote_window_delta);
}
static bool stream_ref_if_not_destroyed(gpr_refcount *r) {
gpr_atm count;
do {
@ -175,342 +174,451 @@ static bool is_default_initial_metadata(grpc_metadata_batch *initial_metadata) {
return initial_metadata->list.default_count == initial_metadata->list.count;
}
grpc_chttp2_begin_write_result grpc_chttp2_begin_write(
grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t) {
grpc_chttp2_stream *s;
namespace {
class StreamWriteContext;
/* stats histogram counters: we increment these throughout this function,
and at the end publish to the central stats histograms */
int flow_control_writes = 0;
int initial_metadata_writes = 0;
int trailing_metadata_writes = 0;
int message_writes = 0;
class WriteContext {
public:
WriteContext(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t) : t_(t) {
GRPC_STATS_INC_HTTP2_WRITES_BEGUN(exec_ctx);
GPR_TIMER_BEGIN("grpc_chttp2_begin_write", 0);
}
GRPC_STATS_INC_HTTP2_WRITES_BEGUN(exec_ctx);
// TODO(ctiller): make this the destructor
void FlushStats(grpc_exec_ctx *exec_ctx) {
GRPC_STATS_INC_HTTP2_SEND_INITIAL_METADATA_PER_WRITE(
exec_ctx, initial_metadata_writes_);
GRPC_STATS_INC_HTTP2_SEND_MESSAGE_PER_WRITE(exec_ctx, message_writes_);
GRPC_STATS_INC_HTTP2_SEND_TRAILING_METADATA_PER_WRITE(
exec_ctx, trailing_metadata_writes_);
GRPC_STATS_INC_HTTP2_SEND_FLOWCTL_PER_WRITE(exec_ctx, flow_control_writes_);
}
void FlushSettings(grpc_exec_ctx *exec_ctx) {
if (t_->dirtied_local_settings && !t_->sent_local_settings) {
grpc_slice_buffer_add(
&t_->outbuf, grpc_chttp2_settings_create(
t_->settings[GRPC_SENT_SETTINGS],
t_->settings[GRPC_LOCAL_SETTINGS],
t_->force_send_settings, GRPC_CHTTP2_NUM_SETTINGS));
t_->force_send_settings = false;
t_->dirtied_local_settings = false;
t_->sent_local_settings = true;
GRPC_STATS_INC_HTTP2_SETTINGS_WRITES(exec_ctx);
}
}
GPR_TIMER_BEGIN("grpc_chttp2_begin_write", 0);
void FlushQueuedBuffers(grpc_exec_ctx *exec_ctx) {
/* simple writes are queued to qbuf, and flushed here */
grpc_slice_buffer_move_into(&t_->qbuf, &t_->outbuf);
GPR_ASSERT(t_->qbuf.count == 0);
}
if (t->dirtied_local_settings && !t->sent_local_settings) {
grpc_slice_buffer_add(
&t->outbuf,
grpc_chttp2_settings_create(
t->settings[GRPC_SENT_SETTINGS], t->settings[GRPC_LOCAL_SETTINGS],
t->force_send_settings, GRPC_CHTTP2_NUM_SETTINGS));
t->force_send_settings = 0;
t->dirtied_local_settings = 0;
t->sent_local_settings = 1;
GRPC_STATS_INC_HTTP2_SETTINGS_WRITES(exec_ctx);
void FlushWindowUpdates(grpc_exec_ctx *exec_ctx) {
uint32_t transport_announce =
grpc_chttp2_flowctl_maybe_send_transport_update(&t_->flow_control,
t_->outbuf.count > 0);
if (transport_announce) {
grpc_transport_one_way_stats throwaway_stats;
grpc_slice_buffer_add(
&t_->outbuf, grpc_chttp2_window_update_create(0, transport_announce,
&throwaway_stats));
ResetPingRecvClock();
}
}
/* simple writes are queued to qbuf, and flushed here */
grpc_slice_buffer_move_into(&t->qbuf, &t->outbuf);
GPR_ASSERT(t->qbuf.count == 0);
void FlushPingAcks() {
for (size_t i = 0; i < t_->ping_ack_count; i++) {
grpc_slice_buffer_add(&t_->outbuf,
grpc_chttp2_ping_create(true, t_->ping_acks[i]));
}
t_->ping_ack_count = 0;
}
grpc_chttp2_hpack_compressor_set_max_table_size(
&t->hpack_compressor,
t->settings[GRPC_PEER_SETTINGS][GRPC_CHTTP2_SETTINGS_HEADER_TABLE_SIZE]);
void EnactHpackSettings(grpc_exec_ctx *exec_ctx) {
grpc_chttp2_hpack_compressor_set_max_table_size(
&t_->hpack_compressor,
t_->settings[GRPC_PEER_SETTINGS]
[GRPC_CHTTP2_SETTINGS_HEADER_TABLE_SIZE]);
}
if (t->flow_control.remote_window > 0) {
while (grpc_chttp2_list_pop_stalled_by_transport(t, &s)) {
if (!t->closed && grpc_chttp2_list_add_writable_stream(t, s)) {
stream_ref_if_not_destroyed(&s->refcount->refs);
void UpdateStreamsNoLongerStalled() {
grpc_chttp2_stream *s;
while (grpc_chttp2_list_pop_stalled_by_transport(t_, &s)) {
if (!t_->closed && grpc_chttp2_list_add_writable_stream(t_, s)) {
if (!stream_ref_if_not_destroyed(&s->refcount->refs)) {
grpc_chttp2_list_remove_writable_stream(t_, s);
}
}
}
}
grpc_chttp2_begin_write_result result = {false, false, false};
grpc_chttp2_stream *NextStream() {
if (t_->outbuf.length > target_write_size(t_)) {
result_.partial = true;
return nullptr;
}
/* for each grpc_chttp2_stream that's become writable, frame it's data
(according to available window sizes) and add to the output buffer */
while (true) {
if (t->outbuf.length > target_write_size(t)) {
result.partial = true;
break;
grpc_chttp2_stream *s;
if (!grpc_chttp2_list_pop_writable_stream(t_, &s)) {
return nullptr;
}
if (!grpc_chttp2_list_pop_writable_stream(t, &s)) {
break;
return s;
}
void ResetPingRecvClock() {
if (!t_->is_client) {
t_->ping_recv_state.last_ping_recv_time = GRPC_MILLIS_INF_PAST;
t_->ping_recv_state.ping_strikes = 0;
}
}
void IncInitialMetadataWrites() { ++initial_metadata_writes_; }
void IncWindowUpdateWrites() { ++flow_control_writes_; }
void IncMessageWrites() { ++message_writes_; }
void IncTrailingMetadataWrites() { ++trailing_metadata_writes_; }
void NoteScheduledResults() { result_.early_results_scheduled = true; }
grpc_chttp2_transport *transport() const { return t_; }
bool sent_initial_metadata = s->sent_initial_metadata;
bool now_writing = false;
grpc_chttp2_begin_write_result Result() {
result_.writing = t_->outbuf.count > 0;
return result_;
}
private:
grpc_chttp2_transport *const t_;
/* stats histogram counters: we increment these throughout this function,
and at the end publish to the central stats histograms */
int flow_control_writes_ = 0;
int initial_metadata_writes_ = 0;
int trailing_metadata_writes_ = 0;
int message_writes_ = 0;
grpc_chttp2_begin_write_result result_ = {false, false, false};
};
class DataSendContext {
public:
DataSendContext(WriteContext *write_context, grpc_chttp2_transport *t,
grpc_chttp2_stream *s)
: write_context_(write_context),
t_(t),
s_(s),
sending_bytes_before_(s_->sending_bytes) {}
uint32_t stream_remote_window() const {
return (uint32_t)GPR_MAX(
0, s_->flow_control.remote_window_delta +
(int64_t)t_->settings[GRPC_PEER_SETTINGS]
[GRPC_CHTTP2_SETTINGS_INITIAL_WINDOW_SIZE]);
}
uint32_t max_outgoing() const {
return (uint32_t)GPR_MIN(
t_->settings[GRPC_PEER_SETTINGS][GRPC_CHTTP2_SETTINGS_MAX_FRAME_SIZE],
GPR_MIN(stream_remote_window(), t_->flow_control.remote_window));
}
bool AnyOutgoing() const { return max_outgoing() != 0; }
void FlushCompressedBytes() {
uint32_t send_bytes =
(uint32_t)GPR_MIN(max_outgoing(), s_->compressed_data_buffer.length);
bool is_last_data_frame =
(send_bytes == s_->compressed_data_buffer.length &&
s_->flow_controlled_buffer.length == 0 &&
s_->fetching_send_message == NULL);
if (is_last_data_frame && s_->send_trailing_metadata != NULL &&
s_->stream_compression_ctx != NULL) {
if (!grpc_stream_compress(s_->stream_compression_ctx,
&s_->flow_controlled_buffer,
&s_->compressed_data_buffer, NULL, MAX_SIZE_T,
GRPC_STREAM_COMPRESSION_FLUSH_FINISH)) {
gpr_log(GPR_ERROR, "Stream compression failed.");
}
grpc_stream_compression_context_destroy(s_->stream_compression_ctx);
s_->stream_compression_ctx = NULL;
/* After finish, bytes in s->compressed_data_buffer may be
* more than max_outgoing. Start another round of the current
* while loop so that send_bytes and is_last_data_frame are
* recalculated. */
return;
}
is_last_frame_ = is_last_data_frame && s_->send_trailing_metadata != NULL &&
grpc_metadata_batch_is_empty(s_->send_trailing_metadata);
grpc_chttp2_encode_data(s_->id, &s_->compressed_data_buffer, send_bytes,
is_last_frame_, &s_->stats.outgoing, &t_->outbuf);
grpc_chttp2_flowctl_sent_data(&t_->flow_control, &s_->flow_control,
send_bytes);
if (s_->compressed_data_buffer.length == 0) {
s_->sending_bytes += s_->uncompressed_data_size;
}
}
void CompressMoreBytes() {
if (s_->stream_compression_ctx == NULL) {
s_->stream_compression_ctx =
grpc_stream_compression_context_create(s_->stream_compression_method);
}
s_->uncompressed_data_size = s_->flow_controlled_buffer.length;
if (!grpc_stream_compress(s_->stream_compression_ctx,
&s_->flow_controlled_buffer,
&s_->compressed_data_buffer, NULL, MAX_SIZE_T,
GRPC_STREAM_COMPRESSION_FLUSH_SYNC)) {
gpr_log(GPR_ERROR, "Stream compression failed.");
}
}
bool is_last_frame() const { return is_last_frame_; }
void CallCallbacks(grpc_exec_ctx *exec_ctx) {
if (update_list(exec_ctx, t_, s_,
(int64_t)(s_->sending_bytes - sending_bytes_before_),
&s_->on_flow_controlled_cbs,
&s_->flow_controlled_bytes_flowed, GRPC_ERROR_NONE)) {
write_context_->NoteScheduledResults();
}
}
private:
WriteContext *write_context_;
grpc_chttp2_transport *t_;
grpc_chttp2_stream *s_;
const size_t sending_bytes_before_;
bool is_last_frame_ = false;
};
class StreamWriteContext {
public:
StreamWriteContext(WriteContext *write_context, grpc_chttp2_stream *s)
: write_context_(write_context), t_(write_context->transport()), s_(s) {
GRPC_CHTTP2_IF_TRACING(
gpr_log(GPR_DEBUG, "W:%p %s[%d] im-(sent,send)=(%d,%d) announce=%d", t,
t->is_client ? "CLIENT" : "SERVER", s->id,
sent_initial_metadata, s->send_initial_metadata != NULL,
gpr_log(GPR_DEBUG, "W:%p %s[%d] im-(sent,send)=(%d,%d) announce=%d", t_,
t_->is_client ? "CLIENT" : "SERVER", s->id,
s->sent_initial_metadata, s->send_initial_metadata != NULL,
(int)(s->flow_control.local_window_delta -
s->flow_control.announced_window_delta)));
}
grpc_mdelem *extra_headers_for_trailing_metadata[2];
size_t num_extra_headers_for_trailing_metadata = 0;
void FlushInitialMetadata(grpc_exec_ctx *exec_ctx) {
/* send initial metadata if it's available */
if (!sent_initial_metadata && s->send_initial_metadata != NULL) {
// We skip this on the server side if there is no custom initial
// metadata, there are no messages to send, and we are also sending
// trailing metadata. This results in a Trailers-Only response,
// which is required for retries, as per:
// https://github.com/grpc/proposal/blob/master/A6-client-retries.md#when-retries-are-valid
if (t->is_client || s->fetching_send_message != NULL ||
s->flow_controlled_buffer.length != 0 ||
s->send_trailing_metadata == NULL ||
!is_default_initial_metadata(s->send_initial_metadata)) {
grpc_encode_header_options hopt = {
s->id, // stream_id
false, // is_eof
t->settings[GRPC_PEER_SETTINGS]
[GRPC_CHTTP2_SETTINGS_GRPC_ALLOW_TRUE_BINARY_METADATA] !=
0, // use_true_binary_metadata
t->settings[GRPC_PEER_SETTINGS]
[GRPC_CHTTP2_SETTINGS_MAX_FRAME_SIZE], // max_frame_size
&s->stats.outgoing // stats
};
grpc_chttp2_encode_header(exec_ctx, &t->hpack_compressor, NULL, 0,
s->send_initial_metadata, &hopt, &t->outbuf);
now_writing = true;
if (!t->is_client) {
t->ping_recv_state.last_ping_recv_time =
gpr_inf_past(GPR_CLOCK_MONOTONIC);
t->ping_recv_state.ping_strikes = 0;
}
initial_metadata_writes++;
} else {
GRPC_CHTTP2_IF_TRACING(
gpr_log(GPR_INFO, "not sending initial_metadata (Trailers-Only)"));
// When sending Trailers-Only, we need to move the :status and
// content-type headers to the trailers.
if (s->send_initial_metadata->idx.named.status != NULL) {
extra_headers_for_trailing_metadata
[num_extra_headers_for_trailing_metadata++] =
&s->send_initial_metadata->idx.named.status->md;
}
if (s->send_initial_metadata->idx.named.content_type != NULL) {
extra_headers_for_trailing_metadata
[num_extra_headers_for_trailing_metadata++] =
&s->send_initial_metadata->idx.named.content_type->md;
}
trailing_metadata_writes++;
}
s->send_initial_metadata = NULL;
s->sent_initial_metadata = true;
sent_initial_metadata = true;
result.early_results_scheduled = true;
grpc_chttp2_complete_closure_step(
exec_ctx, t, s, &s->send_initial_metadata_finished, GRPC_ERROR_NONE,
"send_initial_metadata_finished");
if (s_->sent_initial_metadata) return;
if (s_->send_initial_metadata == nullptr) return;
// We skip this on the server side if there is no custom initial
// metadata, there are no messages to send, and we are also sending
// trailing metadata. This results in a Trailers-Only response,
// which is required for retries, as per:
// https://github.com/grpc/proposal/blob/master/A6-client-retries.md#when-retries-are-valid
if (!t_->is_client && s_->fetching_send_message == nullptr &&
s_->flow_controlled_buffer.length == 0 &&
s_->compressed_data_buffer.length == 0 &&
s_->send_trailing_metadata != nullptr &&
is_default_initial_metadata(s_->send_initial_metadata)) {
ConvertInitialMetadataToTrailingMetadata();
} else {
grpc_encode_header_options hopt = {
s_->id, // stream_id
false, // is_eof
t_->settings[GRPC_PEER_SETTINGS]
[GRPC_CHTTP2_SETTINGS_GRPC_ALLOW_TRUE_BINARY_METADATA] !=
0, // use_true_binary_metadata
t_->settings[GRPC_PEER_SETTINGS]
[GRPC_CHTTP2_SETTINGS_MAX_FRAME_SIZE], // max_frame_size
&s_->stats.outgoing // stats
};
grpc_chttp2_encode_header(exec_ctx, &t_->hpack_compressor, NULL, 0,
s_->send_initial_metadata, &hopt, &t_->outbuf);
write_context_->ResetPingRecvClock();
write_context_->IncInitialMetadataWrites();
}
s_->send_initial_metadata = NULL;
s_->sent_initial_metadata = true;
write_context_->NoteScheduledResults();
grpc_chttp2_complete_closure_step(
exec_ctx, t_, s_, &s_->send_initial_metadata_finished, GRPC_ERROR_NONE,
"send_initial_metadata_finished");
}
void FlushWindowUpdates(grpc_exec_ctx *exec_ctx) {
/* send any window updates */
uint32_t stream_announce = grpc_chttp2_flowctl_maybe_send_stream_update(
&t->flow_control, &s->flow_control);
if (stream_announce > 0) {
grpc_slice_buffer_add(
&t->outbuf, grpc_chttp2_window_update_create(s->id, stream_announce,
&s->stats.outgoing));
if (!t->is_client) {
t->ping_recv_state.last_ping_recv_time =
gpr_inf_past(GPR_CLOCK_MONOTONIC);
t->ping_recv_state.ping_strikes = 0;
&t_->flow_control, &s_->flow_control);
if (stream_announce == 0) return;
grpc_slice_buffer_add(
&t_->outbuf, grpc_chttp2_window_update_create(s_->id, stream_announce,
&s_->stats.outgoing));
write_context_->ResetPingRecvClock();
write_context_->IncWindowUpdateWrites();
}
void FlushData(grpc_exec_ctx *exec_ctx) {
if (!s_->sent_initial_metadata) return;
if (s_->flow_controlled_buffer.length == 0 &&
s_->compressed_data_buffer.length == 0) {
return; // early out: nothing to do
}
DataSendContext data_send_context(write_context_, t_, s_);
if (!data_send_context.AnyOutgoing()) {
if (t_->flow_control.remote_window == 0) {
report_stall(t_, s_, "transport");
grpc_chttp2_list_add_stalled_by_transport(t_, s_);
} else if (data_send_context.stream_remote_window() == 0) {
report_stall(t_, s_, "stream");
grpc_chttp2_list_add_stalled_by_stream(t_, s_);
}
flow_control_writes++;
return; // early out: nothing to do
}
if (sent_initial_metadata) {
/* send any body bytes, if allowed by flow control */
if (s->flow_controlled_buffer.length > 0 ||
s->compressed_data_buffer.length > 0) {
uint32_t stream_remote_window = (uint32_t)GPR_MAX(
0,
s->flow_control.remote_window_delta +
(int64_t)t->settings[GRPC_PEER_SETTINGS]
[GRPC_CHTTP2_SETTINGS_INITIAL_WINDOW_SIZE]);
uint32_t max_outgoing = (uint32_t)GPR_MIN(
t->settings[GRPC_PEER_SETTINGS]
[GRPC_CHTTP2_SETTINGS_MAX_FRAME_SIZE],
GPR_MIN(stream_remote_window, t->flow_control.remote_window));
if (max_outgoing > 0) {
bool is_last_data_frame = false;
bool is_last_frame = false;
size_t sending_bytes_before = s->sending_bytes;
while ((s->flow_controlled_buffer.length > 0 ||
s->compressed_data_buffer.length > 0) &&
max_outgoing > 0) {
if (s->compressed_data_buffer.length > 0) {
uint32_t send_bytes = (uint32_t)GPR_MIN(
max_outgoing, s->compressed_data_buffer.length);
is_last_data_frame =
(send_bytes == s->compressed_data_buffer.length &&
s->flow_controlled_buffer.length == 0 &&
s->fetching_send_message == NULL);
if (is_last_data_frame && s->send_trailing_metadata != NULL &&
s->stream_compression_ctx != NULL) {
if (!grpc_stream_compress(
s->stream_compression_ctx, &s->flow_controlled_buffer,
&s->compressed_data_buffer, NULL, MAX_SIZE_T,
GRPC_STREAM_COMPRESSION_FLUSH_FINISH)) {
gpr_log(GPR_ERROR, "Stream compression failed.");
}
grpc_stream_compression_context_destroy(
s->stream_compression_ctx);
s->stream_compression_ctx = NULL;
/* After finish, bytes in s->compressed_data_buffer may be
* more than max_outgoing. Start another round of the current
* while loop so that send_bytes and is_last_data_frame are
* recalculated. */
continue;
}
is_last_frame =
is_last_data_frame && s->send_trailing_metadata != NULL &&
grpc_metadata_batch_is_empty(s->send_trailing_metadata);
grpc_chttp2_encode_data(s->id, &s->compressed_data_buffer,
send_bytes, is_last_frame,
&s->stats.outgoing, &t->outbuf);
grpc_chttp2_flowctl_sent_data(&t->flow_control, &s->flow_control,
send_bytes);
max_outgoing -= send_bytes;
if (s->compressed_data_buffer.length == 0) {
s->sending_bytes += s->uncompressed_data_size;
}
} else {
if (s->stream_compression_ctx == NULL) {
s->stream_compression_ctx =
grpc_stream_compression_context_create(
s->stream_compression_method);
}
s->uncompressed_data_size = s->flow_controlled_buffer.length;
if (!grpc_stream_compress(
s->stream_compression_ctx, &s->flow_controlled_buffer,
&s->compressed_data_buffer, NULL, MAX_SIZE_T,
GRPC_STREAM_COMPRESSION_FLUSH_SYNC)) {
gpr_log(GPR_ERROR, "Stream compression failed.");
}
}
}
if (!t->is_client) {
t->ping_recv_state.last_ping_recv_time =
gpr_inf_past(GPR_CLOCK_MONOTONIC);
t->ping_recv_state.ping_strikes = 0;
}
if (is_last_frame) {
s->send_trailing_metadata = NULL;
s->sent_trailing_metadata = true;
if (!t->is_client && !s->read_closed) {
grpc_slice_buffer_add(&t->outbuf, grpc_chttp2_rst_stream_create(
s->id, GRPC_HTTP2_NO_ERROR,
&s->stats.outgoing));
}
grpc_chttp2_mark_stream_closed(exec_ctx, t, s, !t->is_client, 1,
GRPC_ERROR_NONE);
}
result.early_results_scheduled |=
update_list(exec_ctx, t, s,
(int64_t)(s->sending_bytes - sending_bytes_before),
&s->on_flow_controlled_cbs,
&s->flow_controlled_bytes_flowed, GRPC_ERROR_NONE);
now_writing = true;
if (s->flow_controlled_buffer.length > 0 ||
s->compressed_data_buffer.length > 0) {
GRPC_CHTTP2_STREAM_REF(s, "chttp2_writing:fork");
grpc_chttp2_list_add_writable_stream(t, s);
}
message_writes++;
} else if (t->flow_control.remote_window == 0) {
grpc_chttp2_list_add_stalled_by_transport(t, s);
now_writing = true;
} else if (stream_remote_window == 0) {
grpc_chttp2_list_add_stalled_by_stream(t, s);
now_writing = true;
}
while ((s_->flow_controlled_buffer.length > 0 ||
s_->compressed_data_buffer.length > 0) &&
data_send_context.max_outgoing() > 0) {
if (s_->compressed_data_buffer.length > 0) {
data_send_context.FlushCompressedBytes();
} else {
data_send_context.CompressMoreBytes();
}
if (s->send_trailing_metadata != NULL &&
s->fetching_send_message == NULL &&
s->flow_controlled_buffer.length == 0 &&
s->compressed_data_buffer.length == 0) {
GRPC_CHTTP2_IF_TRACING(gpr_log(GPR_INFO, "sending trailing_metadata"));
if (grpc_metadata_batch_is_empty(s->send_trailing_metadata)) {
grpc_chttp2_encode_data(s->id, &s->flow_controlled_buffer, 0, true,
&s->stats.outgoing, &t->outbuf);
} else {
grpc_encode_header_options hopt = {
s->id, true,
t->settings
[GRPC_PEER_SETTINGS]
}
write_context_->ResetPingRecvClock();
if (data_send_context.is_last_frame()) {
SentLastFrame(exec_ctx);
}
data_send_context.CallCallbacks(exec_ctx);
stream_became_writable_ = true;
if (s_->flow_controlled_buffer.length > 0 ||
s_->compressed_data_buffer.length > 0) {
GRPC_CHTTP2_STREAM_REF(s_, "chttp2_writing:fork");
grpc_chttp2_list_add_writable_stream(t_, s_);
}
write_context_->IncMessageWrites();
}
void FlushTrailingMetadata(grpc_exec_ctx *exec_ctx) {
if (!s_->sent_initial_metadata) return;
if (s_->send_trailing_metadata == NULL) return;
if (s_->fetching_send_message != NULL) return;
if (s_->flow_controlled_buffer.length != 0) return;
if (s_->compressed_data_buffer.length != 0) return;
GRPC_CHTTP2_IF_TRACING(gpr_log(GPR_INFO, "sending trailing_metadata"));
if (grpc_metadata_batch_is_empty(s_->send_trailing_metadata)) {
grpc_chttp2_encode_data(s_->id, &s_->flow_controlled_buffer, 0, true,
&s_->stats.outgoing, &t_->outbuf);
} else {
grpc_encode_header_options hopt = {
s_->id, true,
t_->settings[GRPC_PEER_SETTINGS]
[GRPC_CHTTP2_SETTINGS_GRPC_ALLOW_TRUE_BINARY_METADATA] !=
0,
t->settings[GRPC_PEER_SETTINGS]
[GRPC_CHTTP2_SETTINGS_MAX_FRAME_SIZE],
&s->stats.outgoing};
grpc_chttp2_encode_header(exec_ctx, &t->hpack_compressor,
extra_headers_for_trailing_metadata,
num_extra_headers_for_trailing_metadata,
s->send_trailing_metadata, &hopt,
&t->outbuf);
trailing_metadata_writes++;
}
s->send_trailing_metadata = NULL;
s->sent_trailing_metadata = true;
if (!t->is_client && !s->read_closed) {
grpc_slice_buffer_add(
&t->outbuf, grpc_chttp2_rst_stream_create(
s->id, GRPC_HTTP2_NO_ERROR, &s->stats.outgoing));
}
grpc_chttp2_mark_stream_closed(exec_ctx, t, s, !t->is_client, 1,
GRPC_ERROR_NONE);
now_writing = true;
result.early_results_scheduled = true;
grpc_chttp2_complete_closure_step(
exec_ctx, t, s, &s->send_trailing_metadata_finished,
GRPC_ERROR_NONE, "send_trailing_metadata_finished");
}
0,
t_->settings[GRPC_PEER_SETTINGS][GRPC_CHTTP2_SETTINGS_MAX_FRAME_SIZE],
&s_->stats.outgoing};
grpc_chttp2_encode_header(exec_ctx, &t_->hpack_compressor,
extra_headers_for_trailing_metadata_,
num_extra_headers_for_trailing_metadata_,
s_->send_trailing_metadata, &hopt, &t_->outbuf);
}
write_context_->IncTrailingMetadataWrites();
write_context_->ResetPingRecvClock();
SentLastFrame(exec_ctx);
write_context_->NoteScheduledResults();
grpc_chttp2_complete_closure_step(
exec_ctx, t_, s_, &s_->send_trailing_metadata_finished, GRPC_ERROR_NONE,
"send_trailing_metadata_finished");
}
bool stream_became_writable() { return stream_became_writable_; }
private:
void ConvertInitialMetadataToTrailingMetadata() {
GRPC_CHTTP2_IF_TRACING(
gpr_log(GPR_INFO, "not sending initial_metadata (Trailers-Only)"));
// When sending Trailers-Only, we need to move the :status and
// content-type headers to the trailers.
if (s_->send_initial_metadata->idx.named.status != NULL) {
extra_headers_for_trailing_metadata_
[num_extra_headers_for_trailing_metadata_++] =
&s_->send_initial_metadata->idx.named.status->md;
}
if (s_->send_initial_metadata->idx.named.content_type != NULL) {
extra_headers_for_trailing_metadata_
[num_extra_headers_for_trailing_metadata_++] =
&s_->send_initial_metadata->idx.named.content_type->md;
}
}
void SentLastFrame(grpc_exec_ctx *exec_ctx) {
s_->send_trailing_metadata = NULL;
s_->sent_trailing_metadata = true;
if (!t_->is_client && !s_->read_closed) {
grpc_slice_buffer_add(
&t_->outbuf, grpc_chttp2_rst_stream_create(
s_->id, GRPC_HTTP2_NO_ERROR, &s_->stats.outgoing));
}
grpc_chttp2_mark_stream_closed(exec_ctx, t_, s_, !t_->is_client, true,
GRPC_ERROR_NONE);
}
if (now_writing) {
GRPC_STATS_INC_HTTP2_SEND_INITIAL_METADATA_PER_WRITE(
exec_ctx, initial_metadata_writes);
GRPC_STATS_INC_HTTP2_SEND_MESSAGE_PER_WRITE(exec_ctx, message_writes);
GRPC_STATS_INC_HTTP2_SEND_TRAILING_METADATA_PER_WRITE(
exec_ctx, trailing_metadata_writes);
GRPC_STATS_INC_HTTP2_SEND_FLOWCTL_PER_WRITE(exec_ctx,
flow_control_writes);
WriteContext *const write_context_;
grpc_chttp2_transport *const t_;
grpc_chttp2_stream *const s_;
bool stream_became_writable_ = false;
grpc_mdelem *extra_headers_for_trailing_metadata_[2];
size_t num_extra_headers_for_trailing_metadata_ = 0;
};
} // namespace
grpc_chttp2_begin_write_result grpc_chttp2_begin_write(
grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t) {
WriteContext ctx(exec_ctx, t);
ctx.FlushSettings(exec_ctx);
ctx.FlushPingAcks();
ctx.FlushQueuedBuffers(exec_ctx);
ctx.EnactHpackSettings(exec_ctx);
if (t->flow_control.remote_window > 0) {
ctx.UpdateStreamsNoLongerStalled();
}
/* for each grpc_chttp2_stream that's become writable, frame it's data
(according to available window sizes) and add to the output buffer */
while (grpc_chttp2_stream *s = ctx.NextStream()) {
StreamWriteContext stream_ctx(&ctx, s);
stream_ctx.FlushInitialMetadata(exec_ctx);
stream_ctx.FlushWindowUpdates(exec_ctx);
stream_ctx.FlushData(exec_ctx);
stream_ctx.FlushTrailingMetadata(exec_ctx);
if (stream_ctx.stream_became_writable()) {
if (!grpc_chttp2_list_add_writing_stream(t, s)) {
/* already in writing list: drop ref */
GRPC_CHTTP2_STREAM_UNREF(exec_ctx, s, "chttp2_writing:already_writing");
} else {
/* ref will be dropped at end of write */
}
} else {
GRPC_CHTTP2_STREAM_UNREF(exec_ctx, s, "chttp2_writing:no_write");
}
}
uint32_t transport_announce =
grpc_chttp2_flowctl_maybe_send_transport_update(&t->flow_control);
if (transport_announce) {
maybe_initiate_ping(exec_ctx, t,
GRPC_CHTTP2_PING_BEFORE_TRANSPORT_WINDOW_UPDATE);
grpc_transport_one_way_stats throwaway_stats;
grpc_slice_buffer_add(
&t->outbuf, grpc_chttp2_window_update_create(0, transport_announce,
&throwaway_stats));
if (!t->is_client) {
t->ping_recv_state.last_ping_recv_time =
gpr_inf_past(GPR_CLOCK_MONOTONIC);
t->ping_recv_state.ping_strikes = 0;
}
}
for (size_t i = 0; i < t->ping_ack_count; i++) {
grpc_slice_buffer_add(&t->outbuf,
grpc_chttp2_ping_create(1, t->ping_acks[i]));
}
t->ping_ack_count = 0;
ctx.FlushWindowUpdates(exec_ctx);
maybe_initiate_ping(exec_ctx, t, GRPC_CHTTP2_PING_ON_NEXT_WRITE);
maybe_initiate_ping(exec_ctx, t);
GPR_TIMER_END("grpc_chttp2_begin_write", 0);
result.writing = t->outbuf.count > 0;
return result;
return ctx.Result();
}
void grpc_chttp2_end_write(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,

@ -62,96 +62,22 @@ typedef struct inproc_transport {
struct inproc_stream *stream_list;
} inproc_transport;
typedef struct sb_list_entry {
grpc_slice_buffer sb;
struct sb_list_entry *next;
} sb_list_entry;
// Specialize grpc_byte_stream for our use case
typedef struct {
grpc_byte_stream base;
sb_list_entry *le;
grpc_error *shutdown_error;
} inproc_slice_byte_stream;
typedef struct {
// TODO (vjpai): Add some inlined elements to avoid alloc in simple cases
sb_list_entry *head;
sb_list_entry *tail;
} slice_buffer_list;
static void slice_buffer_list_init(slice_buffer_list *l) {
l->head = NULL;
l->tail = NULL;
}
static void sb_list_entry_destroy(grpc_exec_ctx *exec_ctx, sb_list_entry *le) {
grpc_slice_buffer_destroy_internal(exec_ctx, &le->sb);
gpr_free(le);
}
static void slice_buffer_list_destroy(grpc_exec_ctx *exec_ctx,
slice_buffer_list *l) {
sb_list_entry *curr = l->head;
while (curr != NULL) {
sb_list_entry *le = curr;
curr = curr->next;
sb_list_entry_destroy(exec_ctx, le);
}
l->head = NULL;
l->tail = NULL;
}
static bool slice_buffer_list_empty(slice_buffer_list *l) {
return l->head == NULL;
}
static void slice_buffer_list_append_entry(slice_buffer_list *l,
sb_list_entry *next) {
next->next = NULL;
if (l->tail) {
l->tail->next = next;
l->tail = next;
} else {
l->head = next;
l->tail = next;
}
}
static grpc_slice_buffer *slice_buffer_list_append(slice_buffer_list *l) {
sb_list_entry *next = (sb_list_entry *)gpr_malloc(sizeof(*next));
grpc_slice_buffer_init(&next->sb);
slice_buffer_list_append_entry(l, next);
return &next->sb;
}
static sb_list_entry *slice_buffer_list_pophead(slice_buffer_list *l) {
sb_list_entry *ret = l->head;
l->head = l->head->next;
if (l->head == NULL) {
l->tail = NULL;
}
return ret;
}
typedef struct inproc_stream {
inproc_transport *t;
grpc_metadata_batch to_read_initial_md;
uint32_t to_read_initial_md_flags;
bool to_read_initial_md_filled;
slice_buffer_list to_read_message;
grpc_metadata_batch to_read_trailing_md;
bool to_read_trailing_md_filled;
bool reads_needed;
bool read_closure_scheduled;
grpc_closure read_closure;
bool ops_needed;
bool op_closure_scheduled;
grpc_closure op_closure;
// Write buffer used only during gap at init time when client-side
// stream is set up but server side stream is not yet set up
grpc_metadata_batch write_buffer_initial_md;
bool write_buffer_initial_md_filled;
uint32_t write_buffer_initial_md_flags;
gpr_timespec write_buffer_deadline;
slice_buffer_list write_buffer_message;
grpc_millis write_buffer_deadline;
grpc_metadata_batch write_buffer_trailing_md;
bool write_buffer_trailing_md_filled;
grpc_error *write_buffer_cancel_error;
@ -164,11 +90,15 @@ typedef struct inproc_stream {
gpr_arena *arena;
grpc_transport_stream_op_batch *send_message_op;
grpc_transport_stream_op_batch *send_trailing_md_op;
grpc_transport_stream_op_batch *recv_initial_md_op;
grpc_transport_stream_op_batch *recv_message_op;
grpc_transport_stream_op_batch *recv_trailing_md_op;
inproc_slice_byte_stream recv_message_stream;
grpc_slice_buffer recv_message;
grpc_slice_buffer_stream recv_stream;
bool recv_inited;
bool initial_md_sent;
bool trailing_md_sent;
@ -180,61 +110,18 @@ typedef struct inproc_stream {
grpc_error *cancel_self_error;
grpc_error *cancel_other_error;
gpr_timespec deadline;
grpc_millis deadline;
bool listed;
struct inproc_stream *stream_list_prev;
struct inproc_stream *stream_list_next;
} inproc_stream;
static bool inproc_slice_byte_stream_next(grpc_exec_ctx *exec_ctx,
grpc_byte_stream *bs, size_t max,
grpc_closure *on_complete) {
// Because inproc transport always provides the entire message atomically,
// the byte stream always has data available when this function is called.
// Thus, this function always returns true (unlike other transports) and
// there is never any need to schedule a closure
return true;
}
static grpc_error *inproc_slice_byte_stream_pull(grpc_exec_ctx *exec_ctx,
grpc_byte_stream *bs,
grpc_slice *slice) {
inproc_slice_byte_stream *stream = (inproc_slice_byte_stream *)bs;
if (stream->shutdown_error != GRPC_ERROR_NONE) {
return GRPC_ERROR_REF(stream->shutdown_error);
}
*slice = grpc_slice_buffer_take_first(&stream->le->sb);
return GRPC_ERROR_NONE;
}
static void inproc_slice_byte_stream_shutdown(grpc_exec_ctx *exec_ctx,
grpc_byte_stream *bs,
grpc_error *error) {
inproc_slice_byte_stream *stream = (inproc_slice_byte_stream *)bs;
GRPC_ERROR_UNREF(stream->shutdown_error);
stream->shutdown_error = error;
}
static void inproc_slice_byte_stream_destroy(grpc_exec_ctx *exec_ctx,
grpc_byte_stream *bs) {
inproc_slice_byte_stream *stream = (inproc_slice_byte_stream *)bs;
sb_list_entry_destroy(exec_ctx, stream->le);
GRPC_ERROR_UNREF(stream->shutdown_error);
}
static const grpc_byte_stream_vtable inproc_slice_byte_stream_vtable = {
inproc_slice_byte_stream_next, inproc_slice_byte_stream_pull,
inproc_slice_byte_stream_shutdown, inproc_slice_byte_stream_destroy};
void inproc_slice_byte_stream_init(inproc_slice_byte_stream *s,
sb_list_entry *le) {
s->base.length = (uint32_t)le->sb.length;
s->base.flags = 0;
s->base.vtable = &inproc_slice_byte_stream_vtable;
s->le = le;
s->shutdown_error = GRPC_ERROR_NONE;
}
static grpc_closure do_nothing_closure;
static bool cancel_stream_locked(grpc_exec_ctx *exec_ctx, inproc_stream *s,
grpc_error *error);
static void op_state_machine(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error);
static void ref_transport(inproc_transport *t) {
INPROC_LOG(GPR_DEBUG, "ref_transport %p", t);
@ -280,12 +167,14 @@ static void unref_stream(grpc_exec_ctx *exec_ctx, inproc_stream *s,
static void really_destroy_stream(grpc_exec_ctx *exec_ctx, inproc_stream *s) {
INPROC_LOG(GPR_DEBUG, "really_destroy_stream %p", s);
slice_buffer_list_destroy(exec_ctx, &s->to_read_message);
slice_buffer_list_destroy(exec_ctx, &s->write_buffer_message);
GRPC_ERROR_UNREF(s->write_buffer_cancel_error);
GRPC_ERROR_UNREF(s->cancel_self_error);
GRPC_ERROR_UNREF(s->cancel_other_error);
if (s->recv_inited) {
grpc_slice_buffer_destroy_internal(exec_ctx, &s->recv_message);
}
unref_transport(exec_ctx, s->t);
if (s->closure_at_destroy) {
@ -293,9 +182,6 @@ static void really_destroy_stream(grpc_exec_ctx *exec_ctx, inproc_stream *s) {
}
}
static void read_state_machine(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error);
static void log_metadata(const grpc_metadata_batch *md_batch, bool is_client,
bool is_initial) {
for (grpc_linked_mdelem *md = md_batch->list.head; md != NULL;
@ -359,11 +245,9 @@ static int init_stream(grpc_exec_ctx *exec_ctx, grpc_transport *gt,
s->write_buffer_initial_md_filled = false;
grpc_metadata_batch_init(&s->write_buffer_trailing_md);
s->write_buffer_trailing_md_filled = false;
slice_buffer_list_init(&s->to_read_message);
slice_buffer_list_init(&s->write_buffer_message);
s->reads_needed = false;
s->read_closure_scheduled = false;
GRPC_CLOSURE_INIT(&s->read_closure, read_state_machine, s,
s->ops_needed = false;
s->op_closure_scheduled = false;
GRPC_CLOSURE_INIT(&s->op_closure, op_state_machine, s,
grpc_schedule_on_exec_ctx);
s->t = t;
s->closure_at_destroy = NULL;
@ -377,8 +261,8 @@ static int init_stream(grpc_exec_ctx *exec_ctx, grpc_transport *gt,
s->cancel_self_error = GRPC_ERROR_NONE;
s->cancel_other_error = GRPC_ERROR_NONE;
s->write_buffer_cancel_error = GRPC_ERROR_NONE;
s->deadline = gpr_inf_future(GPR_CLOCK_MONOTONIC);
s->write_buffer_deadline = gpr_inf_future(GPR_CLOCK_MONOTONIC);
s->deadline = GRPC_MILLIS_INF_FUTURE;
s->write_buffer_deadline = GRPC_MILLIS_INF_FUTURE;
s->stream_list_prev = NULL;
gpr_mu_lock(&t->mu->mu);
@ -421,15 +305,10 @@ static int init_stream(grpc_exec_ctx *exec_ctx, grpc_transport *gt,
cs->write_buffer_initial_md_flags,
&s->to_read_initial_md, &s->to_read_initial_md_flags,
&s->to_read_initial_md_filled);
s->deadline = gpr_time_min(s->deadline, cs->write_buffer_deadline);
s->deadline = GPR_MIN(s->deadline, cs->write_buffer_deadline);
grpc_metadata_batch_clear(exec_ctx, &cs->write_buffer_initial_md);
cs->write_buffer_initial_md_filled = false;
}
while (!slice_buffer_list_empty(&cs->write_buffer_message)) {
slice_buffer_list_append_entry(
&s->to_read_message,
slice_buffer_list_pophead(&cs->write_buffer_message));
}
if (cs->write_buffer_trailing_md_filled) {
fill_in_metadata(exec_ctx, s, &cs->write_buffer_trailing_md, 0,
&s->to_read_trailing_md, NULL,
@ -488,9 +367,39 @@ static void close_other_side_locked(grpc_exec_ctx *exec_ctx, inproc_stream *s,
}
}
// Call the on_complete closure associated with this stream_op_batch if
// this stream_op_batch is only one of the pending operations for this
// stream. This is called when one of the pending operations for the stream
// is done and about to be NULLed out
static void complete_if_batch_end_locked(grpc_exec_ctx *exec_ctx,
inproc_stream *s, grpc_error *error,
grpc_transport_stream_op_batch *op,
const char *msg) {
int is_sm = (int)(op == s->send_message_op);
int is_stm = (int)(op == s->send_trailing_md_op);
int is_rim = (int)(op == s->recv_initial_md_op);
int is_rm = (int)(op == s->recv_message_op);
int is_rtm = (int)(op == s->recv_trailing_md_op);
if ((is_sm + is_stm + is_rim + is_rm + is_rtm) == 1) {
INPROC_LOG(GPR_DEBUG, "%s %p %p %p", msg, s, op, error);
GRPC_CLOSURE_SCHED(exec_ctx, op->on_complete, GRPC_ERROR_REF(error));
}
}
static void maybe_schedule_op_closure_locked(grpc_exec_ctx *exec_ctx,
inproc_stream *s,
grpc_error *error) {
if (s && s->ops_needed && !s->op_closure_scheduled) {
GRPC_CLOSURE_SCHED(exec_ctx, &s->op_closure, GRPC_ERROR_REF(error));
s->op_closure_scheduled = true;
s->ops_needed = false;
}
}
static void fail_helper_locked(grpc_exec_ctx *exec_ctx, inproc_stream *s,
grpc_error *error) {
INPROC_LOG(GPR_DEBUG, "read_state_machine %p fail_helper", s);
INPROC_LOG(GPR_DEBUG, "op_state_machine %p fail_helper", s);
// If we're failing this side, we need to make sure that
// we also send or have already sent trailing metadata
if (!s->trailing_md_sent) {
@ -512,14 +421,7 @@ static void fail_helper_locked(grpc_exec_ctx *exec_ctx, inproc_stream *s,
if (other->cancel_other_error == GRPC_ERROR_NONE) {
other->cancel_other_error = GRPC_ERROR_REF(error);
}
if (other->reads_needed) {
if (!other->read_closure_scheduled) {
GRPC_CLOSURE_SCHED(exec_ctx, &other->read_closure,
GRPC_ERROR_REF(error));
other->read_closure_scheduled = true;
}
other->reads_needed = false;
}
maybe_schedule_op_closure_locked(exec_ctx, other, error);
} else if (s->write_buffer_cancel_error == GRPC_ERROR_NONE) {
s->write_buffer_cancel_error = GRPC_ERROR_REF(error);
}
@ -564,14 +466,9 @@ static void fail_helper_locked(grpc_exec_ctx *exec_ctx, inproc_stream *s,
err);
// Last use of err so no need to REF and then UNREF it
if ((s->recv_initial_md_op != s->recv_message_op) &&
(s->recv_initial_md_op != s->recv_trailing_md_op)) {
INPROC_LOG(GPR_DEBUG,
"fail_helper %p scheduling initial-metadata-on-complete %p",
error, s);
GRPC_CLOSURE_SCHED(exec_ctx, s->recv_initial_md_op->on_complete,
GRPC_ERROR_REF(error));
}
complete_if_batch_end_locked(
exec_ctx, s, error, s->recv_initial_md_op,
"fail_helper scheduling recv-initial-metadata-on-complete");
s->recv_initial_md_op = NULL;
}
if (s->recv_message_op) {
@ -580,20 +477,30 @@ static void fail_helper_locked(grpc_exec_ctx *exec_ctx, inproc_stream *s,
GRPC_CLOSURE_SCHED(
exec_ctx, s->recv_message_op->payload->recv_message.recv_message_ready,
GRPC_ERROR_REF(error));
if (s->recv_message_op != s->recv_trailing_md_op) {
INPROC_LOG(GPR_DEBUG, "fail_helper %p scheduling message-on-complete %p",
s, error);
GRPC_CLOSURE_SCHED(exec_ctx, s->recv_message_op->on_complete,
GRPC_ERROR_REF(error));
}
complete_if_batch_end_locked(
exec_ctx, s, error, s->recv_message_op,
"fail_helper scheduling recv-message-on-complete");
s->recv_message_op = NULL;
}
if (s->send_message_op) {
complete_if_batch_end_locked(
exec_ctx, s, error, s->send_message_op,
"fail_helper scheduling send-message-on-complete");
s->send_message_op = NULL;
}
if (s->send_trailing_md_op) {
complete_if_batch_end_locked(
exec_ctx, s, error, s->send_trailing_md_op,
"fail_helper scheduling send-trailng-md-on-complete");
s->send_trailing_md_op = NULL;
}
if (s->recv_trailing_md_op) {
INPROC_LOG(GPR_DEBUG,
"fail_helper %p scheduling trailing-md-on-complete %p", s,
error);
GRPC_CLOSURE_SCHED(exec_ctx, s->recv_trailing_md_op->on_complete,
GRPC_ERROR_REF(error));
complete_if_batch_end_locked(
exec_ctx, s, error, s->recv_trailing_md_op,
"fail_helper scheduling recv-trailing-metadata-on-complete");
s->recv_trailing_md_op = NULL;
}
close_other_side_locked(exec_ctx, s, "fail_helper:other_side");
@ -602,12 +509,61 @@ static void fail_helper_locked(grpc_exec_ctx *exec_ctx, inproc_stream *s,
GRPC_ERROR_UNREF(error);
}
static void read_state_machine(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error) {
static void message_transfer_locked(grpc_exec_ctx *exec_ctx,
inproc_stream *sender,
inproc_stream *receiver) {
size_t remaining =
sender->send_message_op->payload->send_message.send_message->length;
if (receiver->recv_inited) {
grpc_slice_buffer_destroy_internal(exec_ctx, &receiver->recv_message);
}
grpc_slice_buffer_init(&receiver->recv_message);
receiver->recv_inited = true;
do {
grpc_slice message_slice;
grpc_closure unused;
GPR_ASSERT(grpc_byte_stream_next(
exec_ctx, sender->send_message_op->payload->send_message.send_message,
SIZE_MAX, &unused));
grpc_error *error = grpc_byte_stream_pull(
exec_ctx, sender->send_message_op->payload->send_message.send_message,
&message_slice);
if (error != GRPC_ERROR_NONE) {
cancel_stream_locked(exec_ctx, sender, GRPC_ERROR_REF(error));
break;
}
GPR_ASSERT(error == GRPC_ERROR_NONE);
remaining -= GRPC_SLICE_LENGTH(message_slice);
grpc_slice_buffer_add(&receiver->recv_message, message_slice);
} while (remaining > 0);
grpc_slice_buffer_stream_init(&receiver->recv_stream, &receiver->recv_message,
0);
*receiver->recv_message_op->payload->recv_message.recv_message =
&receiver->recv_stream.base;
INPROC_LOG(GPR_DEBUG, "message_transfer_locked %p scheduling message-ready",
receiver);
GRPC_CLOSURE_SCHED(
exec_ctx,
receiver->recv_message_op->payload->recv_message.recv_message_ready,
GRPC_ERROR_NONE);
complete_if_batch_end_locked(
exec_ctx, sender, GRPC_ERROR_NONE, sender->send_message_op,
"message_transfer scheduling sender on_complete");
complete_if_batch_end_locked(
exec_ctx, receiver, GRPC_ERROR_NONE, receiver->recv_message_op,
"message_transfer scheduling receiver on_complete");
receiver->recv_message_op = NULL;
sender->send_message_op = NULL;
}
static void op_state_machine(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error) {
// This function gets called when we have contents in the unprocessed reads
// Get what we want based on our ops wanted
// Schedule our appropriate closures
// and then return to reads_needed state if still needed
// and then return to ops_needed state if still needed
// Since this is a closure directly invoked by the combiner, it should not
// unref the error parameter explicitly; the combiner will do that implicitly
@ -615,12 +571,14 @@ static void read_state_machine(grpc_exec_ctx *exec_ctx, void *arg,
bool needs_close = false;
INPROC_LOG(GPR_DEBUG, "read_state_machine %p", arg);
INPROC_LOG(GPR_DEBUG, "op_state_machine %p", arg);
inproc_stream *s = (inproc_stream *)arg;
gpr_mu *mu = &s->t->mu->mu; // keep aside in case s gets closed
gpr_mu_lock(mu);
s->read_closure_scheduled = false;
s->op_closure_scheduled = false;
// cancellation takes precedence
inproc_stream *other = s->other_side;
if (s->cancel_self_error != GRPC_ERROR_NONE) {
fail_helper_locked(exec_ctx, s, GRPC_ERROR_REF(s->cancel_self_error));
goto done;
@ -632,89 +590,116 @@ static void read_state_machine(grpc_exec_ctx *exec_ctx, void *arg,
goto done;
}
if (s->recv_initial_md_op) {
if (!s->to_read_initial_md_filled) {
// We entered the state machine on some other kind of read even though
// we still haven't satisfied initial md . That's an error.
new_err =
GRPC_ERROR_CREATE_FROM_STATIC_STRING("Unexpected frame sequencing");
INPROC_LOG(GPR_DEBUG,
"read_state_machine %p scheduling on_complete errors for no "
"initial md %p",
s, new_err);
if (s->send_message_op && other) {
if (other->recv_message_op) {
message_transfer_locked(exec_ctx, s, other);
maybe_schedule_op_closure_locked(exec_ctx, other, GRPC_ERROR_NONE);
} else if (!s->t->is_client &&
(s->trailing_md_sent || other->recv_trailing_md_op)) {
// A server send will never be matched if the client is waiting
// for trailing metadata already
complete_if_batch_end_locked(
exec_ctx, s, GRPC_ERROR_NONE, s->send_message_op,
"op_state_machine scheduling send-message-on-complete");
s->send_message_op = NULL;
}
}
// Pause a send trailing metadata if there is still an outstanding
// send message unless we know that the send message will never get
// matched to a receive. This happens on the client if the server has
// already sent status.
if (s->send_trailing_md_op &&
(!s->send_message_op ||
(s->t->is_client &&
(s->trailing_md_recvd || s->to_read_trailing_md_filled)))) {
grpc_metadata_batch *dest = (other == NULL) ? &s->write_buffer_trailing_md
: &other->to_read_trailing_md;
bool *destfilled = (other == NULL) ? &s->write_buffer_trailing_md_filled
: &other->to_read_trailing_md_filled;
if (*destfilled || s->trailing_md_sent) {
// The buffer is already in use; that's an error!
INPROC_LOG(GPR_DEBUG, "Extra trailing metadata %p", s);
new_err = GRPC_ERROR_CREATE_FROM_STATIC_STRING("Extra trailing metadata");
fail_helper_locked(exec_ctx, s, GRPC_ERROR_REF(new_err));
goto done;
} else if (s->initial_md_recvd) {
} else {
if (other && !other->closed) {
fill_in_metadata(exec_ctx, s,
s->send_trailing_md_op->payload->send_trailing_metadata
.send_trailing_metadata,
0, dest, NULL, destfilled);
}
s->trailing_md_sent = true;
if (!s->t->is_client && s->trailing_md_recvd && s->recv_trailing_md_op) {
INPROC_LOG(GPR_DEBUG,
"op_state_machine %p scheduling trailing-md-on-complete", s);
GRPC_CLOSURE_SCHED(exec_ctx, s->recv_trailing_md_op->on_complete,
GRPC_ERROR_NONE);
s->recv_trailing_md_op = NULL;
needs_close = true;
}
}
maybe_schedule_op_closure_locked(exec_ctx, other, GRPC_ERROR_NONE);
complete_if_batch_end_locked(
exec_ctx, s, GRPC_ERROR_NONE, s->send_trailing_md_op,
"op_state_machine scheduling send-trailing-metadata-on-complete");
s->send_trailing_md_op = NULL;
}
if (s->recv_initial_md_op) {
if (s->initial_md_recvd) {
new_err =
GRPC_ERROR_CREATE_FROM_STATIC_STRING("Already recvd initial md");
INPROC_LOG(
GPR_DEBUG,
"read_state_machine %p scheduling on_complete errors for already "
"op_state_machine %p scheduling on_complete errors for already "
"recvd initial md %p",
s, new_err);
fail_helper_locked(exec_ctx, s, GRPC_ERROR_REF(new_err));
goto done;
}
s->initial_md_recvd = true;
new_err = fill_in_metadata(
exec_ctx, s, &s->to_read_initial_md, s->to_read_initial_md_flags,
s->recv_initial_md_op->payload->recv_initial_metadata
.recv_initial_metadata,
s->recv_initial_md_op->payload->recv_initial_metadata.recv_flags, NULL);
s->recv_initial_md_op->payload->recv_initial_metadata.recv_initial_metadata
->deadline = s->deadline;
grpc_metadata_batch_clear(exec_ctx, &s->to_read_initial_md);
s->to_read_initial_md_filled = false;
INPROC_LOG(GPR_DEBUG,
"read_state_machine %p scheduling initial-metadata-ready %p", s,
new_err);
GRPC_CLOSURE_SCHED(exec_ctx,
s->recv_initial_md_op->payload->recv_initial_metadata
.recv_initial_metadata_ready,
GRPC_ERROR_REF(new_err));
if ((s->recv_initial_md_op != s->recv_message_op) &&
(s->recv_initial_md_op != s->recv_trailing_md_op)) {
INPROC_LOG(
GPR_DEBUG,
"read_state_machine %p scheduling initial-metadata-on-complete %p", s,
new_err);
GRPC_CLOSURE_SCHED(exec_ctx, s->recv_initial_md_op->on_complete,
GRPC_ERROR_REF(new_err));
}
s->recv_initial_md_op = NULL;
if (new_err != GRPC_ERROR_NONE) {
if (s->to_read_initial_md_filled) {
s->initial_md_recvd = true;
new_err = fill_in_metadata(
exec_ctx, s, &s->to_read_initial_md, s->to_read_initial_md_flags,
s->recv_initial_md_op->payload->recv_initial_metadata
.recv_initial_metadata,
s->recv_initial_md_op->payload->recv_initial_metadata.recv_flags,
NULL);
s->recv_initial_md_op->payload->recv_initial_metadata
.recv_initial_metadata->deadline = s->deadline;
grpc_metadata_batch_clear(exec_ctx, &s->to_read_initial_md);
s->to_read_initial_md_filled = false;
INPROC_LOG(GPR_DEBUG,
"read_state_machine %p scheduling on_complete errors2 %p", s,
"op_state_machine %p scheduling initial-metadata-ready %p", s,
new_err);
fail_helper_locked(exec_ctx, s, GRPC_ERROR_REF(new_err));
goto done;
GRPC_CLOSURE_SCHED(exec_ctx,
s->recv_initial_md_op->payload->recv_initial_metadata
.recv_initial_metadata_ready,
GRPC_ERROR_REF(new_err));
complete_if_batch_end_locked(
exec_ctx, s, new_err, s->recv_initial_md_op,
"op_state_machine scheduling recv-initial-metadata-on-complete");
s->recv_initial_md_op = NULL;
if (new_err != GRPC_ERROR_NONE) {
INPROC_LOG(GPR_DEBUG,
"op_state_machine %p scheduling on_complete errors2 %p", s,
new_err);
fail_helper_locked(exec_ctx, s, GRPC_ERROR_REF(new_err));
goto done;
}
}
}
if (s->to_read_initial_md_filled) {
new_err = GRPC_ERROR_CREATE_FROM_STATIC_STRING("Unexpected recv frame");
fail_helper_locked(exec_ctx, s, GRPC_ERROR_REF(new_err));
goto done;
}
if (!slice_buffer_list_empty(&s->to_read_message) && s->recv_message_op) {
inproc_slice_byte_stream_init(
&s->recv_message_stream,
slice_buffer_list_pophead(&s->to_read_message));
*s->recv_message_op->payload->recv_message.recv_message =
&s->recv_message_stream.base;
INPROC_LOG(GPR_DEBUG, "read_state_machine %p scheduling message-ready", s);
GRPC_CLOSURE_SCHED(
exec_ctx, s->recv_message_op->payload->recv_message.recv_message_ready,
GRPC_ERROR_NONE);
if (s->recv_message_op != s->recv_trailing_md_op) {
INPROC_LOG(GPR_DEBUG,
"read_state_machine %p scheduling message-on-complete %p", s,
new_err);
GRPC_CLOSURE_SCHED(exec_ctx, s->recv_message_op->on_complete,
GRPC_ERROR_REF(new_err));
if (s->recv_message_op) {
if (other && other->send_message_op) {
message_transfer_locked(exec_ctx, other, s);
maybe_schedule_op_closure_locked(exec_ctx, other, GRPC_ERROR_NONE);
}
s->recv_message_op = NULL;
}
if (s->recv_trailing_md_op && s->t->is_client && other &&
other->send_message_op) {
maybe_schedule_op_closure_locked(exec_ctx, other, GRPC_ERROR_NONE);
}
if (s->to_read_trailing_md_filled) {
if (s->trailing_md_recvd) {
@ -722,7 +707,7 @@ static void read_state_machine(grpc_exec_ctx *exec_ctx, void *arg,
GRPC_ERROR_CREATE_FROM_STATIC_STRING("Already recvd trailing md");
INPROC_LOG(
GPR_DEBUG,
"read_state_machine %p scheduling on_complete errors for already "
"op_state_machine %p scheduling on_complete errors for already "
"recvd trailing md %p",
s, new_err);
fail_helper_locked(exec_ctx, s, GRPC_ERROR_REF(new_err));
@ -731,21 +716,24 @@ static void read_state_machine(grpc_exec_ctx *exec_ctx, void *arg,
if (s->recv_message_op != NULL) {
// This message needs to be wrapped up because it will never be
// satisfied
INPROC_LOG(GPR_DEBUG, "read_state_machine %p scheduling message-ready",
s);
INPROC_LOG(GPR_DEBUG, "op_state_machine %p scheduling message-ready", s);
GRPC_CLOSURE_SCHED(
exec_ctx,
s->recv_message_op->payload->recv_message.recv_message_ready,
GRPC_ERROR_NONE);
if (s->recv_message_op != s->recv_trailing_md_op) {
INPROC_LOG(GPR_DEBUG,
"read_state_machine %p scheduling message-on-complete %p", s,
new_err);
GRPC_CLOSURE_SCHED(exec_ctx, s->recv_message_op->on_complete,
GRPC_ERROR_REF(new_err));
}
complete_if_batch_end_locked(
exec_ctx, s, new_err, s->recv_message_op,
"op_state_machine scheduling recv-message-on-complete");
s->recv_message_op = NULL;
}
if ((s->trailing_md_sent || s->t->is_client) && s->send_message_op) {
// Nothing further will try to receive from this stream, so finish off
// any outstanding send_message op
complete_if_batch_end_locked(
exec_ctx, s, new_err, s->send_message_op,
"op_state_machine scheduling send-message-on-complete");
s->send_message_op = NULL;
}
if (s->recv_trailing_md_op != NULL) {
// We wanted trailing metadata and we got it
s->trailing_md_recvd = true;
@ -763,61 +751,65 @@ static void read_state_machine(grpc_exec_ctx *exec_ctx, void *arg,
// (If the server hasn't already sent its trailing md, it doesn't have
// a final status, so don't mark this op complete)
if (s->t->is_client || s->trailing_md_sent) {
INPROC_LOG(
GPR_DEBUG,
"read_state_machine %p scheduling trailing-md-on-complete %p", s,
new_err);
INPROC_LOG(GPR_DEBUG,
"op_state_machine %p scheduling trailing-md-on-complete %p",
s, new_err);
GRPC_CLOSURE_SCHED(exec_ctx, s->recv_trailing_md_op->on_complete,
GRPC_ERROR_REF(new_err));
s->recv_trailing_md_op = NULL;
needs_close = true;
} else {
INPROC_LOG(GPR_DEBUG,
"read_state_machine %p server needs to delay handling "
"op_state_machine %p server needs to delay handling "
"trailing-md-on-complete %p",
s, new_err);
}
} else {
INPROC_LOG(
GPR_DEBUG,
"read_state_machine %p has trailing md but not yet waiting for it",
s);
"op_state_machine %p has trailing md but not yet waiting for it", s);
}
}
if (s->trailing_md_recvd && s->recv_message_op) {
// No further message will come on this stream, so finish off the
// recv_message_op
INPROC_LOG(GPR_DEBUG, "read_state_machine %p scheduling message-ready", s);
INPROC_LOG(GPR_DEBUG, "op_state_machine %p scheduling message-ready", s);
GRPC_CLOSURE_SCHED(
exec_ctx, s->recv_message_op->payload->recv_message.recv_message_ready,
GRPC_ERROR_NONE);
if (s->recv_message_op != s->recv_trailing_md_op) {
INPROC_LOG(GPR_DEBUG,
"read_state_machine %p scheduling message-on-complete %p", s,
new_err);
GRPC_CLOSURE_SCHED(exec_ctx, s->recv_message_op->on_complete,
GRPC_ERROR_REF(new_err));
}
complete_if_batch_end_locked(
exec_ctx, s, new_err, s->recv_message_op,
"op_state_machine scheduling recv-message-on-complete");
s->recv_message_op = NULL;
}
if (s->recv_message_op || s->recv_trailing_md_op) {
if (s->trailing_md_recvd && (s->trailing_md_sent || s->t->is_client) &&
s->send_message_op) {
// Nothing further will try to receive from this stream, so finish off
// any outstanding send_message op
complete_if_batch_end_locked(
exec_ctx, s, new_err, s->send_message_op,
"op_state_machine scheduling send-message-on-complete");
s->send_message_op = NULL;
}
if (s->send_message_op || s->send_trailing_md_op || s->recv_initial_md_op ||
s->recv_message_op || s->recv_trailing_md_op) {
// Didn't get the item we wanted so we still need to get
// rescheduled
INPROC_LOG(GPR_DEBUG, "read_state_machine %p still needs closure %p %p", s,
s->recv_message_op, s->recv_trailing_md_op);
s->reads_needed = true;
INPROC_LOG(
GPR_DEBUG, "op_state_machine %p still needs closure %p %p %p %p %p", s,
s->send_message_op, s->send_trailing_md_op, s->recv_initial_md_op,
s->recv_message_op, s->recv_trailing_md_op);
s->ops_needed = true;
}
done:
if (needs_close) {
close_other_side_locked(exec_ctx, s, "read_state_machine");
close_other_side_locked(exec_ctx, s, "op_state_machine");
close_stream_locked(exec_ctx, s);
}
gpr_mu_unlock(mu);
GRPC_ERROR_UNREF(new_err);
}
static grpc_closure do_nothing_closure;
static bool cancel_stream_locked(grpc_exec_ctx *exec_ctx, inproc_stream *s,
grpc_error *error) {
bool ret = false; // was the cancel accepted
@ -826,14 +818,7 @@ static bool cancel_stream_locked(grpc_exec_ctx *exec_ctx, inproc_stream *s,
if (s->cancel_self_error == GRPC_ERROR_NONE) {
ret = true;
s->cancel_self_error = GRPC_ERROR_REF(error);
if (s->reads_needed) {
if (!s->read_closure_scheduled) {
GRPC_CLOSURE_SCHED(exec_ctx, &s->read_closure,
GRPC_ERROR_REF(s->cancel_self_error));
s->read_closure_scheduled = true;
}
s->reads_needed = false;
}
maybe_schedule_op_closure_locked(exec_ctx, s, s->cancel_self_error);
// Send trailing md to the other side indicating cancellation, even if we
// already have
s->trailing_md_sent = true;
@ -853,14 +838,8 @@ static bool cancel_stream_locked(grpc_exec_ctx *exec_ctx, inproc_stream *s,
if (other->cancel_other_error == GRPC_ERROR_NONE) {
other->cancel_other_error = GRPC_ERROR_REF(s->cancel_self_error);
}
if (other->reads_needed) {
if (!other->read_closure_scheduled) {
GRPC_CLOSURE_SCHED(exec_ctx, &other->read_closure,
GRPC_ERROR_REF(other->cancel_other_error));
other->read_closure_scheduled = true;
}
other->reads_needed = false;
}
maybe_schedule_op_closure_locked(exec_ctx, other,
other->cancel_other_error);
} else if (s->write_buffer_cancel_error == GRPC_ERROR_NONE) {
s->write_buffer_cancel_error = GRPC_ERROR_REF(s->cancel_self_error);
}
@ -869,11 +848,9 @@ static bool cancel_stream_locked(grpc_exec_ctx *exec_ctx, inproc_stream *s,
// couldn't complete that because we hadn't yet sent out trailing
// md, now's the chance
if (!s->t->is_client && s->trailing_md_recvd && s->recv_trailing_md_op) {
INPROC_LOG(GPR_DEBUG,
"cancel_stream %p scheduling trailing-md-on-complete %p", s,
s->cancel_self_error);
GRPC_CLOSURE_SCHED(exec_ctx, s->recv_trailing_md_op->on_complete,
GRPC_ERROR_REF(s->cancel_self_error));
complete_if_batch_end_locked(
exec_ctx, s, s->cancel_self_error, s->recv_trailing_md_op,
"cancel_stream scheduling trailing-md-on-complete");
s->recv_trailing_md_op = NULL;
}
}
@ -918,7 +895,8 @@ static void perform_stream_op(grpc_exec_ctx *exec_ctx, grpc_transport *gt,
// already self-canceled so still give it an error
error = GRPC_ERROR_REF(s->cancel_self_error);
} else {
INPROC_LOG(GPR_DEBUG, "perform_stream_op %p%s%s%s%s%s%s", s,
INPROC_LOG(GPR_DEBUG, "perform_stream_op %p %s%s%s%s%s%s%s", s,
s->t->is_client ? "client" : "server",
op->send_initial_metadata ? " send_initial_metadata" : "",
op->send_message ? " send_message" : "",
op->send_trailing_metadata ? " send_trailing_metadata" : "",
@ -929,10 +907,9 @@ static void perform_stream_op(grpc_exec_ctx *exec_ctx, grpc_transport *gt,
bool needs_close = false;
inproc_stream *other = s->other_side;
if (error == GRPC_ERROR_NONE &&
(op->send_initial_metadata || op->send_message ||
op->send_trailing_metadata)) {
inproc_stream *other = s->other_side;
(op->send_initial_metadata || op->send_trailing_metadata)) {
if (s->t->is_closed) {
error = GRPC_ERROR_CREATE_FROM_STATIC_STRING("Endpoint already shutdown");
}
@ -956,79 +933,28 @@ static void perform_stream_op(grpc_exec_ctx *exec_ctx, grpc_transport *gt,
dest, destflags, destfilled);
}
if (s->t->is_client) {
gpr_timespec *dl =
grpc_millis *dl =
(other == NULL) ? &s->write_buffer_deadline : &other->deadline;
*dl = gpr_time_min(*dl, op->payload->send_initial_metadata
.send_initial_metadata->deadline);
*dl = GPR_MIN(*dl, op->payload->send_initial_metadata
.send_initial_metadata->deadline);
s->initial_md_sent = true;
}
}
}
if (error == GRPC_ERROR_NONE && op->send_message) {
size_t remaining = op->payload->send_message.send_message->length;
grpc_slice_buffer *dest = slice_buffer_list_append(
(other == NULL) ? &s->write_buffer_message : &other->to_read_message);
do {
grpc_slice message_slice;
grpc_closure unused;
GPR_ASSERT(grpc_byte_stream_next(exec_ctx,
op->payload->send_message.send_message,
SIZE_MAX, &unused));
error = grpc_byte_stream_pull(
exec_ctx, op->payload->send_message.send_message, &message_slice);
if (error != GRPC_ERROR_NONE) {
cancel_stream_locked(exec_ctx, s, GRPC_ERROR_REF(error));
break;
}
GPR_ASSERT(error == GRPC_ERROR_NONE);
remaining -= GRPC_SLICE_LENGTH(message_slice);
grpc_slice_buffer_add(dest, message_slice);
} while (remaining != 0);
grpc_byte_stream_destroy(exec_ctx,
op->payload->send_message.send_message);
}
if (error == GRPC_ERROR_NONE && op->send_trailing_metadata) {
grpc_metadata_batch *dest = (other == NULL) ? &s->write_buffer_trailing_md
: &other->to_read_trailing_md;
bool *destfilled = (other == NULL) ? &s->write_buffer_trailing_md_filled
: &other->to_read_trailing_md_filled;
if (*destfilled || s->trailing_md_sent) {
// The buffer is already in use; that's an error!
INPROC_LOG(GPR_DEBUG, "Extra trailing metadata %p", s);
error = GRPC_ERROR_CREATE_FROM_STATIC_STRING("Extra trailing metadata");
} else {
if (!other->closed) {
fill_in_metadata(
exec_ctx, s,
op->payload->send_trailing_metadata.send_trailing_metadata, 0,
dest, NULL, destfilled);
}
s->trailing_md_sent = true;
if (!s->t->is_client && s->trailing_md_recvd &&
s->recv_trailing_md_op) {
INPROC_LOG(GPR_DEBUG,
"perform_stream_op %p scheduling trailing-md-on-complete",
s);
GRPC_CLOSURE_SCHED(exec_ctx, s->recv_trailing_md_op->on_complete,
GRPC_ERROR_NONE);
s->recv_trailing_md_op = NULL;
needs_close = true;
}
}
}
if (other != NULL && other->reads_needed) {
if (!other->read_closure_scheduled) {
GRPC_CLOSURE_SCHED(exec_ctx, &other->read_closure, error);
other->read_closure_scheduled = true;
}
other->reads_needed = false;
maybe_schedule_op_closure_locked(exec_ctx, other, error);
}
}
if (error == GRPC_ERROR_NONE &&
(op->recv_initial_metadata || op->recv_message ||
(op->send_message || op->send_trailing_metadata ||
op->recv_initial_metadata || op->recv_message ||
op->recv_trailing_metadata)) {
// If there are any reads, mark it so that the read closure will react to
// them
// Mark ops that need to be processed by the closure
if (op->send_message) {
s->send_message_op = op;
}
if (op->send_trailing_metadata) {
s->send_trailing_md_op = op;
}
if (op->recv_initial_metadata) {
s->recv_initial_md_op = op;
}
@ -1040,25 +966,28 @@ static void perform_stream_op(grpc_exec_ctx *exec_ctx, grpc_transport *gt,
}
// We want to initiate the closure if:
// 1. There is initial metadata and something ready to take that
// 2. There is a message and something ready to take it
// 3. There is trailing metadata, even if nothing specifically wants
// that because that can shut down the message as well
if ((s->to_read_initial_md_filled && op->recv_initial_metadata) ||
((!slice_buffer_list_empty(&s->to_read_message) ||
s->trailing_md_recvd) &&
op->recv_message) ||
(s->to_read_trailing_md_filled)) {
if (!s->read_closure_scheduled) {
GRPC_CLOSURE_SCHED(exec_ctx, &s->read_closure, GRPC_ERROR_NONE);
s->read_closure_scheduled = true;
// 1. We want to send a message and the other side wants to receive or end
// 2. We want to send trailing metadata and there isn't an unmatched send
// 3. We want initial metadata and the other side has sent it
// 4. We want to receive a message and there is a message ready
// 5. There is trailing metadata, even if nothing specifically wants
// that because that can shut down the receive message as well
if ((op->send_message && other && ((other->recv_message_op != NULL) ||
(other->recv_trailing_md_op != NULL))) ||
(op->send_trailing_metadata && !op->send_message) ||
(op->recv_initial_metadata && s->to_read_initial_md_filled) ||
(op->recv_message && (other && other->send_message_op != NULL)) ||
(s->to_read_trailing_md_filled || s->trailing_md_recvd)) {
if (!s->op_closure_scheduled) {
GRPC_CLOSURE_SCHED(exec_ctx, &s->op_closure, GRPC_ERROR_NONE);
s->op_closure_scheduled = true;
}
} else {
s->reads_needed = true;
s->ops_needed = true;
}
} else {
if (error != GRPC_ERROR_NONE) {
// Schedule op's read closures that we didn't push to read state machine
// Schedule op's closures that we didn't push to op state machine
if (op->recv_initial_metadata) {
INPROC_LOG(
GPR_DEBUG,

@ -16,13 +16,14 @@
*
*/
#include "src/core/lib/support/backoff.h"
#include "src/core/lib/backoff/backoff.h"
#include <grpc/support/useful.h>
void gpr_backoff_init(gpr_backoff *backoff, int64_t initial_connect_timeout,
double multiplier, double jitter,
int64_t min_timeout_millis, int64_t max_timeout_millis) {
void grpc_backoff_init(grpc_backoff *backoff,
grpc_millis initial_connect_timeout, double multiplier,
double jitter, grpc_millis min_timeout_millis,
grpc_millis max_timeout_millis) {
backoff->initial_connect_timeout = initial_connect_timeout;
backoff->multiplier = multiplier;
backoff->jitter = jitter;
@ -31,11 +32,11 @@ void gpr_backoff_init(gpr_backoff *backoff, int64_t initial_connect_timeout,
backoff->rng_state = (uint32_t)gpr_now(GPR_CLOCK_REALTIME).tv_nsec;
}
gpr_timespec gpr_backoff_begin(gpr_backoff *backoff, gpr_timespec now) {
grpc_millis grpc_backoff_begin(grpc_exec_ctx *exec_ctx, grpc_backoff *backoff) {
backoff->current_timeout_millis = backoff->initial_connect_timeout;
const int64_t first_timeout =
const grpc_millis first_timeout =
GPR_MAX(backoff->current_timeout_millis, backoff->min_timeout_millis);
return gpr_time_add(now, gpr_time_from_millis(first_timeout, GPR_TIMESPAN));
return grpc_exec_ctx_now(exec_ctx) + first_timeout;
}
/* Generate a random number between 0 and 1. */
@ -44,11 +45,11 @@ static double generate_uniform_random_number(uint32_t *rng_state) {
return *rng_state / (double)((uint32_t)1 << 31);
}
gpr_timespec gpr_backoff_step(gpr_backoff *backoff, gpr_timespec now) {
grpc_millis grpc_backoff_step(grpc_exec_ctx *exec_ctx, grpc_backoff *backoff) {
const double new_timeout_millis =
backoff->multiplier * (double)backoff->current_timeout_millis;
backoff->current_timeout_millis =
GPR_MIN((int64_t)new_timeout_millis, backoff->max_timeout_millis);
GPR_MIN((grpc_millis)new_timeout_millis, backoff->max_timeout_millis);
const double jitter_range_width = backoff->jitter * new_timeout_millis;
const double jitter =
@ -56,17 +57,17 @@ gpr_timespec gpr_backoff_step(gpr_backoff *backoff, gpr_timespec now) {
jitter_range_width;
backoff->current_timeout_millis =
(int64_t)((double)(backoff->current_timeout_millis) + jitter);
(grpc_millis)((double)(backoff->current_timeout_millis) + jitter);
const gpr_timespec current_deadline = gpr_time_add(
now, gpr_time_from_millis(backoff->current_timeout_millis, GPR_TIMESPAN));
const grpc_millis current_deadline =
grpc_exec_ctx_now(exec_ctx) + backoff->current_timeout_millis;
const gpr_timespec min_deadline = gpr_time_add(
now, gpr_time_from_millis(backoff->min_timeout_millis, GPR_TIMESPAN));
const grpc_millis min_deadline =
grpc_exec_ctx_now(exec_ctx) + backoff->min_timeout_millis;
return gpr_time_max(current_deadline, min_deadline);
return GPR_MAX(current_deadline, min_deadline);
}
void gpr_backoff_reset(gpr_backoff *backoff) {
void grpc_backoff_reset(grpc_backoff *backoff) {
backoff->current_timeout_millis = backoff->initial_connect_timeout;
}

@ -16,10 +16,10 @@
*
*/
#ifndef GRPC_CORE_LIB_SUPPORT_BACKOFF_H
#define GRPC_CORE_LIB_SUPPORT_BACKOFF_H
#ifndef GRPC_CORE_LIB_BACKOFF_BACKOFF_H
#define GRPC_CORE_LIB_BACKOFF_BACKOFF_H
#include <grpc/support/time.h>
#include "src/core/lib/iomgr/exec_ctx.h"
#ifdef __cplusplus
extern "C" {
@ -27,38 +27,40 @@ extern "C" {
typedef struct {
/// const: how long to wait after the first failure before retrying
int64_t initial_connect_timeout;
grpc_millis initial_connect_timeout;
/// const: factor with which to multiply backoff after a failed retry
double multiplier;
/// const: amount to randomize backoffs
double jitter;
/// const: minimum time between retries in milliseconds
int64_t min_timeout_millis;
grpc_millis min_timeout_millis;
/// const: maximum time between retries in milliseconds
int64_t max_timeout_millis;
grpc_millis max_timeout_millis;
/// random number generator
uint32_t rng_state;
/// current retry timeout in milliseconds
int64_t current_timeout_millis;
} gpr_backoff;
grpc_millis current_timeout_millis;
} grpc_backoff;
/// Initialize backoff machinery - does not need to be destroyed
void gpr_backoff_init(gpr_backoff *backoff, int64_t initial_connect_timeout,
double multiplier, double jitter,
int64_t min_timeout_millis, int64_t max_timeout_millis);
void grpc_backoff_init(grpc_backoff *backoff,
grpc_millis initial_connect_timeout, double multiplier,
double jitter, grpc_millis min_timeout_millis,
grpc_millis max_timeout_millis);
/// Begin retry loop: returns a timespec for the NEXT retry
gpr_timespec gpr_backoff_begin(gpr_backoff *backoff, gpr_timespec now);
grpc_millis grpc_backoff_begin(grpc_exec_ctx *exec_ctx, grpc_backoff *backoff);
/// Step a retry loop: returns a timespec for the NEXT retry
gpr_timespec gpr_backoff_step(gpr_backoff *backoff, gpr_timespec now);
/// Reset the backoff, so the next gpr_backoff_step will be a gpr_backoff_begin
grpc_millis grpc_backoff_step(grpc_exec_ctx *exec_ctx, grpc_backoff *backoff);
/// Reset the backoff, so the next grpc_backoff_step will be a
/// grpc_backoff_begin
/// instead
void gpr_backoff_reset(gpr_backoff *backoff);
void grpc_backoff_reset(grpc_backoff *backoff);
#ifdef __cplusplus
}
#endif
#endif /* GRPC_CORE_LIB_SUPPORT_BACKOFF_H */
#endif /* GRPC_CORE_LIB_BACKOFF_BACKOFF_H */

@ -157,4 +157,4 @@ grpc_arg grpc_channel_arg_pointer_create(char *name, void *value,
}
#endif
#endif /* GRPC_CORE_LIB_CHANNEL_CHANNEL_ARGS_H */
#endif /* GRPC_CORE_LIB_CHANNEL_CHANNEL_ARGS_H */

@ -70,7 +70,7 @@ typedef struct {
grpc_call_context_element *context;
grpc_slice path;
gpr_timespec start_time;
gpr_timespec deadline;
grpc_millis deadline;
gpr_arena *arena;
grpc_call_combiner *call_combiner;
} grpc_call_element_args;

@ -38,4 +38,4 @@ grpc_stream *grpc_connected_channel_get_stream(grpc_call_element *elem);
}
#endif
#endif /* GRPC_CORE_LIB_CHANNEL_CONNECTED_CHANNEL_H */
#endif /* GRPC_CORE_LIB_CHANNEL_CONNECTED_CHANNEL_H */

@ -232,7 +232,7 @@ static void on_timeout(grpc_exec_ctx* exec_ctx, void* arg, grpc_error* error) {
void grpc_handshake_manager_do_handshake(
grpc_exec_ctx* exec_ctx, grpc_handshake_manager* mgr,
grpc_endpoint* endpoint, const grpc_channel_args* channel_args,
gpr_timespec deadline, grpc_tcp_server_acceptor* acceptor,
grpc_millis deadline, grpc_tcp_server_acceptor* acceptor,
grpc_iomgr_cb_func on_handshake_done, void* user_data) {
gpr_mu_lock(&mgr->mu);
GPR_ASSERT(mgr->index == 0);
@ -255,9 +255,7 @@ void grpc_handshake_manager_do_handshake(
gpr_ref(&mgr->refs);
GRPC_CLOSURE_INIT(&mgr->on_timeout, on_timeout, mgr,
grpc_schedule_on_exec_ctx);
grpc_timer_init(exec_ctx, &mgr->deadline_timer,
gpr_convert_clock_type(deadline, GPR_CLOCK_MONOTONIC),
&mgr->on_timeout, gpr_now(GPR_CLOCK_MONOTONIC));
grpc_timer_init(exec_ctx, &mgr->deadline_timer, deadline, &mgr->on_timeout);
// Start first handshaker, which also owns a ref.
gpr_ref(&mgr->refs);
bool done = call_next_handshaker_locked(exec_ctx, mgr, GRPC_ERROR_NONE);

@ -149,7 +149,7 @@ void grpc_handshake_manager_shutdown(grpc_exec_ctx* exec_ctx,
void grpc_handshake_manager_do_handshake(
grpc_exec_ctx* exec_ctx, grpc_handshake_manager* mgr,
grpc_endpoint* endpoint, const grpc_channel_args* channel_args,
gpr_timespec deadline, grpc_tcp_server_acceptor* acceptor,
grpc_millis deadline, grpc_tcp_server_acceptor* acceptor,
grpc_iomgr_cb_func on_handshake_done, void* user_data);
/// Add \a mgr to the server side list of all pending handshake managers, the
@ -172,4 +172,4 @@ void grpc_handshake_manager_pending_list_shutdown_all(
}
#endif
#endif /* GRPC_CORE_LIB_CHANNEL_HANDSHAKER_H */
#endif /* GRPC_CORE_LIB_CHANNEL_HANDSHAKER_H */

@ -56,4 +56,4 @@ void grpc_handshaker_factory_destroy(
}
#endif
#endif /* GRPC_CORE_LIB_CHANNEL_HANDSHAKER_FACTORY_H */
#endif /* GRPC_CORE_LIB_CHANNEL_HANDSHAKER_FACTORY_H */

@ -53,4 +53,4 @@ void grpc_handshakers_add(grpc_exec_ctx* exec_ctx,
}
#endif
#endif /* GRPC_CORE_LIB_CHANNEL_HANDSHAKER_REGISTRY_H */
#endif /* GRPC_CORE_LIB_CHANNEL_HANDSHAKER_REGISTRY_H */

@ -57,4 +57,4 @@ grpc_stream_compression_algorithm grpc_stream_compression_algorithm_from_slice(
}
#endif
#endif /* GRPC_CORE_LIB_COMPRESSION_ALGORITHM_METADATA_H */
#endif /* GRPC_CORE_LIB_COMPRESSION_ALGORITHM_METADATA_H */

@ -44,4 +44,4 @@ int grpc_msg_decompress(grpc_exec_ctx* exec_ctx,
}
#endif
#endif /* GRPC_CORE_LIB_COMPRESSION_MESSAGE_COMPRESS_H */
#endif /* GRPC_CORE_LIB_COMPRESSION_MESSAGE_COMPRESS_H */

@ -77,6 +77,7 @@ const char *grpc_stats_counter_name[GRPC_STATS_COUNTER_COUNT] = {
"http2_initiate_write_due_to_transport_flow_control_unstalled",
"http2_initiate_write_due_to_ping_response",
"http2_initiate_write_due_to_force_rst_stream",
"http2_spurious_writes_begun",
"hpack_recv_indexed",
"hpack_recv_lithdr_incidx",
"hpack_recv_lithdr_incidx_v",
@ -177,6 +178,7 @@ const char *grpc_stats_counter_doc[GRPC_STATS_COUNTER_COUNT] = {
"'transport_flow_control_unstalled'",
"Number of HTTP2 writes initiated due to 'ping_response'",
"Number of HTTP2 writes initiated due to 'force_rst_stream'",
"Number of HTTP2 writes initiated with nothing to write",
"Number of HPACK indexed fields received",
"Number of HPACK literal headers received with incremental indexing",
"Number of HPACK literal headers received with incremental indexing and "

@ -83,6 +83,7 @@ typedef enum {
GRPC_STATS_COUNTER_HTTP2_INITIATE_WRITE_DUE_TO_TRANSPORT_FLOW_CONTROL_UNSTALLED,
GRPC_STATS_COUNTER_HTTP2_INITIATE_WRITE_DUE_TO_PING_RESPONSE,
GRPC_STATS_COUNTER_HTTP2_INITIATE_WRITE_DUE_TO_FORCE_RST_STREAM,
GRPC_STATS_COUNTER_HTTP2_SPURIOUS_WRITES_BEGUN,
GRPC_STATS_COUNTER_HPACK_RECV_INDEXED,
GRPC_STATS_COUNTER_HPACK_RECV_LITHDR_INCIDX,
GRPC_STATS_COUNTER_HPACK_RECV_LITHDR_INCIDX_V,
@ -330,6 +331,9 @@ typedef enum {
GRPC_STATS_INC_COUNTER( \
(exec_ctx), \
GRPC_STATS_COUNTER_HTTP2_INITIATE_WRITE_DUE_TO_FORCE_RST_STREAM)
#define GRPC_STATS_INC_HTTP2_SPURIOUS_WRITES_BEGUN(exec_ctx) \
GRPC_STATS_INC_COUNTER((exec_ctx), \
GRPC_STATS_COUNTER_HTTP2_SPURIOUS_WRITES_BEGUN)
#define GRPC_STATS_INC_HPACK_RECV_INDEXED(exec_ctx) \
GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_HPACK_RECV_INDEXED)
#define GRPC_STATS_INC_HPACK_RECV_LITHDR_INCIDX(exec_ctx) \

@ -189,6 +189,8 @@
doc: Number of HTTP2 writes initiated due to 'ping_response'
- counter: http2_initiate_write_due_to_force_rst_stream
doc: Number of HTTP2 writes initiated due to 'force_rst_stream'
- counter: http2_spurious_writes_begun
doc: Number of HTTP2 writes initiated with nothing to write
- counter: hpack_recv_indexed
doc: Number of HPACK indexed fields received
- counter: hpack_recv_lithdr_incidx
@ -270,3 +272,4 @@
- counter: server_slowpath_requests_queued
doc: How many times was the server slow path taken (indicates too few
outstanding requests)

@ -52,6 +52,7 @@ http2_initiate_write_due_to_keepalive_ping_per_iteration:FLOAT,
http2_initiate_write_due_to_transport_flow_control_unstalled_per_iteration:FLOAT,
http2_initiate_write_due_to_ping_response_per_iteration:FLOAT,
http2_initiate_write_due_to_force_rst_stream_per_iteration:FLOAT,
http2_spurious_writes_begun_per_iteration:FLOAT,
hpack_recv_indexed_per_iteration:FLOAT,
hpack_recv_lithdr_incidx_per_iteration:FLOAT,
hpack_recv_lithdr_incidx_v_per_iteration:FLOAT,

@ -37,4 +37,4 @@ grpc_slice grpc_httpcli_format_connect_request(
}
#endif
#endif /* GRPC_CORE_LIB_HTTP_FORMAT_REQUEST_H */
#endif /* GRPC_CORE_LIB_HTTP_FORMAT_REQUEST_H */

Some files were not shown because too many files have changed in this diff Show More

Loading…
Cancel
Save