grpc_millis -> Timestamp/Duration (#28119)

* wip

* Automated change: Fix sanity tests

* fixes

* progress

* progress

* grpc compiles

* Automated change: Fix sanity tests

* fixing tests

* x

* progress

* better code

* Automated change: Fix sanity tests

* progress

* progress

* windows fix

* Make Duration metadata trivial

* better message

* fix

* Automated change: Fix sanity tests

* fix

* fix

* fix

* fix

* Automated change: Fix sanity tests

* Automated change: Fix sanity tests

* fix

* progress

* fixes

* fix

* fix

* spam

* un-disable errantly disabled tests

* gain insight

* Automated change: Fix sanity tests

* fixes

* fixes

* fix

* debug

* tweak

* fix

* fix timeout

* fix comment

* fixes

* x

* better test

* tests

* Automated change: Fix sanity tests

* missed file

* fix

* x

* fix

* fix

* fix

* fix

* Automated change: Fix sanity tests

* fix

* merge

* Automated change: Fix sanity tests

* fix

Co-authored-by: ctiller <ctiller@users.noreply.github.com>
pull/28968/head^2
Craig Tiller 3 years ago committed by GitHub
parent 0966536dc1
commit 5fc3ff8203
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
  1. 17
      BUILD
  2. 47
      CMakeLists.txt
  3. 2
      Makefile
  4. 32
      build_autogenerated.yaml
  5. 1
      config.m4
  6. 1
      config.w32
  7. 2
      doc/core/grpc-polling-engines.md
  8. 2
      gRPC-C++.podspec
  9. 3
      gRPC-Core.podspec
  10. 2
      grpc.gemspec
  11. 2
      grpc.gyp
  12. 2
      package.xml
  13. 20
      src/core/ext/filters/client_channel/backup_poller.cc
  14. 10
      src/core/ext/filters/client_channel/channel_connectivity.cc
  15. 16
      src/core/ext/filters/client_channel/client_channel.cc
  16. 2
      src/core/ext/filters/client_channel/client_channel.h
  17. 2
      src/core/ext/filters/client_channel/connector.h
  18. 2
      src/core/ext/filters/client_channel/dynamic_filters.h
  19. 20
      src/core/ext/filters/client_channel/health/health_check_client.cc
  20. 68
      src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.cc
  21. 11
      src/core/ext/filters/client_channel/lb_policy/grpclb/load_balancer_api.cc
  22. 2
      src/core/ext/filters/client_channel/lb_policy/grpclb/load_balancer_api.h
  23. 30
      src/core/ext/filters/client_channel/lb_policy/priority/priority.cc
  24. 106
      src/core/ext/filters/client_channel/lb_policy/rls/rls.cc
  25. 4
      src/core/ext/filters/client_channel/lb_policy/weighted_target/weighted_target.cc
  26. 3
      src/core/ext/filters/client_channel/lb_policy/xds/xds_cluster_manager.cc
  27. 46
      src/core/ext/filters/client_channel/resolver/dns/c_ares/dns_resolver_ares.cc
  28. 29
      src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.cc
  29. 47
      src/core/ext/filters/client_channel/resolver/dns/native/dns_resolver.cc
  30. 6
      src/core/ext/filters/client_channel/resolver/google_c2p/google_c2p_resolver.cc
  31. 21
      src/core/ext/filters/client_channel/resolver/xds/xds_resolver.cc
  32. 2
      src/core/ext/filters/client_channel/resolver_result_parsing.cc
  33. 8
      src/core/ext/filters/client_channel/resolver_result_parsing.h
  34. 64
      src/core/ext/filters/client_channel/retry_filter.cc
  35. 20
      src/core/ext/filters/client_channel/retry_service_config.cc
  36. 20
      src/core/ext/filters/client_channel/retry_service_config.h
  37. 54
      src/core/ext/filters/client_channel/subchannel.cc
  38. 9
      src/core/ext/filters/client_channel/subchannel.h
  39. 12
      src/core/ext/filters/client_idle/client_idle_filter.cc
  40. 19
      src/core/ext/filters/deadline/deadline_filter.cc
  41. 5
      src/core/ext/filters/deadline/deadline_filter.h
  42. 9
      src/core/ext/filters/fault_injection/fault_injection_filter.cc
  43. 2
      src/core/ext/filters/fault_injection/service_config_parser.h
  44. 66
      src/core/ext/filters/max_age/max_age_filter.cc
  45. 12
      src/core/ext/transport/chttp2/server/chttp2_server.cc
  46. 66
      src/core/ext/transport/chttp2/transport/chttp2_transport.cc
  47. 4
      src/core/ext/transport/chttp2/transport/flow_control.cc
  48. 2
      src/core/ext/transport/chttp2/transport/flow_control.h
  49. 8
      src/core/ext/transport/chttp2/transport/frame_ping.cc
  50. 3
      src/core/ext/transport/chttp2/transport/hpack_encoder.cc
  51. 2
      src/core/ext/transport/chttp2/transport/hpack_encoder.h
  52. 12
      src/core/ext/transport/chttp2/transport/internal.h
  53. 4
      src/core/ext/transport/chttp2/transport/parsing.cc
  54. 23
      src/core/ext/transport/chttp2/transport/writing.cc
  55. 11
      src/core/ext/transport/inproc/inproc_transport.cc
  56. 8
      src/core/ext/xds/file_watcher_certificate_provider_factory.cc
  57. 4
      src/core/ext/xds/file_watcher_certificate_provider_factory.h
  58. 6
      src/core/ext/xds/google_mesh_ca_certificate_provider_factory.cc
  59. 12
      src/core/ext/xds/google_mesh_ca_certificate_provider_factory.h
  60. 25
      src/core/ext/xds/xds_api.cc
  61. 8
      src/core/ext/xds/xds_api.h
  62. 58
      src/core/ext/xds/xds_client.cc
  63. 4
      src/core/ext/xds/xds_client.h
  64. 25
      src/core/ext/xds/xds_common_types.h
  65. 3
      src/core/ext/xds/xds_listener.cc
  66. 22
      src/core/ext/xds/xds_route_config.cc
  67. 17
      src/core/lib/backoff/backoff.cc
  68. 16
      src/core/lib/backoff/backoff.h
  69. 2
      src/core/lib/channel/channel_stack.h
  70. 9
      src/core/lib/channel/channel_trace.cc
  71. 2
      src/core/lib/channel/handshaker.cc
  72. 2
      src/core/lib/channel/handshaker.h
  73. 4
      src/core/lib/channel/promise_based_filter.h
  74. 3
      src/core/lib/gpr/time.cc
  75. 14
      src/core/lib/gpr/useful.h
  76. 186
      src/core/lib/gprpp/time.cc
  77. 292
      src/core/lib/gprpp/time.h
  78. 6
      src/core/lib/http/httpcli.cc
  79. 12
      src/core/lib/http/httpcli.h
  80. 10
      src/core/lib/iomgr/buffer_list.h
  81. 9
      src/core/lib/iomgr/ev_apple.cc
  82. 15
      src/core/lib/iomgr/ev_epoll1_linux.cc
  83. 21
      src/core/lib/iomgr/ev_epollex_linux.cc
  84. 14
      src/core/lib/iomgr/ev_poll_posix.cc
  85. 6
      src/core/lib/iomgr/ev_posix.cc
  86. 2
      src/core/lib/iomgr/ev_posix.h
  87. 5
      src/core/lib/iomgr/event_engine/pollset.cc
  88. 7
      src/core/lib/iomgr/event_engine/tcp.cc
  89. 7
      src/core/lib/iomgr/event_engine/timer.cc
  90. 91
      src/core/lib/iomgr/exec_ctx.cc
  91. 30
      src/core/lib/iomgr/exec_ctx.h
  92. 18
      src/core/lib/iomgr/iocp_windows.cc
  93. 2
      src/core/lib/iomgr/iocp_windows.h
  94. 2
      src/core/lib/iomgr/pollset.cc
  95. 8
      src/core/lib/iomgr/pollset.h
  96. 4
      src/core/lib/iomgr/pollset_windows.cc
  97. 2
      src/core/lib/iomgr/tcp_client.cc
  98. 5
      src/core/lib/iomgr/tcp_client.h
  99. 2
      src/core/lib/iomgr/tcp_client_cfstream.cc
  100. 4
      src/core/lib/iomgr/tcp_client_posix.cc
  101. Some files were not shown because too many files have changed in this diff Show More

17
BUILD

@ -1664,6 +1664,20 @@ grpc_cc_library(
], ],
) )
grpc_cc_library(
name = "time",
srcs = [
"src/core/lib/gprpp/time.cc",
],
hdrs = [
"src/core/lib/gprpp/time.h",
],
deps = [
"gpr",
"gpr_codegen",
],
)
grpc_cc_library( grpc_cc_library(
name = "exec_ctx", name = "exec_ctx",
srcs = [ srcs = [
@ -1683,6 +1697,7 @@ grpc_cc_library(
"error", "error",
"gpr_base", "gpr_base",
"gpr_tls", "gpr_tls",
"time",
"useful", "useful",
], ],
) )
@ -2161,6 +2176,7 @@ grpc_cc_library(
"slice_refcount", "slice_refcount",
"sockaddr_utils", "sockaddr_utils",
"table", "table",
"time",
"uri_parser", "uri_parser",
"useful", "useful",
], ],
@ -2466,6 +2482,7 @@ grpc_cc_library(
"server_address", "server_address",
"slice", "slice",
"sockaddr_utils", "sockaddr_utils",
"time",
"uri_parser", "uri_parser",
"useful", "useful",
"xds_orca_upb", "xds_orca_upb",

47
CMakeLists.txt generated

@ -998,6 +998,7 @@ if(gRPC_BUILD_TESTS)
add_dependencies(buildtests_cxx streams_not_seen_test) add_dependencies(buildtests_cxx streams_not_seen_test)
add_dependencies(buildtests_cxx string_ref_test) add_dependencies(buildtests_cxx string_ref_test)
add_dependencies(buildtests_cxx table_test) add_dependencies(buildtests_cxx table_test)
add_dependencies(buildtests_cxx test_core_gprpp_time_test)
add_dependencies(buildtests_cxx test_core_security_credentials_test) add_dependencies(buildtests_cxx test_core_security_credentials_test)
add_dependencies(buildtests_cxx test_core_slice_slice_test) add_dependencies(buildtests_cxx test_core_slice_slice_test)
add_dependencies(buildtests_cxx test_cpp_client_credentials_test) add_dependencies(buildtests_cxx test_cpp_client_credentials_test)
@ -2001,6 +2002,7 @@ add_library(grpc
src/core/lib/event_engine/memory_allocator.cc src/core/lib/event_engine/memory_allocator.cc
src/core/lib/event_engine/resolved_address.cc src/core/lib/event_engine/resolved_address.cc
src/core/lib/event_engine/sockaddr.cc src/core/lib/event_engine/sockaddr.cc
src/core/lib/gprpp/time.cc
src/core/lib/http/format_request.cc src/core/lib/http/format_request.cc
src/core/lib/http/httpcli.cc src/core/lib/http/httpcli.cc
src/core/lib/http/httpcli_security_connector.cc src/core/lib/http/httpcli_security_connector.cc
@ -2642,6 +2644,7 @@ add_library(grpc_unsecure
src/core/lib/event_engine/memory_allocator.cc src/core/lib/event_engine/memory_allocator.cc
src/core/lib/event_engine/resolved_address.cc src/core/lib/event_engine/resolved_address.cc
src/core/lib/event_engine/sockaddr.cc src/core/lib/event_engine/sockaddr.cc
src/core/lib/gprpp/time.cc
src/core/lib/http/format_request.cc src/core/lib/http/format_request.cc
src/core/lib/http/httpcli.cc src/core/lib/http/httpcli.cc
src/core/lib/http/parser.cc src/core/lib/http/parser.cc
@ -6077,6 +6080,7 @@ if(_gRPC_PLATFORM_LINUX OR _gRPC_PLATFORM_POSIX)
add_executable(memory_quota_stress_test add_executable(memory_quota_stress_test
src/core/lib/debug/trace.cc src/core/lib/debug/trace.cc
src/core/lib/event_engine/memory_allocator.cc src/core/lib/event_engine/memory_allocator.cc
src/core/lib/gprpp/time.cc
src/core/lib/iomgr/combiner.cc src/core/lib/iomgr/combiner.cc
src/core/lib/iomgr/error.cc src/core/lib/iomgr/error.cc
src/core/lib/iomgr/exec_ctx.cc src/core/lib/iomgr/exec_ctx.cc
@ -7725,6 +7729,7 @@ if(gRPC_BUILD_TESTS)
add_executable(arena_promise_test add_executable(arena_promise_test
src/core/lib/debug/trace.cc src/core/lib/debug/trace.cc
src/core/lib/event_engine/memory_allocator.cc src/core/lib/event_engine/memory_allocator.cc
src/core/lib/gprpp/time.cc
src/core/lib/iomgr/combiner.cc src/core/lib/iomgr/combiner.cc
src/core/lib/iomgr/error.cc src/core/lib/iomgr/error.cc
src/core/lib/iomgr/exec_ctx.cc src/core/lib/iomgr/exec_ctx.cc
@ -8973,6 +8978,7 @@ if(gRPC_BUILD_TESTS)
add_executable(chunked_vector_test add_executable(chunked_vector_test
src/core/lib/debug/trace.cc src/core/lib/debug/trace.cc
src/core/lib/event_engine/memory_allocator.cc src/core/lib/event_engine/memory_allocator.cc
src/core/lib/gprpp/time.cc
src/core/lib/iomgr/combiner.cc src/core/lib/iomgr/combiner.cc
src/core/lib/iomgr/error.cc src/core/lib/iomgr/error.cc
src/core/lib/iomgr/exec_ctx.cc src/core/lib/iomgr/exec_ctx.cc
@ -10356,6 +10362,7 @@ if(gRPC_BUILD_TESTS)
add_executable(exec_ctx_wakeup_scheduler_test add_executable(exec_ctx_wakeup_scheduler_test
src/core/lib/debug/trace.cc src/core/lib/debug/trace.cc
src/core/lib/gprpp/time.cc
src/core/lib/iomgr/combiner.cc src/core/lib/iomgr/combiner.cc
src/core/lib/iomgr/error.cc src/core/lib/iomgr/error.cc
src/core/lib/iomgr/exec_ctx.cc src/core/lib/iomgr/exec_ctx.cc
@ -10665,6 +10672,7 @@ if(gRPC_BUILD_TESTS)
add_executable(for_each_test add_executable(for_each_test
src/core/lib/debug/trace.cc src/core/lib/debug/trace.cc
src/core/lib/event_engine/memory_allocator.cc src/core/lib/event_engine/memory_allocator.cc
src/core/lib/gprpp/time.cc
src/core/lib/iomgr/combiner.cc src/core/lib/iomgr/combiner.cc
src/core/lib/iomgr/error.cc src/core/lib/iomgr/error.cc
src/core/lib/iomgr/exec_ctx.cc src/core/lib/iomgr/exec_ctx.cc
@ -12845,6 +12853,7 @@ if(gRPC_BUILD_TESTS)
add_executable(memory_quota_test add_executable(memory_quota_test
src/core/lib/debug/trace.cc src/core/lib/debug/trace.cc
src/core/lib/event_engine/memory_allocator.cc src/core/lib/event_engine/memory_allocator.cc
src/core/lib/gprpp/time.cc
src/core/lib/iomgr/combiner.cc src/core/lib/iomgr/combiner.cc
src/core/lib/iomgr/error.cc src/core/lib/iomgr/error.cc
src/core/lib/iomgr/exec_ctx.cc src/core/lib/iomgr/exec_ctx.cc
@ -13431,6 +13440,7 @@ if(gRPC_BUILD_TESTS)
add_executable(pipe_test add_executable(pipe_test
src/core/lib/debug/trace.cc src/core/lib/debug/trace.cc
src/core/lib/event_engine/memory_allocator.cc src/core/lib/event_engine/memory_allocator.cc
src/core/lib/gprpp/time.cc
src/core/lib/iomgr/combiner.cc src/core/lib/iomgr/combiner.cc
src/core/lib/iomgr/error.cc src/core/lib/iomgr/error.cc
src/core/lib/iomgr/exec_ctx.cc src/core/lib/iomgr/exec_ctx.cc
@ -14263,6 +14273,7 @@ if(gRPC_BUILD_TESTS)
add_executable(resource_quota_test add_executable(resource_quota_test
src/core/lib/debug/trace.cc src/core/lib/debug/trace.cc
src/core/lib/event_engine/memory_allocator.cc src/core/lib/event_engine/memory_allocator.cc
src/core/lib/gprpp/time.cc
src/core/lib/iomgr/combiner.cc src/core/lib/iomgr/combiner.cc
src/core/lib/iomgr/error.cc src/core/lib/iomgr/error.cc
src/core/lib/iomgr/exec_ctx.cc src/core/lib/iomgr/exec_ctx.cc
@ -15639,6 +15650,42 @@ target_link_libraries(table_test
) )
endif()
if(gRPC_BUILD_TESTS)
add_executable(test_core_gprpp_time_test
src/core/lib/gprpp/time.cc
test/core/gprpp/time_test.cc
third_party/googletest/googletest/src/gtest-all.cc
third_party/googletest/googlemock/src/gmock-all.cc
)
target_include_directories(test_core_gprpp_time_test
PRIVATE
${CMAKE_CURRENT_SOURCE_DIR}
${CMAKE_CURRENT_SOURCE_DIR}/include
${_gRPC_ADDRESS_SORTING_INCLUDE_DIR}
${_gRPC_RE2_INCLUDE_DIR}
${_gRPC_SSL_INCLUDE_DIR}
${_gRPC_UPB_GENERATED_DIR}
${_gRPC_UPB_GRPC_GENERATED_DIR}
${_gRPC_UPB_INCLUDE_DIR}
${_gRPC_XXHASH_INCLUDE_DIR}
${_gRPC_ZLIB_INCLUDE_DIR}
third_party/googletest/googletest/include
third_party/googletest/googletest
third_party/googletest/googlemock/include
third_party/googletest/googlemock
${_gRPC_PROTO_GENS_DIR}
)
target_link_libraries(test_core_gprpp_time_test
${_gRPC_PROTOBUF_LIBRARIES}
${_gRPC_ALLTARGETS_LIBRARIES}
gpr
)
endif() endif()
if(gRPC_BUILD_TESTS) if(gRPC_BUILD_TESTS)

2
Makefile generated

@ -1443,6 +1443,7 @@ LIBGRPC_SRC = \
src/core/lib/event_engine/memory_allocator.cc \ src/core/lib/event_engine/memory_allocator.cc \
src/core/lib/event_engine/resolved_address.cc \ src/core/lib/event_engine/resolved_address.cc \
src/core/lib/event_engine/sockaddr.cc \ src/core/lib/event_engine/sockaddr.cc \
src/core/lib/gprpp/time.cc \
src/core/lib/http/format_request.cc \ src/core/lib/http/format_request.cc \
src/core/lib/http/httpcli.cc \ src/core/lib/http/httpcli.cc \
src/core/lib/http/httpcli_security_connector.cc \ src/core/lib/http/httpcli_security_connector.cc \
@ -1933,6 +1934,7 @@ LIBGRPC_UNSECURE_SRC = \
src/core/lib/event_engine/memory_allocator.cc \ src/core/lib/event_engine/memory_allocator.cc \
src/core/lib/event_engine/resolved_address.cc \ src/core/lib/event_engine/resolved_address.cc \
src/core/lib/event_engine/sockaddr.cc \ src/core/lib/event_engine/sockaddr.cc \
src/core/lib/gprpp/time.cc \
src/core/lib/http/format_request.cc \ src/core/lib/http/format_request.cc \
src/core/lib/http/httpcli.cc \ src/core/lib/http/httpcli.cc \
src/core/lib/http/parser.cc \ src/core/lib/http/parser.cc \

@ -850,6 +850,7 @@ libs:
- src/core/lib/gprpp/ref_counted.h - src/core/lib/gprpp/ref_counted.h
- src/core/lib/gprpp/ref_counted_ptr.h - src/core/lib/gprpp/ref_counted_ptr.h
- src/core/lib/gprpp/table.h - src/core/lib/gprpp/table.h
- src/core/lib/gprpp/time.h
- src/core/lib/http/format_request.h - src/core/lib/http/format_request.h
- src/core/lib/http/httpcli.h - src/core/lib/http/httpcli.h
- src/core/lib/http/httpcli_ssl_credentials.h - src/core/lib/http/httpcli_ssl_credentials.h
@ -1500,6 +1501,7 @@ libs:
- src/core/lib/event_engine/memory_allocator.cc - src/core/lib/event_engine/memory_allocator.cc
- src/core/lib/event_engine/resolved_address.cc - src/core/lib/event_engine/resolved_address.cc
- src/core/lib/event_engine/sockaddr.cc - src/core/lib/event_engine/sockaddr.cc
- src/core/lib/gprpp/time.cc
- src/core/lib/http/format_request.cc - src/core/lib/http/format_request.cc
- src/core/lib/http/httpcli.cc - src/core/lib/http/httpcli.cc
- src/core/lib/http/httpcli_security_connector.cc - src/core/lib/http/httpcli_security_connector.cc
@ -2015,6 +2017,7 @@ libs:
- src/core/lib/gprpp/ref_counted.h - src/core/lib/gprpp/ref_counted.h
- src/core/lib/gprpp/ref_counted_ptr.h - src/core/lib/gprpp/ref_counted_ptr.h
- src/core/lib/gprpp/table.h - src/core/lib/gprpp/table.h
- src/core/lib/gprpp/time.h
- src/core/lib/http/format_request.h - src/core/lib/http/format_request.h
- src/core/lib/http/httpcli.h - src/core/lib/http/httpcli.h
- src/core/lib/http/parser.h - src/core/lib/http/parser.h
@ -2319,6 +2322,7 @@ libs:
- src/core/lib/event_engine/memory_allocator.cc - src/core/lib/event_engine/memory_allocator.cc
- src/core/lib/event_engine/resolved_address.cc - src/core/lib/event_engine/resolved_address.cc
- src/core/lib/event_engine/sockaddr.cc - src/core/lib/event_engine/sockaddr.cc
- src/core/lib/gprpp/time.cc
- src/core/lib/http/format_request.cc - src/core/lib/http/format_request.cc
- src/core/lib/http/httpcli.cc - src/core/lib/http/httpcli.cc
- src/core/lib/http/parser.cc - src/core/lib/http/parser.cc
@ -3890,6 +3894,7 @@ targets:
- src/core/lib/gprpp/orphanable.h - src/core/lib/gprpp/orphanable.h
- src/core/lib/gprpp/ref_counted.h - src/core/lib/gprpp/ref_counted.h
- src/core/lib/gprpp/ref_counted_ptr.h - src/core/lib/gprpp/ref_counted_ptr.h
- src/core/lib/gprpp/time.h
- src/core/lib/iomgr/closure.h - src/core/lib/iomgr/closure.h
- src/core/lib/iomgr/combiner.h - src/core/lib/iomgr/combiner.h
- src/core/lib/iomgr/error.h - src/core/lib/iomgr/error.h
@ -3920,6 +3925,7 @@ targets:
src: src:
- src/core/lib/debug/trace.cc - src/core/lib/debug/trace.cc
- src/core/lib/event_engine/memory_allocator.cc - src/core/lib/event_engine/memory_allocator.cc
- src/core/lib/gprpp/time.cc
- src/core/lib/iomgr/combiner.cc - src/core/lib/iomgr/combiner.cc
- src/core/lib/iomgr/error.cc - src/core/lib/iomgr/error.cc
- src/core/lib/iomgr/exec_ctx.cc - src/core/lib/iomgr/exec_ctx.cc
@ -4627,6 +4633,7 @@ targets:
- src/core/lib/gprpp/orphanable.h - src/core/lib/gprpp/orphanable.h
- src/core/lib/gprpp/ref_counted.h - src/core/lib/gprpp/ref_counted.h
- src/core/lib/gprpp/ref_counted_ptr.h - src/core/lib/gprpp/ref_counted_ptr.h
- src/core/lib/gprpp/time.h
- src/core/lib/iomgr/closure.h - src/core/lib/iomgr/closure.h
- src/core/lib/iomgr/combiner.h - src/core/lib/iomgr/combiner.h
- src/core/lib/iomgr/error.h - src/core/lib/iomgr/error.h
@ -4661,6 +4668,7 @@ targets:
src: src:
- src/core/lib/debug/trace.cc - src/core/lib/debug/trace.cc
- src/core/lib/event_engine/memory_allocator.cc - src/core/lib/event_engine/memory_allocator.cc
- src/core/lib/gprpp/time.cc
- src/core/lib/iomgr/combiner.cc - src/core/lib/iomgr/combiner.cc
- src/core/lib/iomgr/error.cc - src/core/lib/iomgr/error.cc
- src/core/lib/iomgr/exec_ctx.cc - src/core/lib/iomgr/exec_ctx.cc
@ -5129,6 +5137,7 @@ targets:
- src/core/lib/gprpp/orphanable.h - src/core/lib/gprpp/orphanable.h
- src/core/lib/gprpp/ref_counted.h - src/core/lib/gprpp/ref_counted.h
- src/core/lib/gprpp/ref_counted_ptr.h - src/core/lib/gprpp/ref_counted_ptr.h
- src/core/lib/gprpp/time.h
- src/core/lib/iomgr/closure.h - src/core/lib/iomgr/closure.h
- src/core/lib/iomgr/combiner.h - src/core/lib/iomgr/combiner.h
- src/core/lib/iomgr/error.h - src/core/lib/iomgr/error.h
@ -5162,6 +5171,7 @@ targets:
src: src:
- src/core/lib/debug/trace.cc - src/core/lib/debug/trace.cc
- src/core/lib/event_engine/memory_allocator.cc - src/core/lib/event_engine/memory_allocator.cc
- src/core/lib/gprpp/time.cc
- src/core/lib/iomgr/combiner.cc - src/core/lib/iomgr/combiner.cc
- src/core/lib/iomgr/error.cc - src/core/lib/iomgr/error.cc
- src/core/lib/iomgr/exec_ctx.cc - src/core/lib/iomgr/exec_ctx.cc
@ -5682,6 +5692,7 @@ targets:
- src/core/lib/gprpp/atomic_utils.h - src/core/lib/gprpp/atomic_utils.h
- src/core/lib/gprpp/ref_counted.h - src/core/lib/gprpp/ref_counted.h
- src/core/lib/gprpp/ref_counted_ptr.h - src/core/lib/gprpp/ref_counted_ptr.h
- src/core/lib/gprpp/time.h
- src/core/lib/iomgr/closure.h - src/core/lib/iomgr/closure.h
- src/core/lib/iomgr/combiner.h - src/core/lib/iomgr/combiner.h
- src/core/lib/iomgr/error.h - src/core/lib/iomgr/error.h
@ -5703,6 +5714,7 @@ targets:
- src/core/lib/slice/slice_string_helpers.h - src/core/lib/slice/slice_string_helpers.h
src: src:
- src/core/lib/debug/trace.cc - src/core/lib/debug/trace.cc
- src/core/lib/gprpp/time.cc
- src/core/lib/iomgr/combiner.cc - src/core/lib/iomgr/combiner.cc
- src/core/lib/iomgr/error.cc - src/core/lib/iomgr/error.cc
- src/core/lib/iomgr/exec_ctx.cc - src/core/lib/iomgr/exec_ctx.cc
@ -5873,6 +5885,7 @@ targets:
- src/core/lib/gprpp/orphanable.h - src/core/lib/gprpp/orphanable.h
- src/core/lib/gprpp/ref_counted.h - src/core/lib/gprpp/ref_counted.h
- src/core/lib/gprpp/ref_counted_ptr.h - src/core/lib/gprpp/ref_counted_ptr.h
- src/core/lib/gprpp/time.h
- src/core/lib/iomgr/closure.h - src/core/lib/iomgr/closure.h
- src/core/lib/iomgr/combiner.h - src/core/lib/iomgr/combiner.h
- src/core/lib/iomgr/error.h - src/core/lib/iomgr/error.h
@ -5914,6 +5927,7 @@ targets:
src: src:
- src/core/lib/debug/trace.cc - src/core/lib/debug/trace.cc
- src/core/lib/event_engine/memory_allocator.cc - src/core/lib/event_engine/memory_allocator.cc
- src/core/lib/gprpp/time.cc
- src/core/lib/iomgr/combiner.cc - src/core/lib/iomgr/combiner.cc
- src/core/lib/iomgr/error.cc - src/core/lib/iomgr/error.cc
- src/core/lib/iomgr/exec_ctx.cc - src/core/lib/iomgr/exec_ctx.cc
@ -6711,6 +6725,7 @@ targets:
- src/core/lib/gprpp/orphanable.h - src/core/lib/gprpp/orphanable.h
- src/core/lib/gprpp/ref_counted.h - src/core/lib/gprpp/ref_counted.h
- src/core/lib/gprpp/ref_counted_ptr.h - src/core/lib/gprpp/ref_counted_ptr.h
- src/core/lib/gprpp/time.h
- src/core/lib/iomgr/closure.h - src/core/lib/iomgr/closure.h
- src/core/lib/iomgr/combiner.h - src/core/lib/iomgr/combiner.h
- src/core/lib/iomgr/error.h - src/core/lib/iomgr/error.h
@ -6742,6 +6757,7 @@ targets:
src: src:
- src/core/lib/debug/trace.cc - src/core/lib/debug/trace.cc
- src/core/lib/event_engine/memory_allocator.cc - src/core/lib/event_engine/memory_allocator.cc
- src/core/lib/gprpp/time.cc
- src/core/lib/iomgr/combiner.cc - src/core/lib/iomgr/combiner.cc
- src/core/lib/iomgr/error.cc - src/core/lib/iomgr/error.cc
- src/core/lib/iomgr/exec_ctx.cc - src/core/lib/iomgr/exec_ctx.cc
@ -7008,6 +7024,7 @@ targets:
- src/core/lib/gprpp/orphanable.h - src/core/lib/gprpp/orphanable.h
- src/core/lib/gprpp/ref_counted.h - src/core/lib/gprpp/ref_counted.h
- src/core/lib/gprpp/ref_counted_ptr.h - src/core/lib/gprpp/ref_counted_ptr.h
- src/core/lib/gprpp/time.h
- src/core/lib/iomgr/closure.h - src/core/lib/iomgr/closure.h
- src/core/lib/iomgr/combiner.h - src/core/lib/iomgr/combiner.h
- src/core/lib/iomgr/error.h - src/core/lib/iomgr/error.h
@ -7047,6 +7064,7 @@ targets:
src: src:
- src/core/lib/debug/trace.cc - src/core/lib/debug/trace.cc
- src/core/lib/event_engine/memory_allocator.cc - src/core/lib/event_engine/memory_allocator.cc
- src/core/lib/gprpp/time.cc
- src/core/lib/iomgr/combiner.cc - src/core/lib/iomgr/combiner.cc
- src/core/lib/iomgr/error.cc - src/core/lib/iomgr/error.cc
- src/core/lib/iomgr/exec_ctx.cc - src/core/lib/iomgr/exec_ctx.cc
@ -7366,6 +7384,7 @@ targets:
- src/core/lib/gprpp/orphanable.h - src/core/lib/gprpp/orphanable.h
- src/core/lib/gprpp/ref_counted.h - src/core/lib/gprpp/ref_counted.h
- src/core/lib/gprpp/ref_counted_ptr.h - src/core/lib/gprpp/ref_counted_ptr.h
- src/core/lib/gprpp/time.h
- src/core/lib/iomgr/closure.h - src/core/lib/iomgr/closure.h
- src/core/lib/iomgr/combiner.h - src/core/lib/iomgr/combiner.h
- src/core/lib/iomgr/error.h - src/core/lib/iomgr/error.h
@ -7398,6 +7417,7 @@ targets:
src: src:
- src/core/lib/debug/trace.cc - src/core/lib/debug/trace.cc
- src/core/lib/event_engine/memory_allocator.cc - src/core/lib/event_engine/memory_allocator.cc
- src/core/lib/gprpp/time.cc
- src/core/lib/iomgr/combiner.cc - src/core/lib/iomgr/combiner.cc
- src/core/lib/iomgr/error.cc - src/core/lib/iomgr/error.cc
- src/core/lib/iomgr/exec_ctx.cc - src/core/lib/iomgr/exec_ctx.cc
@ -7829,6 +7849,18 @@ targets:
- absl/types:optional - absl/types:optional
- absl/utility:utility - absl/utility:utility
uses_polling: false uses_polling: false
- name: test_core_gprpp_time_test
gtest: true
build: test
language: c++
headers:
- src/core/lib/gprpp/time.h
src:
- src/core/lib/gprpp/time.cc
- test/core/gprpp/time_test.cc
deps:
- gpr
uses_polling: false
- name: test_core_security_credentials_test - name: test_core_security_credentials_test
gtest: true gtest: true
build: test build: test

1
config.m4 generated

@ -502,6 +502,7 @@ if test "$PHP_GRPC" != "no"; then
src/core/lib/gprpp/status_helper.cc \ src/core/lib/gprpp/status_helper.cc \
src/core/lib/gprpp/thd_posix.cc \ src/core/lib/gprpp/thd_posix.cc \
src/core/lib/gprpp/thd_windows.cc \ src/core/lib/gprpp/thd_windows.cc \
src/core/lib/gprpp/time.cc \
src/core/lib/gprpp/time_util.cc \ src/core/lib/gprpp/time_util.cc \
src/core/lib/http/format_request.cc \ src/core/lib/http/format_request.cc \
src/core/lib/http/httpcli.cc \ src/core/lib/http/httpcli.cc \

1
config.w32 generated

@ -468,6 +468,7 @@ if (PHP_GRPC != "no") {
"src\\core\\lib\\gprpp\\status_helper.cc " + "src\\core\\lib\\gprpp\\status_helper.cc " +
"src\\core\\lib\\gprpp\\thd_posix.cc " + "src\\core\\lib\\gprpp\\thd_posix.cc " +
"src\\core\\lib\\gprpp\\thd_windows.cc " + "src\\core\\lib\\gprpp\\thd_windows.cc " +
"src\\core\\lib\\gprpp\\time.cc " +
"src\\core\\lib\\gprpp\\time_util.cc " + "src\\core\\lib\\gprpp\\time_util.cc " +
"src\\core\\lib\\http\\format_request.cc " + "src\\core\\lib\\http\\format_request.cc " +
"src\\core\\lib\\http\\httpcli.cc " + "src\\core\\lib\\http\\httpcli.cc " +

@ -64,7 +64,7 @@ The following are the **Opaque** structures exposed by Polling Engine interface
> **NOTE**: There is no `grpc_pollset_remove_fd`. This is because calling `grpc_fd_orphan()` will effectively remove the fd from all the pollsets it’s a part of > **NOTE**: There is no `grpc_pollset_remove_fd`. This is because calling `grpc_fd_orphan()` will effectively remove the fd from all the pollsets it’s a part of
- **grpc_pollset_work** - **grpc_pollset_work**
- Signature: `grpc_pollset_work(grpc_pollset* ps, grpc_pollset_worker** worker, grpc_millis deadline)` - Signature: `grpc_pollset_work(grpc_pollset* ps, grpc_pollset_worker** worker, grpc_core::Timestamp deadline)`
> **NOTE**: `grpc_pollset_work()` requires the pollset mutex to be locked before calling it. Shortly after calling `grpc_pollset_work()`, the function populates the `*worker` pointer (among other things) and releases the mutex. Once `grpc_pollset_work()` returns, the `*worker` pointer is **invalid** and should not be used anymore. See the code in `completion_queue.cc` to see how this is used. > **NOTE**: `grpc_pollset_work()` requires the pollset mutex to be locked before calling it. Shortly after calling `grpc_pollset_work()`, the function populates the `*worker` pointer (among other things) and releases the mutex. Once `grpc_pollset_work()` returns, the `*worker` pointer is **invalid** and should not be used anymore. See the code in `completion_queue.cc` to see how this is used.
- Poll the fds in the pollset for events AND return when ANY of the following is true: - Poll the fds in the pollset for events AND return when ANY of the following is true:
- Deadline expired - Deadline expired

2
gRPC-C++.podspec generated

@ -694,6 +694,7 @@ Pod::Spec.new do |s|
'src/core/lib/gprpp/sync.h', 'src/core/lib/gprpp/sync.h',
'src/core/lib/gprpp/table.h', 'src/core/lib/gprpp/table.h',
'src/core/lib/gprpp/thd.h', 'src/core/lib/gprpp/thd.h',
'src/core/lib/gprpp/time.h',
'src/core/lib/gprpp/time_util.h', 'src/core/lib/gprpp/time_util.h',
'src/core/lib/http/format_request.h', 'src/core/lib/http/format_request.h',
'src/core/lib/http/httpcli.h', 'src/core/lib/http/httpcli.h',
@ -1491,6 +1492,7 @@ Pod::Spec.new do |s|
'src/core/lib/gprpp/sync.h', 'src/core/lib/gprpp/sync.h',
'src/core/lib/gprpp/table.h', 'src/core/lib/gprpp/table.h',
'src/core/lib/gprpp/thd.h', 'src/core/lib/gprpp/thd.h',
'src/core/lib/gprpp/time.h',
'src/core/lib/gprpp/time_util.h', 'src/core/lib/gprpp/time_util.h',
'src/core/lib/http/format_request.h', 'src/core/lib/http/format_request.h',
'src/core/lib/http/httpcli.h', 'src/core/lib/http/httpcli.h',

3
gRPC-Core.podspec generated

@ -1095,6 +1095,8 @@ Pod::Spec.new do |s|
'src/core/lib/gprpp/thd.h', 'src/core/lib/gprpp/thd.h',
'src/core/lib/gprpp/thd_posix.cc', 'src/core/lib/gprpp/thd_posix.cc',
'src/core/lib/gprpp/thd_windows.cc', 'src/core/lib/gprpp/thd_windows.cc',
'src/core/lib/gprpp/time.cc',
'src/core/lib/gprpp/time.h',
'src/core/lib/gprpp/time_util.cc', 'src/core/lib/gprpp/time_util.cc',
'src/core/lib/gprpp/time_util.h', 'src/core/lib/gprpp/time_util.h',
'src/core/lib/http/format_request.cc', 'src/core/lib/http/format_request.cc',
@ -2085,6 +2087,7 @@ Pod::Spec.new do |s|
'src/core/lib/gprpp/sync.h', 'src/core/lib/gprpp/sync.h',
'src/core/lib/gprpp/table.h', 'src/core/lib/gprpp/table.h',
'src/core/lib/gprpp/thd.h', 'src/core/lib/gprpp/thd.h',
'src/core/lib/gprpp/time.h',
'src/core/lib/gprpp/time_util.h', 'src/core/lib/gprpp/time_util.h',
'src/core/lib/http/format_request.h', 'src/core/lib/http/format_request.h',
'src/core/lib/http/httpcli.h', 'src/core/lib/http/httpcli.h',

2
grpc.gemspec generated

@ -1014,6 +1014,8 @@ Gem::Specification.new do |s|
s.files += %w( src/core/lib/gprpp/thd.h ) s.files += %w( src/core/lib/gprpp/thd.h )
s.files += %w( src/core/lib/gprpp/thd_posix.cc ) s.files += %w( src/core/lib/gprpp/thd_posix.cc )
s.files += %w( src/core/lib/gprpp/thd_windows.cc ) s.files += %w( src/core/lib/gprpp/thd_windows.cc )
s.files += %w( src/core/lib/gprpp/time.cc )
s.files += %w( src/core/lib/gprpp/time.h )
s.files += %w( src/core/lib/gprpp/time_util.cc ) s.files += %w( src/core/lib/gprpp/time_util.cc )
s.files += %w( src/core/lib/gprpp/time_util.h ) s.files += %w( src/core/lib/gprpp/time_util.h )
s.files += %w( src/core/lib/http/format_request.cc ) s.files += %w( src/core/lib/http/format_request.cc )

2
grpc.gyp generated

@ -905,6 +905,7 @@
'src/core/lib/event_engine/memory_allocator.cc', 'src/core/lib/event_engine/memory_allocator.cc',
'src/core/lib/event_engine/resolved_address.cc', 'src/core/lib/event_engine/resolved_address.cc',
'src/core/lib/event_engine/sockaddr.cc', 'src/core/lib/event_engine/sockaddr.cc',
'src/core/lib/gprpp/time.cc',
'src/core/lib/http/format_request.cc', 'src/core/lib/http/format_request.cc',
'src/core/lib/http/httpcli.cc', 'src/core/lib/http/httpcli.cc',
'src/core/lib/http/httpcli_security_connector.cc', 'src/core/lib/http/httpcli_security_connector.cc',
@ -1366,6 +1367,7 @@
'src/core/lib/event_engine/memory_allocator.cc', 'src/core/lib/event_engine/memory_allocator.cc',
'src/core/lib/event_engine/resolved_address.cc', 'src/core/lib/event_engine/resolved_address.cc',
'src/core/lib/event_engine/sockaddr.cc', 'src/core/lib/event_engine/sockaddr.cc',
'src/core/lib/gprpp/time.cc',
'src/core/lib/http/format_request.cc', 'src/core/lib/http/format_request.cc',
'src/core/lib/http/httpcli.cc', 'src/core/lib/http/httpcli.cc',
'src/core/lib/http/parser.cc', 'src/core/lib/http/parser.cc',

2
package.xml generated

@ -994,6 +994,8 @@
<file baseinstalldir="/" name="src/core/lib/gprpp/thd.h" role="src" /> <file baseinstalldir="/" name="src/core/lib/gprpp/thd.h" role="src" />
<file baseinstalldir="/" name="src/core/lib/gprpp/thd_posix.cc" role="src" /> <file baseinstalldir="/" name="src/core/lib/gprpp/thd_posix.cc" role="src" />
<file baseinstalldir="/" name="src/core/lib/gprpp/thd_windows.cc" role="src" /> <file baseinstalldir="/" name="src/core/lib/gprpp/thd_windows.cc" role="src" />
<file baseinstalldir="/" name="src/core/lib/gprpp/time.cc" role="src" />
<file baseinstalldir="/" name="src/core/lib/gprpp/time.h" role="src" />
<file baseinstalldir="/" name="src/core/lib/gprpp/time_util.cc" role="src" /> <file baseinstalldir="/" name="src/core/lib/gprpp/time_util.cc" role="src" />
<file baseinstalldir="/" name="src/core/lib/gprpp/time_util.h" role="src" /> <file baseinstalldir="/" name="src/core/lib/gprpp/time_util.h" role="src" />
<file baseinstalldir="/" name="src/core/lib/http/format_request.cc" role="src" /> <file baseinstalldir="/" name="src/core/lib/http/format_request.cc" role="src" />

@ -29,6 +29,7 @@
#include "src/core/lib/gpr/string.h" #include "src/core/lib/gpr/string.h"
#include "src/core/lib/gprpp/global_config.h" #include "src/core/lib/gprpp/global_config.h"
#include "src/core/lib/gprpp/memory.h" #include "src/core/lib/gprpp/memory.h"
#include "src/core/lib/gprpp/time.h"
#include "src/core/lib/iomgr/error.h" #include "src/core/lib/iomgr/error.h"
#include "src/core/lib/iomgr/iomgr.h" #include "src/core/lib/iomgr/iomgr.h"
#include "src/core/lib/iomgr/pollset.h" #include "src/core/lib/iomgr/pollset.h"
@ -57,7 +58,8 @@ static backup_poller* g_poller = nullptr; // guarded by g_poller_mu
// g_poll_interval_ms is set only once at the first time // g_poll_interval_ms is set only once at the first time
// grpc_client_channel_start_backup_polling() is called, after that it is // grpc_client_channel_start_backup_polling() is called, after that it is
// treated as const. // treated as const.
static int g_poll_interval_ms = DEFAULT_POLL_INTERVAL_MS; static grpc_core::Duration g_poll_interval =
grpc_core::Duration::Milliseconds(DEFAULT_POLL_INTERVAL_MS);
GPR_GLOBAL_CONFIG_DEFINE_INT32( GPR_GLOBAL_CONFIG_DEFINE_INT32(
grpc_client_channel_backup_poll_interval_ms, DEFAULT_POLL_INTERVAL_MS, grpc_client_channel_backup_poll_interval_ms, DEFAULT_POLL_INTERVAL_MS,
@ -75,10 +77,10 @@ void grpc_client_channel_global_init_backup_polling() {
if (poll_interval_ms < 0) { if (poll_interval_ms < 0) {
gpr_log(GPR_ERROR, gpr_log(GPR_ERROR,
"Invalid GRPC_CLIENT_CHANNEL_BACKUP_POLL_INTERVAL_MS: %d, " "Invalid GRPC_CLIENT_CHANNEL_BACKUP_POLL_INTERVAL_MS: %d, "
"default value %d will be used.", "default value %" PRId64 " will be used.",
poll_interval_ms, g_poll_interval_ms); poll_interval_ms, g_poll_interval.millis());
} else { } else {
g_poll_interval_ms = poll_interval_ms; g_poll_interval = grpc_core::Duration::Milliseconds(poll_interval_ms);
} }
} }
@ -133,7 +135,7 @@ static void run_poller(void* arg, grpc_error_handle error) {
gpr_mu_unlock(p->pollset_mu); gpr_mu_unlock(p->pollset_mu);
GRPC_LOG_IF_ERROR("Run client channel backup poller", err); GRPC_LOG_IF_ERROR("Run client channel backup poller", err);
grpc_timer_init(&p->polling_timer, grpc_timer_init(&p->polling_timer,
grpc_core::ExecCtx::Get()->Now() + g_poll_interval_ms, grpc_core::ExecCtx::Get()->Now() + g_poll_interval,
&p->run_poller_closure); &p->run_poller_closure);
} }
@ -150,14 +152,15 @@ static void g_poller_init_locked() {
GRPC_CLOSURE_INIT(&g_poller->run_poller_closure, run_poller, g_poller, GRPC_CLOSURE_INIT(&g_poller->run_poller_closure, run_poller, g_poller,
grpc_schedule_on_exec_ctx); grpc_schedule_on_exec_ctx);
grpc_timer_init(&g_poller->polling_timer, grpc_timer_init(&g_poller->polling_timer,
grpc_core::ExecCtx::Get()->Now() + g_poll_interval_ms, grpc_core::ExecCtx::Get()->Now() + g_poll_interval,
&g_poller->run_poller_closure); &g_poller->run_poller_closure);
} }
} }
void grpc_client_channel_start_backup_polling( void grpc_client_channel_start_backup_polling(
grpc_pollset_set* interested_parties) { grpc_pollset_set* interested_parties) {
if (g_poll_interval_ms == 0 || grpc_iomgr_run_in_background()) { if (g_poll_interval == grpc_core::Duration::Zero() ||
grpc_iomgr_run_in_background()) {
return; return;
} }
gpr_mu_lock(&g_poller_mu); gpr_mu_lock(&g_poller_mu);
@ -175,7 +178,8 @@ void grpc_client_channel_start_backup_polling(
void grpc_client_channel_stop_backup_polling( void grpc_client_channel_stop_backup_polling(
grpc_pollset_set* interested_parties) { grpc_pollset_set* interested_parties) {
if (g_poll_interval_ms == 0 || grpc_iomgr_run_in_background()) { if (g_poll_interval == grpc_core::Duration::Zero() ||
grpc_iomgr_run_in_background()) {
return; return;
} }
grpc_pollset_set_del_pollset(interested_parties, g_poller->pollset); grpc_pollset_set_del_pollset(interested_parties, g_poller->pollset);

@ -95,7 +95,7 @@ class StateWatcher : public DualRefCounted<StateWatcher> {
// watch, but we are hiding that fact from the application. // watch, but we are hiding that fact from the application.
if (IsLameChannel(channel)) { if (IsLameChannel(channel)) {
// Ref from object creation is held by timer callback. // Ref from object creation is held by timer callback.
StartTimer(grpc_timespec_to_millis_round_up(deadline)); StartTimer(Timestamp::FromTimespecRoundUp(deadline));
return; return;
} }
gpr_log(GPR_ERROR, gpr_log(GPR_ERROR,
@ -108,7 +108,7 @@ class StateWatcher : public DualRefCounted<StateWatcher> {
// the other by the watcher callback. // the other by the watcher callback.
Ref().release(); Ref().release();
auto* watcher_timer_init_state = new WatcherTimerInitState( auto* watcher_timer_init_state = new WatcherTimerInitState(
this, grpc_timespec_to_millis_round_up(deadline)); this, Timestamp::FromTimespecRoundUp(deadline));
client_channel->AddExternalConnectivityWatcher( client_channel->AddExternalConnectivityWatcher(
grpc_polling_entity_create_from_pollset(grpc_cq_pollset(cq)), &state_, grpc_polling_entity_create_from_pollset(grpc_cq_pollset(cq)), &state_,
&on_complete_, watcher_timer_init_state->closure()); &on_complete_, watcher_timer_init_state->closure());
@ -123,7 +123,7 @@ class StateWatcher : public DualRefCounted<StateWatcher> {
// ClientChannel actually starts the watch. // ClientChannel actually starts the watch.
class WatcherTimerInitState { class WatcherTimerInitState {
public: public:
WatcherTimerInitState(StateWatcher* state_watcher, grpc_millis deadline) WatcherTimerInitState(StateWatcher* state_watcher, Timestamp deadline)
: state_watcher_(state_watcher), deadline_(deadline) { : state_watcher_(state_watcher), deadline_(deadline) {
GRPC_CLOSURE_INIT(&closure_, WatcherTimerInit, this, nullptr); GRPC_CLOSURE_INIT(&closure_, WatcherTimerInit, this, nullptr);
} }
@ -138,11 +138,11 @@ class StateWatcher : public DualRefCounted<StateWatcher> {
} }
StateWatcher* state_watcher_; StateWatcher* state_watcher_;
grpc_millis deadline_; Timestamp deadline_;
grpc_closure closure_; grpc_closure closure_;
}; };
void StartTimer(grpc_millis deadline) { void StartTimer(Timestamp deadline) {
grpc_timer_init(&timer_, deadline, &on_timeout_); grpc_timer_init(&timer_, deadline, &on_timeout_);
} }

@ -190,7 +190,7 @@ class ClientChannel::CallData {
grpc_slice path_; // Request path. grpc_slice path_; // Request path.
gpr_cycle_counter call_start_time_; gpr_cycle_counter call_start_time_;
grpc_millis deadline_; Timestamp deadline_;
Arena* arena_; Arena* arena_;
grpc_call_stack* owning_call_; grpc_call_stack* owning_call_;
CallCombiner* call_combiner_; CallCombiner* call_combiner_;
@ -373,7 +373,7 @@ class DynamicTerminationFilter::CallData {
~CallData() { grpc_slice_unref_internal(path_); } ~CallData() { grpc_slice_unref_internal(path_); }
grpc_slice path_; // Request path. grpc_slice path_; // Request path.
grpc_millis deadline_; Timestamp deadline_;
Arena* arena_; Arena* arena_;
grpc_call_stack* owning_call_; grpc_call_stack* owning_call_;
CallCombiner* call_combiner_; CallCombiner* call_combiner_;
@ -1862,7 +1862,7 @@ ClientChannel::CallData::CallData(grpc_call_element* elem,
: deadline_state_(elem, args, : deadline_state_(elem, args,
GPR_LIKELY(chand.deadline_checking_enabled_) GPR_LIKELY(chand.deadline_checking_enabled_)
? args.deadline ? args.deadline
: GRPC_MILLIS_INF_FUTURE), : Timestamp::InfFuture()),
path_(grpc_slice_ref_internal(args.path)), path_(grpc_slice_ref_internal(args.path)),
call_start_time_(args.start_time), call_start_time_(args.start_time),
deadline_(args.deadline), deadline_(args.deadline),
@ -2227,9 +2227,10 @@ grpc_error_handle ClientChannel::CallData::ApplyServiceConfigToCallLocked(
if (method_params != nullptr) { if (method_params != nullptr) {
// If the deadline from the service config is shorter than the one // If the deadline from the service config is shorter than the one
// from the client API, reset the deadline timer. // from the client API, reset the deadline timer.
if (chand->deadline_checking_enabled_ && method_params->timeout() != 0) { if (chand->deadline_checking_enabled_ &&
const grpc_millis per_method_deadline = method_params->timeout() != Duration::Zero()) {
grpc_cycle_counter_to_millis_round_up(call_start_time_) + const Timestamp per_method_deadline =
Timestamp::FromCycleCounterRoundUp(call_start_time_) +
method_params->timeout(); method_params->timeout();
if (per_method_deadline < deadline_) { if (per_method_deadline < deadline_) {
deadline_ = per_method_deadline; deadline_ = per_method_deadline;
@ -2471,7 +2472,8 @@ class ClientChannel::LoadBalancedCall::Metadata
std::string(value_slice.as_string_view())); std::string(value_slice.as_string_view()));
} }
void Encode(GrpcTimeoutMetadata, grpc_millis) {} void Encode(GrpcTimeoutMetadata,
const typename GrpcTimeoutMetadata::ValueType&) {}
void Encode(HttpPathMetadata, const Slice&) {} void Encode(HttpPathMetadata, const Slice&) {}
void Encode(HttpMethodMetadata, void Encode(HttpMethodMetadata,
const typename HttpMethodMetadata::ValueType&) {} const typename HttpMethodMetadata::ValueType&) {}

@ -449,7 +449,7 @@ class ClientChannel::LoadBalancedCall
// that uses any one of them, we should store them in the call // that uses any one of them, we should store them in the call
// context. This will save per-call memory overhead. // context. This will save per-call memory overhead.
Slice path_; // Request path. Slice path_; // Request path.
grpc_millis deadline_; Timestamp deadline_;
Arena* arena_; Arena* arena_;
grpc_call_stack* owning_call_; grpc_call_stack* owning_call_;
CallCombiner* call_combiner_; CallCombiner* call_combiner_;

@ -38,7 +38,7 @@ class SubchannelConnector : public InternallyRefCounted<SubchannelConnector> {
// Set of pollsets interested in this connection. // Set of pollsets interested in this connection.
grpc_pollset_set* interested_parties; grpc_pollset_set* interested_parties;
// Deadline for connection. // Deadline for connection.
grpc_millis deadline; Timestamp deadline;
// Channel args to be passed to handshakers and transport. // Channel args to be passed to handshakers and transport.
const grpc_channel_args* channel_args; const grpc_channel_args* channel_args;
}; };

@ -40,7 +40,7 @@ class DynamicFilters : public RefCounted<DynamicFilters> {
grpc_polling_entity* pollent; grpc_polling_entity* pollent;
grpc_slice path; grpc_slice path;
gpr_cycle_counter start_time; gpr_cycle_counter start_time;
grpc_millis deadline; Timestamp deadline;
Arena* arena; Arena* arena;
grpc_call_context_element* context; grpc_call_context_element* context;
CallCombiner* call_combiner; CallCombiner* call_combiner;

@ -68,12 +68,12 @@ HealthCheckClient::HealthCheckClient(
watcher_(std::move(watcher)), watcher_(std::move(watcher)),
retry_backoff_( retry_backoff_(
BackOff::Options() BackOff::Options()
.set_initial_backoff( .set_initial_backoff(Duration::Seconds(
HEALTH_CHECK_INITIAL_CONNECT_BACKOFF_SECONDS * 1000) HEALTH_CHECK_INITIAL_CONNECT_BACKOFF_SECONDS))
.set_multiplier(HEALTH_CHECK_RECONNECT_BACKOFF_MULTIPLIER) .set_multiplier(HEALTH_CHECK_RECONNECT_BACKOFF_MULTIPLIER)
.set_jitter(HEALTH_CHECK_RECONNECT_JITTER) .set_jitter(HEALTH_CHECK_RECONNECT_JITTER)
.set_max_backoff(HEALTH_CHECK_RECONNECT_MAX_BACKOFF_SECONDS * .set_max_backoff(Duration::Seconds(
1000)) { HEALTH_CHECK_RECONNECT_MAX_BACKOFF_SECONDS))) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_health_check_client_trace)) { if (GRPC_TRACE_FLAG_ENABLED(grpc_health_check_client_trace)) {
gpr_log(GPR_INFO, "created HealthCheckClient %p", this); gpr_log(GPR_INFO, "created HealthCheckClient %p", this);
} }
@ -144,14 +144,14 @@ void HealthCheckClient::StartCallLocked() {
void HealthCheckClient::StartRetryTimerLocked() { void HealthCheckClient::StartRetryTimerLocked() {
SetHealthStatusLocked(GRPC_CHANNEL_TRANSIENT_FAILURE, SetHealthStatusLocked(GRPC_CHANNEL_TRANSIENT_FAILURE,
"health check call failed; will retry after backoff"); "health check call failed; will retry after backoff");
grpc_millis next_try = retry_backoff_.NextAttemptTime(); Timestamp next_try = retry_backoff_.NextAttemptTime();
if (GRPC_TRACE_FLAG_ENABLED(grpc_health_check_client_trace)) { if (GRPC_TRACE_FLAG_ENABLED(grpc_health_check_client_trace)) {
gpr_log(GPR_INFO, "HealthCheckClient %p: health check call lost...", this); gpr_log(GPR_INFO, "HealthCheckClient %p: health check call lost...", this);
grpc_millis timeout = next_try - ExecCtx::Get()->Now(); Duration timeout = next_try - ExecCtx::Get()->Now();
if (timeout > 0) { if (timeout > Duration::Zero()) {
gpr_log(GPR_INFO, gpr_log(GPR_INFO,
"HealthCheckClient %p: ... will retry in %" PRId64 "ms.", this, "HealthCheckClient %p: ... will retry in %" PRId64 "ms.", this,
timeout); timeout.millis());
} else { } else {
gpr_log(GPR_INFO, "HealthCheckClient %p: ... retrying immediately.", gpr_log(GPR_INFO, "HealthCheckClient %p: ... retrying immediately.",
this); this);
@ -295,7 +295,7 @@ void HealthCheckClient::CallState::StartCall() {
&pollent_, &pollent_,
Slice::FromStaticString("/grpc.health.v1.Health/Watch"), Slice::FromStaticString("/grpc.health.v1.Health/Watch"),
gpr_get_cycle_counter(), // start_time gpr_get_cycle_counter(), // start_time
GRPC_MILLIS_INF_FUTURE, // deadline Timestamp::InfFuture(), // deadline
arena_.get(), arena_.get(),
context_, context_,
&call_combiner_, &call_combiner_,
@ -560,7 +560,7 @@ void HealthCheckClient::CallState::RecvTrailingMetadataReady(
self->recv_trailing_metadata_.get(GrpcStatusMetadata()) self->recv_trailing_metadata_.get(GrpcStatusMetadata())
.value_or(GRPC_STATUS_UNKNOWN); .value_or(GRPC_STATUS_UNKNOWN);
if (error != GRPC_ERROR_NONE) { if (error != GRPC_ERROR_NONE) {
grpc_error_get_status(error, GRPC_MILLIS_INF_FUTURE, &status, grpc_error_get_status(error, Timestamp::InfFuture(), &status,
nullptr /* slice */, nullptr /* http_error */, nullptr /* slice */, nullptr /* http_error */,
nullptr /* error_string */); nullptr /* error_string */);
} }

@ -214,7 +214,7 @@ class GrpcLb : public LoadBalancingPolicy {
// The stats for client-side load reporting associated with this LB call. // The stats for client-side load reporting associated with this LB call.
// Created after the first serverlist is received. // Created after the first serverlist is received.
RefCountedPtr<GrpcLbClientStats> client_stats_; RefCountedPtr<GrpcLbClientStats> client_stats_;
grpc_millis client_stats_report_interval_ = 0; Duration client_stats_report_interval_;
grpc_timer client_load_report_timer_; grpc_timer client_load_report_timer_;
bool client_load_report_timer_callback_pending_ = false; bool client_load_report_timer_callback_pending_ = false;
bool last_client_load_report_counters_were_zero_ = false; bool last_client_load_report_counters_were_zero_ = false;
@ -452,8 +452,8 @@ class GrpcLb : public LoadBalancingPolicy {
// is shutting down, or the LB call has ended). A non-NULL lb_calld_ always // is shutting down, or the LB call has ended). A non-NULL lb_calld_ always
// contains a non-NULL lb_call_. // contains a non-NULL lb_call_.
OrphanablePtr<BalancerCallState> lb_calld_; OrphanablePtr<BalancerCallState> lb_calld_;
// Timeout in milliseconds for the LB call. 0 means no deadline. // Timeout for the LB call. 0 means no deadline.
const int lb_call_timeout_ms_ = 0; const Duration lb_call_timeout_;
// Balancer call retry state. // Balancer call retry state.
BackOff lb_call_backoff_; BackOff lb_call_backoff_;
bool retry_timer_callback_pending_ = false; bool retry_timer_callback_pending_ = false;
@ -474,7 +474,7 @@ class GrpcLb : public LoadBalancingPolicy {
// State for fallback-at-startup checks. // State for fallback-at-startup checks.
// Timeout after startup after which we will go into fallback mode if // Timeout after startup after which we will go into fallback mode if
// we have not received a serverlist from the balancer. // we have not received a serverlist from the balancer.
const int fallback_at_startup_timeout_ = 0; const Duration fallback_at_startup_timeout_;
bool fallback_at_startup_checks_pending_ = false; bool fallback_at_startup_checks_pending_ = false;
grpc_timer lb_fallback_timer_; grpc_timer lb_fallback_timer_;
grpc_closure lb_on_fallback_; grpc_closure lb_on_fallback_;
@ -485,8 +485,8 @@ class GrpcLb : public LoadBalancingPolicy {
bool child_policy_ready_ = false; bool child_policy_ready_ = false;
// Deleted subchannel caching. // Deleted subchannel caching.
const grpc_millis subchannel_cache_interval_ms_; const Duration subchannel_cache_interval_;
std::map<grpc_millis /*deletion time*/, std::map<Timestamp /*deletion time*/,
std::vector<RefCountedPtr<SubchannelInterface>>> std::vector<RefCountedPtr<SubchannelInterface>>>
cached_subchannels_; cached_subchannels_;
grpc_timer subchannel_cache_timer_; grpc_timer subchannel_cache_timer_;
@ -784,10 +784,10 @@ GrpcLb::BalancerCallState::BalancerCallState(
this, grpc_schedule_on_exec_ctx); this, grpc_schedule_on_exec_ctx);
GRPC_CLOSURE_INIT(&client_load_report_closure_, MaybeSendClientLoadReport, GRPC_CLOSURE_INIT(&client_load_report_closure_, MaybeSendClientLoadReport,
this, grpc_schedule_on_exec_ctx); this, grpc_schedule_on_exec_ctx);
const grpc_millis deadline = const Timestamp deadline =
grpclb_policy()->lb_call_timeout_ms_ == 0 grpclb_policy()->lb_call_timeout_ == Duration::Zero()
? GRPC_MILLIS_INF_FUTURE ? Timestamp::InfFuture()
: ExecCtx::Get()->Now() + grpclb_policy()->lb_call_timeout_ms_; : ExecCtx::Get()->Now() + grpclb_policy()->lb_call_timeout_;
lb_call_ = grpc_channel_create_pollset_set_call( lb_call_ = grpc_channel_create_pollset_set_call(
grpclb_policy()->lb_channel_, nullptr, GRPC_PROPAGATE_DEFAULTS, grpclb_policy()->lb_channel_, nullptr, GRPC_PROPAGATE_DEFAULTS,
grpclb_policy_->interested_parties(), grpclb_policy_->interested_parties(),
@ -914,7 +914,7 @@ void GrpcLb::BalancerCallState::ScheduleNextClientLoadReportLocked() {
// in a loop while draining the currently-held WorkSerializer. // in a loop while draining the currently-held WorkSerializer.
// Also see https://github.com/grpc/grpc/issues/26079. // Also see https://github.com/grpc/grpc/issues/26079.
ExecCtx::Get()->InvalidateNow(); ExecCtx::Get()->InvalidateNow();
const grpc_millis next_client_load_report_time = const Timestamp next_client_load_report_time =
ExecCtx::Get()->Now() + client_stats_report_interval_; ExecCtx::Get()->Now() + client_stats_report_interval_;
GRPC_CLOSURE_INIT(&client_load_report_closure_, MaybeSendClientLoadReport, GRPC_CLOSURE_INIT(&client_load_report_closure_, MaybeSendClientLoadReport,
this, grpc_schedule_on_exec_ctx); this, grpc_schedule_on_exec_ctx);
@ -1077,15 +1077,16 @@ void GrpcLb::BalancerCallState::OnBalancerMessageReceivedLocked() {
} else { } else {
switch (response.type) { switch (response.type) {
case response.INITIAL: { case response.INITIAL: {
if (response.client_stats_report_interval != 0) { if (response.client_stats_report_interval != Duration::Zero()) {
client_stats_report_interval_ = std::max( client_stats_report_interval_ = std::max(
int64_t(GPR_MS_PER_SEC), response.client_stats_report_interval); Duration::Seconds(1), response.client_stats_report_interval);
if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_glb_trace)) { if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_glb_trace)) {
gpr_log(GPR_INFO, gpr_log(GPR_INFO,
"[grpclb %p] lb_calld=%p: Received initial LB response " "[grpclb %p] lb_calld=%p: Received initial LB response "
"message; client load reporting interval = %" PRId64 "message; client load reporting interval = %" PRId64
" milliseconds", " milliseconds",
grpclb_policy(), this, client_stats_report_interval_); grpclb_policy(), this,
client_stats_report_interval_.millis());
} }
} else if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_glb_trace)) { } else if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_glb_trace)) {
gpr_log(GPR_INFO, gpr_log(GPR_INFO,
@ -1111,7 +1112,8 @@ void GrpcLb::BalancerCallState::OnBalancerMessageReceivedLocked() {
seen_serverlist_ = true; seen_serverlist_ = true;
// Start sending client load report only after we start using the // Start sending client load report only after we start using the
// serverlist returned from the current LB call. // serverlist returned from the current LB call.
if (client_stats_report_interval_ > 0 && client_stats_ == nullptr) { if (client_stats_report_interval_ > Duration::Zero() &&
client_stats_ == nullptr) {
client_stats_ = MakeRefCounted<GrpcLbClientStats>(); client_stats_ = MakeRefCounted<GrpcLbClientStats>();
// Ref held by callback. // Ref held by callback.
Ref(DEBUG_LOCATION, "client_load_report").release(); Ref(DEBUG_LOCATION, "client_load_report").release();
@ -1364,22 +1366,25 @@ GrpcLb::GrpcLb(Args args)
: LoadBalancingPolicy(std::move(args)), : LoadBalancingPolicy(std::move(args)),
server_name_(GetServerNameFromChannelArgs(args.args)), server_name_(GetServerNameFromChannelArgs(args.args)),
response_generator_(MakeRefCounted<FakeResolverResponseGenerator>()), response_generator_(MakeRefCounted<FakeResolverResponseGenerator>()),
lb_call_timeout_ms_(grpc_channel_args_find_integer( lb_call_timeout_(Duration::Milliseconds(grpc_channel_args_find_integer(
args.args, GRPC_ARG_GRPCLB_CALL_TIMEOUT_MS, {0, 0, INT_MAX})), args.args, GRPC_ARG_GRPCLB_CALL_TIMEOUT_MS, {0, 0, INT_MAX}))),
lb_call_backoff_( lb_call_backoff_(
BackOff::Options() BackOff::Options()
.set_initial_backoff(GRPC_GRPCLB_INITIAL_CONNECT_BACKOFF_SECONDS * .set_initial_backoff(Duration::Seconds(
1000) GRPC_GRPCLB_INITIAL_CONNECT_BACKOFF_SECONDS))
.set_multiplier(GRPC_GRPCLB_RECONNECT_BACKOFF_MULTIPLIER) .set_multiplier(GRPC_GRPCLB_RECONNECT_BACKOFF_MULTIPLIER)
.set_jitter(GRPC_GRPCLB_RECONNECT_JITTER) .set_jitter(GRPC_GRPCLB_RECONNECT_JITTER)
.set_max_backoff(GRPC_GRPCLB_RECONNECT_MAX_BACKOFF_SECONDS * .set_max_backoff(Duration::Seconds(
1000)), GRPC_GRPCLB_RECONNECT_MAX_BACKOFF_SECONDS))),
fallback_at_startup_timeout_(grpc_channel_args_find_integer( fallback_at_startup_timeout_(
Duration::Milliseconds(grpc_channel_args_find_integer(
args.args, GRPC_ARG_GRPCLB_FALLBACK_TIMEOUT_MS, args.args, GRPC_ARG_GRPCLB_FALLBACK_TIMEOUT_MS,
{GRPC_GRPCLB_DEFAULT_FALLBACK_TIMEOUT_MS, 0, INT_MAX})), {GRPC_GRPCLB_DEFAULT_FALLBACK_TIMEOUT_MS, 0, INT_MAX}))),
subchannel_cache_interval_ms_(grpc_channel_args_find_integer( subchannel_cache_interval_(
Duration::Milliseconds(grpc_channel_args_find_integer(
args.args, GRPC_ARG_GRPCLB_SUBCHANNEL_CACHE_INTERVAL_MS, args.args, GRPC_ARG_GRPCLB_SUBCHANNEL_CACHE_INTERVAL_MS,
{GRPC_GRPCLB_DEFAULT_SUBCHANNEL_DELETION_DELAY_MS, 0, INT_MAX})) { {GRPC_GRPCLB_DEFAULT_SUBCHANNEL_DELETION_DELAY_MS, 0,
INT_MAX}))) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_glb_trace)) { if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_glb_trace)) {
gpr_log(GPR_INFO, gpr_log(GPR_INFO,
"[grpclb %p] Will use '%s' as the server name for LB request.", "[grpclb %p] Will use '%s' as the server name for LB request.",
@ -1470,7 +1475,7 @@ void GrpcLb::UpdateLocked(UpdateArgs args) {
if (is_initial_update) { if (is_initial_update) {
fallback_at_startup_checks_pending_ = true; fallback_at_startup_checks_pending_ = true;
// Start timer. // Start timer.
grpc_millis deadline = ExecCtx::Get()->Now() + fallback_at_startup_timeout_; Timestamp deadline = ExecCtx::Get()->Now() + fallback_at_startup_timeout_;
Ref(DEBUG_LOCATION, "on_fallback_timer").release(); // Ref for callback Ref(DEBUG_LOCATION, "on_fallback_timer").release(); // Ref for callback
grpc_timer_init(&lb_fallback_timer_, deadline, &lb_on_fallback_); grpc_timer_init(&lb_fallback_timer_, deadline, &lb_on_fallback_);
// Start watching the channel's connectivity state. If the channel // Start watching the channel's connectivity state. If the channel
@ -1561,13 +1566,13 @@ void GrpcLb::StartBalancerCallLocked() {
} }
void GrpcLb::StartBalancerCallRetryTimerLocked() { void GrpcLb::StartBalancerCallRetryTimerLocked() {
grpc_millis next_try = lb_call_backoff_.NextAttemptTime(); Timestamp next_try = lb_call_backoff_.NextAttemptTime();
if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_glb_trace)) { if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_glb_trace)) {
gpr_log(GPR_INFO, "[grpclb %p] Connection to LB server lost...", this); gpr_log(GPR_INFO, "[grpclb %p] Connection to LB server lost...", this);
grpc_millis timeout = next_try - ExecCtx::Get()->Now(); Duration timeout = next_try - ExecCtx::Get()->Now();
if (timeout > 0) { if (timeout > Duration::Zero()) {
gpr_log(GPR_INFO, "[grpclb %p] ... retry_timer_active in %" PRId64 "ms.", gpr_log(GPR_INFO, "[grpclb %p] ... retry_timer_active in %" PRId64 "ms.",
this, timeout); this, timeout.millis());
} else { } else {
gpr_log(GPR_INFO, "[grpclb %p] ... retry_timer_active immediately.", gpr_log(GPR_INFO, "[grpclb %p] ... retry_timer_active immediately.",
this); this);
@ -1735,8 +1740,7 @@ void GrpcLb::CreateOrUpdateChildPolicyLocked() {
void GrpcLb::CacheDeletedSubchannelLocked( void GrpcLb::CacheDeletedSubchannelLocked(
RefCountedPtr<SubchannelInterface> subchannel) { RefCountedPtr<SubchannelInterface> subchannel) {
grpc_millis deletion_time = Timestamp deletion_time = ExecCtx::Get()->Now() + subchannel_cache_interval_;
ExecCtx::Get()->Now() + subchannel_cache_interval_ms_;
cached_subchannels_[deletion_time].push_back(std::move(subchannel)); cached_subchannels_[deletion_time].push_back(std::move(subchannel));
if (!subchannel_cache_timer_pending_) { if (!subchannel_cache_timer_pending_) {
Ref(DEBUG_LOCATION, "OnSubchannelCacheTimer").release(); Ref(DEBUG_LOCATION, "OnSubchannelCacheTimer").release();

@ -146,11 +146,10 @@ bool ParseServerList(const grpc_lb_v1_LoadBalanceResponse& response,
return true; return true;
} }
grpc_millis grpc_grpclb_duration_to_millis( Duration ParseDuration(const google_protobuf_Duration* duration_pb) {
const google_protobuf_Duration* duration_pb) { return Duration::FromSecondsAndNanoseconds(
return static_cast<grpc_millis>( google_protobuf_Duration_seconds(duration_pb),
(google_protobuf_Duration_seconds(duration_pb) * GPR_MS_PER_SEC) + google_protobuf_Duration_nanos(duration_pb));
(google_protobuf_Duration_nanos(duration_pb) / GPR_NS_PER_MS));
} }
} // namespace } // namespace
@ -177,7 +176,7 @@ bool GrpcLbResponseParse(const grpc_slice& serialized_response,
initial_response); initial_response);
if (client_stats_report_interval != nullptr) { if (client_stats_report_interval != nullptr) {
result->client_stats_report_interval = result->client_stats_report_interval =
grpc_grpclb_duration_to_millis(client_stats_report_interval); ParseDuration(client_stats_report_interval);
} }
return true; return true;
} }

@ -49,7 +49,7 @@ struct GrpcLbServer {
struct GrpcLbResponse { struct GrpcLbResponse {
enum { INITIAL, SERVERLIST, FALLBACK } type; enum { INITIAL, SERVERLIST, FALLBACK } type;
grpc_millis client_stats_report_interval = 0; Duration client_stats_report_interval;
std::vector<GrpcLbServer> serverlist; std::vector<GrpcLbServer> serverlist;
}; };

@ -47,11 +47,11 @@ constexpr char kPriority[] = "priority_experimental";
// How long we keep a child around for after it is no longer being used // How long we keep a child around for after it is no longer being used
// (either because it has been removed from the config or because we // (either because it has been removed from the config or because we
// have switched to a higher-priority child). // have switched to a higher-priority child).
constexpr int kChildRetentionIntervalMs = 15 * 60 * 1000; constexpr Duration kChildRetentionInterval = Duration::Minutes(15);
// Default for how long we wait for a newly created child to get connected // Default for how long we wait for a newly created child to get connected
// before starting to attempt the next priority. Overridable via channel arg. // before starting to attempt the next priority. Overridable via channel arg.
constexpr int kDefaultChildFailoverTimeoutMs = 10000; constexpr Duration kDefaultChildFailoverTimeout = Duration::Seconds(10);
// Config for priority LB policy. // Config for priority LB policy.
class PriorityLbConfig : public LoadBalancingPolicy::Config { class PriorityLbConfig : public LoadBalancingPolicy::Config {
@ -219,7 +219,7 @@ class PriorityLb : public LoadBalancingPolicy {
void TryNextPriorityLocked(bool report_connecting); void TryNextPriorityLocked(bool report_connecting);
void SelectPriorityLocked(uint32_t priority); void SelectPriorityLocked(uint32_t priority);
const int child_failover_timeout_ms_; const Duration child_failover_timeout_;
// Current channel args and config from the resolver. // Current channel args and config from the resolver.
const grpc_channel_args* args_ = nullptr; const grpc_channel_args* args_ = nullptr;
@ -244,9 +244,11 @@ class PriorityLb : public LoadBalancingPolicy {
PriorityLb::PriorityLb(Args args) PriorityLb::PriorityLb(Args args)
: LoadBalancingPolicy(std::move(args)), : LoadBalancingPolicy(std::move(args)),
child_failover_timeout_ms_(grpc_channel_args_find_integer( child_failover_timeout_(
Duration::Milliseconds(grpc_channel_args_find_integer(
args.args, GRPC_ARG_PRIORITY_FAILOVER_TIMEOUT_MS, args.args, GRPC_ARG_PRIORITY_FAILOVER_TIMEOUT_MS,
{kDefaultChildFailoverTimeoutMs, 0, INT_MAX})) { {static_cast<int>(kDefaultChildFailoverTimeout.millis()), 0,
INT_MAX}))) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_priority_trace)) { if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_priority_trace)) {
gpr_log(GPR_INFO, "[priority_lb %p] created", this); gpr_log(GPR_INFO, "[priority_lb %p] created", this);
} }
@ -634,15 +636,17 @@ void PriorityLb::ChildPriority::OnConnectivityStateUpdateLocked(
void PriorityLb::ChildPriority::StartFailoverTimerLocked() { void PriorityLb::ChildPriority::StartFailoverTimerLocked() {
if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_priority_trace)) { if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_priority_trace)) {
gpr_log(GPR_INFO, gpr_log(
"[priority_lb %p] child %s (%p): starting failover timer for %d ms", GPR_INFO,
"[priority_lb %p] child %s (%p): starting failover timer for %" PRId64
"ms",
priority_policy_.get(), name_.c_str(), this, priority_policy_.get(), name_.c_str(), this,
priority_policy_->child_failover_timeout_ms_); priority_policy_->child_failover_timeout_.millis());
} }
Ref(DEBUG_LOCATION, "ChildPriority+OnFailoverTimerLocked").release(); Ref(DEBUG_LOCATION, "ChildPriority+OnFailoverTimerLocked").release();
grpc_timer_init( grpc_timer_init(
&failover_timer_, &failover_timer_,
ExecCtx::Get()->Now() + priority_policy_->child_failover_timeout_ms_, ExecCtx::Get()->Now() + priority_policy_->child_failover_timeout_,
&on_failover_timer_); &on_failover_timer_);
failover_timer_callback_pending_ = true; failover_timer_callback_pending_ = true;
} }
@ -691,16 +695,16 @@ void PriorityLb::ChildPriority::DeactivateLocked() {
if (deactivation_timer_callback_pending_) return; if (deactivation_timer_callback_pending_) return;
if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_priority_trace)) { if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_priority_trace)) {
gpr_log(GPR_INFO, gpr_log(GPR_INFO,
"[priority_lb %p] child %s (%p): deactivating -- will remove in %d " "[priority_lb %p] child %s (%p): deactivating -- will remove in "
"ms.", "%" PRId64 "ms.",
priority_policy_.get(), name_.c_str(), this, priority_policy_.get(), name_.c_str(), this,
kChildRetentionIntervalMs); kChildRetentionInterval.millis());
} }
MaybeCancelFailoverTimerLocked(); MaybeCancelFailoverTimerLocked();
// Start a timer to delete the child. // Start a timer to delete the child.
Ref(DEBUG_LOCATION, "ChildPriority+timer").release(); Ref(DEBUG_LOCATION, "ChildPriority+timer").release();
grpc_timer_init(&deactivation_timer_, grpc_timer_init(&deactivation_timer_,
ExecCtx::Get()->Now() + kChildRetentionIntervalMs, ExecCtx::Get()->Now() + kChildRetentionInterval,
&on_deactivation_timer_); &on_deactivation_timer_);
deactivation_timer_callback_pending_ = true; deactivation_timer_callback_pending_ = true;
} }

@ -89,17 +89,17 @@ const char* kRlsRequestPath = "/grpc.lookup.v1.RouteLookupService/RouteLookup";
const char* kFakeTargetFieldValue = "fake_target_field_value"; const char* kFakeTargetFieldValue = "fake_target_field_value";
const char* kRlsHeaderKey = "X-Google-RLS-Data"; const char* kRlsHeaderKey = "X-Google-RLS-Data";
const grpc_millis kDefaultLookupServiceTimeout = 10000; const Duration kDefaultLookupServiceTimeout = Duration::Seconds(10);
const grpc_millis kMaxMaxAge = 5 * 60 * GPR_MS_PER_SEC; const Duration kMaxMaxAge = Duration::Minutes(5);
const grpc_millis kMinExpirationTime = 5 * GPR_MS_PER_SEC; const Duration kMinExpirationTime = Duration::Seconds(5);
const grpc_millis kCacheBackoffInitial = 1 * GPR_MS_PER_SEC; const Duration kCacheBackoffInitial = Duration::Seconds(1);
const double kCacheBackoffMultiplier = 1.6; const double kCacheBackoffMultiplier = 1.6;
const double kCacheBackoffJitter = 0.2; const double kCacheBackoffJitter = 0.2;
const grpc_millis kCacheBackoffMax = 120 * GPR_MS_PER_SEC; const Duration kCacheBackoffMax = Duration::Minutes(2);
const grpc_millis kDefaultThrottleWindowSizeMs = 30 * GPR_MS_PER_SEC; const Duration kDefaultThrottleWindowSize = Duration::Seconds(30);
const float kDefaultThrottleRatioForSuccesses = 2.0; const double kDefaultThrottleRatioForSuccesses = 2.0;
const int kDefaultThrottlePadding = 8; const int kDefaultThrottlePadding = 8;
const grpc_millis kCacheCleanupTimerInterval = 60 * GPR_MS_PER_SEC; const Duration kCacheCleanupTimerInterval = Duration::Minutes(1);
const int64_t kMaxCacheSizeBytes = 5 * 1024 * 1024; const int64_t kMaxCacheSizeBytes = 5 * 1024 * 1024;
// Parsed RLS LB policy configuration. // Parsed RLS LB policy configuration.
@ -118,9 +118,9 @@ class RlsLbConfig : public LoadBalancingPolicy::Config {
struct RouteLookupConfig { struct RouteLookupConfig {
KeyBuilderMap key_builder_map; KeyBuilderMap key_builder_map;
std::string lookup_service; std::string lookup_service;
grpc_millis lookup_service_timeout = 0; Duration lookup_service_timeout;
grpc_millis max_age = 0; Duration max_age;
grpc_millis stale_age = 0; Duration stale_age;
int64_t cache_size_bytes = 0; int64_t cache_size_bytes = 0;
std::string default_target; std::string default_target;
}; };
@ -146,11 +146,11 @@ class RlsLbConfig : public LoadBalancingPolicy::Config {
const std::string& lookup_service() const { const std::string& lookup_service() const {
return route_lookup_config_.lookup_service; return route_lookup_config_.lookup_service;
} }
grpc_millis lookup_service_timeout() const { Duration lookup_service_timeout() const {
return route_lookup_config_.lookup_service_timeout; return route_lookup_config_.lookup_service_timeout;
} }
grpc_millis max_age() const { return route_lookup_config_.max_age; } Duration max_age() const { return route_lookup_config_.max_age; }
grpc_millis stale_age() const { return route_lookup_config_.stale_age; } Duration stale_age() const { return route_lookup_config_.stale_age; }
int64_t cache_size_bytes() const { int64_t cache_size_bytes() const {
return route_lookup_config_.cache_size_bytes; return route_lookup_config_.cache_size_bytes;
} }
@ -362,15 +362,15 @@ class RlsLb : public LoadBalancingPolicy {
ABSL_EXCLUSIVE_LOCKS_REQUIRED(&RlsLb::mu_) { ABSL_EXCLUSIVE_LOCKS_REQUIRED(&RlsLb::mu_) {
return status_; return status_;
} }
grpc_millis backoff_time() const Timestamp backoff_time() const
ABSL_EXCLUSIVE_LOCKS_REQUIRED(&RlsLb::mu_) { ABSL_EXCLUSIVE_LOCKS_REQUIRED(&RlsLb::mu_) {
return backoff_time_; return backoff_time_;
} }
grpc_millis backoff_expiration_time() const Timestamp backoff_expiration_time() const
ABSL_EXCLUSIVE_LOCKS_REQUIRED(&RlsLb::mu_) { ABSL_EXCLUSIVE_LOCKS_REQUIRED(&RlsLb::mu_) {
return backoff_expiration_time_; return backoff_expiration_time_;
} }
grpc_millis data_expiration_time() const Timestamp data_expiration_time() const
ABSL_EXCLUSIVE_LOCKS_REQUIRED(&RlsLb::mu_) { ABSL_EXCLUSIVE_LOCKS_REQUIRED(&RlsLb::mu_) {
return data_expiration_time_; return data_expiration_time_;
} }
@ -378,11 +378,10 @@ class RlsLb : public LoadBalancingPolicy {
ABSL_EXCLUSIVE_LOCKS_REQUIRED(&RlsLb::mu_) { ABSL_EXCLUSIVE_LOCKS_REQUIRED(&RlsLb::mu_) {
return header_data_; return header_data_;
} }
grpc_millis stale_time() const Timestamp stale_time() const ABSL_EXCLUSIVE_LOCKS_REQUIRED(&RlsLb::mu_) {
ABSL_EXCLUSIVE_LOCKS_REQUIRED(&RlsLb::mu_) {
return stale_time_; return stale_time_;
} }
grpc_millis min_expiration_time() const Timestamp min_expiration_time() const
ABSL_EXCLUSIVE_LOCKS_REQUIRED(&RlsLb::mu_) { ABSL_EXCLUSIVE_LOCKS_REQUIRED(&RlsLb::mu_) {
return min_expiration_time_; return min_expiration_time_;
} }
@ -423,7 +422,7 @@ class RlsLb : public LoadBalancingPolicy {
private: private:
class BackoffTimer : public InternallyRefCounted<BackoffTimer> { class BackoffTimer : public InternallyRefCounted<BackoffTimer> {
public: public:
BackoffTimer(RefCountedPtr<Entry> entry, grpc_millis backoff_time); BackoffTimer(RefCountedPtr<Entry> entry, Timestamp backoff_time);
// Note: We are forced to disable lock analysis here because // Note: We are forced to disable lock analysis here because
// Orphan() is called by OrphanablePtr<>, which cannot have lock // Orphan() is called by OrphanablePtr<>, which cannot have lock
@ -446,22 +445,21 @@ class RlsLb : public LoadBalancingPolicy {
// Backoff states // Backoff states
absl::Status status_ ABSL_GUARDED_BY(&RlsLb::mu_); absl::Status status_ ABSL_GUARDED_BY(&RlsLb::mu_);
std::unique_ptr<BackOff> backoff_state_ ABSL_GUARDED_BY(&RlsLb::mu_); std::unique_ptr<BackOff> backoff_state_ ABSL_GUARDED_BY(&RlsLb::mu_);
grpc_millis backoff_time_ ABSL_GUARDED_BY(&RlsLb::mu_) = Timestamp backoff_time_ ABSL_GUARDED_BY(&RlsLb::mu_) =
GRPC_MILLIS_INF_PAST; Timestamp::InfPast();
grpc_millis backoff_expiration_time_ ABSL_GUARDED_BY(&RlsLb::mu_) = Timestamp backoff_expiration_time_ ABSL_GUARDED_BY(&RlsLb::mu_) =
GRPC_MILLIS_INF_PAST; Timestamp::InfPast();
OrphanablePtr<BackoffTimer> backoff_timer_; OrphanablePtr<BackoffTimer> backoff_timer_;
// RLS response states // RLS response states
std::vector<RefCountedPtr<ChildPolicyWrapper>> child_policy_wrappers_ std::vector<RefCountedPtr<ChildPolicyWrapper>> child_policy_wrappers_
ABSL_GUARDED_BY(&RlsLb::mu_); ABSL_GUARDED_BY(&RlsLb::mu_);
std::string header_data_ ABSL_GUARDED_BY(&RlsLb::mu_); std::string header_data_ ABSL_GUARDED_BY(&RlsLb::mu_);
grpc_millis data_expiration_time_ ABSL_GUARDED_BY(&RlsLb::mu_) = Timestamp data_expiration_time_ ABSL_GUARDED_BY(&RlsLb::mu_) =
GRPC_MILLIS_INF_PAST; Timestamp::InfPast();
grpc_millis stale_time_ ABSL_GUARDED_BY(&RlsLb::mu_) = Timestamp stale_time_ ABSL_GUARDED_BY(&RlsLb::mu_) = Timestamp::InfPast();
GRPC_MILLIS_INF_PAST;
grpc_millis min_expiration_time_ ABSL_GUARDED_BY(&RlsLb::mu_); Timestamp min_expiration_time_ ABSL_GUARDED_BY(&RlsLb::mu_);
Cache::Iterator lru_iterator_ ABSL_GUARDED_BY(&RlsLb::mu_); Cache::Iterator lru_iterator_ ABSL_GUARDED_BY(&RlsLb::mu_);
}; };
@ -566,10 +564,10 @@ class RlsLb : public LoadBalancingPolicy {
class Throttle { class Throttle {
public: public:
explicit Throttle( explicit Throttle(
int window_size_ms = kDefaultThrottleWindowSizeMs, Duration window_size = kDefaultThrottleWindowSize,
float ratio_for_successes = kDefaultThrottleRatioForSuccesses, float ratio_for_successes = kDefaultThrottleRatioForSuccesses,
int padding = kDefaultThrottlePadding) int padding = kDefaultThrottlePadding)
: window_size_ms_(window_size_ms), : window_size_(window_size),
ratio_for_successes_(ratio_for_successes), ratio_for_successes_(ratio_for_successes),
padding_(padding) {} padding_(padding) {}
@ -579,16 +577,16 @@ class RlsLb : public LoadBalancingPolicy {
ABSL_EXCLUSIVE_LOCKS_REQUIRED(&RlsLb::mu_); ABSL_EXCLUSIVE_LOCKS_REQUIRED(&RlsLb::mu_);
private: private:
grpc_millis window_size_ms_; Duration window_size_;
float ratio_for_successes_; double ratio_for_successes_;
int padding_; int padding_;
std::mt19937 rng_{std::random_device()()}; std::mt19937 rng_{std::random_device()()};
// Logged timestamps of requests. // Logged timestamp of requests.
std::deque<grpc_millis> requests_ ABSL_GUARDED_BY(&RlsLb::mu_); std::deque<Timestamp> requests_ ABSL_GUARDED_BY(&RlsLb::mu_);
// Logged timestamps of failures. // Logged timestamps of failures.
std::deque<grpc_millis> failures_ ABSL_GUARDED_BY(&RlsLb::mu_); std::deque<Timestamp> failures_ ABSL_GUARDED_BY(&RlsLb::mu_);
}; };
RefCountedPtr<RlsLb> lb_policy_; RefCountedPtr<RlsLb> lb_policy_;
@ -641,7 +639,7 @@ class RlsLb : public LoadBalancingPolicy {
std::string stale_header_data_; std::string stale_header_data_;
// RLS call state. // RLS call state.
grpc_millis deadline_; Timestamp deadline_;
grpc_closure call_start_cb_; grpc_closure call_start_cb_;
grpc_closure call_complete_cb_; grpc_closure call_complete_cb_;
grpc_call* call_ = nullptr; grpc_call* call_ = nullptr;
@ -991,7 +989,7 @@ LoadBalancingPolicy::PickResult RlsLb::Picker::Pick(PickArgs args) {
gpr_log(GPR_INFO, "[rlslb %p] picker=%p: request keys: %s", gpr_log(GPR_INFO, "[rlslb %p] picker=%p: request keys: %s",
lb_policy_.get(), this, key.ToString().c_str()); lb_policy_.get(), this, key.ToString().c_str());
} }
grpc_millis now = ExecCtx::Get()->Now(); Timestamp now = ExecCtx::Get()->Now();
MutexLock lock(&lb_policy_->mu_); MutexLock lock(&lb_policy_->mu_);
if (lb_policy_->is_shutdown_) { if (lb_policy_->is_shutdown_) {
return PickResult::Fail( return PickResult::Fail(
@ -1077,7 +1075,7 @@ LoadBalancingPolicy::PickResult RlsLb::Picker::Pick(PickArgs args) {
// //
RlsLb::Cache::Entry::BackoffTimer::BackoffTimer(RefCountedPtr<Entry> entry, RlsLb::Cache::Entry::BackoffTimer::BackoffTimer(RefCountedPtr<Entry> entry,
grpc_millis backoff_time) Timestamp backoff_time)
: entry_(std::move(entry)) { : entry_(std::move(entry)) {
GRPC_CLOSURE_INIT(&backoff_timer_callback_, OnBackoffTimer, this, nullptr); GRPC_CLOSURE_INIT(&backoff_timer_callback_, OnBackoffTimer, this, nullptr);
Ref(DEBUG_LOCATION, "BackoffTimer").release(); Ref(DEBUG_LOCATION, "BackoffTimer").release();
@ -1211,17 +1209,17 @@ LoadBalancingPolicy::PickResult RlsLb::Cache::Entry::Pick(PickArgs args) {
} }
void RlsLb::Cache::Entry::ResetBackoff() { void RlsLb::Cache::Entry::ResetBackoff() {
backoff_time_ = GRPC_MILLIS_INF_PAST; backoff_time_ = Timestamp::InfPast();
backoff_timer_.reset(); backoff_timer_.reset();
} }
bool RlsLb::Cache::Entry::ShouldRemove() const { bool RlsLb::Cache::Entry::ShouldRemove() const {
grpc_millis now = ExecCtx::Get()->Now(); Timestamp now = ExecCtx::Get()->Now();
return data_expiration_time_ < now && backoff_expiration_time_ < now; return data_expiration_time_ < now && backoff_expiration_time_ < now;
} }
bool RlsLb::Cache::Entry::CanEvict() const { bool RlsLb::Cache::Entry::CanEvict() const {
grpc_millis now = ExecCtx::Get()->Now(); Timestamp now = ExecCtx::Get()->Now();
return min_expiration_time_ < now; return min_expiration_time_ < now;
} }
@ -1247,7 +1245,7 @@ RlsLb::Cache::Entry::OnRlsResponseLocked(
backoff_state_ = MakeCacheEntryBackoff(); backoff_state_ = MakeCacheEntryBackoff();
} }
backoff_time_ = backoff_state_->NextAttemptTime(); backoff_time_ = backoff_state_->NextAttemptTime();
grpc_millis now = ExecCtx::Get()->Now(); Timestamp now = ExecCtx::Get()->Now();
backoff_expiration_time_ = now + (backoff_time_ - now) * 2; backoff_expiration_time_ = now + (backoff_time_ - now) * 2;
backoff_timer_ = MakeOrphanable<BackoffTimer>( backoff_timer_ = MakeOrphanable<BackoffTimer>(
Ref(DEBUG_LOCATION, "BackoffTimer"), backoff_time_); Ref(DEBUG_LOCATION, "BackoffTimer"), backoff_time_);
@ -1256,13 +1254,13 @@ RlsLb::Cache::Entry::OnRlsResponseLocked(
} }
// Request succeeded, so store the result. // Request succeeded, so store the result.
header_data_ = std::move(response.header_data); header_data_ = std::move(response.header_data);
grpc_millis now = ExecCtx::Get()->Now(); Timestamp now = ExecCtx::Get()->Now();
data_expiration_time_ = now + lb_policy_->config_->max_age(); data_expiration_time_ = now + lb_policy_->config_->max_age();
stale_time_ = now + lb_policy_->config_->stale_age(); stale_time_ = now + lb_policy_->config_->stale_age();
status_ = absl::OkStatus(); status_ = absl::OkStatus();
backoff_state_.reset(); backoff_state_.reset();
backoff_time_ = GRPC_MILLIS_INF_PAST; backoff_time_ = Timestamp::InfPast();
backoff_expiration_time_ = GRPC_MILLIS_INF_PAST; backoff_expiration_time_ = Timestamp::InfPast();
// Check if we need to update this list of targets. // Check if we need to update this list of targets.
bool targets_changed = [&]() ABSL_EXCLUSIVE_LOCKS_REQUIRED(&RlsLb::mu_) { bool targets_changed = [&]() ABSL_EXCLUSIVE_LOCKS_REQUIRED(&RlsLb::mu_) {
if (child_policy_wrappers_.size() != response.targets.size()) return true; if (child_policy_wrappers_.size() != response.targets.size()) return true;
@ -1322,7 +1320,7 @@ RlsLb::Cache::Entry::OnRlsResponseLocked(
// //
RlsLb::Cache::Cache(RlsLb* lb_policy) : lb_policy_(lb_policy) { RlsLb::Cache::Cache(RlsLb* lb_policy) : lb_policy_(lb_policy) {
grpc_millis now = ExecCtx::Get()->Now(); Timestamp now = ExecCtx::Get()->Now();
lb_policy_->Ref(DEBUG_LOCATION, "CacheCleanupTimer").release(); lb_policy_->Ref(DEBUG_LOCATION, "CacheCleanupTimer").release();
GRPC_CLOSURE_INIT(&timer_callback_, OnCleanupTimer, this, nullptr); GRPC_CLOSURE_INIT(&timer_callback_, OnCleanupTimer, this, nullptr);
grpc_timer_init(&cleanup_timer_, now + kCacheCleanupTimerInterval, grpc_timer_init(&cleanup_timer_, now + kCacheCleanupTimerInterval,
@ -1405,7 +1403,7 @@ void RlsLb::Cache::OnCleanupTimer(void* arg, grpc_error_handle error) {
++it; ++it;
} }
} }
grpc_millis now = ExecCtx::Get()->Now(); Timestamp now = ExecCtx::Get()->Now();
lb_policy.release(); lb_policy.release();
grpc_timer_init(&cache->cleanup_timer_, grpc_timer_init(&cache->cleanup_timer_,
now + kCacheCleanupTimerInterval, now + kCacheCleanupTimerInterval,
@ -1474,11 +1472,11 @@ void RlsLb::RlsChannel::StateWatcher::OnConnectivityStateChange(
// //
bool RlsLb::RlsChannel::Throttle::ShouldThrottle() { bool RlsLb::RlsChannel::Throttle::ShouldThrottle() {
grpc_millis now = ExecCtx::Get()->Now(); Timestamp now = ExecCtx::Get()->Now();
while (!requests_.empty() && now - requests_.front() > window_size_ms_) { while (!requests_.empty() && now - requests_.front() > window_size_) {
requests_.pop_front(); requests_.pop_front();
} }
while (!failures_.empty() && now - failures_.front() > window_size_ms_) { while (!failures_.empty() && now - failures_.front() > window_size_) {
failures_.pop_front(); failures_.pop_front();
} }
// Compute probability of throttling. // Compute probability of throttling.
@ -1502,7 +1500,7 @@ bool RlsLb::RlsChannel::Throttle::ShouldThrottle() {
} }
void RlsLb::RlsChannel::Throttle::RegisterResponse(bool success) { void RlsLb::RlsChannel::Throttle::RegisterResponse(bool success) {
grpc_millis now = ExecCtx::Get()->Now(); Timestamp now = ExecCtx::Get()->Now();
requests_.push_back(now); requests_.push_back(now);
if (!success) failures_.push_back(now); if (!success) failures_.push_back(now);
} }
@ -1690,7 +1688,7 @@ void RlsLb::RlsRequest::StartCallLocked() {
MutexLock lock(&lb_policy_->mu_); MutexLock lock(&lb_policy_->mu_);
if (lb_policy_->is_shutdown_) return; if (lb_policy_->is_shutdown_) return;
} }
grpc_millis now = ExecCtx::Get()->Now(); Timestamp now = ExecCtx::Get()->Now();
deadline_ = now + lb_policy_->config_->lookup_service_timeout(); deadline_ = now + lb_policy_->config_->lookup_service_timeout();
grpc_metadata_array_init(&recv_initial_metadata_); grpc_metadata_array_init(&recv_initial_metadata_);
grpc_metadata_array_init(&recv_trailing_metadata_); grpc_metadata_array_init(&recv_trailing_metadata_);

@ -48,7 +48,7 @@ constexpr char kWeightedTarget[] = "weighted_target_experimental";
// How long we keep a child around for after it has been removed from // How long we keep a child around for after it has been removed from
// the config. // the config.
constexpr int kChildRetentionIntervalMs = 15 * 60 * 1000; constexpr Duration kChildRetentionInterval = Duration::Minutes(15);
// Config for weighted_target LB policy. // Config for weighted_target LB policy.
class WeightedTargetLbConfig : public LoadBalancingPolicy::Config { class WeightedTargetLbConfig : public LoadBalancingPolicy::Config {
@ -564,7 +564,7 @@ void WeightedTargetLb::WeightedChild::DeactivateLocked() {
Ref(DEBUG_LOCATION, "WeightedChild+timer").release(); Ref(DEBUG_LOCATION, "WeightedChild+timer").release();
delayed_removal_timer_callback_pending_ = true; delayed_removal_timer_callback_pending_ = true;
grpc_timer_init(&delayed_removal_timer_, grpc_timer_init(&delayed_removal_timer_,
ExecCtx::Get()->Now() + kChildRetentionIntervalMs, ExecCtx::Get()->Now() + kChildRetentionInterval,
&on_delayed_removal_timer_); &on_delayed_removal_timer_);
} }

@ -487,7 +487,8 @@ void XdsClusterManagerLb::ClusterChild::DeactivateLocked() {
Ref(DEBUG_LOCATION, "ClusterChild+timer").release(); Ref(DEBUG_LOCATION, "ClusterChild+timer").release();
grpc_timer_init(&delayed_removal_timer_, grpc_timer_init(&delayed_removal_timer_,
ExecCtx::Get()->Now() + ExecCtx::Get()->Now() +
GRPC_XDS_CLUSTER_MANAGER_CHILD_RETENTION_INTERVAL_MS, Duration::Milliseconds(
GRPC_XDS_CLUSTER_MANAGER_CHILD_RETENTION_INTERVAL_MS),
&on_delayed_removal_timer_); &on_delayed_removal_timer_);
delayed_removal_timer_callback_pending_ = true; delayed_removal_timer_callback_pending_ = true;
} }

@ -101,7 +101,7 @@ class AresClientChannelDNSResolver : public Resolver {
// timeout in milliseconds for active DNS queries // timeout in milliseconds for active DNS queries
int query_timeout_ms_; int query_timeout_ms_;
/// min interval between DNS requests /// min interval between DNS requests
grpc_millis min_time_between_resolutions_; Duration min_time_between_resolutions_;
/// closures used by the work_serializer /// closures used by the work_serializer
grpc_closure on_next_resolution_; grpc_closure on_next_resolution_;
@ -114,7 +114,7 @@ class AresClientChannelDNSResolver : public Resolver {
bool have_next_resolution_timer_ = false; bool have_next_resolution_timer_ = false;
grpc_timer next_resolution_timer_; grpc_timer next_resolution_timer_;
/// timestamp of last DNS request /// timestamp of last DNS request
grpc_millis last_resolution_timestamp_ = -1; absl::optional<Timestamp> last_resolution_timestamp_;
/// retry backoff state /// retry backoff state
BackOff backoff_; BackOff backoff_;
/// currently resolving backend addresses /// currently resolving backend addresses
@ -141,16 +141,17 @@ AresClientChannelDNSResolver::AresClientChannelDNSResolver(ResolverArgs args)
query_timeout_ms_(grpc_channel_args_find_integer( query_timeout_ms_(grpc_channel_args_find_integer(
channel_args_, GRPC_ARG_DNS_ARES_QUERY_TIMEOUT_MS, channel_args_, GRPC_ARG_DNS_ARES_QUERY_TIMEOUT_MS,
{GRPC_DNS_ARES_DEFAULT_QUERY_TIMEOUT_MS, 0, INT_MAX})), {GRPC_DNS_ARES_DEFAULT_QUERY_TIMEOUT_MS, 0, INT_MAX})),
min_time_between_resolutions_(grpc_channel_args_find_integer( min_time_between_resolutions_(
Duration::Milliseconds(grpc_channel_args_find_integer(
channel_args_, GRPC_ARG_DNS_MIN_TIME_BETWEEN_RESOLUTIONS_MS, channel_args_, GRPC_ARG_DNS_MIN_TIME_BETWEEN_RESOLUTIONS_MS,
{1000 * 30, 0, INT_MAX})), {1000 * 30, 0, INT_MAX}))),
backoff_( backoff_(BackOff::Options()
BackOff::Options() .set_initial_backoff(Duration::Milliseconds(
.set_initial_backoff(GRPC_DNS_INITIAL_CONNECT_BACKOFF_SECONDS * GRPC_DNS_INITIAL_CONNECT_BACKOFF_SECONDS))
1000)
.set_multiplier(GRPC_DNS_RECONNECT_BACKOFF_MULTIPLIER) .set_multiplier(GRPC_DNS_RECONNECT_BACKOFF_MULTIPLIER)
.set_jitter(GRPC_DNS_RECONNECT_JITTER) .set_jitter(GRPC_DNS_RECONNECT_JITTER)
.set_max_backoff(GRPC_DNS_RECONNECT_MAX_BACKOFF_SECONDS * 1000)) { .set_max_backoff(Duration::Milliseconds(
GRPC_DNS_RECONNECT_MAX_BACKOFF_SECONDS))) {
// Closure initialization. // Closure initialization.
GRPC_CLOSURE_INIT(&on_next_resolution_, OnNextResolution, this, GRPC_CLOSURE_INIT(&on_next_resolution_, OnNextResolution, this,
grpc_schedule_on_exec_ctx); grpc_schedule_on_exec_ctx);
@ -388,8 +389,8 @@ void AresClientChannelDNSResolver::OnResolvedLocked(grpc_error_handle error) {
// in a loop while draining the currently-held WorkSerializer. // in a loop while draining the currently-held WorkSerializer.
// Also see https://github.com/grpc/grpc/issues/26079. // Also see https://github.com/grpc/grpc/issues/26079.
ExecCtx::Get()->InvalidateNow(); ExecCtx::Get()->InvalidateNow();
grpc_millis next_try = backoff_.NextAttemptTime(); Timestamp next_try = backoff_.NextAttemptTime();
grpc_millis timeout = next_try - ExecCtx::Get()->Now(); Duration timeout = next_try - ExecCtx::Get()->Now();
GRPC_CARES_TRACE_LOG("resolver:%p dns resolution failed (will retry): %s", GRPC_CARES_TRACE_LOG("resolver:%p dns resolution failed (will retry): %s",
this, grpc_error_std_string(error).c_str()); this, grpc_error_std_string(error).c_str());
GPR_ASSERT(!have_next_resolution_timer_); GPR_ASSERT(!have_next_resolution_timer_);
@ -398,9 +399,9 @@ void AresClientChannelDNSResolver::OnResolvedLocked(grpc_error_handle error) {
// new closure API is done, find a way to track this ref with the timer // new closure API is done, find a way to track this ref with the timer
// callback as part of the type system. // callback as part of the type system.
Ref(DEBUG_LOCATION, "retry-timer").release(); Ref(DEBUG_LOCATION, "retry-timer").release();
if (timeout > 0) { if (timeout > Duration::Zero()) {
GRPC_CARES_TRACE_LOG("resolver:%p retrying in %" PRId64 " milliseconds", GRPC_CARES_TRACE_LOG("resolver:%p retrying in %" PRId64 " milliseconds",
this, timeout); this, timeout.millis());
} else { } else {
GRPC_CARES_TRACE_LOG("resolver:%p retrying immediately", this); GRPC_CARES_TRACE_LOG("resolver:%p retrying immediately", this);
} }
@ -414,29 +415,30 @@ void AresClientChannelDNSResolver::MaybeStartResolvingLocked() {
// If there is an existing timer, the time it fires is the earliest time we // If there is an existing timer, the time it fires is the earliest time we
// can start the next resolution. // can start the next resolution.
if (have_next_resolution_timer_) return; if (have_next_resolution_timer_) return;
if (last_resolution_timestamp_ >= 0) { if (last_resolution_timestamp_.has_value()) {
// InvalidateNow to avoid getting stuck re-initializing this timer // InvalidateNow to avoid getting stuck re-initializing this timer
// in a loop while draining the currently-held WorkSerializer. // in a loop while draining the currently-held WorkSerializer.
// Also see https://github.com/grpc/grpc/issues/26079. // Also see https://github.com/grpc/grpc/issues/26079.
ExecCtx::Get()->InvalidateNow(); ExecCtx::Get()->InvalidateNow();
const grpc_millis earliest_next_resolution = const Timestamp earliest_next_resolution =
last_resolution_timestamp_ + min_time_between_resolutions_; *last_resolution_timestamp_ + min_time_between_resolutions_;
const grpc_millis ms_until_next_resolution = const Duration time_until_next_resolution =
earliest_next_resolution - ExecCtx::Get()->Now(); earliest_next_resolution - ExecCtx::Get()->Now();
if (ms_until_next_resolution > 0) { if (time_until_next_resolution > Duration::Zero()) {
const grpc_millis last_resolution_ago = const Duration last_resolution_ago =
ExecCtx::Get()->Now() - last_resolution_timestamp_; ExecCtx::Get()->Now() - *last_resolution_timestamp_;
GRPC_CARES_TRACE_LOG( GRPC_CARES_TRACE_LOG(
"resolver:%p In cooldown from last resolution (from %" PRId64 "resolver:%p In cooldown from last resolution (from %" PRId64
" ms ago). Will resolve again in %" PRId64 " ms", " ms ago). Will resolve again in %" PRId64 " ms",
this, last_resolution_ago, ms_until_next_resolution); this, last_resolution_ago.millis(),
time_until_next_resolution.millis());
have_next_resolution_timer_ = true; have_next_resolution_timer_ = true;
// TODO(roth): We currently deal with this ref manually. Once the // TODO(roth): We currently deal with this ref manually. Once the
// new closure API is done, find a way to track this ref with the timer // new closure API is done, find a way to track this ref with the timer
// callback as part of the type system. // callback as part of the type system.
Ref(DEBUG_LOCATION, "next_resolution_timer_cooldown").release(); Ref(DEBUG_LOCATION, "next_resolution_timer_cooldown").release();
grpc_timer_init(&next_resolution_timer_, grpc_timer_init(&next_resolution_timer_,
ExecCtx::Get()->Now() + ms_until_next_resolution, ExecCtx::Get()->Now() + time_until_next_resolution,
&on_next_resolution_); &on_next_resolution_);
return; return;
} }

@ -249,20 +249,20 @@ static fd_node* pop_fd_node_locked(fd_node** head, ares_socket_t as)
return nullptr; return nullptr;
} }
static grpc_millis calculate_next_ares_backup_poll_alarm_ms( static grpc_core::Timestamp calculate_next_ares_backup_poll_alarm(
grpc_ares_ev_driver* driver) grpc_ares_ev_driver* driver)
ABSL_EXCLUSIVE_LOCKS_REQUIRED(&grpc_ares_request::mu) { ABSL_EXCLUSIVE_LOCKS_REQUIRED(&grpc_ares_request::mu) {
// An alternative here could be to use ares_timeout to try to be more // An alternative here could be to use ares_timeout to try to be more
// accurate, but that would require using "struct timeval"'s, which just makes // accurate, but that would require using "struct timeval"'s, which just makes
// things a bit more complicated. So just poll every second, as suggested // things a bit more complicated. So just poll every second, as suggested
// by the c-ares code comments. // by the c-ares code comments.
grpc_millis ms_until_next_ares_backup_poll_alarm = 1000; grpc_core::Duration until_next_ares_backup_poll_alarm =
grpc_core::Duration::Seconds(1);
GRPC_CARES_TRACE_LOG( GRPC_CARES_TRACE_LOG(
"request:%p ev_driver=%p. next ares process poll time in " "request:%p ev_driver=%p. next ares process poll time in "
"%" PRId64 " ms", "%" PRId64 " ms",
driver->request, driver, ms_until_next_ares_backup_poll_alarm); driver->request, driver, until_next_ares_backup_poll_alarm.millis());
return ms_until_next_ares_backup_poll_alarm + return grpc_core::ExecCtx::Get()->Now() + until_next_ares_backup_poll_alarm;
grpc_core::ExecCtx::Get()->Now();
} }
static void on_timeout(void* arg, grpc_error_handle error) { static void on_timeout(void* arg, grpc_error_handle error) {
@ -317,8 +317,8 @@ static void on_ares_backup_poll_alarm(void* arg, grpc_error_handle error) {
// in a loop while draining the currently-held WorkSerializer. // in a loop while draining the currently-held WorkSerializer.
// Also see https://github.com/grpc/grpc/issues/26079. // Also see https://github.com/grpc/grpc/issues/26079.
grpc_core::ExecCtx::Get()->InvalidateNow(); grpc_core::ExecCtx::Get()->InvalidateNow();
grpc_millis next_ares_backup_poll_alarm = grpc_core::Timestamp next_ares_backup_poll_alarm =
calculate_next_ares_backup_poll_alarm_ms(driver); calculate_next_ares_backup_poll_alarm(driver);
grpc_ares_ev_driver_ref(driver); grpc_ares_ev_driver_ref(driver);
GRPC_CLOSURE_INIT(&driver->on_ares_backup_poll_alarm_locked, GRPC_CLOSURE_INIT(&driver->on_ares_backup_poll_alarm_locked,
on_ares_backup_poll_alarm, driver, on_ares_backup_poll_alarm, driver,
@ -462,22 +462,23 @@ void grpc_ares_ev_driver_start_locked(grpc_ares_ev_driver* ev_driver)
ABSL_EXCLUSIVE_LOCKS_REQUIRED(&grpc_ares_request::mu) { ABSL_EXCLUSIVE_LOCKS_REQUIRED(&grpc_ares_request::mu) {
grpc_ares_notify_on_event_locked(ev_driver); grpc_ares_notify_on_event_locked(ev_driver);
// Initialize overall DNS resolution timeout alarm // Initialize overall DNS resolution timeout alarm
grpc_millis timeout = grpc_core::Duration timeout =
ev_driver->query_timeout_ms == 0 ev_driver->query_timeout_ms == 0
? GRPC_MILLIS_INF_FUTURE ? grpc_core::Duration::Infinity()
: ev_driver->query_timeout_ms + grpc_core::ExecCtx::Get()->Now(); : grpc_core::Duration::Milliseconds(ev_driver->query_timeout_ms);
GRPC_CARES_TRACE_LOG( GRPC_CARES_TRACE_LOG(
"request:%p ev_driver=%p grpc_ares_ev_driver_start_locked. timeout in " "request:%p ev_driver=%p grpc_ares_ev_driver_start_locked. timeout in "
"%" PRId64 " ms", "%" PRId64 " ms",
ev_driver->request, ev_driver, timeout); ev_driver->request, ev_driver, timeout.millis());
grpc_ares_ev_driver_ref(ev_driver); grpc_ares_ev_driver_ref(ev_driver);
GRPC_CLOSURE_INIT(&ev_driver->on_timeout_locked, on_timeout, ev_driver, GRPC_CLOSURE_INIT(&ev_driver->on_timeout_locked, on_timeout, ev_driver,
grpc_schedule_on_exec_ctx); grpc_schedule_on_exec_ctx);
grpc_timer_init(&ev_driver->query_timeout, timeout, grpc_timer_init(&ev_driver->query_timeout,
grpc_core::ExecCtx::Get()->Now() + timeout,
&ev_driver->on_timeout_locked); &ev_driver->on_timeout_locked);
// Initialize the backup poll alarm // Initialize the backup poll alarm
grpc_millis next_ares_backup_poll_alarm = grpc_core::Timestamp next_ares_backup_poll_alarm =
calculate_next_ares_backup_poll_alarm_ms(ev_driver); calculate_next_ares_backup_poll_alarm(ev_driver);
grpc_ares_ev_driver_ref(ev_driver); grpc_ares_ev_driver_ref(ev_driver);
GRPC_CLOSURE_INIT(&ev_driver->on_ares_backup_poll_alarm_locked, GRPC_CLOSURE_INIT(&ev_driver->on_ares_backup_poll_alarm_locked,
on_ares_backup_poll_alarm, ev_driver, on_ares_backup_poll_alarm, ev_driver,

@ -91,9 +91,9 @@ class NativeClientChannelDNSResolver : public Resolver {
grpc_timer next_resolution_timer_; grpc_timer next_resolution_timer_;
grpc_closure on_next_resolution_; grpc_closure on_next_resolution_;
/// min time between DNS requests /// min time between DNS requests
grpc_millis min_time_between_resolutions_; Duration min_time_between_resolutions_;
/// timestamp of last DNS request /// timestamp of last DNS request
grpc_millis last_resolution_timestamp_ = -1; absl::optional<Timestamp> last_resolution_timestamp_;
/// retry backoff state /// retry backoff state
BackOff backoff_; BackOff backoff_;
/// tracks pending resolutions /// tracks pending resolutions
@ -107,16 +107,17 @@ NativeClientChannelDNSResolver::NativeClientChannelDNSResolver(
work_serializer_(std::move(args.work_serializer)), work_serializer_(std::move(args.work_serializer)),
result_handler_(std::move(args.result_handler)), result_handler_(std::move(args.result_handler)),
interested_parties_(grpc_pollset_set_create()), interested_parties_(grpc_pollset_set_create()),
min_time_between_resolutions_(grpc_channel_args_find_integer( min_time_between_resolutions_(
Duration::Milliseconds(grpc_channel_args_find_integer(
channel_args_, GRPC_ARG_DNS_MIN_TIME_BETWEEN_RESOLUTIONS_MS, channel_args_, GRPC_ARG_DNS_MIN_TIME_BETWEEN_RESOLUTIONS_MS,
{1000 * 30, 0, INT_MAX})), {1000 * 30, 0, INT_MAX}))),
backoff_( backoff_(BackOff::Options()
BackOff::Options() .set_initial_backoff(Duration::Seconds(
.set_initial_backoff(GRPC_DNS_INITIAL_CONNECT_BACKOFF_SECONDS * GRPC_DNS_INITIAL_CONNECT_BACKOFF_SECONDS))
1000)
.set_multiplier(GRPC_DNS_RECONNECT_BACKOFF_MULTIPLIER) .set_multiplier(GRPC_DNS_RECONNECT_BACKOFF_MULTIPLIER)
.set_jitter(GRPC_DNS_RECONNECT_JITTER) .set_jitter(GRPC_DNS_RECONNECT_JITTER)
.set_max_backoff(GRPC_DNS_RECONNECT_MAX_BACKOFF_SECONDS * 1000)) { .set_max_backoff(Duration::Seconds(
GRPC_DNS_RECONNECT_MAX_BACKOFF_SECONDS))) {
if (args.pollset_set != nullptr) { if (args.pollset_set != nullptr) {
grpc_pollset_set_add_pollset_set(interested_parties_, args.pollset_set); grpc_pollset_set_add_pollset_set(interested_parties_, args.pollset_set);
} }
@ -216,16 +217,17 @@ void NativeClientChannelDNSResolver::OnResolvedLocked(
// in a loop while draining the currently-held WorkSerializer. // in a loop while draining the currently-held WorkSerializer.
// Also see https://github.com/grpc/grpc/issues/26079. // Also see https://github.com/grpc/grpc/issues/26079.
ExecCtx::Get()->InvalidateNow(); ExecCtx::Get()->InvalidateNow();
grpc_millis next_try = backoff_.NextAttemptTime(); Timestamp next_try = backoff_.NextAttemptTime();
grpc_millis timeout = next_try - ExecCtx::Get()->Now(); Duration timeout = next_try - ExecCtx::Get()->Now();
GPR_ASSERT(!have_next_resolution_timer_); GPR_ASSERT(!have_next_resolution_timer_);
have_next_resolution_timer_ = true; have_next_resolution_timer_ = true;
// TODO(roth): We currently deal with this ref manually. Once the // TODO(roth): We currently deal with this ref manually. Once the
// new closure API is done, find a way to track this ref with the timer // new closure API is done, find a way to track this ref with the timer
// callback as part of the type system. // callback as part of the type system.
Ref(DEBUG_LOCATION, "next_resolution_timer").release(); Ref(DEBUG_LOCATION, "next_resolution_timer").release();
if (timeout > 0) { if (timeout > Duration::Zero()) {
gpr_log(GPR_DEBUG, "retrying in %" PRId64 " milliseconds", timeout); gpr_log(GPR_DEBUG, "retrying in %" PRId64 " milliseconds",
timeout.millis());
} else { } else {
gpr_log(GPR_DEBUG, "retrying immediately"); gpr_log(GPR_DEBUG, "retrying immediately");
} }
@ -241,22 +243,23 @@ void NativeClientChannelDNSResolver::MaybeStartResolvingLocked() {
// If there is an existing timer, the time it fires is the earliest time we // If there is an existing timer, the time it fires is the earliest time we
// can start the next resolution. // can start the next resolution.
if (have_next_resolution_timer_) return; if (have_next_resolution_timer_) return;
if (last_resolution_timestamp_ >= 0) { if (last_resolution_timestamp_.has_value()) {
// InvalidateNow to avoid getting stuck re-initializing this timer // InvalidateNow to avoid getting stuck re-initializing this timer
// in a loop while draining the currently-held WorkSerializer. // in a loop while draining the currently-held WorkSerializer.
// Also see https://github.com/grpc/grpc/issues/26079. // Also see https://github.com/grpc/grpc/issues/26079.
ExecCtx::Get()->InvalidateNow(); ExecCtx::Get()->InvalidateNow();
const grpc_millis earliest_next_resolution = const Timestamp earliest_next_resolution =
last_resolution_timestamp_ + min_time_between_resolutions_; *last_resolution_timestamp_ + min_time_between_resolutions_;
const grpc_millis ms_until_next_resolution = const Duration time_until_next_resolution =
earliest_next_resolution - ExecCtx::Get()->Now(); earliest_next_resolution - ExecCtx::Get()->Now();
if (ms_until_next_resolution > 0) { if (time_until_next_resolution > Duration::Zero()) {
const grpc_millis last_resolution_ago = const Duration last_resolution_ago =
ExecCtx::Get()->Now() - last_resolution_timestamp_; ExecCtx::Get()->Now() - *last_resolution_timestamp_;
gpr_log(GPR_DEBUG, gpr_log(GPR_DEBUG,
"In cooldown from last resolution (from %" PRId64 "In cooldown from last resolution (from %" PRId64
" ms ago). Will resolve again in %" PRId64 " ms", " ms ago). Will resolve again in %" PRId64 " ms",
last_resolution_ago, ms_until_next_resolution); last_resolution_ago.millis(),
time_until_next_resolution.millis());
have_next_resolution_timer_ = true; have_next_resolution_timer_ = true;
// TODO(roth): We currently deal with this ref manually. Once the // TODO(roth): We currently deal with this ref manually. Once the
// new closure API is done, find a way to track this ref with the timer // new closure API is done, find a way to track this ref with the timer
@ -266,7 +269,7 @@ void NativeClientChannelDNSResolver::MaybeStartResolvingLocked() {
NativeClientChannelDNSResolver::OnNextResolution, this, NativeClientChannelDNSResolver::OnNextResolution, this,
grpc_schedule_on_exec_ctx); grpc_schedule_on_exec_ctx);
grpc_timer_init(&next_resolution_timer_, grpc_timer_init(&next_resolution_timer_,
ExecCtx::Get()->Now() + ms_until_next_resolution, ExecCtx::Get()->Now() + time_until_next_resolution,
&on_next_resolution_); &on_next_resolution_);
return; return;
} }

@ -131,9 +131,9 @@ GoogleCloud2ProdResolver::MetadataQuery::MetadataQuery(
const_cast<char*>(GRPC_ARG_RESOURCE_QUOTA), const_cast<char*>(GRPC_ARG_RESOURCE_QUOTA),
resolver_->resource_quota_.get(), grpc_resource_quota_arg_vtable()); resolver_->resource_quota_.get(), grpc_resource_quota_arg_vtable());
grpc_channel_args args = {1, &resource_quota_arg}; grpc_channel_args args = {1, &resource_quota_arg};
http_request_ = http_request_ = HttpRequest::Get(
HttpRequest::Get(std::move(*uri), &args, pollent, &request, std::move(*uri), &args, pollent, &request,
ExecCtx::Get()->Now() + 10000, // 10s timeout ExecCtx::Get()->Now() + Duration::Seconds(10), // 10s timeout
&on_done_, &response_, &on_done_, &response_,
RefCountedPtr<grpc_channel_credentials>( RefCountedPtr<grpc_channel_credentials>(
grpc_insecure_credentials_create())); grpc_insecure_credentials_create()));

@ -472,17 +472,18 @@ grpc_error_handle XdsResolver::XdsConfigSelector::CreateMethodConfig(
if (route_action.retry_policy.has_value() && if (route_action.retry_policy.has_value() &&
!route_action.retry_policy->retry_on.Empty()) { !route_action.retry_policy->retry_on.Empty()) {
std::vector<std::string> retry_parts; std::vector<std::string> retry_parts;
const auto base_interval =
route_action.retry_policy->retry_back_off.base_interval.as_timespec();
const auto max_interval =
route_action.retry_policy->retry_back_off.max_interval.as_timespec();
retry_parts.push_back(absl::StrFormat( retry_parts.push_back(absl::StrFormat(
"\"retryPolicy\": {\n" "\"retryPolicy\": {\n"
" \"maxAttempts\": %d,\n" " \"maxAttempts\": %d,\n"
" \"initialBackoff\": \"%d.%09ds\",\n" " \"initialBackoff\": \"%d.%09ds\",\n"
" \"maxBackoff\": \"%d.%09ds\",\n" " \"maxBackoff\": \"%d.%09ds\",\n"
" \"backoffMultiplier\": 2,\n", " \"backoffMultiplier\": 2,\n",
route_action.retry_policy->num_retries + 1, route_action.retry_policy->num_retries + 1, base_interval.tv_sec,
route_action.retry_policy->retry_back_off.base_interval.seconds, base_interval.tv_nsec, max_interval.tv_sec, max_interval.tv_nsec));
route_action.retry_policy->retry_back_off.base_interval.nanos,
route_action.retry_policy->retry_back_off.max_interval.seconds,
route_action.retry_policy->retry_back_off.max_interval.nanos));
std::vector<std::string> code_parts; std::vector<std::string> code_parts;
if (route_action.retry_policy->retry_on.Contains(GRPC_STATUS_CANCELLED)) { if (route_action.retry_policy->retry_on.Contains(GRPC_STATUS_CANCELLED)) {
code_parts.push_back(" \"CANCELLED\""); code_parts.push_back(" \"CANCELLED\"");
@ -509,12 +510,10 @@ grpc_error_handle XdsResolver::XdsConfigSelector::CreateMethodConfig(
} }
// Set timeout. // Set timeout.
if (route_action.max_stream_duration.has_value() && if (route_action.max_stream_duration.has_value() &&
(route_action.max_stream_duration->seconds != 0 || (route_action.max_stream_duration != Duration::Zero())) {
route_action.max_stream_duration->nanos != 0)) { gpr_timespec ts = route_action.max_stream_duration->as_timespec();
fields.emplace_back( fields.emplace_back(absl::StrFormat(" \"timeout\": \"%d.%09ds\"",
absl::StrFormat(" \"timeout\": \"%d.%09ds\"", ts.tv_sec, ts.tv_nsec));
route_action.max_stream_duration->seconds,
route_action.max_stream_duration->nanos));
} }
// Handle xDS HTTP filters. // Handle xDS HTTP filters.
XdsRouting::GeneratePerHttpFilterConfigsResult result = XdsRouting::GeneratePerHttpFilterConfigsResult result =

@ -170,7 +170,7 @@ ClientChannelServiceConfigParser::ParsePerMethodParams(
} }
} }
// Parse timeout. // Parse timeout.
grpc_millis timeout = 0; Duration timeout;
ParseJsonObjectFieldAsDuration(json.object_value(), "timeout", &timeout, ParseJsonObjectFieldAsDuration(json.object_value(), "timeout", &timeout,
&error_list, false); &error_list, false);
// Return result. // Return result.

@ -27,7 +27,7 @@
#include "src/core/lib/config/core_configuration.h" #include "src/core/lib/config/core_configuration.h"
#include "src/core/lib/gprpp/ref_counted.h" #include "src/core/lib/gprpp/ref_counted.h"
#include "src/core/lib/gprpp/ref_counted_ptr.h" #include "src/core/lib/gprpp/ref_counted_ptr.h"
#include "src/core/lib/iomgr/exec_ctx.h" // for grpc_millis #include "src/core/lib/iomgr/exec_ctx.h" // for grpc_core::Timestamp
#include "src/core/lib/json/json.h" #include "src/core/lib/json/json.h"
#include "src/core/lib/resolver/resolver.h" #include "src/core/lib/resolver/resolver.h"
#include "src/core/lib/service_config/service_config_parser.h" #include "src/core/lib/service_config/service_config_parser.h"
@ -67,16 +67,16 @@ class ClientChannelGlobalParsedConfig
class ClientChannelMethodParsedConfig class ClientChannelMethodParsedConfig
: public ServiceConfigParser::ParsedConfig { : public ServiceConfigParser::ParsedConfig {
public: public:
ClientChannelMethodParsedConfig(grpc_millis timeout, ClientChannelMethodParsedConfig(Duration timeout,
const absl::optional<bool>& wait_for_ready) const absl::optional<bool>& wait_for_ready)
: timeout_(timeout), wait_for_ready_(wait_for_ready) {} : timeout_(timeout), wait_for_ready_(wait_for_ready) {}
grpc_millis timeout() const { return timeout_; } Duration timeout() const { return timeout_; }
absl::optional<bool> wait_for_ready() const { return wait_for_ready_; } absl::optional<bool> wait_for_ready() const { return wait_for_ready_; }
private: private:
grpc_millis timeout_ = 0; Duration timeout_;
absl::optional<bool> wait_for_ready_; absl::optional<bool> wait_for_ready_;
}; };

@ -401,7 +401,7 @@ class RetryFilter::CallData {
// Returns true if the call should be retried. // Returns true if the call should be retried.
bool ShouldRetry(absl::optional<grpc_status_code> status, bool ShouldRetry(absl::optional<grpc_status_code> status,
absl::optional<grpc_millis> server_pushback_ms); absl::optional<Duration> server_pushback_ms);
// Abandons the call attempt. Unrefs any deferred batches. // Abandons the call attempt. Unrefs any deferred batches.
void Abandon(); void Abandon();
@ -511,8 +511,8 @@ class RetryFilter::CallData {
void RetryCommit(CallAttempt* call_attempt); void RetryCommit(CallAttempt* call_attempt);
// Starts a timer to retry after appropriate back-off. // Starts a timer to retry after appropriate back-off.
// If server_pushback_ms is nullopt, retry_backoff_ is used. // If server_pushback is nullopt, retry_backoff_ is used.
void StartRetryTimer(absl::optional<grpc_millis> server_pushback_ms); void StartRetryTimer(absl::optional<Duration> server_pushback);
static void OnRetryTimer(void* arg, grpc_error_handle error); static void OnRetryTimer(void* arg, grpc_error_handle error);
static void OnRetryTimerLocked(void* arg, grpc_error_handle error); static void OnRetryTimerLocked(void* arg, grpc_error_handle error);
@ -534,7 +534,7 @@ class RetryFilter::CallData {
BackOff retry_backoff_; BackOff retry_backoff_;
grpc_slice path_; // Request path. grpc_slice path_; // Request path.
grpc_millis deadline_; Timestamp deadline_;
Arena* arena_; Arena* arena_;
grpc_call_stack* owning_call_; grpc_call_stack* owning_call_;
CallCombiner* call_combiner_; CallCombiner* call_combiner_;
@ -689,7 +689,7 @@ RetryFilter::CallData::CallAttempt::CallAttempt(CallData* calld,
// If per_attempt_recv_timeout is set, start a timer. // If per_attempt_recv_timeout is set, start a timer.
if (calld->retry_policy_ != nullptr && if (calld->retry_policy_ != nullptr &&
calld->retry_policy_->per_attempt_recv_timeout().has_value()) { calld->retry_policy_->per_attempt_recv_timeout().has_value()) {
grpc_millis per_attempt_recv_deadline = Timestamp per_attempt_recv_deadline =
ExecCtx::Get()->Now() + ExecCtx::Get()->Now() +
*calld->retry_policy_->per_attempt_recv_timeout(); *calld->retry_policy_->per_attempt_recv_timeout();
if (GRPC_TRACE_FLAG_ENABLED(grpc_retry_trace)) { if (GRPC_TRACE_FLAG_ENABLED(grpc_retry_trace)) {
@ -697,7 +697,7 @@ RetryFilter::CallData::CallAttempt::CallAttempt(CallData* calld,
"chand=%p calld=%p attempt=%p: per-attempt timeout in %" PRId64 "chand=%p calld=%p attempt=%p: per-attempt timeout in %" PRId64
" ms", " ms",
calld->chand_, calld, this, calld->chand_, calld, this,
*calld->retry_policy_->per_attempt_recv_timeout()); calld->retry_policy_->per_attempt_recv_timeout()->millis());
} }
// Schedule retry after computed delay. // Schedule retry after computed delay.
GRPC_CLOSURE_INIT(&on_per_attempt_recv_timer_, OnPerAttemptRecvTimer, this, GRPC_CLOSURE_INIT(&on_per_attempt_recv_timer_, OnPerAttemptRecvTimer, this,
@ -1086,7 +1086,7 @@ void RetryFilter::CallData::CallAttempt::CancelFromSurface(
bool RetryFilter::CallData::CallAttempt::ShouldRetry( bool RetryFilter::CallData::CallAttempt::ShouldRetry(
absl::optional<grpc_status_code> status, absl::optional<grpc_status_code> status,
absl::optional<grpc_millis> server_pushback_ms) { absl::optional<Duration> server_pushback) {
// If no retry policy, don't retry. // If no retry policy, don't retry.
if (calld_->retry_policy_ == nullptr) return false; if (calld_->retry_policy_ == nullptr) return false;
// Check status. // Check status.
@ -1149,8 +1149,8 @@ bool RetryFilter::CallData::CallAttempt::ShouldRetry(
return false; return false;
} }
// Check server push-back. // Check server push-back.
if (server_pushback_ms.has_value()) { if (server_pushback.has_value()) {
if (*server_pushback_ms < 0) { if (*server_pushback < Duration::Zero()) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_retry_trace)) { if (GRPC_TRACE_FLAG_ENABLED(grpc_retry_trace)) {
gpr_log(GPR_INFO, gpr_log(GPR_INFO,
"chand=%p calld=%p attempt=%p: not retrying due to server " "chand=%p calld=%p attempt=%p: not retrying due to server "
@ -1164,7 +1164,7 @@ bool RetryFilter::CallData::CallAttempt::ShouldRetry(
GPR_INFO, GPR_INFO,
"chand=%p calld=%p attempt=%p: server push-back: retry in %" PRIu64 "chand=%p calld=%p attempt=%p: server push-back: retry in %" PRIu64
" ms", " ms",
calld_->chand_, calld_, this, *server_pushback_ms); calld_->chand_, calld_, this, server_pushback->millis());
} }
} }
} }
@ -1255,7 +1255,7 @@ void RetryFilter::CallData::CallAttempt::OnPerAttemptRecvTimerLocked(
// Mark current attempt as abandoned. // Mark current attempt as abandoned.
call_attempt->Abandon(); call_attempt->Abandon();
// We are retrying. Start backoff timer. // We are retrying. Start backoff timer.
calld->StartRetryTimer(/*server_pushback_ms=*/absl::nullopt); calld->StartRetryTimer(/*server_pushback=*/absl::nullopt);
} else { } else {
// Not retrying, so commit the call. // Not retrying, so commit the call.
calld->RetryCommit(call_attempt); calld->RetryCommit(call_attempt);
@ -1551,12 +1551,12 @@ void RetryFilter::CallData::CallAttempt::BatchData::RecvMessageReady(
namespace { namespace {
// Sets *status, *server_pushback_ms, and *is_lb_drop based on md_batch // Sets *status, *server_pushback, and *is_lb_drop based on md_batch
// and error. // and error.
void GetCallStatus( void GetCallStatus(
grpc_millis deadline, grpc_metadata_batch* md_batch, Timestamp deadline, grpc_metadata_batch* md_batch, grpc_error_handle error,
grpc_error_handle error, grpc_status_code* status, grpc_status_code* status, absl::optional<Duration>* server_pushback,
absl::optional<grpc_millis>* server_pushback_ms, bool* is_lb_drop, bool* is_lb_drop,
absl::optional<GrpcStreamNetworkState::ValueType>* stream_network_state) { absl::optional<GrpcStreamNetworkState::ValueType>* stream_network_state) {
if (error != GRPC_ERROR_NONE) { if (error != GRPC_ERROR_NONE) {
grpc_error_get_status(error, deadline, status, nullptr, nullptr, nullptr); grpc_error_get_status(error, deadline, status, nullptr, nullptr, nullptr);
@ -1568,7 +1568,7 @@ void GetCallStatus(
} else { } else {
*status = *md_batch->get(GrpcStatusMetadata()); *status = *md_batch->get(GrpcStatusMetadata());
} }
*server_pushback_ms = md_batch->get(GrpcRetryPushbackMsMetadata()); *server_pushback = md_batch->get(GrpcRetryPushbackMsMetadata());
*stream_network_state = md_batch->get(GrpcStreamNetworkState()); *stream_network_state = md_batch->get(GrpcStreamNetworkState());
GRPC_ERROR_UNREF(error); GRPC_ERROR_UNREF(error);
} }
@ -1700,21 +1700,20 @@ void RetryFilter::CallData::CallAttempt::BatchData::RecvTrailingMetadataReady(
call_attempt->MaybeCancelPerAttemptRecvTimer(); call_attempt->MaybeCancelPerAttemptRecvTimer();
// Get the call's status and check for server pushback metadata. // Get the call's status and check for server pushback metadata.
grpc_status_code status = GRPC_STATUS_OK; grpc_status_code status = GRPC_STATUS_OK;
absl::optional<grpc_millis> server_pushback_ms; absl::optional<Duration> server_pushback;
bool is_lb_drop = false; bool is_lb_drop = false;
absl::optional<GrpcStreamNetworkState::ValueType> stream_network_state; absl::optional<GrpcStreamNetworkState::ValueType> stream_network_state;
grpc_metadata_batch* md_batch = grpc_metadata_batch* md_batch =
batch_data->batch_.payload->recv_trailing_metadata.recv_trailing_metadata; batch_data->batch_.payload->recv_trailing_metadata.recv_trailing_metadata;
GetCallStatus(calld->deadline_, md_batch, GRPC_ERROR_REF(error), &status, GetCallStatus(calld->deadline_, md_batch, GRPC_ERROR_REF(error), &status,
&server_pushback_ms, &is_lb_drop, &stream_network_state); &server_pushback, &is_lb_drop, &stream_network_state);
if (GRPC_TRACE_FLAG_ENABLED(grpc_retry_trace)) { if (GRPC_TRACE_FLAG_ENABLED(grpc_retry_trace)) {
gpr_log(GPR_INFO, gpr_log(GPR_INFO,
"chand=%p calld=%p attempt=%p: call finished, status=%s " "chand=%p calld=%p attempt=%p: call finished, status=%s "
"server_pushback_ms=%s is_lb_drop=%d stream_network_state=%s", "server_pushback=%s is_lb_drop=%d stream_network_state=%s",
calld->chand_, calld, call_attempt, calld->chand_, calld, call_attempt,
grpc_status_code_to_string(status), grpc_status_code_to_string(status),
server_pushback_ms.has_value() server_pushback.has_value() ? server_pushback->ToString().c_str()
? absl::StrCat(*server_pushback_ms).c_str()
: "N/A", : "N/A",
is_lb_drop, is_lb_drop,
stream_network_state.has_value() stream_network_state.has_value()
@ -1739,7 +1738,7 @@ void RetryFilter::CallData::CallAttempt::BatchData::RecvTrailingMetadataReady(
} }
// If not transparently retrying, check for configurable retry. // If not transparently retrying, check for configurable retry.
if (retry == kNoRetry && if (retry == kNoRetry &&
call_attempt->ShouldRetry(status, server_pushback_ms)) { call_attempt->ShouldRetry(status, server_pushback)) {
retry = kConfigurableRetry; retry = kConfigurableRetry;
} }
// If we're retrying, do so. // If we're retrying, do so.
@ -1759,7 +1758,7 @@ void RetryFilter::CallData::CallAttempt::BatchData::RecvTrailingMetadataReady(
if (retry == kTransparentRetry) { if (retry == kTransparentRetry) {
calld->AddClosureToStartTransparentRetry(&closures); calld->AddClosureToStartTransparentRetry(&closures);
} else { } else {
calld->StartRetryTimer(server_pushback_ms); calld->StartRetryTimer(server_pushback);
} }
// Record that this attempt has been abandoned. // Record that this attempt has been abandoned.
call_attempt->Abandon(); call_attempt->Abandon();
@ -2115,14 +2114,15 @@ RetryFilter::CallData::CallData(RetryFilter* chand,
retry_backoff_( retry_backoff_(
BackOff::Options() BackOff::Options()
.set_initial_backoff(retry_policy_ == nullptr .set_initial_backoff(retry_policy_ == nullptr
? 0 ? Duration::Zero()
: retry_policy_->initial_backoff()) : retry_policy_->initial_backoff())
.set_multiplier(retry_policy_ == nullptr .set_multiplier(retry_policy_ == nullptr
? 0 ? 0
: retry_policy_->backoff_multiplier()) : retry_policy_->backoff_multiplier())
.set_jitter(RETRY_BACKOFF_JITTER) .set_jitter(RETRY_BACKOFF_JITTER)
.set_max_backoff( .set_max_backoff(retry_policy_ == nullptr
retry_policy_ == nullptr ? 0 : retry_policy_->max_backoff())), ? Duration::Zero()
: retry_policy_->max_backoff())),
path_(grpc_slice_ref_internal(args.path)), path_(grpc_slice_ref_internal(args.path)),
deadline_(args.deadline), deadline_(args.deadline),
arena_(args.arena), arena_(args.arena),
@ -2552,14 +2552,14 @@ void RetryFilter::CallData::RetryCommit(CallAttempt* call_attempt) {
} }
void RetryFilter::CallData::StartRetryTimer( void RetryFilter::CallData::StartRetryTimer(
absl::optional<grpc_millis> server_pushback_ms) { absl::optional<Duration> server_pushback) {
// Reset call attempt. // Reset call attempt.
call_attempt_.reset(DEBUG_LOCATION, "StartRetryTimer"); call_attempt_.reset(DEBUG_LOCATION, "StartRetryTimer");
// Compute backoff delay. // Compute backoff delay.
grpc_millis next_attempt_time; Timestamp next_attempt_time;
if (server_pushback_ms.has_value()) { if (server_pushback.has_value()) {
GPR_ASSERT(*server_pushback_ms >= 0); GPR_ASSERT(*server_pushback >= Duration::Zero());
next_attempt_time = ExecCtx::Get()->Now() + *server_pushback_ms; next_attempt_time = ExecCtx::Get()->Now() + *server_pushback;
retry_backoff_.Reset(); retry_backoff_.Reset();
} else { } else {
next_attempt_time = retry_backoff_.NextAttemptTime(); next_attempt_time = retry_backoff_.NextAttemptTime();
@ -2567,7 +2567,7 @@ void RetryFilter::CallData::StartRetryTimer(
if (GRPC_TRACE_FLAG_ENABLED(grpc_retry_trace)) { if (GRPC_TRACE_FLAG_ENABLED(grpc_retry_trace)) {
gpr_log(GPR_INFO, gpr_log(GPR_INFO,
"chand=%p calld=%p: retrying failed call in %" PRId64 " ms", chand_, "chand=%p calld=%p: retrying failed call in %" PRId64 " ms", chand_,
this, next_attempt_time - ExecCtx::Get()->Now()); this, (next_attempt_time - ExecCtx::Get()->Now()).millis());
} }
// Schedule retry after computed delay. // Schedule retry after computed delay.
GRPC_CLOSURE_INIT(&retry_closure_, OnRetryTimer, this, nullptr); GRPC_CLOSURE_INIT(&retry_closure_, OnRetryTimer, this, nullptr);

@ -158,9 +158,9 @@ namespace {
grpc_error_handle ParseRetryPolicy( grpc_error_handle ParseRetryPolicy(
const grpc_channel_args* args, const Json& json, int* max_attempts, const grpc_channel_args* args, const Json& json, int* max_attempts,
grpc_millis* initial_backoff, grpc_millis* max_backoff, Duration* initial_backoff, Duration* max_backoff, float* backoff_multiplier,
float* backoff_multiplier, StatusCodeSet* retryable_status_codes, StatusCodeSet* retryable_status_codes,
absl::optional<grpc_millis>* per_attempt_recv_timeout) { absl::optional<Duration>* per_attempt_recv_timeout) {
if (json.type() != Json::Type::OBJECT) { if (json.type() != Json::Type::OBJECT) {
return GRPC_ERROR_CREATE_FROM_STATIC_STRING( return GRPC_ERROR_CREATE_FROM_STATIC_STRING(
"field:retryPolicy error:should be of type object"); "field:retryPolicy error:should be of type object");
@ -192,14 +192,14 @@ grpc_error_handle ParseRetryPolicy(
// Parse initialBackoff. // Parse initialBackoff.
if (ParseJsonObjectFieldAsDuration(json.object_value(), "initialBackoff", if (ParseJsonObjectFieldAsDuration(json.object_value(), "initialBackoff",
initial_backoff, &error_list) && initial_backoff, &error_list) &&
*initial_backoff == 0) { *initial_backoff == Duration::Zero()) {
error_list.push_back(GRPC_ERROR_CREATE_FROM_STATIC_STRING( error_list.push_back(GRPC_ERROR_CREATE_FROM_STATIC_STRING(
"field:initialBackoff error:must be greater than 0")); "field:initialBackoff error:must be greater than 0"));
} }
// Parse maxBackoff. // Parse maxBackoff.
if (ParseJsonObjectFieldAsDuration(json.object_value(), "maxBackoff", if (ParseJsonObjectFieldAsDuration(json.object_value(), "maxBackoff",
max_backoff, &error_list) && max_backoff, &error_list) &&
*max_backoff == 0) { *max_backoff == Duration::Zero()) {
error_list.push_back(GRPC_ERROR_CREATE_FROM_STATIC_STRING( error_list.push_back(GRPC_ERROR_CREATE_FROM_STATIC_STRING(
"field:maxBackoff error:must be greater than 0")); "field:maxBackoff error:must be greater than 0"));
} }
@ -253,7 +253,7 @@ grpc_error_handle ParseRetryPolicy(
false)) { false)) {
it = json.object_value().find("perAttemptRecvTimeout"); it = json.object_value().find("perAttemptRecvTimeout");
if (it != json.object_value().end()) { if (it != json.object_value().end()) {
grpc_millis per_attempt_recv_timeout_value; Duration per_attempt_recv_timeout_value;
if (!ParseDurationFromJson(it->second, &per_attempt_recv_timeout_value)) { if (!ParseDurationFromJson(it->second, &per_attempt_recv_timeout_value)) {
error_list.push_back(GRPC_ERROR_CREATE_FROM_STATIC_STRING( error_list.push_back(GRPC_ERROR_CREATE_FROM_STATIC_STRING(
"field:perAttemptRecvTimeout error:type must be STRING of the " "field:perAttemptRecvTimeout error:type must be STRING of the "
@ -262,7 +262,7 @@ grpc_error_handle ParseRetryPolicy(
*per_attempt_recv_timeout = per_attempt_recv_timeout_value; *per_attempt_recv_timeout = per_attempt_recv_timeout_value;
// TODO(roth): As part of implementing hedging, relax this check such // TODO(roth): As part of implementing hedging, relax this check such
// that we allow a value of 0 if a hedging policy is specified. // that we allow a value of 0 if a hedging policy is specified.
if (per_attempt_recv_timeout_value == 0) { if (per_attempt_recv_timeout_value == Duration::Zero()) {
error_list.push_back(GRPC_ERROR_CREATE_FROM_STATIC_STRING( error_list.push_back(GRPC_ERROR_CREATE_FROM_STATIC_STRING(
"field:perAttemptRecvTimeout error:must be greater than 0")); "field:perAttemptRecvTimeout error:must be greater than 0"));
} }
@ -296,11 +296,11 @@ RetryServiceConfigParser::ParsePerMethodParams(const grpc_channel_args* args,
auto it = json.object_value().find("retryPolicy"); auto it = json.object_value().find("retryPolicy");
if (it == json.object_value().end()) return nullptr; if (it == json.object_value().end()) return nullptr;
int max_attempts = 0; int max_attempts = 0;
grpc_millis initial_backoff = 0; Duration initial_backoff;
grpc_millis max_backoff = 0; Duration max_backoff;
float backoff_multiplier = 0; float backoff_multiplier = 0;
StatusCodeSet retryable_status_codes; StatusCodeSet retryable_status_codes;
absl::optional<grpc_millis> per_attempt_recv_timeout; absl::optional<Duration> per_attempt_recv_timeout;
*error = ParseRetryPolicy(args, it->second, &max_attempts, &initial_backoff, *error = ParseRetryPolicy(args, it->second, &max_attempts, &initial_backoff,
&max_backoff, &backoff_multiplier, &max_backoff, &backoff_multiplier,
&retryable_status_codes, &per_attempt_recv_timeout); &retryable_status_codes, &per_attempt_recv_timeout);

@ -24,7 +24,7 @@
#include "src/core/ext/filters/client_channel/retry_throttle.h" #include "src/core/ext/filters/client_channel/retry_throttle.h"
#include "src/core/lib/channel/status_util.h" #include "src/core/lib/channel/status_util.h"
#include "src/core/lib/config/core_configuration.h" #include "src/core/lib/config/core_configuration.h"
#include "src/core/lib/iomgr/exec_ctx.h" // for grpc_millis #include "src/core/lib/gprpp/time.h"
#include "src/core/lib/service_config/service_config_parser.h" #include "src/core/lib/service_config/service_config_parser.h"
namespace grpc_core { namespace grpc_core {
@ -46,10 +46,10 @@ class RetryGlobalConfig : public ServiceConfigParser::ParsedConfig {
class RetryMethodConfig : public ServiceConfigParser::ParsedConfig { class RetryMethodConfig : public ServiceConfigParser::ParsedConfig {
public: public:
RetryMethodConfig(int max_attempts, grpc_millis initial_backoff, RetryMethodConfig(int max_attempts, Duration initial_backoff,
grpc_millis max_backoff, float backoff_multiplier, Duration max_backoff, float backoff_multiplier,
StatusCodeSet retryable_status_codes, StatusCodeSet retryable_status_codes,
absl::optional<grpc_millis> per_attempt_recv_timeout) absl::optional<Duration> per_attempt_recv_timeout)
: max_attempts_(max_attempts), : max_attempts_(max_attempts),
initial_backoff_(initial_backoff), initial_backoff_(initial_backoff),
max_backoff_(max_backoff), max_backoff_(max_backoff),
@ -58,23 +58,23 @@ class RetryMethodConfig : public ServiceConfigParser::ParsedConfig {
per_attempt_recv_timeout_(per_attempt_recv_timeout) {} per_attempt_recv_timeout_(per_attempt_recv_timeout) {}
int max_attempts() const { return max_attempts_; } int max_attempts() const { return max_attempts_; }
grpc_millis initial_backoff() const { return initial_backoff_; } Duration initial_backoff() const { return initial_backoff_; }
grpc_millis max_backoff() const { return max_backoff_; } Duration max_backoff() const { return max_backoff_; }
float backoff_multiplier() const { return backoff_multiplier_; } float backoff_multiplier() const { return backoff_multiplier_; }
StatusCodeSet retryable_status_codes() const { StatusCodeSet retryable_status_codes() const {
return retryable_status_codes_; return retryable_status_codes_;
} }
absl::optional<grpc_millis> per_attempt_recv_timeout() const { absl::optional<Duration> per_attempt_recv_timeout() const {
return per_attempt_recv_timeout_; return per_attempt_recv_timeout_;
} }
private: private:
int max_attempts_ = 0; int max_attempts_ = 0;
grpc_millis initial_backoff_ = 0; Duration initial_backoff_;
grpc_millis max_backoff_ = 0; Duration max_backoff_;
float backoff_multiplier_ = 0; float backoff_multiplier_ = 0;
StatusCodeSet retryable_status_codes_; StatusCodeSet retryable_status_codes_;
absl::optional<grpc_millis> per_attempt_recv_timeout_; absl::optional<Duration> per_attempt_recv_timeout_;
}; };
class RetryServiceConfigParser : public ServiceConfigParser::Parser { class RetryServiceConfigParser : public ServiceConfigParser::Parser {

@ -247,7 +247,7 @@ void SubchannelCall::MaybeInterceptRecvTrailingMetadata(
namespace { namespace {
// Sets *status based on the rest of the parameters. // Sets *status based on the rest of the parameters.
void GetCallStatus(grpc_status_code* status, grpc_millis deadline, void GetCallStatus(grpc_status_code* status, Timestamp deadline,
grpc_metadata_batch* md_batch, grpc_error_handle error) { grpc_metadata_batch* md_batch, grpc_error_handle error) {
if (error != GRPC_ERROR_NONE) { if (error != GRPC_ERROR_NONE) {
grpc_error_get_status(error, deadline, status, nullptr, nullptr, nullptr); grpc_error_get_status(error, deadline, status, nullptr, nullptr, nullptr);
@ -568,52 +568,55 @@ void Subchannel::HealthWatcherMap::ShutdownLocked() { map_.clear(); }
namespace { namespace {
BackOff::Options ParseArgsForBackoffValues( BackOff::Options ParseArgsForBackoffValues(const grpc_channel_args* args,
const grpc_channel_args* args, grpc_millis* min_connect_timeout_ms) { Duration* min_connect_timeout) {
grpc_millis initial_backoff_ms = Duration initial_backoff =
GRPC_SUBCHANNEL_INITIAL_CONNECT_BACKOFF_SECONDS * 1000; Duration::Seconds(GRPC_SUBCHANNEL_INITIAL_CONNECT_BACKOFF_SECONDS);
*min_connect_timeout_ms = *min_connect_timeout =
GRPC_SUBCHANNEL_RECONNECT_MIN_TIMEOUT_SECONDS * 1000; Duration::Seconds(GRPC_SUBCHANNEL_RECONNECT_MIN_TIMEOUT_SECONDS);
grpc_millis max_backoff_ms = Duration max_backoff =
GRPC_SUBCHANNEL_RECONNECT_MAX_BACKOFF_SECONDS * 1000; Duration::Seconds(GRPC_SUBCHANNEL_RECONNECT_MAX_BACKOFF_SECONDS);
bool fixed_reconnect_backoff = false; bool fixed_reconnect_backoff = false;
if (args != nullptr) { if (args != nullptr) {
for (size_t i = 0; i < args->num_args; i++) { for (size_t i = 0; i < args->num_args; i++) {
if (0 == strcmp(args->args[i].key, if (0 == strcmp(args->args[i].key,
"grpc.testing.fixed_reconnect_backoff_ms")) { "grpc.testing.fixed_reconnect_backoff_ms")) {
fixed_reconnect_backoff = true; fixed_reconnect_backoff = true;
initial_backoff_ms = *min_connect_timeout_ms = max_backoff_ms = initial_backoff = *min_connect_timeout = max_backoff =
grpc_channel_arg_get_integer( Duration::Milliseconds(grpc_channel_arg_get_integer(
&args->args[i], &args->args[i],
{static_cast<int>(initial_backoff_ms), 100, INT_MAX}); {static_cast<int>(initial_backoff.millis()), 100, INT_MAX}));
} else if (0 == } else if (0 ==
strcmp(args->args[i].key, GRPC_ARG_MIN_RECONNECT_BACKOFF_MS)) { strcmp(args->args[i].key, GRPC_ARG_MIN_RECONNECT_BACKOFF_MS)) {
fixed_reconnect_backoff = false; fixed_reconnect_backoff = false;
*min_connect_timeout_ms = grpc_channel_arg_get_integer( *min_connect_timeout =
Duration::Milliseconds(grpc_channel_arg_get_integer(
&args->args[i], &args->args[i],
{static_cast<int>(*min_connect_timeout_ms), 100, INT_MAX}); {static_cast<int>(min_connect_timeout->millis()), 100,
INT_MAX}));
} else if (0 == } else if (0 ==
strcmp(args->args[i].key, GRPC_ARG_MAX_RECONNECT_BACKOFF_MS)) { strcmp(args->args[i].key, GRPC_ARG_MAX_RECONNECT_BACKOFF_MS)) {
fixed_reconnect_backoff = false; fixed_reconnect_backoff = false;
max_backoff_ms = grpc_channel_arg_get_integer( max_backoff = Duration::Milliseconds(grpc_channel_arg_get_integer(
&args->args[i], {static_cast<int>(max_backoff_ms), 100, INT_MAX}); &args->args[i],
{static_cast<int>(max_backoff.millis()), 100, INT_MAX}));
} else if (0 == strcmp(args->args[i].key, } else if (0 == strcmp(args->args[i].key,
GRPC_ARG_INITIAL_RECONNECT_BACKOFF_MS)) { GRPC_ARG_INITIAL_RECONNECT_BACKOFF_MS)) {
fixed_reconnect_backoff = false; fixed_reconnect_backoff = false;
initial_backoff_ms = grpc_channel_arg_get_integer( initial_backoff = Duration::Milliseconds(grpc_channel_arg_get_integer(
&args->args[i], &args->args[i],
{static_cast<int>(initial_backoff_ms), 100, INT_MAX}); {static_cast<int>(initial_backoff.millis()), 100, INT_MAX}));
} }
} }
} }
return BackOff::Options() return BackOff::Options()
.set_initial_backoff(initial_backoff_ms) .set_initial_backoff(initial_backoff)
.set_multiplier(fixed_reconnect_backoff .set_multiplier(fixed_reconnect_backoff
? 1.0 ? 1.0
: GRPC_SUBCHANNEL_RECONNECT_BACKOFF_MULTIPLIER) : GRPC_SUBCHANNEL_RECONNECT_BACKOFF_MULTIPLIER)
.set_jitter(fixed_reconnect_backoff ? 0.0 .set_jitter(fixed_reconnect_backoff ? 0.0
: GRPC_SUBCHANNEL_RECONNECT_JITTER) : GRPC_SUBCHANNEL_RECONNECT_JITTER)
.set_max_backoff(max_backoff_ms); .set_max_backoff(max_backoff);
} }
} // namespace } // namespace
@ -642,7 +645,7 @@ Subchannel::Subchannel(SubchannelKey key,
key_(std::move(key)), key_(std::move(key)),
pollset_set_(grpc_pollset_set_create()), pollset_set_(grpc_pollset_set_create()),
connector_(std::move(connector)), connector_(std::move(connector)),
backoff_(ParseArgsForBackoffValues(args, &min_connect_timeout_ms_)) { backoff_(ParseArgsForBackoffValues(args, &min_connect_timeout_)) {
GRPC_STATS_INC_CLIENT_SUBCHANNELS_CREATED(); GRPC_STATS_INC_CLIENT_SUBCHANNELS_CREATED();
GRPC_CLOSURE_INIT(&on_connecting_finished_, OnConnectingFinished, this, GRPC_CLOSURE_INIT(&on_connecting_finished_, OnConnectingFinished, this,
grpc_schedule_on_exec_ctx); grpc_schedule_on_exec_ctx);
@ -878,14 +881,14 @@ void Subchannel::MaybeStartConnectingLocked() {
} else { } else {
GPR_ASSERT(!have_retry_alarm_); GPR_ASSERT(!have_retry_alarm_);
have_retry_alarm_ = true; have_retry_alarm_ = true;
const grpc_millis time_til_next = const Duration time_til_next =
next_attempt_deadline_ - ExecCtx::Get()->Now(); next_attempt_deadline_ - ExecCtx::Get()->Now();
if (time_til_next <= 0) { if (time_til_next <= Duration::Zero()) {
gpr_log(GPR_INFO, "subchannel %p %s: Retry immediately", this, gpr_log(GPR_INFO, "subchannel %p %s: Retry immediately", this,
key_.ToString().c_str()); key_.ToString().c_str());
} else { } else {
gpr_log(GPR_INFO, "subchannel %p %s: Retry in %" PRId64 " milliseconds", gpr_log(GPR_INFO, "subchannel %p %s: Retry in %" PRId64 " milliseconds",
this, key_.ToString().c_str(), time_til_next); this, key_.ToString().c_str(), time_til_next.millis());
} }
GRPC_CLOSURE_INIT(&on_retry_alarm_, OnRetryAlarm, this, GRPC_CLOSURE_INIT(&on_retry_alarm_, OnRetryAlarm, this,
grpc_schedule_on_exec_ctx); grpc_schedule_on_exec_ctx);
@ -922,8 +925,7 @@ void Subchannel::ContinueConnectingLocked() {
SubchannelConnector::Args args; SubchannelConnector::Args args;
args.address = &address_for_connect_; args.address = &address_for_connect_;
args.interested_parties = pollset_set_; args.interested_parties = pollset_set_;
const grpc_millis min_deadline = const Timestamp min_deadline = min_connect_timeout_ + ExecCtx::Get()->Now();
min_connect_timeout_ms_ + ExecCtx::Get()->Now();
next_attempt_deadline_ = backoff_.NextAttemptTime(); next_attempt_deadline_ = backoff_.NextAttemptTime();
args.deadline = std::max(next_attempt_deadline_, min_deadline); args.deadline = std::max(next_attempt_deadline_, min_deadline);
args.channel_args = args_; args.channel_args = args_;

@ -76,7 +76,7 @@ class SubchannelCall {
grpc_polling_entity* pollent; grpc_polling_entity* pollent;
Slice path; Slice path;
gpr_cycle_counter start_time; gpr_cycle_counter start_time;
grpc_millis deadline; Timestamp deadline;
Arena* arena; Arena* arena;
grpc_call_context_element* context; grpc_call_context_element* context;
CallCombiner* call_combiner; CallCombiner* call_combiner;
@ -129,7 +129,7 @@ class SubchannelCall {
grpc_closure recv_trailing_metadata_ready_; grpc_closure recv_trailing_metadata_ready_;
grpc_closure* original_recv_trailing_metadata_ = nullptr; grpc_closure* original_recv_trailing_metadata_ = nullptr;
grpc_metadata_batch* recv_trailing_metadata_ = nullptr; grpc_metadata_batch* recv_trailing_metadata_ = nullptr;
grpc_millis deadline_; Timestamp deadline_;
}; };
// A subchannel that knows how to connect to exactly one target address. It // A subchannel that knows how to connect to exactly one target address. It
@ -360,10 +360,11 @@ class Subchannel : public DualRefCounted<Subchannel> {
// The map of watchers with health check service names. // The map of watchers with health check service names.
HealthWatcherMap health_watcher_map_ ABSL_GUARDED_BY(mu_); HealthWatcherMap health_watcher_map_ ABSL_GUARDED_BY(mu_);
// Minimum connect timeout - must be located before backoff_.
Duration min_connect_timeout_ ABSL_GUARDED_BY(mu_);
// Backoff state. // Backoff state.
BackOff backoff_ ABSL_GUARDED_BY(mu_); BackOff backoff_ ABSL_GUARDED_BY(mu_);
grpc_millis next_attempt_deadline_ ABSL_GUARDED_BY(mu_); Timestamp next_attempt_deadline_ ABSL_GUARDED_BY(mu_);
grpc_millis min_connect_timeout_ms_ ABSL_GUARDED_BY(mu_);
bool backoff_begun_ ABSL_GUARDED_BY(mu_) = false; bool backoff_begun_ ABSL_GUARDED_BY(mu_) = false;
// Retry alarm. // Retry alarm.

@ -54,12 +54,14 @@ TraceFlag grpc_trace_client_idle_filter(false, "client_idle_filter");
namespace { namespace {
grpc_millis GetClientIdleTimeout(const grpc_channel_args* args) { Duration GetClientIdleTimeout(const grpc_channel_args* args) {
return std::max( auto millis = std::max(
grpc_channel_arg_get_integer( grpc_channel_arg_get_integer(
grpc_channel_args_find(args, GRPC_ARG_CLIENT_IDLE_TIMEOUT_MS), grpc_channel_args_find(args, GRPC_ARG_CLIENT_IDLE_TIMEOUT_MS),
{DEFAULT_IDLE_TIMEOUT_MS, 0, INT_MAX}), {DEFAULT_IDLE_TIMEOUT_MS, 0, INT_MAX}),
MIN_IDLE_TIMEOUT_MS); MIN_IDLE_TIMEOUT_MS);
if (millis == INT_MAX) return Duration::Infinity();
return Duration::Milliseconds(millis);
} }
class ClientIdleFilter : public ChannelFilter { class ClientIdleFilter : public ChannelFilter {
@ -82,7 +84,7 @@ class ClientIdleFilter : public ChannelFilter {
private: private:
ClientIdleFilter(grpc_channel_stack* channel_stack, ClientIdleFilter(grpc_channel_stack* channel_stack,
grpc_millis client_idle_timeout) Duration client_idle_timeout)
: channel_stack_(channel_stack), : channel_stack_(channel_stack),
client_idle_timeout_(client_idle_timeout) {} client_idle_timeout_(client_idle_timeout) {}
@ -99,7 +101,7 @@ class ClientIdleFilter : public ChannelFilter {
// The channel stack to which we take refs for pending callbacks. // The channel stack to which we take refs for pending callbacks.
grpc_channel_stack* channel_stack_; grpc_channel_stack* channel_stack_;
grpc_millis client_idle_timeout_; Duration client_idle_timeout_;
std::shared_ptr<IdleFilterState> idle_filter_state_{ std::shared_ptr<IdleFilterState> idle_filter_state_{
std::make_shared<IdleFilterState>(false)}; std::make_shared<IdleFilterState>(false)};
@ -190,7 +192,7 @@ void RegisterClientIdleFilter(CoreConfiguration::Builder* builder) {
[](ChannelStackBuilder* builder) { [](ChannelStackBuilder* builder) {
const grpc_channel_args* channel_args = builder->channel_args(); const grpc_channel_args* channel_args = builder->channel_args();
if (!grpc_channel_args_want_minimal_stack(channel_args) && if (!grpc_channel_args_want_minimal_stack(channel_args) &&
GetClientIdleTimeout(channel_args) != INT_MAX) { GetClientIdleTimeout(channel_args) != Duration::Infinity()) {
builder->PrependFilter(&grpc_client_idle_filter, nullptr); builder->PrependFilter(&grpc_client_idle_filter, nullptr);
} }
return true; return true;

@ -38,7 +38,7 @@ namespace grpc_core {
// Allocated on the call arena. // Allocated on the call arena.
class TimerState { class TimerState {
public: public:
TimerState(grpc_call_element* elem, grpc_millis deadline) : elem_(elem) { TimerState(grpc_call_element* elem, Timestamp deadline) : elem_(elem) {
grpc_deadline_state* deadline_state = grpc_deadline_state* deadline_state =
static_cast<grpc_deadline_state*>(elem_->call_data); static_cast<grpc_deadline_state*>(elem_->call_data);
GRPC_CALL_STACK_REF(deadline_state->call_stack, "DeadlineTimerState"); GRPC_CALL_STACK_REF(deadline_state->call_stack, "DeadlineTimerState");
@ -113,8 +113,8 @@ class TimerState {
// This is called via the call combiner, so access to deadline_state is // This is called via the call combiner, so access to deadline_state is
// synchronized. // synchronized.
static void start_timer_if_needed(grpc_call_element* elem, static void start_timer_if_needed(grpc_call_element* elem,
grpc_millis deadline) { grpc_core::Timestamp deadline) {
if (deadline == GRPC_MILLIS_INF_FUTURE) return; if (deadline == grpc_core::Timestamp::InfFuture()) return;
grpc_deadline_state* deadline_state = grpc_deadline_state* deadline_state =
static_cast<grpc_deadline_state*>(elem->call_data); static_cast<grpc_deadline_state*>(elem->call_data);
GPR_ASSERT(deadline_state->timer_state == nullptr); GPR_ASSERT(deadline_state->timer_state == nullptr);
@ -157,13 +157,14 @@ static void inject_recv_trailing_metadata_ready(
// Callback and associated state for starting the timer after call stack // Callback and associated state for starting the timer after call stack
// initialization has been completed. // initialization has been completed.
struct start_timer_after_init_state { struct start_timer_after_init_state {
start_timer_after_init_state(grpc_call_element* elem, grpc_millis deadline) start_timer_after_init_state(grpc_call_element* elem,
grpc_core::Timestamp deadline)
: elem(elem), deadline(deadline) {} : elem(elem), deadline(deadline) {}
~start_timer_after_init_state() { start_timer_if_needed(elem, deadline); } ~start_timer_after_init_state() { start_timer_if_needed(elem, deadline); }
bool in_call_combiner = false; bool in_call_combiner = false;
grpc_call_element* elem; grpc_call_element* elem;
grpc_millis deadline; grpc_core::Timestamp deadline;
grpc_closure closure; grpc_closure closure;
}; };
static void start_timer_after_init(void* arg, grpc_error_handle error) { static void start_timer_after_init(void* arg, grpc_error_handle error) {
@ -187,13 +188,13 @@ static void start_timer_after_init(void* arg, grpc_error_handle error) {
grpc_deadline_state::grpc_deadline_state(grpc_call_element* elem, grpc_deadline_state::grpc_deadline_state(grpc_call_element* elem,
const grpc_call_element_args& args, const grpc_call_element_args& args,
grpc_millis deadline) grpc_core::Timestamp deadline)
: call_stack(args.call_stack), : call_stack(args.call_stack),
call_combiner(args.call_combiner), call_combiner(args.call_combiner),
arena(args.arena) { arena(args.arena) {
// Deadline will always be infinite on servers, so the timer will only be // Deadline will always be infinite on servers, so the timer will only be
// set on clients with a finite deadline. // set on clients with a finite deadline.
if (deadline != GRPC_MILLIS_INF_FUTURE) { if (deadline != grpc_core::Timestamp::InfFuture()) {
// When the deadline passes, we indicate the failure by sending down // When the deadline passes, we indicate the failure by sending down
// an op with cancel_error set. However, we can't send down any ops // an op with cancel_error set. However, we can't send down any ops
// until after the call stack is fully initialized. If we start the // until after the call stack is fully initialized. If we start the
@ -212,7 +213,7 @@ grpc_deadline_state::grpc_deadline_state(grpc_call_element* elem,
grpc_deadline_state::~grpc_deadline_state() { cancel_timer_if_needed(this); } grpc_deadline_state::~grpc_deadline_state() { cancel_timer_if_needed(this); }
void grpc_deadline_state_reset(grpc_call_element* elem, void grpc_deadline_state_reset(grpc_call_element* elem,
grpc_millis new_deadline) { grpc_core::Timestamp new_deadline) {
grpc_deadline_state* deadline_state = grpc_deadline_state* deadline_state =
static_cast<grpc_deadline_state*>(elem->call_data); static_cast<grpc_deadline_state*>(elem->call_data);
cancel_timer_if_needed(deadline_state); cancel_timer_if_needed(deadline_state);
@ -295,7 +296,7 @@ static void recv_initial_metadata_ready(void* arg, grpc_error_handle error) {
server_call_data* calld = static_cast<server_call_data*>(elem->call_data); server_call_data* calld = static_cast<server_call_data*>(elem->call_data);
start_timer_if_needed( start_timer_if_needed(
elem, calld->recv_initial_metadata->get(grpc_core::GrpcTimeoutMetadata()) elem, calld->recv_initial_metadata->get(grpc_core::GrpcTimeoutMetadata())
.value_or(GRPC_MILLIS_INF_FUTURE)); .value_or(grpc_core::Timestamp::InfFuture()));
// Invoke the next callback. // Invoke the next callback.
grpc_core::Closure::Run(DEBUG_LOCATION, grpc_core::Closure::Run(DEBUG_LOCATION,
calld->next_recv_initial_metadata_ready, calld->next_recv_initial_metadata_ready,

@ -30,7 +30,8 @@ class TimerState;
// Must be the first field in the filter's call_data. // Must be the first field in the filter's call_data.
struct grpc_deadline_state { struct grpc_deadline_state {
grpc_deadline_state(grpc_call_element* elem, grpc_deadline_state(grpc_call_element* elem,
const grpc_call_element_args& args, grpc_millis deadline); const grpc_call_element_args& args,
grpc_core::Timestamp deadline);
~grpc_deadline_state(); ~grpc_deadline_state();
// We take a reference to the call stack for the timer callback. // We take a reference to the call stack for the timer callback.
@ -61,7 +62,7 @@ struct grpc_deadline_state {
// //
// Note: Must be called while holding the call combiner. // Note: Must be called while holding the call combiner.
void grpc_deadline_state_reset(grpc_call_element* elem, void grpc_deadline_state_reset(grpc_call_element* elem,
grpc_millis new_deadline); grpc_core::Timestamp new_deadline);
// To be called from the client-side filter's start_transport_stream_op_batch() // To be called from the client-side filter's start_transport_stream_op_batch()
// method. Ensures that the deadline timer is cancelled when the call // method. Ensures that the deadline timer is cancelled when the call

@ -357,12 +357,13 @@ void CallData::DecideWhetherToInjectFaults(
} }
} }
if (!fi_policy_->delay_header.empty() && if (!fi_policy_->delay_header.empty() &&
(copied_policy == nullptr || copied_policy->delay == 0)) { (copied_policy == nullptr ||
copied_policy->delay == Duration::Zero())) {
auto value = auto value =
initial_metadata->GetStringValue(fi_policy_->delay_header, &buffer); initial_metadata->GetStringValue(fi_policy_->delay_header, &buffer);
if (value.has_value()) { if (value.has_value()) {
maybe_copy_policy_func(); maybe_copy_policy_func();
copied_policy->delay = static_cast<grpc_millis>( copied_policy->delay = Duration::Milliseconds(
std::max(AsInt<int64_t>(*value).value_or(0), int64_t(0))); std::max(AsInt<int64_t>(*value).value_or(0), int64_t(0)));
} }
} }
@ -379,7 +380,7 @@ void CallData::DecideWhetherToInjectFaults(
if (copied_policy != nullptr) fi_policy_ = copied_policy; if (copied_policy != nullptr) fi_policy_ = copied_policy;
} }
// Roll the dice // Roll the dice
delay_request_ = fi_policy_->delay != 0 && delay_request_ = fi_policy_->delay != Duration::Zero() &&
UnderFraction(fi_policy_->delay_percentage_numerator, UnderFraction(fi_policy_->delay_percentage_numerator,
fi_policy_->delay_percentage_denominator); fi_policy_->delay_percentage_denominator);
abort_request_ = fi_policy_->abort_code != GRPC_STATUS_OK && abort_request_ = fi_policy_->abort_code != GRPC_STATUS_OK &&
@ -423,7 +424,7 @@ void CallData::DelayBatch(grpc_call_element* elem,
MutexLock lock(&delay_mu_); MutexLock lock(&delay_mu_);
delayed_batch_ = batch; delayed_batch_ = batch;
resume_batch_canceller_ = new ResumeBatchCanceller(elem); resume_batch_canceller_ = new ResumeBatchCanceller(elem);
grpc_millis resume_time = ExecCtx::Get()->Now() + fi_policy_->delay; Timestamp resume_time = ExecCtx::Get()->Now() + fi_policy_->delay;
GRPC_CLOSURE_INIT(&batch->handler_private.closure, ResumeBatch, elem, GRPC_CLOSURE_INIT(&batch->handler_private.closure, ResumeBatch, elem,
grpc_schedule_on_exec_ctx); grpc_schedule_on_exec_ctx);
grpc_timer_init(&delay_timer_, resume_time, &batch->handler_private.closure); grpc_timer_init(&delay_timer_, resume_time, &batch->handler_private.closure);

@ -38,7 +38,7 @@ class FaultInjectionMethodParsedConfig
uint32_t abort_percentage_numerator = 0; uint32_t abort_percentage_numerator = 0;
uint32_t abort_percentage_denominator = 100; uint32_t abort_percentage_denominator = 100;
grpc_millis delay = 0; Duration delay;
std::string delay_header; std::string delay_header;
std::string delay_percentage_header; std::string delay_percentage_header;
uint32_t delay_percentage_numerator = 0; uint32_t delay_percentage_numerator = 0;

@ -68,11 +68,11 @@ struct channel_data {
max_connection_idle */ max_connection_idle */
grpc_timer max_idle_timer; grpc_timer max_idle_timer;
/* Allowed max time a channel may have no outstanding rpcs */ /* Allowed max time a channel may have no outstanding rpcs */
grpc_millis max_connection_idle; grpc_core::Duration max_connection_idle;
/* Allowed max time a channel may exist */ /* Allowed max time a channel may exist */
grpc_millis max_connection_age; grpc_core::Duration max_connection_age;
/* Allowed grace period after the channel reaches its max age */ /* Allowed grace period after the channel reaches its max age */
grpc_millis max_connection_age_grace; grpc_core::Duration max_connection_age_grace;
/* Closure to run when the channel's idle duration reaches max_connection_idle /* Closure to run when the channel's idle duration reaches max_connection_idle
and should be closed gracefully */ and should be closed gracefully */
grpc_closure max_idle_timer_cb; grpc_closure max_idle_timer_cb;
@ -142,7 +142,8 @@ struct channel_data {
For 2, 7 : See decrease_call_count() function For 2, 7 : See decrease_call_count() function
For 4, 6 : See increase_call_count() function */ For 4, 6 : See increase_call_count() function */
gpr_atm idle_state; gpr_atm idle_state;
/* Time when the channel finished its last outstanding call, in grpc_millis */ /* Time when the channel finished its last outstanding call, in
* grpc_core::Timestamp */
gpr_atm last_enter_idle_time_millis; gpr_atm last_enter_idle_time_millis;
}; };
} // namespace } // namespace
@ -179,7 +180,9 @@ static void decrease_call_count(channel_data* chand) {
/* Enter idle */ /* Enter idle */
if (gpr_atm_full_fetch_add(&chand->call_count, -1) == 1) { if (gpr_atm_full_fetch_add(&chand->call_count, -1) == 1) {
gpr_atm_no_barrier_store(&chand->last_enter_idle_time_millis, gpr_atm_no_barrier_store(&chand->last_enter_idle_time_millis,
(gpr_atm)grpc_core::ExecCtx::Get()->Now()); (gpr_atm)grpc_core::ExecCtx::Get()
->Now()
.milliseconds_after_process_epoch());
while (true) { while (true) {
gpr_atm idle_state = gpr_atm_acq_load(&chand->idle_state); gpr_atm idle_state = gpr_atm_acq_load(&chand->idle_state);
switch (idle_state) { switch (idle_state) {
@ -286,11 +289,9 @@ static void start_max_age_grace_timer_after_goaway_op(
grpc_core::MutexLock lock(&chand->max_age_timer_mu); grpc_core::MutexLock lock(&chand->max_age_timer_mu);
chand->max_age_grace_timer_pending = true; chand->max_age_grace_timer_pending = true;
GRPC_CHANNEL_STACK_REF(chand->channel_stack, "max_age max_age_grace_timer"); GRPC_CHANNEL_STACK_REF(chand->channel_stack, "max_age max_age_grace_timer");
grpc_timer_init(&chand->max_age_grace_timer, grpc_timer_init(
chand->max_connection_age_grace == GRPC_MILLIS_INF_FUTURE &chand->max_age_grace_timer,
? GRPC_MILLIS_INF_FUTURE grpc_core::ExecCtx::Get()->Now() + chand->max_connection_age_grace,
: grpc_core::ExecCtx::Get()->Now() +
chand->max_connection_age_grace,
&chand->force_close_max_age_channel); &chand->force_close_max_age_channel);
} }
GRPC_CHANNEL_STACK_UNREF(chand->channel_stack, GRPC_CHANNEL_STACK_UNREF(chand->channel_stack,
@ -332,8 +333,10 @@ static void max_idle_timer_cb(void* arg, grpc_error_handle error) {
case MAX_IDLE_STATE_SEEN_ENTER_IDLE: case MAX_IDLE_STATE_SEEN_ENTER_IDLE:
GRPC_CHANNEL_STACK_REF(chand->channel_stack, GRPC_CHANNEL_STACK_REF(chand->channel_stack,
"max_age max_idle_timer"); "max_age max_idle_timer");
grpc_timer_init(&chand->max_idle_timer, grpc_timer_init(
static_cast<grpc_millis>(gpr_atm_no_barrier_load( &chand->max_idle_timer,
grpc_core::Timestamp::FromMillisecondsAfterProcessEpoch(
gpr_atm_no_barrier_load(
&chand->last_enter_idle_time_millis)) + &chand->last_enter_idle_time_millis)) +
chand->max_connection_idle, chand->max_connection_idle,
&chand->max_idle_timer_cb); &chand->max_idle_timer_cb);
@ -399,8 +402,8 @@ static void force_close_max_age_channel(void* arg, grpc_error_handle error) {
connection storms. Note that the MAX_CONNECTION_AGE option without jitter connection storms. Note that the MAX_CONNECTION_AGE option without jitter
would not create connection storms by itself, but if there happened to be a would not create connection storms by itself, but if there happened to be a
connection storm it could cause it to repeat at a fixed period. */ connection storm it could cause it to repeat at a fixed period. */
static grpc_millis static grpc_core::Duration
add_random_max_connection_age_jitter_and_convert_to_grpc_millis(int value) { add_random_max_connection_age_jitter_and_convert_to_duration(int value) {
/* generate a random number between 1 - MAX_CONNECTION_AGE_JITTER and /* generate a random number between 1 - MAX_CONNECTION_AGE_JITTER and
1 + MAX_CONNECTION_AGE_JITTER */ 1 + MAX_CONNECTION_AGE_JITTER */
double multiplier = rand() * MAX_CONNECTION_AGE_JITTER * 2.0 / RAND_MAX + double multiplier = rand() * MAX_CONNECTION_AGE_JITTER * 2.0 / RAND_MAX +
@ -408,9 +411,11 @@ add_random_max_connection_age_jitter_and_convert_to_grpc_millis(int value) {
double result = multiplier * value; double result = multiplier * value;
/* INT_MAX - 0.5 converts the value to float, so that result will not be /* INT_MAX - 0.5 converts the value to float, so that result will not be
cast to int implicitly before the comparison. */ cast to int implicitly before the comparison. */
return result > (static_cast<double>(GRPC_MILLIS_INF_FUTURE)) - 0.5 return result > (static_cast<double>(
? GRPC_MILLIS_INF_FUTURE grpc_core::Duration::Infinity().millis())) -
: static_cast<grpc_millis>(result); 0.5
? grpc_core::Duration::Infinity()
: grpc_core::Duration::Milliseconds(result);
} }
/* Constructor for call_data. */ /* Constructor for call_data. */
@ -436,15 +441,17 @@ static grpc_error_handle max_age_init_channel_elem(
new (chand) channel_data(); new (chand) channel_data();
chand->channel_stack = args->channel_stack; chand->channel_stack = args->channel_stack;
chand->max_connection_age = chand->max_connection_age =
add_random_max_connection_age_jitter_and_convert_to_grpc_millis( add_random_max_connection_age_jitter_and_convert_to_duration(
DEFAULT_MAX_CONNECTION_AGE_MS); DEFAULT_MAX_CONNECTION_AGE_MS);
chand->max_connection_age_grace = chand->max_connection_age_grace =
DEFAULT_MAX_CONNECTION_AGE_GRACE_MS == INT_MAX DEFAULT_MAX_CONNECTION_AGE_GRACE_MS == INT_MAX
? GRPC_MILLIS_INF_FUTURE ? grpc_core::Duration::Infinity()
: DEFAULT_MAX_CONNECTION_AGE_GRACE_MS; : grpc_core::Duration::Milliseconds(
chand->max_connection_idle = DEFAULT_MAX_CONNECTION_IDLE_MS == INT_MAX DEFAULT_MAX_CONNECTION_AGE_GRACE_MS);
? GRPC_MILLIS_INF_FUTURE chand->max_connection_idle =
: DEFAULT_MAX_CONNECTION_IDLE_MS; DEFAULT_MAX_CONNECTION_IDLE_MS == INT_MAX
? grpc_core::Duration::Infinity()
: grpc_core::Duration::Milliseconds(DEFAULT_MAX_CONNECTION_IDLE_MS);
chand->idle_state = MAX_IDLE_STATE_INIT; chand->idle_state = MAX_IDLE_STATE_INIT;
gpr_atm_no_barrier_store(&chand->last_enter_idle_time_millis, GPR_ATM_MIN); gpr_atm_no_barrier_store(&chand->last_enter_idle_time_millis, GPR_ATM_MIN);
for (size_t i = 0; i < args->channel_args->num_args; ++i) { for (size_t i = 0; i < args->channel_args->num_args; ++i) {
@ -453,21 +460,22 @@ static grpc_error_handle max_age_init_channel_elem(
const int value = grpc_channel_arg_get_integer( const int value = grpc_channel_arg_get_integer(
&args->channel_args->args[i], MAX_CONNECTION_AGE_INTEGER_OPTIONS); &args->channel_args->args[i], MAX_CONNECTION_AGE_INTEGER_OPTIONS);
chand->max_connection_age = chand->max_connection_age =
add_random_max_connection_age_jitter_and_convert_to_grpc_millis( add_random_max_connection_age_jitter_and_convert_to_duration(value);
value);
} else if (0 == strcmp(args->channel_args->args[i].key, } else if (0 == strcmp(args->channel_args->args[i].key,
GRPC_ARG_MAX_CONNECTION_AGE_GRACE_MS)) { GRPC_ARG_MAX_CONNECTION_AGE_GRACE_MS)) {
const int value = grpc_channel_arg_get_integer( const int value = grpc_channel_arg_get_integer(
&args->channel_args->args[i], &args->channel_args->args[i],
{DEFAULT_MAX_CONNECTION_AGE_GRACE_MS, 0, INT_MAX}); {DEFAULT_MAX_CONNECTION_AGE_GRACE_MS, 0, INT_MAX});
chand->max_connection_age_grace = chand->max_connection_age_grace =
value == INT_MAX ? GRPC_MILLIS_INF_FUTURE : value; value == INT_MAX ? grpc_core::Duration::Infinity()
: grpc_core::Duration::Milliseconds(value);
} else if (0 == strcmp(args->channel_args->args[i].key, } else if (0 == strcmp(args->channel_args->args[i].key,
GRPC_ARG_MAX_CONNECTION_IDLE_MS)) { GRPC_ARG_MAX_CONNECTION_IDLE_MS)) {
const int value = grpc_channel_arg_get_integer( const int value = grpc_channel_arg_get_integer(
&args->channel_args->args[i], MAX_CONNECTION_IDLE_INTEGER_OPTIONS); &args->channel_args->args[i], MAX_CONNECTION_IDLE_INTEGER_OPTIONS);
chand->max_connection_idle = chand->max_connection_idle =
value == INT_MAX ? GRPC_MILLIS_INF_FUTURE : value; value == INT_MAX ? grpc_core::Duration::Infinity()
: grpc_core::Duration::Milliseconds(value);
} }
} }
GRPC_CLOSURE_INIT(&chand->max_idle_timer_cb, max_idle_timer_cb, chand, GRPC_CLOSURE_INIT(&chand->max_idle_timer_cb, max_idle_timer_cb, chand,
@ -487,7 +495,7 @@ static grpc_error_handle max_age_init_channel_elem(
start_max_age_grace_timer_after_goaway_op, chand, start_max_age_grace_timer_after_goaway_op, chand,
grpc_schedule_on_exec_ctx); grpc_schedule_on_exec_ctx);
if (chand->max_connection_age != GRPC_MILLIS_INF_FUTURE) { if (chand->max_connection_age != grpc_core::Duration::Infinity()) {
/* When the channel reaches its max age, we send down an op with /* When the channel reaches its max age, we send down an op with
goaway_error set. However, we can't send down any ops until after the goaway_error set. However, we can't send down any ops until after the
channel stack is fully initialized. If we start the timer here, we have channel stack is fully initialized. If we start the timer here, we have
@ -505,7 +513,7 @@ static grpc_error_handle max_age_init_channel_elem(
/* Initialize the number of calls as 1, so that the max_idle_timer will not /* Initialize the number of calls as 1, so that the max_idle_timer will not
start until start_max_idle_timer_after_init is invoked. */ start until start_max_idle_timer_after_init is invoked. */
gpr_atm_rel_store(&chand->call_count, 1); gpr_atm_rel_store(&chand->call_count, 1);
if (chand->max_connection_idle != GRPC_MILLIS_INF_FUTURE) { if (chand->max_connection_idle != grpc_core::Duration::Infinity()) {
GRPC_CHANNEL_STACK_REF(chand->channel_stack, GRPC_CHANNEL_STACK_REF(chand->channel_stack,
"max_age start_max_idle_timer_after_init"); "max_age start_max_idle_timer_after_init");
grpc_core::ExecCtx::Run(DEBUG_LOCATION, grpc_core::ExecCtx::Run(DEBUG_LOCATION,

@ -147,7 +147,7 @@ class Chttp2ServerListener : public Server::ListenerInterface {
RefCountedPtr<HandshakeManager> handshake_mgr_ RefCountedPtr<HandshakeManager> handshake_mgr_
ABSL_GUARDED_BY(&connection_->mu_); ABSL_GUARDED_BY(&connection_->mu_);
// State for enforcing handshake timeout on receiving HTTP/2 settings. // State for enforcing handshake timeout on receiving HTTP/2 settings.
grpc_millis const deadline_; Timestamp const deadline_;
grpc_timer timer_ ABSL_GUARDED_BY(&connection_->mu_); grpc_timer timer_ ABSL_GUARDED_BY(&connection_->mu_);
grpc_closure on_timeout_ ABSL_GUARDED_BY(&connection_->mu_); grpc_closure on_timeout_ ABSL_GUARDED_BY(&connection_->mu_);
grpc_closure on_receive_settings_ ABSL_GUARDED_BY(&connection_->mu_); grpc_closure on_receive_settings_ ABSL_GUARDED_BY(&connection_->mu_);
@ -334,10 +334,10 @@ void Chttp2ServerListener::ConfigFetcherWatcher::StopServing() {
// Chttp2ServerListener::ActiveConnection::HandshakingState // Chttp2ServerListener::ActiveConnection::HandshakingState
// //
grpc_millis GetConnectionDeadline(const grpc_channel_args* args) { Timestamp GetConnectionDeadline(const grpc_channel_args* args) {
int timeout_ms = auto timeout_ms = Duration::Milliseconds(
grpc_channel_args_find_integer(args, GRPC_ARG_SERVER_HANDSHAKE_TIMEOUT_MS, grpc_channel_args_find_integer(args, GRPC_ARG_SERVER_HANDSHAKE_TIMEOUT_MS,
{120 * GPR_MS_PER_SEC, 1, INT_MAX}); {120 * GPR_MS_PER_SEC, 1, INT_MAX}));
return ExecCtx::Get()->Now() + timeout_ms; return ExecCtx::Get()->Now() + timeout_ms;
} }
@ -566,10 +566,10 @@ void Chttp2ServerListener::ActiveConnection::SendGoAway() {
this, nullptr); this, nullptr);
grpc_timer_init(&drain_grace_timer_, grpc_timer_init(&drain_grace_timer_,
ExecCtx::Get()->Now() + ExecCtx::Get()->Now() +
grpc_channel_args_find_integer( Duration::Milliseconds(grpc_channel_args_find_integer(
listener_->args_, listener_->args_,
GRPC_ARG_SERVER_CONFIG_CHANGE_DRAIN_GRACE_TIME_MS, GRPC_ARG_SERVER_CONFIG_CHANGE_DRAIN_GRACE_TIME_MS,
{10 * 60 * GPR_MS_PER_SEC, 0, INT_MAX}), {10 * 60 * GPR_MS_PER_SEC, 0, INT_MAX})),
&on_drain_grace_time_expiry_); &on_drain_grace_time_expiry_);
drain_grace_timer_expiry_callback_pending_ = true; drain_grace_timer_expiry_callback_pending_ = true;
shutdown_ = true; shutdown_ = true;

@ -301,11 +301,11 @@ static bool read_channel_args(grpc_chttp2_transport* t,
strcmp(channel_args->args[i].key, strcmp(channel_args->args[i].key,
GRPC_ARG_HTTP2_MIN_RECV_PING_INTERVAL_WITHOUT_DATA_MS)) { GRPC_ARG_HTTP2_MIN_RECV_PING_INTERVAL_WITHOUT_DATA_MS)) {
t->ping_policy.min_recv_ping_interval_without_data = t->ping_policy.min_recv_ping_interval_without_data =
grpc_channel_arg_get_integer( grpc_core::Duration::Milliseconds(grpc_channel_arg_get_integer(
&channel_args->args[i], &channel_args->args[i],
grpc_integer_options{ grpc_integer_options{
g_default_min_recv_ping_interval_without_data_ms, 0, g_default_min_recv_ping_interval_without_data_ms, 0,
INT_MAX}); INT_MAX}));
} else if (0 == strcmp(channel_args->args[i].key, } else if (0 == strcmp(channel_args->args[i].key,
GRPC_ARG_HTTP2_WRITE_BUFFER_SIZE)) { GRPC_ARG_HTTP2_WRITE_BUFFER_SIZE)) {
t->write_buffer_size = static_cast<uint32_t>(grpc_channel_arg_get_integer( t->write_buffer_size = static_cast<uint32_t>(grpc_channel_arg_get_integer(
@ -321,7 +321,9 @@ static bool read_channel_args(grpc_chttp2_transport* t,
? g_default_client_keepalive_time_ms ? g_default_client_keepalive_time_ms
: g_default_server_keepalive_time_ms, : g_default_server_keepalive_time_ms,
1, INT_MAX}); 1, INT_MAX});
t->keepalive_time = value == INT_MAX ? GRPC_MILLIS_INF_FUTURE : value; t->keepalive_time = value == INT_MAX
? grpc_core::Duration::Infinity()
: grpc_core::Duration::Milliseconds(value);
} else if (0 == strcmp(channel_args->args[i].key, } else if (0 == strcmp(channel_args->args[i].key,
GRPC_ARG_KEEPALIVE_TIMEOUT_MS)) { GRPC_ARG_KEEPALIVE_TIMEOUT_MS)) {
const int value = grpc_channel_arg_get_integer( const int value = grpc_channel_arg_get_integer(
@ -330,7 +332,9 @@ static bool read_channel_args(grpc_chttp2_transport* t,
? g_default_client_keepalive_timeout_ms ? g_default_client_keepalive_timeout_ms
: g_default_server_keepalive_timeout_ms, : g_default_server_keepalive_timeout_ms,
0, INT_MAX}); 0, INT_MAX});
t->keepalive_timeout = value == INT_MAX ? GRPC_MILLIS_INF_FUTURE : value; t->keepalive_timeout = value == INT_MAX
? grpc_core::Duration::Infinity()
: grpc_core::Duration::Milliseconds(value);
} else if (0 == strcmp(channel_args->args[i].key, } else if (0 == strcmp(channel_args->args[i].key,
GRPC_ARG_KEEPALIVE_PERMIT_WITHOUT_CALLS)) { GRPC_ARG_KEEPALIVE_PERMIT_WITHOUT_CALLS)) {
t->keepalive_permit_without_calls = static_cast<uint32_t>( t->keepalive_permit_without_calls = static_cast<uint32_t>(
@ -406,20 +410,24 @@ static bool read_channel_args(grpc_chttp2_transport* t,
static void init_transport_keepalive_settings(grpc_chttp2_transport* t) { static void init_transport_keepalive_settings(grpc_chttp2_transport* t) {
if (t->is_client) { if (t->is_client) {
t->keepalive_time = g_default_client_keepalive_time_ms == INT_MAX t->keepalive_time = g_default_client_keepalive_time_ms == INT_MAX
? GRPC_MILLIS_INF_FUTURE ? grpc_core::Duration::Infinity()
: g_default_client_keepalive_time_ms; : grpc_core::Duration::Milliseconds(
g_default_client_keepalive_time_ms);
t->keepalive_timeout = g_default_client_keepalive_timeout_ms == INT_MAX t->keepalive_timeout = g_default_client_keepalive_timeout_ms == INT_MAX
? GRPC_MILLIS_INF_FUTURE ? grpc_core::Duration::Infinity()
: g_default_client_keepalive_timeout_ms; : grpc_core::Duration::Milliseconds(
g_default_client_keepalive_timeout_ms);
t->keepalive_permit_without_calls = t->keepalive_permit_without_calls =
g_default_client_keepalive_permit_without_calls; g_default_client_keepalive_permit_without_calls;
} else { } else {
t->keepalive_time = g_default_server_keepalive_time_ms == INT_MAX t->keepalive_time = g_default_server_keepalive_time_ms == INT_MAX
? GRPC_MILLIS_INF_FUTURE ? grpc_core::Duration::Infinity()
: g_default_server_keepalive_time_ms; : grpc_core::Duration::Milliseconds(
g_default_server_keepalive_time_ms);
t->keepalive_timeout = g_default_server_keepalive_timeout_ms == INT_MAX t->keepalive_timeout = g_default_server_keepalive_timeout_ms == INT_MAX
? GRPC_MILLIS_INF_FUTURE ? grpc_core::Duration::Infinity()
: g_default_server_keepalive_timeout_ms; : grpc_core::Duration::Milliseconds(
g_default_server_keepalive_timeout_ms);
t->keepalive_permit_without_calls = t->keepalive_permit_without_calls =
g_default_server_keepalive_permit_without_calls; g_default_server_keepalive_permit_without_calls;
} }
@ -429,11 +437,12 @@ static void configure_transport_ping_policy(grpc_chttp2_transport* t) {
t->ping_policy.max_pings_without_data = g_default_max_pings_without_data; t->ping_policy.max_pings_without_data = g_default_max_pings_without_data;
t->ping_policy.max_ping_strikes = g_default_max_ping_strikes; t->ping_policy.max_ping_strikes = g_default_max_ping_strikes;
t->ping_policy.min_recv_ping_interval_without_data = t->ping_policy.min_recv_ping_interval_without_data =
g_default_min_recv_ping_interval_without_data_ms; grpc_core::Duration::Milliseconds(
g_default_min_recv_ping_interval_without_data_ms);
} }
static void init_keepalive_pings_if_enabled(grpc_chttp2_transport* t) { static void init_keepalive_pings_if_enabled(grpc_chttp2_transport* t) {
if (t->keepalive_time != GRPC_MILLIS_INF_FUTURE) { if (t->keepalive_time != grpc_core::Duration::Infinity()) {
t->keepalive_state = GRPC_CHTTP2_KEEPALIVE_STATE_WAITING; t->keepalive_state = GRPC_CHTTP2_KEEPALIVE_STATE_WAITING;
GRPC_CHTTP2_REF_TRANSPORT(t, "init keepalive ping"); GRPC_CHTTP2_REF_TRANSPORT(t, "init keepalive ping");
GRPC_CLOSURE_INIT(&t->init_keepalive_ping_locked, init_keepalive_ping, t, GRPC_CLOSURE_INIT(&t->init_keepalive_ping_locked, init_keepalive_ping, t,
@ -525,9 +534,9 @@ grpc_chttp2_transport::grpc_chttp2_transport(
// No pings allowed before receiving a header or data frame. // No pings allowed before receiving a header or data frame.
ping_state.pings_before_data_required = 0; ping_state.pings_before_data_required = 0;
ping_state.is_delayed_ping_timer_set = false; ping_state.is_delayed_ping_timer_set = false;
ping_state.last_ping_sent_time = GRPC_MILLIS_INF_PAST; ping_state.last_ping_sent_time = grpc_core::Timestamp::InfPast();
ping_recv_state.last_ping_recv_time = GRPC_MILLIS_INF_PAST; ping_recv_state.last_ping_recv_time = grpc_core::Timestamp::InfPast();
ping_recv_state.ping_strikes = 0; ping_recv_state.ping_strikes = 0;
init_keepalive_pings_if_enabled(this); init_keepalive_pings_if_enabled(this);
@ -1118,16 +1127,14 @@ void grpc_chttp2_add_incoming_goaway(grpc_chttp2_transport* t,
gpr_log(GPR_ERROR, gpr_log(GPR_ERROR,
"Received a GOAWAY with error code ENHANCE_YOUR_CALM and debug " "Received a GOAWAY with error code ENHANCE_YOUR_CALM and debug "
"data equal to \"too_many_pings\""); "data equal to \"too_many_pings\"");
double current_keepalive_time_ms = static_cast<double>(t->keepalive_time); constexpr auto max_keepalive_time = grpc_core::Duration::Milliseconds(
constexpr int max_keepalive_time_ms = INT_MAX / KEEPALIVE_TIME_BACKOFF_MULTIPLIER);
INT_MAX / KEEPALIVE_TIME_BACKOFF_MULTIPLIER;
t->keepalive_time = t->keepalive_time =
current_keepalive_time_ms > static_cast<double>(max_keepalive_time_ms) t->keepalive_time > max_keepalive_time
? GRPC_MILLIS_INF_FUTURE ? grpc_core::Duration::Infinity()
: static_cast<grpc_millis>(current_keepalive_time_ms * : t->keepalive_time * KEEPALIVE_TIME_BACKOFF_MULTIPLIER;
KEEPALIVE_TIME_BACKOFF_MULTIPLIER);
status.SetPayload(grpc_core::kKeepaliveThrottlingKey, status.SetPayload(grpc_core::kKeepaliveThrottlingKey,
absl::Cord(std::to_string(t->keepalive_time))); absl::Cord(std::to_string(t->keepalive_time.millis())));
} }
// lie: use transient failure from the transport to indicate goaway has been // lie: use transient failure from the transport to indicate goaway has been
// received. // received.
@ -1435,7 +1442,7 @@ static void perform_stream_op_locked(void* stream_op,
s->deadline = std::min( s->deadline = std::min(
s->deadline, s->deadline,
s->send_initial_metadata->get(grpc_core::GrpcTimeoutMetadata()) s->send_initial_metadata->get(grpc_core::GrpcTimeoutMetadata())
.value_or(GRPC_MILLIS_INF_FUTURE)); .value_or(grpc_core::Timestamp::InfFuture()));
} }
if (contains_non_ok_status(s->send_initial_metadata)) { if (contains_non_ok_status(s->send_initial_metadata)) {
s->seen_error = true; s->seen_error = true;
@ -1757,8 +1764,8 @@ static void send_goaway(grpc_chttp2_transport* t, grpc_error_handle error) {
t->sent_goaway_state = GRPC_CHTTP2_GOAWAY_SEND_SCHEDULED; t->sent_goaway_state = GRPC_CHTTP2_GOAWAY_SEND_SCHEDULED;
grpc_http2_error_code http_error; grpc_http2_error_code http_error;
std::string message; std::string message;
grpc_error_get_status(error, GRPC_MILLIS_INF_FUTURE, nullptr, &message, grpc_error_get_status(error, grpc_core::Timestamp::InfFuture(), nullptr,
&http_error, nullptr); &message, &http_error, nullptr);
grpc_chttp2_goaway_append( grpc_chttp2_goaway_append(
t->last_new_stream_id, static_cast<uint32_t>(http_error), t->last_new_stream_id, static_cast<uint32_t>(http_error),
grpc_slice_from_cpp_string(std::move(message)), &t->qbuf); grpc_slice_from_cpp_string(std::move(message)), &t->qbuf);
@ -1783,7 +1790,7 @@ void grpc_chttp2_add_ping_strike(grpc_chttp2_transport* t) {
void grpc_chttp2_reset_ping_clock(grpc_chttp2_transport* t) { void grpc_chttp2_reset_ping_clock(grpc_chttp2_transport* t) {
if (!t->is_client) { if (!t->is_client) {
t->ping_recv_state.last_ping_recv_time = GRPC_MILLIS_INF_PAST; t->ping_recv_state.last_ping_recv_time = grpc_core::Timestamp::InfPast();
t->ping_recv_state.ping_strikes = 0; t->ping_recv_state.ping_strikes = 0;
} }
t->ping_state.pings_before_data_required = t->ping_state.pings_before_data_required =
@ -2605,7 +2612,8 @@ static void finish_bdp_ping_locked(void* tp, grpc_error_handle error) {
return; return;
} }
t->bdp_ping_started = false; t->bdp_ping_started = false;
grpc_millis next_ping = t->flow_control->bdp_estimator()->CompletePing(); grpc_core::Timestamp next_ping =
t->flow_control->bdp_estimator()->CompletePing();
grpc_chttp2_act_on_flowctl_action(t->flow_control->PeriodicUpdate(), t, grpc_chttp2_act_on_flowctl_action(t->flow_control->PeriodicUpdate(), t,
nullptr); nullptr);
GPR_ASSERT(!t->have_next_bdp_ping_timer); GPR_ASSERT(!t->have_next_bdp_ping_timer);

@ -348,9 +348,9 @@ double TransportFlowControl::TargetLogBdp() {
} }
double TransportFlowControl::SmoothLogBdp(double value) { double TransportFlowControl::SmoothLogBdp(double value) {
grpc_millis now = ExecCtx::Get()->Now(); Timestamp now = ExecCtx::Get()->Now();
double bdp_error = value - pid_controller_.last_control_value(); double bdp_error = value - pid_controller_.last_control_value();
const double dt = static_cast<double>(now - last_pid_update_) * 1e-3; const double dt = (now - last_pid_update_).seconds();
last_pid_update_ = now; last_pid_update_ = now;
// Limit dt to 100ms // Limit dt to 100ms
const double kMaxDt = 0.1; const double kMaxDt = 0.1;

@ -335,7 +335,7 @@ class TransportFlowControl final : public TransportFlowControlBase {
/* pid controller */ /* pid controller */
PidController pid_controller_; PidController pid_controller_;
grpc_millis last_pid_update_ = 0; Timestamp last_pid_update_;
}; };
// Fat interface with all methods a stream flow control implementation needs // Fat interface with all methods a stream flow control implementation needs

@ -90,8 +90,8 @@ grpc_error_handle grpc_chttp2_ping_parser_parse(void* parser,
grpc_chttp2_ack_ping(t, p->opaque_8bytes); grpc_chttp2_ack_ping(t, p->opaque_8bytes);
} else { } else {
if (!t->is_client) { if (!t->is_client) {
grpc_millis now = grpc_core::ExecCtx::Get()->Now(); grpc_core::Timestamp now = grpc_core::ExecCtx::Get()->Now();
grpc_millis next_allowed_ping = grpc_core::Timestamp next_allowed_ping =
t->ping_recv_state.last_ping_recv_time + t->ping_recv_state.last_ping_recv_time +
t->ping_policy.min_recv_ping_interval_without_data; t->ping_policy.min_recv_ping_interval_without_data;
@ -100,8 +100,8 @@ grpc_error_handle grpc_chttp2_ping_parser_parse(void* parser,
/* According to RFC1122, the interval of TCP Keep-Alive is default to /* According to RFC1122, the interval of TCP Keep-Alive is default to
no less than two hours. When there is no outstanding streams, we no less than two hours. When there is no outstanding streams, we
restrict the number of PINGS equivalent to TCP Keep-Alive. */ restrict the number of PINGS equivalent to TCP Keep-Alive. */
next_allowed_ping = next_allowed_ping = t->ping_recv_state.last_ping_recv_time +
t->ping_recv_state.last_ping_recv_time + 7200 * GPR_MS_PER_SEC; grpc_core::Duration::Hours(2);
} }
if (next_allowed_ping > now) { if (next_allowed_ping > now) {

@ -523,8 +523,7 @@ void HPackCompressor::Framer::EncodeIndexedKeyWithBinaryValue(
} }
} }
void HPackCompressor::Framer::Encode(GrpcTimeoutMetadata, void HPackCompressor::Framer::Encode(GrpcTimeoutMetadata, Timestamp deadline) {
grpc_millis deadline) {
Timeout timeout = Timeout::FromDuration(deadline - ExecCtx::Get()->Now()); Timeout timeout = Timeout::FromDuration(deadline - ExecCtx::Get()->Now());
for (auto it = compressor_->previous_timeouts_.begin(); for (auto it = compressor_->previous_timeouts_.begin();
it != compressor_->previous_timeouts_.end(); ++it) { it != compressor_->previous_timeouts_.end(); ++it) {

@ -80,7 +80,7 @@ class HPackCompressor {
void Encode(HttpPathMetadata, const Slice& value); void Encode(HttpPathMetadata, const Slice& value);
void Encode(HttpAuthorityMetadata, const Slice& value); void Encode(HttpAuthorityMetadata, const Slice& value);
void Encode(HttpStatusMetadata, uint32_t status); void Encode(HttpStatusMetadata, uint32_t status);
void Encode(GrpcTimeoutMetadata, grpc_millis deadline); void Encode(GrpcTimeoutMetadata, Timestamp deadline);
void Encode(TeMetadata, TeMetadata::ValueType value); void Encode(TeMetadata, TeMetadata::ValueType value);
void Encode(ContentTypeMetadata, ContentTypeMetadata::ValueType value); void Encode(ContentTypeMetadata, ContentTypeMetadata::ValueType value);
void Encode(HttpSchemeMetadata, HttpSchemeMetadata::ValueType value); void Encode(HttpSchemeMetadata, HttpSchemeMetadata::ValueType value);

@ -118,16 +118,16 @@ struct grpc_chttp2_ping_queue {
struct grpc_chttp2_repeated_ping_policy { struct grpc_chttp2_repeated_ping_policy {
int max_pings_without_data; int max_pings_without_data;
int max_ping_strikes; int max_ping_strikes;
grpc_millis min_recv_ping_interval_without_data; grpc_core::Duration min_recv_ping_interval_without_data;
}; };
struct grpc_chttp2_repeated_ping_state { struct grpc_chttp2_repeated_ping_state {
grpc_millis last_ping_sent_time; grpc_core::Timestamp last_ping_sent_time;
int pings_before_data_required; int pings_before_data_required;
grpc_timer delayed_ping_timer; grpc_timer delayed_ping_timer;
bool is_delayed_ping_timer_set; bool is_delayed_ping_timer_set;
}; };
struct grpc_chttp2_server_ping_recv_state { struct grpc_chttp2_server_ping_recv_state {
grpc_millis last_ping_recv_time; grpc_core::Timestamp last_ping_recv_time;
int ping_strikes; int ping_strikes;
}; };
/* deframer state for the overall http2 stream of bytes */ /* deframer state for the overall http2 stream of bytes */
@ -477,9 +477,9 @@ struct grpc_chttp2_transport {
/** watchdog to kill the transport when waiting for the keepalive ping */ /** watchdog to kill the transport when waiting for the keepalive ping */
grpc_timer keepalive_watchdog_timer; grpc_timer keepalive_watchdog_timer;
/** time duration in between pings */ /** time duration in between pings */
grpc_millis keepalive_time; grpc_core::Duration keepalive_time;
/** grace period for a ping to complete before watchdog kicks in */ /** grace period for a ping to complete before watchdog kicks in */
grpc_millis keepalive_timeout; grpc_core::Duration keepalive_timeout;
/** if keepalive pings are allowed when there's no outstanding streams */ /** if keepalive pings are allowed when there's no outstanding streams */
bool keepalive_permit_without_calls = false; bool keepalive_permit_without_calls = false;
/** If start_keepalive_ping_locked has been called */ /** If start_keepalive_ping_locked has been called */
@ -609,7 +609,7 @@ struct grpc_chttp2_stream {
GRPC_ERROR_NONE; /* protected by t combiner */ GRPC_ERROR_NONE; /* protected by t combiner */
bool received_last_frame = false; /* protected by t combiner */ bool received_last_frame = false; /* protected by t combiner */
grpc_millis deadline = GRPC_MILLIS_INF_FUTURE; grpc_core::Timestamp deadline = grpc_core::Timestamp::InfFuture();
/** saw some stream level error */ /** saw some stream level error */
grpc_error_handle forced_close_error = GRPC_ERROR_NONE; grpc_error_handle forced_close_error = GRPC_ERROR_NONE;

@ -400,7 +400,7 @@ error_handler:
/* t->parser = grpc_chttp2_data_parser_parse;*/ /* t->parser = grpc_chttp2_data_parser_parse;*/
t->parser = grpc_chttp2_data_parser_parse; t->parser = grpc_chttp2_data_parser_parse;
t->parser_data = &s->data_parser; t->parser_data = &s->data_parser;
t->ping_state.last_ping_sent_time = GRPC_MILLIS_INF_PAST; t->ping_state.last_ping_sent_time = grpc_core::Timestamp::InfPast();
return GRPC_ERROR_NONE; return GRPC_ERROR_NONE;
} else if (grpc_error_get_int(err, GRPC_ERROR_INT_STREAM_ID, &unused)) { } else if (grpc_error_get_int(err, GRPC_ERROR_INT_STREAM_ID, &unused)) {
/* handle stream errors by closing the stream */ /* handle stream errors by closing the stream */
@ -440,7 +440,7 @@ static grpc_error_handle init_header_frame_parser(grpc_chttp2_transport* t,
? HPackParser::Priority::Included ? HPackParser::Priority::Included
: HPackParser::Priority::None; : HPackParser::Priority::None;
t->ping_state.last_ping_sent_time = GRPC_MILLIS_INF_PAST; t->ping_state.last_ping_sent_time = grpc_core::Timestamp::InfPast();
/* could be a new grpc_chttp2_stream or an existing grpc_chttp2_stream */ /* could be a new grpc_chttp2_stream or an existing grpc_chttp2_stream */
s = grpc_chttp2_parsing_lookup_stream(t, t->incoming_stream_id); s = grpc_chttp2_parsing_lookup_stream(t, t->incoming_stream_id);

@ -77,15 +77,16 @@ static void maybe_initiate_ping(grpc_chttp2_transport* t) {
// in a loop while draining the currently-held combiner. Also see // in a loop while draining the currently-held combiner. Also see
// https://github.com/grpc/grpc/issues/26079. // https://github.com/grpc/grpc/issues/26079.
grpc_core::ExecCtx::Get()->InvalidateNow(); grpc_core::ExecCtx::Get()->InvalidateNow();
grpc_millis now = grpc_core::ExecCtx::Get()->Now(); grpc_core::Timestamp now = grpc_core::ExecCtx::Get()->Now();
grpc_millis next_allowed_ping_interval = grpc_core::Duration next_allowed_ping_interval =
(t->keepalive_permit_without_calls == 0 && (t->keepalive_permit_without_calls == 0 &&
grpc_chttp2_stream_map_size(&t->stream_map) == 0) grpc_chttp2_stream_map_size(&t->stream_map) == 0)
? 7200 * GPR_MS_PER_SEC ? grpc_core::Duration::Hours(2)
: (GPR_MS_PER_SEC); /* A second is added to deal with network delays : grpc_core::Duration::Seconds(
and timing imprecision */ 1); /* A second is added to deal with network delays and timing
grpc_millis next_allowed_ping = imprecision */
grpc_core::Timestamp next_allowed_ping =
t->ping_state.last_ping_sent_time + next_allowed_ping_interval; t->ping_state.last_ping_sent_time + next_allowed_ping_interval;
if (next_allowed_ping > now) { if (next_allowed_ping > now) {
@ -93,12 +94,14 @@ static void maybe_initiate_ping(grpc_chttp2_transport* t) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_http_trace) || if (GRPC_TRACE_FLAG_ENABLED(grpc_http_trace) ||
GRPC_TRACE_FLAG_ENABLED(grpc_bdp_estimator_trace) || GRPC_TRACE_FLAG_ENABLED(grpc_bdp_estimator_trace) ||
GRPC_TRACE_FLAG_ENABLED(grpc_keepalive_trace)) { GRPC_TRACE_FLAG_ENABLED(grpc_keepalive_trace)) {
gpr_log(GPR_INFO, gpr_log(
GPR_INFO,
"%s: Ping delayed [%s]: not enough time elapsed since last ping. " "%s: Ping delayed [%s]: not enough time elapsed since last ping. "
" Last ping %f: Next ping %f: Now %f", " Last ping %" PRId64 ": Next ping %" PRId64 ": Now %" PRId64,
t->is_client ? "CLIENT" : "SERVER", t->peer_string.c_str(), t->is_client ? "CLIENT" : "SERVER", t->peer_string.c_str(),
static_cast<double>(t->ping_state.last_ping_sent_time), t->ping_state.last_ping_sent_time.milliseconds_after_process_epoch(),
static_cast<double>(next_allowed_ping), static_cast<double>(now)); next_allowed_ping.milliseconds_after_process_epoch(),
now.milliseconds_after_process_epoch());
} }
if (!t->ping_state.is_delayed_ping_timer_set) { if (!t->ping_state.is_delayed_ping_timer_set) {
t->ping_state.is_delayed_ping_timer_set = true; t->ping_state.is_delayed_ping_timer_set = true;

@ -231,7 +231,8 @@ struct inproc_stream {
grpc_metadata_batch write_buffer_initial_md{arena}; grpc_metadata_batch write_buffer_initial_md{arena};
bool write_buffer_initial_md_filled = false; bool write_buffer_initial_md_filled = false;
uint32_t write_buffer_initial_md_flags = 0; uint32_t write_buffer_initial_md_flags = 0;
grpc_millis write_buffer_deadline = GRPC_MILLIS_INF_FUTURE; grpc_core::Timestamp write_buffer_deadline =
grpc_core::Timestamp::InfFuture();
grpc_metadata_batch write_buffer_trailing_md{arena}; grpc_metadata_batch write_buffer_trailing_md{arena};
bool write_buffer_trailing_md_filled = false; bool write_buffer_trailing_md_filled = false;
grpc_error_handle write_buffer_cancel_error = GRPC_ERROR_NONE; grpc_error_handle write_buffer_cancel_error = GRPC_ERROR_NONE;
@ -265,7 +266,7 @@ struct inproc_stream {
grpc_error_handle cancel_self_error = GRPC_ERROR_NONE; grpc_error_handle cancel_self_error = GRPC_ERROR_NONE;
grpc_error_handle cancel_other_error = GRPC_ERROR_NONE; grpc_error_handle cancel_other_error = GRPC_ERROR_NONE;
grpc_millis deadline = GRPC_MILLIS_INF_FUTURE; grpc_core::Timestamp deadline = grpc_core::Timestamp::InfFuture();
bool listed = true; bool listed = true;
struct inproc_stream* stream_list_prev; struct inproc_stream* stream_list_prev;
@ -705,7 +706,7 @@ void op_state_machine_locked(inproc_stream* s, grpc_error_handle error) {
.recv_initial_metadata, .recv_initial_metadata,
s->recv_initial_md_op->payload->recv_initial_metadata.recv_flags, s->recv_initial_md_op->payload->recv_initial_metadata.recv_flags,
nullptr); nullptr);
if (s->deadline != GRPC_MILLIS_INF_FUTURE) { if (s->deadline != grpc_core::Timestamp::InfFuture()) {
s->recv_initial_md_op->payload->recv_initial_metadata s->recv_initial_md_op->payload->recv_initial_metadata
.recv_initial_metadata->Set(grpc_core::GrpcTimeoutMetadata(), .recv_initial_metadata->Set(grpc_core::GrpcTimeoutMetadata(),
s->deadline); s->deadline);
@ -1008,12 +1009,12 @@ void perform_stream_op(grpc_transport* gt, grpc_stream* gs,
dest, destflags, destfilled); dest, destflags, destfilled);
} }
if (s->t->is_client) { if (s->t->is_client) {
grpc_millis* dl = grpc_core::Timestamp* dl =
(other == nullptr) ? &s->write_buffer_deadline : &other->deadline; (other == nullptr) ? &s->write_buffer_deadline : &other->deadline;
*dl = std::min( *dl = std::min(
*dl, op->payload->send_initial_metadata.send_initial_metadata *dl, op->payload->send_initial_metadata.send_initial_metadata
->get(grpc_core::GrpcTimeoutMetadata()) ->get(grpc_core::GrpcTimeoutMetadata())
.value_or(GRPC_MILLIS_INF_FUTURE)); .value_or(grpc_core::Timestamp::InfFuture()));
s->initial_md_sent = true; s->initial_md_sent = true;
} }
} }

@ -58,7 +58,7 @@ std::string FileWatcherCertificateProviderFactory::Config::ToString() const {
absl::StrFormat("ca_certificate_file=\"%s\", ", root_cert_file_)); absl::StrFormat("ca_certificate_file=\"%s\", ", root_cert_file_));
} }
parts.push_back( parts.push_back(
absl::StrFormat("refresh_interval=%ldms}", refresh_interval_ms_)); absl::StrFormat("refresh_interval=%ldms}", refresh_interval_.millis()));
return absl::StrJoin(parts, ""); return absl::StrJoin(parts, "");
} }
@ -91,8 +91,8 @@ FileWatcherCertificateProviderFactory::Config::Parse(const Json& config_json,
} }
if (!ParseJsonObjectFieldAsDuration( if (!ParseJsonObjectFieldAsDuration(
config_json.object_value(), "refresh_interval", config_json.object_value(), "refresh_interval",
&config->refresh_interval_ms_, &error_list, false)) { &config->refresh_interval_, &error_list, false)) {
config->refresh_interval_ms_ = 10 * 60 * 1000; // 10 minutes default config->refresh_interval_ = Duration::Minutes(10); // 10 minutes default
} }
if (!error_list.empty()) { if (!error_list.empty()) {
*error = GRPC_ERROR_CREATE_FROM_VECTOR( *error = GRPC_ERROR_CREATE_FROM_VECTOR(
@ -131,7 +131,7 @@ FileWatcherCertificateProviderFactory::CreateCertificateProvider(
file_watcher_config->private_key_file(), file_watcher_config->private_key_file(),
file_watcher_config->identity_cert_file(), file_watcher_config->identity_cert_file(),
file_watcher_config->root_cert_file(), file_watcher_config->root_cert_file(),
file_watcher_config->refresh_interval_ms() / GPR_MS_PER_SEC); file_watcher_config->refresh_interval().millis() / GPR_MS_PER_SEC);
} }
void FileWatcherCertificateProviderInit() { void FileWatcherCertificateProviderInit() {

@ -45,13 +45,13 @@ class FileWatcherCertificateProviderFactory
const std::string& root_cert_file() const { return root_cert_file_; } const std::string& root_cert_file() const { return root_cert_file_; }
grpc_millis refresh_interval_ms() const { return refresh_interval_ms_; } Duration refresh_interval() const { return refresh_interval_; }
private: private:
std::string identity_cert_file_; std::string identity_cert_file_;
std::string private_key_file_; std::string private_key_file_;
std::string root_cert_file_; std::string root_cert_file_;
grpc_millis refresh_interval_ms_; Duration refresh_interval_;
}; };
const char* name() const override; const char* name() const override;

@ -153,7 +153,7 @@ GoogleMeshCaCertificateProviderFactory::Config::ParseJsonObjectGrpcServices(
} }
if (!ParseJsonObjectFieldAsDuration(grpc_service, "timeout", &timeout_, if (!ParseJsonObjectFieldAsDuration(grpc_service, "timeout", &timeout_,
&error_list_grpc_services, false)) { &error_list_grpc_services, false)) {
timeout_ = 10 * 1000; // 10sec default timeout_ = Duration::Seconds(10); // 10sec default
} }
return error_list_grpc_services; return error_list_grpc_services;
} }
@ -216,12 +216,12 @@ GoogleMeshCaCertificateProviderFactory::Config::Parse(
if (!ParseJsonObjectFieldAsDuration( if (!ParseJsonObjectFieldAsDuration(
config_json.object_value(), "certificate_lifetime", config_json.object_value(), "certificate_lifetime",
&config->certificate_lifetime_, &error_list, false)) { &config->certificate_lifetime_, &error_list, false)) {
config->certificate_lifetime_ = 24 * 60 * 60 * 1000; // 24hrs default config->certificate_lifetime_ = Duration::Hours(24); // 24hrs default
} }
if (!ParseJsonObjectFieldAsDuration( if (!ParseJsonObjectFieldAsDuration(
config_json.object_value(), "renewal_grace_period", config_json.object_value(), "renewal_grace_period",
&config->renewal_grace_period_, &error_list, false)) { &config->renewal_grace_period_, &error_list, false)) {
config->renewal_grace_period_ = 12 * 60 * 60 * 1000; // 12hrs default config->renewal_grace_period_ = Duration::Hours(12); // 12hrs default
} }
std::string key_type; std::string key_type;
if (ParseJsonObjectField(config_json.object_value(), "key_type", &key_type, if (ParseJsonObjectField(config_json.object_value(), "key_type", &key_type,

@ -52,11 +52,11 @@ class GoogleMeshCaCertificateProviderFactory
const StsConfig& sts_config() const { return sts_config_; } const StsConfig& sts_config() const { return sts_config_; }
grpc_millis timeout() const { return timeout_; } Duration timeout() const { return timeout_; }
grpc_millis certificate_lifetime() const { return certificate_lifetime_; } Duration certificate_lifetime() const { return certificate_lifetime_; }
grpc_millis renewal_grace_period() const { return renewal_grace_period_; } Duration renewal_grace_period() const { return renewal_grace_period_; }
uint32_t key_size() const { return key_size_; } uint32_t key_size() const { return key_size_; }
@ -80,9 +80,9 @@ class GoogleMeshCaCertificateProviderFactory
std::string endpoint_; std::string endpoint_;
StsConfig sts_config_; StsConfig sts_config_;
grpc_millis timeout_; Duration timeout_;
grpc_millis certificate_lifetime_; Duration certificate_lifetime_;
grpc_millis renewal_grace_period_; Duration renewal_grace_period_;
uint32_t key_size_; uint32_t key_size_;
std::string location_; std::string location_;
}; };

@ -568,8 +568,7 @@ grpc_slice XdsApi::CreateLrsRequest(
envoy_config_endpoint_v3_ClusterStats_set_total_dropped_requests( envoy_config_endpoint_v3_ClusterStats_set_total_dropped_requests(
cluster_stats, total_dropped_requests); cluster_stats, total_dropped_requests);
// Set real load report interval. // Set real load report interval.
gpr_timespec timespec = gpr_timespec timespec = load_report.load_report_interval.as_timespec();
grpc_millis_to_timespec(load_report.load_report_interval, GPR_TIMESPAN);
google_protobuf_Duration* load_report_interval = google_protobuf_Duration* load_report_interval =
envoy_config_endpoint_v3_ClusterStats_mutable_load_report_interval( envoy_config_endpoint_v3_ClusterStats_mutable_load_report_interval(
cluster_stats, arena.ptr()); cluster_stats, arena.ptr());
@ -580,10 +579,10 @@ grpc_slice XdsApi::CreateLrsRequest(
return SerializeLrsRequest(context, request); return SerializeLrsRequest(context, request);
} }
grpc_error_handle XdsApi::ParseLrsResponse( grpc_error_handle XdsApi::ParseLrsResponse(const grpc_slice& encoded_response,
const grpc_slice& encoded_response, bool* send_all_clusters, bool* send_all_clusters,
std::set<std::string>* cluster_names, std::set<std::string>* cluster_names,
grpc_millis* load_reporting_interval) { Duration* load_reporting_interval) {
upb::Arena arena; upb::Arena arena;
// Decode the response. // Decode the response.
const envoy_service_load_stats_v3_LoadStatsResponse* decoded_response = const envoy_service_load_stats_v3_LoadStatsResponse* decoded_response =
@ -612,21 +611,19 @@ grpc_error_handle XdsApi::ParseLrsResponse(
const google_protobuf_Duration* load_reporting_interval_duration = const google_protobuf_Duration* load_reporting_interval_duration =
envoy_service_load_stats_v3_LoadStatsResponse_load_reporting_interval( envoy_service_load_stats_v3_LoadStatsResponse_load_reporting_interval(
decoded_response); decoded_response);
gpr_timespec timespec{ *load_reporting_interval = Duration::FromSecondsAndNanoseconds(
google_protobuf_Duration_seconds(load_reporting_interval_duration), google_protobuf_Duration_seconds(load_reporting_interval_duration),
google_protobuf_Duration_nanos(load_reporting_interval_duration), google_protobuf_Duration_nanos(load_reporting_interval_duration));
GPR_TIMESPAN};
*load_reporting_interval = gpr_time_to_millis(timespec);
return GRPC_ERROR_NONE; return GRPC_ERROR_NONE;
} }
namespace { namespace {
google_protobuf_Timestamp* GrpcMillisToTimestamp( google_protobuf_Timestamp* EncodeTimestamp(const XdsEncodingContext& context,
const XdsEncodingContext& context, grpc_millis value) { Timestamp value) {
google_protobuf_Timestamp* timestamp = google_protobuf_Timestamp* timestamp =
google_protobuf_Timestamp_new(context.arena); google_protobuf_Timestamp_new(context.arena);
gpr_timespec timespec = grpc_millis_to_timespec(value, GPR_CLOCK_REALTIME); gpr_timespec timespec = value.as_timespec(GPR_CLOCK_REALTIME);
google_protobuf_Timestamp_set_seconds(timestamp, timespec.tv_sec); google_protobuf_Timestamp_set_seconds(timestamp, timespec.tv_sec);
google_protobuf_Timestamp_set_nanos(timestamp, timespec.tv_nsec); google_protobuf_Timestamp_set_nanos(timestamp, timespec.tv_nsec);
return timestamp; return timestamp;
@ -677,7 +674,7 @@ std::string XdsApi::AssembleClientConfig(
envoy_service_status_v3_ClientConfig_GenericXdsConfig_set_version_info( envoy_service_status_v3_ClientConfig_GenericXdsConfig_set_version_info(
entry, StdStringToUpbString(metadata.version)); entry, StdStringToUpbString(metadata.version));
envoy_service_status_v3_ClientConfig_GenericXdsConfig_set_last_updated( envoy_service_status_v3_ClientConfig_GenericXdsConfig_set_last_updated(
entry, GrpcMillisToTimestamp(context, metadata.update_time)); entry, EncodeTimestamp(context, metadata.update_time));
auto* any_field = auto* any_field =
envoy_service_status_v3_ClientConfig_GenericXdsConfig_mutable_xds_config( envoy_service_status_v3_ClientConfig_GenericXdsConfig_mutable_xds_config(
entry, context.arena); entry, context.arena);
@ -697,7 +694,7 @@ std::string XdsApi::AssembleClientConfig(
StdStringToUpbString(metadata.failed_version)); StdStringToUpbString(metadata.failed_version));
envoy_admin_v3_UpdateFailureState_set_last_update_attempt( envoy_admin_v3_UpdateFailureState_set_last_update_attempt(
update_failure_state, update_failure_state,
GrpcMillisToTimestamp(context, metadata.failed_update_time)); EncodeTimestamp(context, metadata.failed_update_time));
envoy_service_status_v3_ClientConfig_GenericXdsConfig_set_error_state( envoy_service_status_v3_ClientConfig_GenericXdsConfig_set_error_state(
entry, update_failure_state); entry, update_failure_state);
} }

@ -75,7 +75,7 @@ class XdsApi {
std::map<RefCountedPtr<XdsLocalityName>, XdsClusterLocalityStats::Snapshot, std::map<RefCountedPtr<XdsLocalityName>, XdsClusterLocalityStats::Snapshot,
XdsLocalityName::Less> XdsLocalityName::Less>
locality_stats; locality_stats;
grpc_millis load_report_interval; Duration load_report_interval;
}; };
using ClusterLoadReportMap = std::map< using ClusterLoadReportMap = std::map<
std::pair<std::string /*cluster_name*/, std::string /*eds_service_name*/>, std::pair<std::string /*cluster_name*/, std::string /*eds_service_name*/>,
@ -106,7 +106,7 @@ class XdsApi {
// The serialized bytes of the last successfully updated raw xDS resource. // The serialized bytes of the last successfully updated raw xDS resource.
std::string serialized_proto; std::string serialized_proto;
// The timestamp when the resource was last successfully updated. // The timestamp when the resource was last successfully updated.
grpc_millis update_time = 0; Timestamp update_time;
// The last successfully updated version of the resource. // The last successfully updated version of the resource.
std::string version; std::string version;
// The rejected version string of the last failed update attempt. // The rejected version string of the last failed update attempt.
@ -114,7 +114,7 @@ class XdsApi {
// Details about the last failed update attempt. // Details about the last failed update attempt.
std::string failed_details; std::string failed_details;
// Timestamp of the last failed update attempt. // Timestamp of the last failed update attempt.
grpc_millis failed_update_time = 0; Timestamp failed_update_time;
}; };
using ResourceMetadataMap = using ResourceMetadataMap =
std::map<std::string /*resource_name*/, const ResourceMetadata*>; std::map<std::string /*resource_name*/, const ResourceMetadata*>;
@ -168,7 +168,7 @@ class XdsApi {
grpc_error_handle ParseLrsResponse(const grpc_slice& encoded_response, grpc_error_handle ParseLrsResponse(const grpc_slice& encoded_response,
bool* send_all_clusters, bool* send_all_clusters,
std::set<std::string>* cluster_names, std::set<std::string>* cluster_names,
grpc_millis* load_reporting_interval); Duration* load_reporting_interval);
// Assemble the client config proto message and return the serialized result. // Assemble the client config proto message and return the serialized result.
std::string AssembleClientConfig( std::string AssembleClientConfig(

@ -216,7 +216,7 @@ class XdsClient::ChannelState::AdsCallState
XdsClient* xds_client() const { return ads_call_state_->xds_client(); } XdsClient* xds_client() const { return ads_call_state_->xds_client(); }
AdsCallState* ads_call_state_; AdsCallState* ads_call_state_;
const grpc_millis update_time_ = ExecCtx::Get()->Now(); const Timestamp update_time_ = ExecCtx::Get()->Now();
Result result_; Result result_;
}; };
@ -389,7 +389,7 @@ class XdsClient::ChannelState::LrsCallState
// Reports client-side load stats according to a fixed interval. // Reports client-side load stats according to a fixed interval.
class Reporter : public InternallyRefCounted<Reporter> { class Reporter : public InternallyRefCounted<Reporter> {
public: public:
Reporter(RefCountedPtr<LrsCallState> parent, grpc_millis report_interval) Reporter(RefCountedPtr<LrsCallState> parent, Duration report_interval)
: parent_(std::move(parent)), report_interval_(report_interval) { : parent_(std::move(parent)), report_interval_(report_interval) {
GRPC_CLOSURE_INIT(&on_next_report_timer_, OnNextReportTimer, this, GRPC_CLOSURE_INIT(&on_next_report_timer_, OnNextReportTimer, this,
grpc_schedule_on_exec_ctx); grpc_schedule_on_exec_ctx);
@ -420,7 +420,7 @@ class XdsClient::ChannelState::LrsCallState
RefCountedPtr<LrsCallState> parent_; RefCountedPtr<LrsCallState> parent_;
// The load reporting state. // The load reporting state.
const grpc_millis report_interval_; const Duration report_interval_;
bool last_report_counters_were_zero_ = false; bool last_report_counters_were_zero_ = false;
bool next_report_timer_callback_pending_ = false; bool next_report_timer_callback_pending_ = false;
grpc_timer next_report_timer_; grpc_timer next_report_timer_;
@ -467,7 +467,7 @@ class XdsClient::ChannelState::LrsCallState
// Load reporting state. // Load reporting state.
bool send_all_clusters_ = false; bool send_all_clusters_ = false;
std::set<std::string> cluster_names_; // Asked for by the LRS server. std::set<std::string> cluster_names_; // Asked for by the LRS server.
grpc_millis load_reporting_interval_ = 0; Duration load_reporting_interval_;
OrphanablePtr<Reporter> reporter_; OrphanablePtr<Reporter> reporter_;
}; };
@ -661,13 +661,13 @@ template <typename T>
XdsClient::ChannelState::RetryableCall<T>::RetryableCall( XdsClient::ChannelState::RetryableCall<T>::RetryableCall(
WeakRefCountedPtr<ChannelState> chand) WeakRefCountedPtr<ChannelState> chand)
: chand_(std::move(chand)), : chand_(std::move(chand)),
backoff_( backoff_(BackOff::Options()
BackOff::Options() .set_initial_backoff(Duration::Seconds(
.set_initial_backoff(GRPC_XDS_INITIAL_CONNECT_BACKOFF_SECONDS * GRPC_XDS_INITIAL_CONNECT_BACKOFF_SECONDS))
1000)
.set_multiplier(GRPC_XDS_RECONNECT_BACKOFF_MULTIPLIER) .set_multiplier(GRPC_XDS_RECONNECT_BACKOFF_MULTIPLIER)
.set_jitter(GRPC_XDS_RECONNECT_JITTER) .set_jitter(GRPC_XDS_RECONNECT_JITTER)
.set_max_backoff(GRPC_XDS_RECONNECT_MAX_BACKOFF_SECONDS * 1000)) { .set_max_backoff(Duration::Seconds(
GRPC_XDS_RECONNECT_MAX_BACKOFF_SECONDS))) {
// Closure Initialization // Closure Initialization
GRPC_CLOSURE_INIT(&on_retry_timer_, OnRetryTimer, this, GRPC_CLOSURE_INIT(&on_retry_timer_, OnRetryTimer, this,
grpc_schedule_on_exec_ctx); grpc_schedule_on_exec_ctx);
@ -715,15 +715,15 @@ void XdsClient::ChannelState::RetryableCall<T>::StartNewCallLocked() {
template <typename T> template <typename T>
void XdsClient::ChannelState::RetryableCall<T>::StartRetryTimerLocked() { void XdsClient::ChannelState::RetryableCall<T>::StartRetryTimerLocked() {
if (shutting_down_) return; if (shutting_down_) return;
const grpc_millis next_attempt_time = backoff_.NextAttemptTime(); const Timestamp next_attempt_time = backoff_.NextAttemptTime();
if (GRPC_TRACE_FLAG_ENABLED(grpc_xds_client_trace)) { if (GRPC_TRACE_FLAG_ENABLED(grpc_xds_client_trace)) {
grpc_millis timeout = Duration timeout =
std::max(next_attempt_time - ExecCtx::Get()->Now(), grpc_millis(0)); std::max(next_attempt_time - ExecCtx::Get()->Now(), Duration::Zero());
gpr_log(GPR_INFO, gpr_log(GPR_INFO,
"[xds_client %p] xds server %s: call attempt failed; " "[xds_client %p] xds server %s: call attempt failed; "
"retry timer will fire in %" PRId64 "ms.", "retry timer will fire in %" PRId64 "ms.",
chand()->xds_client(), chand()->server_.server_uri.c_str(), chand()->xds_client(), chand()->server_.server_uri.c_str(),
timeout); timeout.millis());
} }
this->Ref(DEBUG_LOCATION, "RetryableCall+retry_timer_start").release(); this->Ref(DEBUG_LOCATION, "RetryableCall+retry_timer_start").release();
grpc_timer_init(&retry_timer_, next_attempt_time, &on_retry_timer_); grpc_timer_init(&retry_timer_, next_attempt_time, &on_retry_timer_);
@ -789,8 +789,7 @@ namespace {
// Build a resource metadata struct for ADS result accepting methods and CSDS. // Build a resource metadata struct for ADS result accepting methods and CSDS.
XdsApi::ResourceMetadata CreateResourceMetadataAcked( XdsApi::ResourceMetadata CreateResourceMetadataAcked(
std::string serialized_proto, std::string version, std::string serialized_proto, std::string version, Timestamp update_time) {
grpc_millis update_time) {
XdsApi::ResourceMetadata resource_metadata; XdsApi::ResourceMetadata resource_metadata;
resource_metadata.serialized_proto = std::move(serialized_proto); resource_metadata.serialized_proto = std::move(serialized_proto);
resource_metadata.update_time = update_time; resource_metadata.update_time = update_time;
@ -802,7 +801,7 @@ XdsApi::ResourceMetadata CreateResourceMetadataAcked(
// Update resource_metadata for NACK. // Update resource_metadata for NACK.
void UpdateResourceMetadataNacked(const std::string& version, void UpdateResourceMetadataNacked(const std::string& version,
const std::string& details, const std::string& details,
grpc_millis update_time, Timestamp update_time,
XdsApi::ResourceMetadata* resource_metadata) { XdsApi::ResourceMetadata* resource_metadata) {
resource_metadata->client_status = XdsApi::ResourceMetadata::NACKED; resource_metadata->client_status = XdsApi::ResourceMetadata::NACKED;
resource_metadata->failed_version = version; resource_metadata->failed_version = version;
@ -950,7 +949,7 @@ XdsClient::ChannelState::AdsCallState::AdsCallState(
chand()->channel_, nullptr, GRPC_PROPAGATE_DEFAULTS, chand()->channel_, nullptr, GRPC_PROPAGATE_DEFAULTS,
xds_client()->interested_parties_, xds_client()->interested_parties_,
StaticSlice::FromStaticString(method).c_slice(), nullptr, StaticSlice::FromStaticString(method).c_slice(), nullptr,
GRPC_MILLIS_INF_FUTURE, nullptr); Timestamp::InfFuture(), nullptr);
GPR_ASSERT(call_ != nullptr); GPR_ASSERT(call_ != nullptr);
// Init data associated with the call. // Init data associated with the call.
grpc_metadata_array_init(&initial_metadata_recv_); grpc_metadata_array_init(&initial_metadata_recv_);
@ -1362,7 +1361,7 @@ void XdsClient::ChannelState::LrsCallState::Reporter::Orphan() {
void XdsClient::ChannelState::LrsCallState::Reporter:: void XdsClient::ChannelState::LrsCallState::Reporter::
ScheduleNextReportLocked() { ScheduleNextReportLocked() {
const grpc_millis next_report_time = ExecCtx::Get()->Now() + report_interval_; const Timestamp next_report_time = ExecCtx::Get()->Now() + report_interval_;
grpc_timer_init(&next_report_timer_, next_report_time, grpc_timer_init(&next_report_timer_, next_report_time,
&on_next_report_timer_); &on_next_report_timer_);
next_report_timer_callback_pending_ = true; next_report_timer_callback_pending_ = true;
@ -1509,8 +1508,8 @@ XdsClient::ChannelState::LrsCallState::LrsCallState(
call_ = grpc_channel_create_pollset_set_call( call_ = grpc_channel_create_pollset_set_call(
chand()->channel_, nullptr, GRPC_PROPAGATE_DEFAULTS, chand()->channel_, nullptr, GRPC_PROPAGATE_DEFAULTS,
xds_client()->interested_parties_, xds_client()->interested_parties_,
StaticSlice::FromStaticString(method).c_slice(), nullptr, Slice::FromStaticString(method).c_slice(), nullptr,
GRPC_MILLIS_INF_FUTURE, nullptr); Timestamp::InfFuture(), nullptr);
GPR_ASSERT(call_ != nullptr); GPR_ASSERT(call_ != nullptr);
// Init the request payload. // Init the request payload.
grpc_slice request_payload_slice = grpc_slice request_payload_slice =
@ -1680,7 +1679,7 @@ bool XdsClient::ChannelState::LrsCallState::OnResponseReceivedLocked() {
// Parse the response. // Parse the response.
bool send_all_clusters = false; bool send_all_clusters = false;
std::set<std::string> new_cluster_names; std::set<std::string> new_cluster_names;
grpc_millis new_load_reporting_interval; Duration new_load_reporting_interval;
grpc_error_handle parse_error = xds_client()->api_.ParseLrsResponse( grpc_error_handle parse_error = xds_client()->api_.ParseLrsResponse(
response_slice, &send_all_clusters, &new_cluster_names, response_slice, &send_all_clusters, &new_cluster_names,
&new_load_reporting_interval); &new_load_reporting_interval);
@ -1701,7 +1700,7 @@ bool XdsClient::ChannelState::LrsCallState::OnResponseReceivedLocked() {
"ms", "ms",
xds_client(), chand()->server_.server_uri.c_str(), xds_client(), chand()->server_.server_uri.c_str(),
new_cluster_names.size(), send_all_clusters, new_cluster_names.size(), send_all_clusters,
new_load_reporting_interval); new_load_reporting_interval.millis());
size_t i = 0; size_t i = 0;
for (const auto& name : new_cluster_names) { for (const auto& name : new_cluster_names) {
gpr_log(GPR_INFO, "[xds_client %p] cluster_name %" PRIuPTR ": %s", gpr_log(GPR_INFO, "[xds_client %p] cluster_name %" PRIuPTR ": %s",
@ -1709,9 +1708,10 @@ bool XdsClient::ChannelState::LrsCallState::OnResponseReceivedLocked() {
} }
} }
if (new_load_reporting_interval < if (new_load_reporting_interval <
GRPC_XDS_MIN_CLIENT_LOAD_REPORTING_INTERVAL_MS) { Duration::Milliseconds(
new_load_reporting_interval = GRPC_XDS_MIN_CLIENT_LOAD_REPORTING_INTERVAL_MS)) {
GRPC_XDS_MIN_CLIENT_LOAD_REPORTING_INTERVAL_MS; new_load_reporting_interval = Duration::Milliseconds(
GRPC_XDS_MIN_CLIENT_LOAD_REPORTING_INTERVAL_MS);
if (GRPC_TRACE_FLAG_ENABLED(grpc_xds_client_trace)) { if (GRPC_TRACE_FLAG_ENABLED(grpc_xds_client_trace)) {
gpr_log(GPR_INFO, gpr_log(GPR_INFO,
"[xds_client %p] xds server %s: increased load_report_interval " "[xds_client %p] xds server %s: increased load_report_interval "
@ -1804,10 +1804,10 @@ bool XdsClient::ChannelState::LrsCallState::IsCurrentCallOnChannel() const {
namespace { namespace {
grpc_millis GetRequestTimeout(const grpc_channel_args* args) { Duration GetRequestTimeout(const grpc_channel_args* args) {
return grpc_channel_args_find_integer( return Duration::Milliseconds(grpc_channel_args_find_integer(
args, GRPC_ARG_XDS_RESOURCE_DOES_NOT_EXIST_TIMEOUT_MS, args, GRPC_ARG_XDS_RESOURCE_DOES_NOT_EXIST_TIMEOUT_MS,
{15000, 0, INT_MAX}); {15000, 0, INT_MAX}));
} }
grpc_channel_args* ModifyChannelArgs(const grpc_channel_args* args) { grpc_channel_args* ModifyChannelArgs(const grpc_channel_args* args) {
@ -2285,7 +2285,7 @@ XdsApi::ClusterLoadReportMap XdsClient::BuildLoadReportSnapshotLocked(
} }
} }
// Compute load report interval. // Compute load report interval.
const grpc_millis now = ExecCtx::Get()->Now(); const Timestamp now = ExecCtx::Get()->Now();
snapshot.load_report_interval = now - load_report.last_report_time; snapshot.load_report_interval = now - load_report.last_report_time;
load_report.last_report_time = now; load_report.last_report_time = now;
// Record snapshot. // Record snapshot.

@ -253,7 +253,7 @@ class XdsClient : public DualRefCounted<XdsClient> {
std::map<RefCountedPtr<XdsLocalityName>, LocalityState, std::map<RefCountedPtr<XdsLocalityName>, LocalityState,
XdsLocalityName::Less> XdsLocalityName::Less>
locality_stats; locality_stats;
grpc_millis last_report_time = ExecCtx::Get()->Now(); Timestamp last_report_time = ExecCtx::Get()->Now();
}; };
// Load report data. // Load report data.
@ -294,7 +294,7 @@ class XdsClient : public DualRefCounted<XdsClient> {
std::unique_ptr<XdsBootstrap> bootstrap_; std::unique_ptr<XdsBootstrap> bootstrap_;
grpc_channel_args* args_; grpc_channel_args* args_;
const grpc_millis request_timeout_; const Duration request_timeout_;
grpc_pollset_set* interested_parties_; grpc_pollset_set* interested_parties_;
OrphanablePtr<CertificateProviderStore> certificate_provider_store_; OrphanablePtr<CertificateProviderStore> certificate_provider_store_;
XdsApi api_; XdsApi api_;

@ -32,26 +32,11 @@
namespace grpc_core { namespace grpc_core {
struct Duration { inline Duration ParseDuration(const google_protobuf_Duration* proto_duration) {
int64_t seconds = 0; return Duration::FromSecondsAndNanoseconds(
int32_t nanos = 0; google_protobuf_Duration_seconds(proto_duration),
google_protobuf_Duration_nanos(proto_duration));
Duration() = default; }
bool operator==(const Duration& other) const {
return seconds == other.seconds && nanos == other.nanos;
}
std::string ToString() const {
return absl::StrFormat("Duration seconds: %ld, nanos %d", seconds, nanos);
}
static Duration Parse(const google_protobuf_Duration* proto_duration) {
Duration duration;
duration.seconds = google_protobuf_Duration_seconds(proto_duration);
duration.nanos = google_protobuf_Duration_nanos(proto_duration);
return duration;
}
};
struct CommonTlsContext { struct CommonTlsContext {
struct CertificateProviderPluginInstance { struct CertificateProviderPluginInstance {

@ -37,6 +37,7 @@
#include "upb/upb.h" #include "upb/upb.h"
#include "upb/upb.hpp" #include "upb/upb.hpp"
#include "src/core/ext/xds/xds_common_types.h"
#include "src/core/lib/address_utils/parse_address.h" #include "src/core/lib/address_utils/parse_address.h"
#include "src/core/lib/address_utils/sockaddr_utils.h" #include "src/core/lib/address_utils/sockaddr_utils.h"
#include "src/core/lib/gprpp/host_port.h" #include "src/core/lib/gprpp/host_port.h"
@ -294,7 +295,7 @@ grpc_error_handle HttpConnectionManagerParse(
envoy_config_core_v3_HttpProtocolOptions_max_stream_duration(options); envoy_config_core_v3_HttpProtocolOptions_max_stream_duration(options);
if (duration != nullptr) { if (duration != nullptr) {
http_connection_manager->http_max_stream_duration = http_connection_manager->http_max_stream_duration =
Duration::Parse(duration); ParseDuration(duration);
} }
} }
// Parse filters. // Parse filters.

@ -595,29 +595,21 @@ grpc_error_handle RetryPolicyParse(
"RouteAction RetryPolicy RetryBackoff missing base interval.")); "RouteAction RetryPolicy RetryBackoff missing base interval."));
} else { } else {
retry_to_return.retry_back_off.base_interval = retry_to_return.retry_back_off.base_interval =
Duration::Parse(base_interval); ParseDuration(base_interval);
} }
const google_protobuf_Duration* max_interval = const google_protobuf_Duration* max_interval =
envoy_config_route_v3_RetryPolicy_RetryBackOff_max_interval(backoff); envoy_config_route_v3_RetryPolicy_RetryBackOff_max_interval(backoff);
Duration max; Duration max;
if (max_interval != nullptr) { if (max_interval != nullptr) {
max = Duration::Parse(max_interval); max = ParseDuration(max_interval);
} else { } else {
// if max interval is not set, it is 10x the base, if the value in nanos // if max interval is not set, it is 10x the base.
// can yield another second, adjust the value in seconds accordingly. max = 10 * retry_to_return.retry_back_off.base_interval;
max.seconds = retry_to_return.retry_back_off.base_interval.seconds * 10;
max.nanos = retry_to_return.retry_back_off.base_interval.nanos * 10;
if (max.nanos > 1000000000) {
max.seconds += max.nanos / 1000000000;
max.nanos = max.nanos % 1000000000;
}
} }
retry_to_return.retry_back_off.max_interval = max; retry_to_return.retry_back_off.max_interval = max;
} else { } else {
retry_to_return.retry_back_off.base_interval.seconds = 0; retry_to_return.retry_back_off.base_interval = Duration::Milliseconds(25);
retry_to_return.retry_back_off.base_interval.nanos = 25000000; retry_to_return.retry_back_off.max_interval = Duration::Milliseconds(250);
retry_to_return.retry_back_off.max_interval.seconds = 0;
retry_to_return.retry_back_off.max_interval.nanos = 250000000;
} }
if (errors.empty()) { if (errors.empty()) {
*retry = retry_to_return; *retry = retry_to_return;
@ -718,7 +710,7 @@ grpc_error_handle RouteActionParse(
max_stream_duration); max_stream_duration);
} }
if (duration != nullptr) { if (duration != nullptr) {
route->max_stream_duration = Duration::Parse(duration); route->max_stream_duration = ParseDuration(duration);
} }
} }
} }

@ -28,20 +28,17 @@ namespace grpc_core {
BackOff::BackOff(const Options& options) : options_(options) { Reset(); } BackOff::BackOff(const Options& options) : options_(options) { Reset(); }
grpc_millis BackOff::NextAttemptTime() { Timestamp BackOff::NextAttemptTime() {
if (initial_) { if (initial_) {
initial_ = false; initial_ = false;
return current_backoff_ + ExecCtx::Get()->Now(); return current_backoff_ + ExecCtx::Get()->Now();
} }
current_backoff_ = static_cast<grpc_millis>( current_backoff_ = std::min(current_backoff_ * options_.multiplier(),
std::min(current_backoff_ * options_.multiplier(), options_.max_backoff());
static_cast<double>(options_.max_backoff()))); const Duration jitter = Duration::FromSecondsAsDouble(
const double jitter = absl::Uniform(rand_gen_, -options_.jitter() * current_backoff_.seconds(),
absl::Uniform(rand_gen_, -options_.jitter() * current_backoff_, options_.jitter() * current_backoff_.seconds()));
options_.jitter() * current_backoff_); return ExecCtx::Get()->Now() + current_backoff_ + jitter;
const grpc_millis next_timeout =
static_cast<grpc_millis>(current_backoff_ + jitter);
return next_timeout + ExecCtx::Get()->Now();
} }
void BackOff::Reset() { void BackOff::Reset() {

@ -37,7 +37,7 @@ class BackOff {
explicit BackOff(const Options& options); explicit BackOff(const Options& options);
/// Returns the time at which the next attempt should start. /// Returns the time at which the next attempt should start.
grpc_millis NextAttemptTime(); Timestamp NextAttemptTime();
/// Reset the backoff, so the next value returned by NextAttemptTime() /// Reset the backoff, so the next value returned by NextAttemptTime()
/// will be the time of the second attempt (rather than the Nth). /// will be the time of the second attempt (rather than the Nth).
@ -45,7 +45,7 @@ class BackOff {
class Options { class Options {
public: public:
Options& set_initial_backoff(grpc_millis initial_backoff) { Options& set_initial_backoff(Duration initial_backoff) {
initial_backoff_ = initial_backoff; initial_backoff_ = initial_backoff;
return *this; return *this;
} }
@ -57,24 +57,24 @@ class BackOff {
jitter_ = jitter; jitter_ = jitter;
return *this; return *this;
} }
Options& set_max_backoff(grpc_millis max_backoff) { Options& set_max_backoff(Duration max_backoff) {
max_backoff_ = max_backoff; max_backoff_ = max_backoff;
return *this; return *this;
} }
/// how long to wait after the first failure before retrying /// how long to wait after the first failure before retrying
grpc_millis initial_backoff() const { return initial_backoff_; } Duration initial_backoff() const { return initial_backoff_; }
/// factor with which to multiply backoff after a failed retry /// factor with which to multiply backoff after a failed retry
double multiplier() const { return multiplier_; } double multiplier() const { return multiplier_; }
/// amount to randomize backoffs /// amount to randomize backoffs
double jitter() const { return jitter_; } double jitter() const { return jitter_; }
/// maximum time between retries /// maximum time between retries
grpc_millis max_backoff() const { return max_backoff_; } Duration max_backoff() const { return max_backoff_; }
private: private:
grpc_millis initial_backoff_; Duration initial_backoff_;
double multiplier_; double multiplier_;
double jitter_; double jitter_;
grpc_millis max_backoff_; Duration max_backoff_;
}; // class Options }; // class Options
private: private:
@ -82,7 +82,7 @@ class BackOff {
absl::BitGen rand_gen_; absl::BitGen rand_gen_;
bool initial_; bool initial_;
/// current delay before retries /// current delay before retries
grpc_millis current_backoff_; Duration current_backoff_;
}; };
} // namespace grpc_core } // namespace grpc_core

@ -83,7 +83,7 @@ struct grpc_call_element_args {
grpc_call_context_element* context; grpc_call_context_element* context;
const grpc_slice& path; const grpc_slice& path;
gpr_cycle_counter start_time; // Note: not populated in subchannel stack. gpr_cycle_counter start_time; // Note: not populated in subchannel stack.
grpc_millis deadline; grpc_core::Timestamp deadline;
grpc_core::Arena* arena; grpc_core::Arena* arena;
grpc_core::CallCombiner* call_combiner; grpc_core::CallCombiner* call_combiner;
}; };

@ -46,8 +46,7 @@ ChannelTrace::TraceEvent::TraceEvent(Severity severity, const grpc_slice& data,
RefCountedPtr<BaseNode> referenced_entity) RefCountedPtr<BaseNode> referenced_entity)
: severity_(severity), : severity_(severity),
data_(data), data_(data),
timestamp_( timestamp_(ExecCtx::Get()->Now().as_timespec(GPR_CLOCK_REALTIME)),
grpc_millis_to_timespec(ExecCtx::Get()->Now(), GPR_CLOCK_REALTIME)),
next_(nullptr), next_(nullptr),
referenced_entity_(std::move(referenced_entity)), referenced_entity_(std::move(referenced_entity)),
memory_usage_(sizeof(TraceEvent) + grpc_slice_memory_usage(data)) {} memory_usage_(sizeof(TraceEvent) + grpc_slice_memory_usage(data)) {}
@ -55,8 +54,7 @@ ChannelTrace::TraceEvent::TraceEvent(Severity severity, const grpc_slice& data,
ChannelTrace::TraceEvent::TraceEvent(Severity severity, const grpc_slice& data) ChannelTrace::TraceEvent::TraceEvent(Severity severity, const grpc_slice& data)
: severity_(severity), : severity_(severity),
data_(data), data_(data),
timestamp_( timestamp_(ExecCtx::Get()->Now().as_timespec(GPR_CLOCK_REALTIME)),
grpc_millis_to_timespec(ExecCtx::Get()->Now(), GPR_CLOCK_REALTIME)),
next_(nullptr), next_(nullptr),
memory_usage_(sizeof(TraceEvent) + grpc_slice_memory_usage(data)) {} memory_usage_(sizeof(TraceEvent) + grpc_slice_memory_usage(data)) {}
@ -72,8 +70,7 @@ ChannelTrace::ChannelTrace(size_t max_event_memory)
return; // tracing is disabled if max_event_memory_ == 0 return; // tracing is disabled if max_event_memory_ == 0
} }
gpr_mu_init(&tracer_mu_); gpr_mu_init(&tracer_mu_);
time_created_ = time_created_ = ExecCtx::Get()->Now().as_timespec(GPR_CLOCK_REALTIME);
grpc_millis_to_timespec(ExecCtx::Get()->Now(), GPR_CLOCK_REALTIME);
} }
ChannelTrace::~ChannelTrace() { ChannelTrace::~ChannelTrace() {

@ -169,7 +169,7 @@ void HandshakeManager::OnTimeoutFn(void* arg, grpc_error_handle error) {
void HandshakeManager::DoHandshake(grpc_endpoint* endpoint, void HandshakeManager::DoHandshake(grpc_endpoint* endpoint,
const grpc_channel_args* channel_args, const grpc_channel_args* channel_args,
grpc_millis deadline, Timestamp deadline,
grpc_tcp_server_acceptor* acceptor, grpc_tcp_server_acceptor* acceptor,
grpc_iomgr_cb_func on_handshake_done, grpc_iomgr_cb_func on_handshake_done,
void* user_data) { void* user_data) {

@ -114,7 +114,7 @@ class HandshakeManager : public RefCounted<HandshakeManager> {
/// the necessary clean-up. Otherwise, the callback takes ownership of /// the necessary clean-up. Otherwise, the callback takes ownership of
/// the arguments. /// the arguments.
void DoHandshake(grpc_endpoint* endpoint, void DoHandshake(grpc_endpoint* endpoint,
const grpc_channel_args* channel_args, grpc_millis deadline, const grpc_channel_args* channel_args, Timestamp deadline,
grpc_tcp_server_acceptor* acceptor, grpc_tcp_server_acceptor* acceptor,
grpc_iomgr_cb_func on_handshake_done, void* user_data); grpc_iomgr_cb_func on_handshake_done, void* user_data);

@ -104,13 +104,13 @@ class BaseCallData {
grpc_call_element* elem() const { return elem_; } grpc_call_element* elem() const { return elem_; }
CallCombiner* call_combiner() const { return call_combiner_; } CallCombiner* call_combiner() const { return call_combiner_; }
grpc_millis deadline() const { return deadline_; } Timestamp deadline() const { return deadline_; }
private: private:
grpc_call_element* const elem_; grpc_call_element* const elem_;
Arena* const arena_; Arena* const arena_;
CallCombiner* const call_combiner_; CallCombiner* const call_combiner_;
const grpc_millis deadline_; const Timestamp deadline_;
}; };
// Specific call data per channel filter. // Specific call data per channel filter.

@ -184,7 +184,8 @@ gpr_timespec gpr_time_sub(gpr_timespec a, gpr_timespec b) {
dec++; dec++;
} }
if (a.tv_sec == INT64_MAX || a.tv_sec == INT64_MIN) { if (a.tv_sec == INT64_MAX || a.tv_sec == INT64_MIN) {
diff = a; diff.tv_sec = a.tv_sec;
diff.tv_nsec = a.tv_nsec;
} else if (b.tv_sec == INT64_MIN || } else if (b.tv_sec == INT64_MIN ||
(b.tv_sec <= 0 && a.tv_sec >= INT64_MAX + b.tv_sec)) { (b.tv_sec <= 0 && a.tv_sec >= INT64_MAX + b.tv_sec)) {
diff = gpr_inf_future(GPR_CLOCK_REALTIME); diff = gpr_inf_future(GPR_CLOCK_REALTIME);

@ -106,6 +106,20 @@ constexpr size_t HashPointer(T* p, size_t range) {
range; range;
} }
// Compute a+b.
// If the result is greater than INT64_MAX, return INT64_MAX.
// If the result is less than INT64_MIN, return INT64_MIN.
inline int64_t SaturatingAdd(int64_t a, int64_t b) {
if (a > 0) {
if (b > INT64_MAX - a) {
return INT64_MAX;
}
} else if (b < INT64_MIN - a) {
return INT64_MIN;
}
return a + b;
}
inline uint32_t MixHash32(uint32_t a, uint32_t b) { inline uint32_t MixHash32(uint32_t a, uint32_t b) {
return RotateLeft(a, 2u) ^ b; return RotateLeft(a, 2u) ^ b;
} }

@ -0,0 +1,186 @@
// Copyright 2021 gRPC authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <grpc/support/port_platform.h>
#include "src/core/lib/gprpp/time.h"
#include <atomic>
#include <cstdint>
#include <limits>
#include <string>
#include <grpc/impl/codegen/gpr_types.h>
#include <grpc/support/log.h>
namespace grpc_core {
namespace {
std::atomic<int64_t> g_process_epoch_seconds;
std::atomic<gpr_cycle_counter> g_process_epoch_cycles;
GPR_ATTRIBUTE_NOINLINE std::pair<int64_t, gpr_cycle_counter> InitTime() {
gpr_cycle_counter cycles_start;
gpr_cycle_counter cycles_end;
int64_t process_epoch_seconds;
// Check the current time... if we end up with zero, try again after 100ms.
// If it doesn't advance after sleeping for 1100ms, crash the process.
for (int i = 0; i < 11; i++) {
cycles_start = gpr_get_cycle_counter();
gpr_timespec now = gpr_now(GPR_CLOCK_MONOTONIC);
cycles_end = gpr_get_cycle_counter();
process_epoch_seconds = now.tv_sec - 1;
if (process_epoch_seconds != 0) {
break;
}
gpr_sleep_until(gpr_time_add(now, gpr_time_from_millis(100, GPR_TIMESPAN)));
}
// Time does not seem to be increasing from zero...
GPR_ASSERT(process_epoch_seconds != 0);
int64_t expected = 0;
gpr_cycle_counter process_epoch_cycles = (cycles_start + cycles_end) / 2;
GPR_ASSERT(process_epoch_cycles != 0);
if (!g_process_epoch_seconds.compare_exchange_strong(
expected, process_epoch_seconds, std::memory_order_relaxed,
std::memory_order_relaxed)) {
process_epoch_seconds = expected;
do {
process_epoch_cycles =
g_process_epoch_cycles.load(std::memory_order_relaxed);
} while (process_epoch_cycles == 0);
} else {
g_process_epoch_cycles.store(process_epoch_cycles,
std::memory_order_relaxed);
}
return std::make_pair(process_epoch_seconds, process_epoch_cycles);
}
gpr_timespec StartTime() {
int64_t sec = g_process_epoch_seconds.load(std::memory_order_relaxed);
if (GPR_UNLIKELY(sec == 0)) sec = InitTime().first;
return {sec, 0, GPR_CLOCK_MONOTONIC};
}
gpr_cycle_counter StartCycleCounter() {
gpr_cycle_counter cycles =
g_process_epoch_cycles.load(std::memory_order_relaxed);
if (GPR_UNLIKELY(cycles == 0)) cycles = InitTime().second;
return cycles;
}
gpr_timespec MillisecondsAsTimespec(int64_t millis, gpr_clock_type clock_type) {
// special-case infinities as Timestamp can be 32bit on some
// platforms while gpr_time_from_millis always takes an int64_t.
if (millis == std::numeric_limits<int64_t>::max()) {
return gpr_inf_future(clock_type);
}
if (millis == std::numeric_limits<int64_t>::min()) {
return gpr_inf_past(clock_type);
}
if (clock_type == GPR_TIMESPAN) {
return gpr_time_from_millis(millis, GPR_TIMESPAN);
}
return gpr_time_add(gpr_convert_clock_type(StartTime(), clock_type),
gpr_time_from_millis(millis, GPR_TIMESPAN));
}
int64_t TimespanToMillisRoundUp(gpr_timespec ts) {
GPR_ASSERT(ts.clock_type == GPR_TIMESPAN);
double x = GPR_MS_PER_SEC * static_cast<double>(ts.tv_sec) +
static_cast<double>(ts.tv_nsec) / GPR_NS_PER_MS +
static_cast<double>(GPR_NS_PER_SEC - 1) /
static_cast<double>(GPR_NS_PER_SEC);
if (x <= static_cast<double>(std::numeric_limits<int64_t>::min())) {
return std::numeric_limits<int64_t>::min();
}
if (x >= static_cast<double>(std::numeric_limits<int64_t>::max())) {
return std::numeric_limits<int64_t>::max();
}
return static_cast<int64_t>(x);
}
int64_t TimespanToMillisRoundDown(gpr_timespec ts) {
GPR_ASSERT(ts.clock_type == GPR_TIMESPAN);
double x = GPR_MS_PER_SEC * static_cast<double>(ts.tv_sec) +
static_cast<double>(ts.tv_nsec) / GPR_NS_PER_MS;
if (x <= static_cast<double>(std::numeric_limits<int64_t>::min())) {
return std::numeric_limits<int64_t>::min();
}
if (x >= static_cast<double>(std::numeric_limits<int64_t>::max())) {
return std::numeric_limits<int64_t>::max();
}
return static_cast<int64_t>(x);
}
} // namespace
Timestamp Timestamp::FromTimespecRoundUp(gpr_timespec ts) {
return FromMillisecondsAfterProcessEpoch(TimespanToMillisRoundUp(gpr_time_sub(
gpr_convert_clock_type(ts, GPR_CLOCK_MONOTONIC), StartTime())));
}
Timestamp Timestamp::FromTimespecRoundDown(gpr_timespec ts) {
return FromMillisecondsAfterProcessEpoch(
TimespanToMillisRoundDown(gpr_time_sub(
gpr_convert_clock_type(ts, GPR_CLOCK_MONOTONIC), StartTime())));
}
Timestamp Timestamp::FromCycleCounterRoundUp(gpr_cycle_counter c) {
return Timestamp::FromMillisecondsAfterProcessEpoch(
TimespanToMillisRoundUp(gpr_cycle_counter_sub(c, StartCycleCounter())));
}
Timestamp Timestamp::FromCycleCounterRoundDown(gpr_cycle_counter c) {
return Timestamp::FromMillisecondsAfterProcessEpoch(
TimespanToMillisRoundDown(gpr_cycle_counter_sub(c, StartCycleCounter())));
}
gpr_timespec Timestamp::as_timespec(gpr_clock_type clock_type) const {
return MillisecondsAsTimespec(millis_, clock_type);
}
std::string Timestamp::ToString() const {
return "@" + std::to_string(millis_) + "ms";
}
gpr_timespec Duration::as_timespec() const {
return MillisecondsAsTimespec(millis_, GPR_TIMESPAN);
}
Duration Duration::FromTimespec(gpr_timespec t) {
return Duration::Milliseconds(TimespanToMillisRoundUp(t));
}
std::string Duration::ToString() const {
return std::to_string(millis_) + "ms";
}
void TestOnlySetProcessEpoch(gpr_timespec epoch) {
g_process_epoch_seconds.store(
gpr_convert_clock_type(epoch, GPR_CLOCK_MONOTONIC).tv_sec);
}
std::ostream& operator<<(std::ostream& out, Timestamp timestamp) {
return out << timestamp.ToString();
}
std::ostream& operator<<(std::ostream& out, Duration duration) {
return out << duration.ToString();
}
} // namespace grpc_core

@ -0,0 +1,292 @@
// Copyright 2021 gRPC authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#ifndef GRPC_CORE_LIB_GPRPP_TIME_H
#define GRPC_CORE_LIB_GPRPP_TIME_H
#include <grpc/support/port_platform.h>
#include <stdint.h>
#include <cstdint>
#include <limits>
#include <ostream>
#include <string>
#include <grpc/support/time.h>
#include "src/core/lib/gpr/time_precise.h"
#include "src/core/lib/gpr/useful.h"
namespace grpc_core {
namespace time_detail {
inline int64_t MillisAdd(int64_t a, int64_t b) {
if (a == std::numeric_limits<int64_t>::max() ||
b == std::numeric_limits<int64_t>::max()) {
return std::numeric_limits<int64_t>::max();
}
if (a == std::numeric_limits<int64_t>::min() ||
b == std::numeric_limits<int64_t>::min()) {
return std::numeric_limits<int64_t>::min();
}
return SaturatingAdd(a, b);
}
constexpr inline int64_t MillisMul(int64_t millis, int64_t mul) {
return millis >= std::numeric_limits<int64_t>::max() / mul
? std::numeric_limits<int64_t>::max()
: millis <= std::numeric_limits<int64_t>::min() / mul
? std::numeric_limits<int64_t>::min()
: millis * mul;
}
} // namespace time_detail
class Duration;
// Timestamp represents a discrete point in time.
class Timestamp {
public:
constexpr Timestamp() = default;
// Constructs a Timestamp from a gpr_timespec.
static Timestamp FromTimespecRoundDown(gpr_timespec t);
static Timestamp FromTimespecRoundUp(gpr_timespec t);
// Construct a Timestamp from a gpr_cycle_counter.
static Timestamp FromCycleCounterRoundUp(gpr_cycle_counter c);
static Timestamp FromCycleCounterRoundDown(gpr_cycle_counter c);
static constexpr Timestamp FromMillisecondsAfterProcessEpoch(int64_t millis) {
return Timestamp(millis);
}
static constexpr Timestamp ProcessEpoch() { return Timestamp(0); }
static constexpr Timestamp InfFuture() {
return Timestamp(std::numeric_limits<int64_t>::max());
}
static constexpr Timestamp InfPast() {
return Timestamp(std::numeric_limits<int64_t>::min());
}
constexpr bool operator==(Timestamp other) const {
return millis_ == other.millis_;
}
constexpr bool operator!=(Timestamp other) const {
return millis_ != other.millis_;
}
constexpr bool operator<(Timestamp other) const {
return millis_ < other.millis_;
}
constexpr bool operator<=(Timestamp other) const {
return millis_ <= other.millis_;
}
constexpr bool operator>(Timestamp other) const {
return millis_ > other.millis_;
}
constexpr bool operator>=(Timestamp other) const {
return millis_ >= other.millis_;
}
Timestamp& operator+=(Duration duration);
bool is_process_epoch() const { return millis_ == 0; }
uint64_t milliseconds_after_process_epoch() const { return millis_; }
gpr_timespec as_timespec(gpr_clock_type type) const;
std::string ToString() const;
private:
explicit constexpr Timestamp(int64_t millis) : millis_(millis) {}
int64_t millis_ = 0;
};
// Duration represents a span of time.
class Duration {
public:
constexpr Duration() : millis_(0) {}
static Duration FromTimespec(gpr_timespec t);
static Duration FromSecondsAndNanoseconds(int64_t seconds, int32_t nanos);
static Duration FromSecondsAsDouble(double seconds);
static constexpr Duration Zero() { return Duration(0); }
// Smallest representatable positive duration.
static constexpr Duration Epsilon() { return Duration(1); }
static constexpr Duration NegativeInfinity() {
return Duration(std::numeric_limits<int64_t>::min());
}
static constexpr Duration Infinity() {
return Duration(std::numeric_limits<int64_t>::max());
}
static constexpr Duration Hours(int64_t hours) {
return Minutes(time_detail::MillisMul(hours, 60));
}
static constexpr Duration Minutes(int64_t minutes) {
return Seconds(time_detail::MillisMul(minutes, 60));
}
static constexpr Duration Seconds(int64_t seconds) {
return Milliseconds(time_detail::MillisMul(seconds, GPR_MS_PER_SEC));
}
static constexpr Duration Milliseconds(int64_t millis) {
return Duration(millis);
}
static constexpr Duration MicrosecondsRoundDown(int64_t micros) {
return Duration(micros / GPR_US_PER_MS);
}
static constexpr Duration NanosecondsRoundDown(int64_t nanos) {
return Duration(nanos / GPR_NS_PER_MS);
}
static constexpr Duration MicrosecondsRoundUp(int64_t micros) {
return Duration(micros / GPR_US_PER_MS + (micros % GPR_US_PER_MS != 0));
}
static constexpr Duration NanosecondsRoundUp(int64_t nanos) {
return Duration(nanos / GPR_NS_PER_MS + (nanos % GPR_NS_PER_MS != 0));
}
constexpr bool operator==(Duration other) const {
return millis_ == other.millis_;
}
constexpr bool operator!=(Duration other) const {
return millis_ != other.millis_;
}
constexpr bool operator<(Duration other) const {
return millis_ < other.millis_;
}
constexpr bool operator<=(Duration other) const {
return millis_ <= other.millis_;
}
constexpr bool operator>(Duration other) const {
return millis_ > other.millis_;
}
constexpr bool operator>=(Duration other) const {
return millis_ >= other.millis_;
}
Duration& operator/=(int64_t divisor) {
if (millis_ == std::numeric_limits<int64_t>::max()) {
*this = divisor < 0 ? NegativeInfinity() : Infinity();
} else if (millis_ == std::numeric_limits<int64_t>::min()) {
*this = divisor < 0 ? Infinity() : NegativeInfinity();
} else {
millis_ /= divisor;
}
return *this;
}
Duration& operator+=(Duration other) {
millis_ += other.millis_;
return *this;
}
constexpr int64_t millis() const { return millis_; }
double seconds() const { return static_cast<double>(millis_) / 1000.0; }
gpr_timespec as_timespec() const;
std::string ToString() const;
private:
explicit constexpr Duration(int64_t millis) : millis_(millis) {}
int64_t millis_;
};
inline Duration operator+(Duration lhs, Duration rhs) {
return Duration::Milliseconds(
time_detail::MillisAdd(lhs.millis(), rhs.millis()));
}
inline Duration operator-(Duration lhs, Duration rhs) {
return Duration::Milliseconds(
time_detail::MillisAdd(lhs.millis(), -rhs.millis()));
}
inline Timestamp operator+(Timestamp lhs, Duration rhs) {
return Timestamp::FromMillisecondsAfterProcessEpoch(time_detail::MillisAdd(
lhs.milliseconds_after_process_epoch(), rhs.millis()));
}
inline Timestamp operator-(Timestamp lhs, Duration rhs) {
return Timestamp::FromMillisecondsAfterProcessEpoch(time_detail::MillisAdd(
lhs.milliseconds_after_process_epoch(), -rhs.millis()));
}
inline Timestamp operator+(Duration lhs, Timestamp rhs) { return rhs + lhs; }
inline Duration operator-(Timestamp lhs, Timestamp rhs) {
return Duration::Milliseconds(
time_detail::MillisAdd(lhs.milliseconds_after_process_epoch(),
-rhs.milliseconds_after_process_epoch()));
}
inline Duration operator*(Duration lhs, double rhs) {
if (lhs == Duration::Infinity()) {
return rhs < 0 ? Duration::NegativeInfinity() : Duration::Infinity();
}
if (lhs == Duration::NegativeInfinity()) {
return rhs < 0 ? Duration::Infinity() : Duration::NegativeInfinity();
}
return Duration::FromSecondsAsDouble(lhs.millis() * rhs / 1000.0);
}
inline Duration operator*(double lhs, Duration rhs) { return rhs * lhs; }
inline Duration operator/(Duration lhs, int64_t rhs) {
lhs /= rhs;
return lhs;
}
inline Duration Duration::FromSecondsAndNanoseconds(int64_t seconds,
int32_t nanos) {
return Seconds(seconds) + NanosecondsRoundDown(nanos);
}
inline Duration Duration::FromSecondsAsDouble(double seconds) {
double millis = seconds * 1000.0;
if (millis >= static_cast<double>(std::numeric_limits<int64_t>::max())) {
return Infinity();
}
if (millis <= static_cast<double>(std::numeric_limits<int64_t>::min())) {
return NegativeInfinity();
}
return Milliseconds(static_cast<int64_t>(millis));
}
inline Timestamp& Timestamp::operator+=(Duration duration) {
return *this = (*this + duration);
}
void TestOnlySetProcessEpoch(gpr_timespec epoch);
std::ostream& operator<<(std::ostream& out, Timestamp timestamp);
std::ostream& operator<<(std::ostream& out, Duration duration);
} // namespace grpc_core
#endif // GRPC_CORE_LIB_GPRPP_TIME_H

@ -59,7 +59,7 @@ grpc_httpcli_post_override g_post_override;
OrphanablePtr<HttpRequest> HttpRequest::Get( OrphanablePtr<HttpRequest> HttpRequest::Get(
URI uri, const grpc_channel_args* channel_args, URI uri, const grpc_channel_args* channel_args,
grpc_polling_entity* pollent, const grpc_http_request* request, grpc_polling_entity* pollent, const grpc_http_request* request,
grpc_millis deadline, grpc_closure* on_done, grpc_http_response* response, Timestamp deadline, grpc_closure* on_done, grpc_http_response* response,
RefCountedPtr<grpc_channel_credentials> channel_creds) { RefCountedPtr<grpc_channel_credentials> channel_creds) {
absl::optional<std::function<void()>> test_only_generate_response; absl::optional<std::function<void()>> test_only_generate_response;
if (g_get_override != nullptr) { if (g_get_override != nullptr) {
@ -85,7 +85,7 @@ OrphanablePtr<HttpRequest> HttpRequest::Get(
OrphanablePtr<HttpRequest> HttpRequest::Post( OrphanablePtr<HttpRequest> HttpRequest::Post(
URI uri, const grpc_channel_args* channel_args, URI uri, const grpc_channel_args* channel_args,
grpc_polling_entity* pollent, const grpc_http_request* request, grpc_polling_entity* pollent, const grpc_http_request* request,
grpc_millis deadline, grpc_closure* on_done, grpc_http_response* response, Timestamp deadline, grpc_closure* on_done, grpc_http_response* response,
RefCountedPtr<grpc_channel_credentials> channel_creds) { RefCountedPtr<grpc_channel_credentials> channel_creds) {
absl::optional<std::function<void()>> test_only_generate_response; absl::optional<std::function<void()>> test_only_generate_response;
if (g_post_override != nullptr) { if (g_post_override != nullptr) {
@ -114,7 +114,7 @@ void HttpRequest::SetOverride(grpc_httpcli_get_override get,
HttpRequest::HttpRequest( HttpRequest::HttpRequest(
URI uri, const grpc_slice& request_text, grpc_http_response* response, URI uri, const grpc_slice& request_text, grpc_http_response* response,
grpc_millis deadline, const grpc_channel_args* channel_args, Timestamp deadline, const grpc_channel_args* channel_args,
grpc_closure* on_done, grpc_polling_entity* pollent, const char* name, grpc_closure* on_done, grpc_polling_entity* pollent, const char* name,
absl::optional<std::function<void()>> test_only_generate_response, absl::optional<std::function<void()>> test_only_generate_response,
RefCountedPtr<grpc_channel_credentials> channel_creds) RefCountedPtr<grpc_channel_credentials> channel_creds)

@ -43,12 +43,12 @@
/* override functions return 1 if they handled the request, 0 otherwise */ /* override functions return 1 if they handled the request, 0 otherwise */
typedef int (*grpc_httpcli_get_override)(const grpc_http_request* request, typedef int (*grpc_httpcli_get_override)(const grpc_http_request* request,
const char* host, const char* path, const char* host, const char* path,
grpc_millis deadline, grpc_core::Timestamp deadline,
grpc_closure* on_complete, grpc_closure* on_complete,
grpc_http_response* response); grpc_http_response* response);
typedef int (*grpc_httpcli_post_override)( typedef int (*grpc_httpcli_post_override)(
const grpc_http_request* request, const char* host, const char* path, const grpc_http_request* request, const char* host, const char* path,
const char* body_bytes, size_t body_size, grpc_millis deadline, const char* body_bytes, size_t body_size, grpc_core::Timestamp deadline,
grpc_closure* on_complete, grpc_http_response* response); grpc_closure* on_complete, grpc_http_response* response);
namespace grpc_core { namespace grpc_core {
@ -81,7 +81,7 @@ class HttpRequest : public InternallyRefCounted<HttpRequest> {
// are removed. // are removed.
static OrphanablePtr<HttpRequest> Get( static OrphanablePtr<HttpRequest> Get(
URI uri, const grpc_channel_args* args, grpc_polling_entity* pollent, URI uri, const grpc_channel_args* args, grpc_polling_entity* pollent,
const grpc_http_request* request, grpc_millis deadline, const grpc_http_request* request, Timestamp deadline,
grpc_closure* on_done, grpc_http_response* response, grpc_closure* on_done, grpc_http_response* response,
RefCountedPtr<grpc_channel_credentials> channel_creds) RefCountedPtr<grpc_channel_credentials> channel_creds)
GRPC_MUST_USE_RESULT; GRPC_MUST_USE_RESULT;
@ -107,13 +107,13 @@ class HttpRequest : public InternallyRefCounted<HttpRequest> {
// Does not support ?var1=val1&var2=val2 in the path. // Does not support ?var1=val1&var2=val2 in the path.
static OrphanablePtr<HttpRequest> Post( static OrphanablePtr<HttpRequest> Post(
URI uri, const grpc_channel_args* args, grpc_polling_entity* pollent, URI uri, const grpc_channel_args* args, grpc_polling_entity* pollent,
const grpc_http_request* request, grpc_millis deadline, const grpc_http_request* request, Timestamp deadline,
grpc_closure* on_done, grpc_http_response* response, grpc_closure* on_done, grpc_http_response* response,
RefCountedPtr<grpc_channel_credentials> channel_creds) RefCountedPtr<grpc_channel_credentials> channel_creds)
GRPC_MUST_USE_RESULT; GRPC_MUST_USE_RESULT;
HttpRequest(URI uri, const grpc_slice& request_text, HttpRequest(URI uri, const grpc_slice& request_text,
grpc_http_response* response, grpc_millis deadline, grpc_http_response* response, Timestamp deadline,
const grpc_channel_args* channel_args, grpc_closure* on_done, const grpc_channel_args* channel_args, grpc_closure* on_done,
grpc_polling_entity* pollent, const char* name, grpc_polling_entity* pollent, const char* name,
absl::optional<std::function<void()>> test_only_generate_response, absl::optional<std::function<void()>> test_only_generate_response,
@ -185,7 +185,7 @@ class HttpRequest : public InternallyRefCounted<HttpRequest> {
const URI uri_; const URI uri_;
const grpc_slice request_text_; const grpc_slice request_text_;
const grpc_millis deadline_; const Timestamp deadline_;
const grpc_channel_args* channel_args_; const grpc_channel_args* channel_args_;
RefCountedPtr<grpc_channel_credentials> channel_creds_; RefCountedPtr<grpc_channel_credentials> channel_creds_;
grpc_closure on_read_; grpc_closure on_read_;

@ -82,16 +82,16 @@ struct ConnectionMetrics {
absl::optional<uint64_t> sndbuf_limited_usec; absl::optional<uint64_t> sndbuf_limited_usec;
}; };
struct Timestamp { struct BufferTimestamp {
gpr_timespec time; gpr_timespec time;
ConnectionMetrics metrics; /* Metrics collected with this timestamp */ ConnectionMetrics metrics; /* Metrics collected with this timestamp */
}; };
struct Timestamps { struct Timestamps {
Timestamp sendmsg_time; BufferTimestamp sendmsg_time;
Timestamp scheduled_time; BufferTimestamp scheduled_time;
Timestamp sent_time; BufferTimestamp sent_time;
Timestamp acked_time; BufferTimestamp acked_time;
uint32_t byte_offset; /* byte offset relative to the start of the RPC */ uint32_t byte_offset; /* byte offset relative to the start of the RPC */

@ -221,9 +221,10 @@ static void pollset_global_shutdown(void) {
/// these events will eventually trigger the kick. /// these events will eventually trigger the kick.
static grpc_error_handle pollset_work(grpc_pollset* pollset, static grpc_error_handle pollset_work(grpc_pollset* pollset,
grpc_pollset_worker** worker, grpc_pollset_worker** worker,
grpc_millis deadline) { grpc_core::Timestamp deadline) {
GRPC_POLLING_TRACE("pollset work: %p, worker: %p, deadline: %" PRIu64, GRPC_POLLING_TRACE("pollset work: %p, worker: %p, deadline: %" PRIu64,
pollset, worker, deadline); pollset, worker,
deadline.milliseconds_after_process_epoch());
GrpcApplePollset* apple_pollset = GrpcApplePollset* apple_pollset =
reinterpret_cast<GrpcApplePollset*>(pollset); reinterpret_cast<GrpcApplePollset*>(pollset);
GrpcAppleWorker actual_worker; GrpcAppleWorker actual_worker;
@ -241,8 +242,8 @@ static grpc_error_handle pollset_work(grpc_pollset* pollset,
while (!actual_worker.kicked && !apple_pollset->is_shutdown) { while (!actual_worker.kicked && !apple_pollset->is_shutdown) {
if (actual_worker.cv.WaitWithDeadline( if (actual_worker.cv.WaitWithDeadline(
&apple_pollset->mu, grpc_core::ToAbslTime(grpc_millis_to_timespec( &apple_pollset->mu, grpc_core::ToAbslTime(deadline.as_timespec(
deadline, GPR_CLOCK_REALTIME)))) { GPR_CLOCK_REALTIME)))) {
// timed out // timed out
break; break;
} }

@ -639,9 +639,9 @@ static void pollset_shutdown(grpc_pollset* pollset, grpc_closure* closure) {
pollset_maybe_finish_shutdown(pollset); pollset_maybe_finish_shutdown(pollset);
} }
static int poll_deadline_to_millis_timeout(grpc_millis millis) { static int poll_deadline_to_millis_timeout(grpc_core::Timestamp millis) {
if (millis == GRPC_MILLIS_INF_FUTURE) return -1; if (millis == grpc_core::Timestamp::InfFuture()) return -1;
grpc_millis delta = millis - grpc_core::ExecCtx::Get()->Now(); int64_t delta = (millis - grpc_core::ExecCtx::Get()->Now()).millis();
if (delta > INT_MAX) { if (delta > INT_MAX) {
return INT_MAX; return INT_MAX;
} else if (delta < 0) { } else if (delta < 0) {
@ -711,7 +711,8 @@ static grpc_error_handle process_epoll_events(grpc_pollset* /*pollset*/) {
NOTE ON SYNCHRONIZATION: At any point of time, only the g_active_poller NOTE ON SYNCHRONIZATION: At any point of time, only the g_active_poller
(i.e the designated poller thread) will be calling this function. So there is (i.e the designated poller thread) will be calling this function. So there is
no need for any synchronization when accesing fields in g_epoll_set */ no need for any synchronization when accesing fields in g_epoll_set */
static grpc_error_handle do_epoll_wait(grpc_pollset* ps, grpc_millis deadline) { static grpc_error_handle do_epoll_wait(grpc_pollset* ps,
grpc_core::Timestamp deadline) {
GPR_TIMER_SCOPE("do_epoll_wait", 0); GPR_TIMER_SCOPE("do_epoll_wait", 0);
int r; int r;
@ -744,7 +745,7 @@ static grpc_error_handle do_epoll_wait(grpc_pollset* ps, grpc_millis deadline) {
static bool begin_worker(grpc_pollset* pollset, grpc_pollset_worker* worker, static bool begin_worker(grpc_pollset* pollset, grpc_pollset_worker* worker,
grpc_pollset_worker** worker_hdl, grpc_pollset_worker** worker_hdl,
grpc_millis deadline) { grpc_core::Timestamp deadline) {
GPR_TIMER_SCOPE("begin_worker", 0); GPR_TIMER_SCOPE("begin_worker", 0);
if (worker_hdl != nullptr) *worker_hdl = worker; if (worker_hdl != nullptr) *worker_hdl = worker;
worker->initialized_cv = false; worker->initialized_cv = false;
@ -831,7 +832,7 @@ static bool begin_worker(grpc_pollset* pollset, grpc_pollset_worker* worker,
} }
if (gpr_cv_wait(&worker->cv, &pollset->mu, if (gpr_cv_wait(&worker->cv, &pollset->mu,
grpc_millis_to_timespec(deadline, GPR_CLOCK_MONOTONIC)) && deadline.as_timespec(GPR_CLOCK_MONOTONIC)) &&
worker->state == UNKICKED) { worker->state == UNKICKED) {
/* If gpr_cv_wait returns true (i.e a timeout), pretend that the worker /* If gpr_cv_wait returns true (i.e a timeout), pretend that the worker
received a kick */ received a kick */
@ -1012,7 +1013,7 @@ static void end_worker(grpc_pollset* pollset, grpc_pollset_worker* worker,
ensure that it is held by the time the function returns */ ensure that it is held by the time the function returns */
static grpc_error_handle pollset_work(grpc_pollset* ps, static grpc_error_handle pollset_work(grpc_pollset* ps,
grpc_pollset_worker** worker_hdl, grpc_pollset_worker** worker_hdl,
grpc_millis deadline) { grpc_core::Timestamp deadline) {
GPR_TIMER_SCOPE("pollset_work", 0); GPR_TIMER_SCOPE("pollset_work", 0);
grpc_pollset_worker worker; grpc_pollset_worker worker;
grpc_error_handle error = GRPC_ERROR_NONE; grpc_error_handle error = GRPC_ERROR_NONE;

@ -798,9 +798,9 @@ static void pollset_init(grpc_pollset* pollset, gpr_mu** mu) {
*mu = &pollset->mu; *mu = &pollset->mu;
} }
static int poll_deadline_to_millis_timeout(grpc_millis millis) { static int poll_deadline_to_millis_timeout(grpc_core::Timestamp millis) {
if (millis == GRPC_MILLIS_INF_FUTURE) return -1; if (millis == grpc_core::Timestamp::InfFuture()) return -1;
grpc_millis delta = millis - grpc_core::ExecCtx::Get()->Now(); int64_t delta = (millis - grpc_core::ExecCtx::Get()->Now()).millis();
if (delta > INT_MAX) { if (delta > INT_MAX) {
return INT_MAX; return INT_MAX;
} else if (delta < 0) { } else if (delta < 0) {
@ -926,7 +926,8 @@ static void pollset_destroy(grpc_pollset* pollset) {
gpr_mu_destroy(&pollset->mu); gpr_mu_destroy(&pollset->mu);
} }
static grpc_error_handle pollable_epoll(pollable* p, grpc_millis deadline) { static grpc_error_handle pollable_epoll(pollable* p,
grpc_core::Timestamp deadline) {
GPR_TIMER_SCOPE("pollable_epoll", 0); GPR_TIMER_SCOPE("pollable_epoll", 0);
int timeout = poll_deadline_to_millis_timeout(deadline); int timeout = poll_deadline_to_millis_timeout(deadline);
@ -1001,7 +1002,7 @@ static worker_remove_result worker_remove(grpc_pollset_worker** root_worker,
/* Return true if this thread should poll */ /* Return true if this thread should poll */
static bool begin_worker(grpc_pollset* pollset, grpc_pollset_worker* worker, static bool begin_worker(grpc_pollset* pollset, grpc_pollset_worker* worker,
grpc_pollset_worker** worker_hdl, grpc_pollset_worker** worker_hdl,
grpc_millis deadline) { grpc_core::Timestamp deadline) {
GPR_TIMER_SCOPE("begin_worker", 0); GPR_TIMER_SCOPE("begin_worker", 0);
bool do_poll = bool do_poll =
(pollset->shutdown_closure == nullptr && !pollset->already_shutdown); (pollset->shutdown_closure == nullptr && !pollset->already_shutdown);
@ -1027,7 +1028,7 @@ static bool begin_worker(grpc_pollset* pollset, grpc_pollset_worker* worker,
} }
while (do_poll && worker->pollable_obj->root_worker != worker) { while (do_poll && worker->pollable_obj->root_worker != worker) {
if (gpr_cv_wait(&worker->cv, &worker->pollable_obj->mu, if (gpr_cv_wait(&worker->cv, &worker->pollable_obj->mu,
grpc_millis_to_timespec(deadline, GPR_CLOCK_REALTIME))) { deadline.as_timespec(GPR_CLOCK_REALTIME))) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_polling_trace)) { if (GRPC_TRACE_FLAG_ENABLED(grpc_polling_trace)) {
gpr_log(GPR_INFO, "PS:%p timeout_wait %p w=%p", pollset, gpr_log(GPR_INFO, "PS:%p timeout_wait %p w=%p", pollset,
worker->pollable_obj, worker); worker->pollable_obj, worker);
@ -1099,7 +1100,7 @@ static long sys_gettid(void) { return syscall(__NR_gettid); }
ensure that it is held by the time the function returns */ ensure that it is held by the time the function returns */
static grpc_error_handle pollset_work(grpc_pollset* pollset, static grpc_error_handle pollset_work(grpc_pollset* pollset,
grpc_pollset_worker** worker_hdl, grpc_pollset_worker** worker_hdl,
grpc_millis deadline) { grpc_core::Timestamp deadline) {
GPR_TIMER_SCOPE("pollset_work", 0); GPR_TIMER_SCOPE("pollset_work", 0);
#ifdef GRPC_EPOLLEX_CREATE_WORKERS_ON_HEAP #ifdef GRPC_EPOLLEX_CREATE_WORKERS_ON_HEAP
grpc_pollset_worker* worker = grpc_pollset_worker* worker =
@ -1116,8 +1117,10 @@ static grpc_error_handle pollset_work(grpc_pollset* pollset,
gpr_log(GPR_INFO, gpr_log(GPR_INFO,
"PS:%p work hdl=%p worker=%p now=%" PRId64 " deadline=%" PRId64 "PS:%p work hdl=%p worker=%p now=%" PRId64 " deadline=%" PRId64
" kwp=%d pollable=%p", " kwp=%d pollable=%p",
pollset, worker_hdl, WORKER_PTR, grpc_core::ExecCtx::Get()->Now(), pollset, worker_hdl, WORKER_PTR,
deadline, pollset->kicked_without_poller, pollset->active_pollable); grpc_core::ExecCtx::Get()->Now().milliseconds_after_process_epoch(),
deadline.milliseconds_after_process_epoch(),
pollset->kicked_without_poller, pollset->active_pollable);
} }
static const char* err_desc = "pollset_work"; static const char* err_desc = "pollset_work";
grpc_error_handle error = GRPC_ERROR_NONE; grpc_error_handle error = GRPC_ERROR_NONE;

@ -221,7 +221,7 @@ static void pollset_set_add_fd(grpc_pollset_set* pollset_set, grpc_fd* fd);
- longer than a millisecond polls are rounded up to the next nearest - longer than a millisecond polls are rounded up to the next nearest
millisecond to avoid spinning millisecond to avoid spinning
- infinite timeouts are converted to -1 */ - infinite timeouts are converted to -1 */
static int poll_deadline_to_millis_timeout(grpc_millis deadline); static int poll_deadline_to_millis_timeout(grpc_core::Timestamp deadline);
/* Allow kick to wakeup the currently polling worker */ /* Allow kick to wakeup the currently polling worker */
#define GRPC_POLLSET_CAN_KICK_SELF 1 #define GRPC_POLLSET_CAN_KICK_SELF 1
@ -913,7 +913,7 @@ static void work_combine_error(grpc_error_handle* composite,
static grpc_error_handle pollset_work(grpc_pollset* pollset, static grpc_error_handle pollset_work(grpc_pollset* pollset,
grpc_pollset_worker** worker_hdl, grpc_pollset_worker** worker_hdl,
grpc_millis deadline) { grpc_core::Timestamp deadline) {
GPR_TIMER_SCOPE("pollset_work", 0); GPR_TIMER_SCOPE("pollset_work", 0);
grpc_pollset_worker worker; grpc_pollset_worker worker;
if (worker_hdl) *worker_hdl = &worker; if (worker_hdl) *worker_hdl = &worker;
@ -1106,7 +1106,7 @@ static grpc_error_handle pollset_work(grpc_pollset* pollset,
if (queued_work || worker.kicked_specifically) { if (queued_work || worker.kicked_specifically) {
/* If there's queued work on the list, then set the deadline to be /* If there's queued work on the list, then set the deadline to be
immediate so we get back out of the polling loop quickly */ immediate so we get back out of the polling loop quickly */
deadline = 0; deadline = grpc_core::Timestamp();
} }
keep_polling = 1; keep_polling = 1;
} }
@ -1151,10 +1151,10 @@ static void pollset_shutdown(grpc_pollset* pollset, grpc_closure* closure) {
} }
} }
static int poll_deadline_to_millis_timeout(grpc_millis deadline) { static int poll_deadline_to_millis_timeout(grpc_core::Timestamp deadline) {
if (deadline == GRPC_MILLIS_INF_FUTURE) return -1; if (deadline == grpc_core::Timestamp::InfFuture()) return -1;
if (deadline == 0) return 0; if (deadline.is_process_epoch()) return 0;
grpc_millis n = deadline - grpc_core::ExecCtx::Get()->Now(); int64_t n = (deadline - grpc_core::ExecCtx::Get()->Now()).millis();
if (n < 0) return 0; if (n < 0) return 0;
if (n > INT_MAX) return -1; if (n > INT_MAX) return -1;
return static_cast<int>(n); return static_cast<int>(n);

@ -316,13 +316,13 @@ static void pollset_destroy(grpc_pollset* pollset) {
static grpc_error_handle pollset_work(grpc_pollset* pollset, static grpc_error_handle pollset_work(grpc_pollset* pollset,
grpc_pollset_worker** worker, grpc_pollset_worker** worker,
grpc_millis deadline) { grpc_core::Timestamp deadline) {
GRPC_POLLING_API_TRACE("pollset_work(%p, %" PRId64 ") begin", pollset, GRPC_POLLING_API_TRACE("pollset_work(%p, %" PRId64 ") begin", pollset,
deadline); deadline.milliseconds_after_process_epoch());
grpc_error_handle err = grpc_error_handle err =
g_event_engine->pollset_work(pollset, worker, deadline); g_event_engine->pollset_work(pollset, worker, deadline);
GRPC_POLLING_API_TRACE("pollset_work(%p, %" PRId64 ") end", pollset, GRPC_POLLING_API_TRACE("pollset_work(%p, %" PRId64 ") end", pollset,
deadline); deadline.milliseconds_after_process_epoch());
return err; return err;
} }

@ -66,7 +66,7 @@ typedef struct grpc_event_engine_vtable {
void (*pollset_destroy)(grpc_pollset* pollset); void (*pollset_destroy)(grpc_pollset* pollset);
grpc_error_handle (*pollset_work)(grpc_pollset* pollset, grpc_error_handle (*pollset_work)(grpc_pollset* pollset,
grpc_pollset_worker** worker, grpc_pollset_worker** worker,
grpc_millis deadline); grpc_core::Timestamp deadline);
grpc_error_handle (*pollset_kick)(grpc_pollset* pollset, grpc_error_handle (*pollset_kick)(grpc_pollset* pollset,
grpc_pollset_worker* specific_worker); grpc_pollset_worker* specific_worker);
void (*pollset_add_fd)(grpc_pollset* pollset, struct grpc_fd* fd); void (*pollset_add_fd)(grpc_pollset* pollset, struct grpc_fd* fd);

@ -42,10 +42,9 @@ void pollset_shutdown(grpc_pollset* pollset, grpc_closure* closure) {
void pollset_destroy(grpc_pollset* pollset) {} void pollset_destroy(grpc_pollset* pollset) {}
grpc_error_handle pollset_work(grpc_pollset* pollset, grpc_error_handle pollset_work(grpc_pollset* pollset,
grpc_pollset_worker** worker, grpc_pollset_worker** worker,
grpc_millis deadline) { grpc_core::Timestamp deadline) {
(void)worker; (void)worker;
gpr_cv_wait(&g_cv, &g_mu, gpr_cv_wait(&g_cv, &g_mu, deadline.as_timespec(GPR_CLOCK_REALTIME));
grpc_millis_to_timespec(deadline, GPR_CLOCK_REALTIME));
return GRPC_ERROR_NONE; return GRPC_ERROR_NONE;
} }
grpc_error_handle pollset_kick(grpc_pollset* pollset, grpc_error_handle pollset_kick(grpc_pollset* pollset,

@ -139,7 +139,8 @@ void tcp_connect(grpc_closure* on_connect, grpc_endpoint** endpoint,
grpc_slice_allocator* slice_allocator, grpc_slice_allocator* slice_allocator,
grpc_pollset_set* /* interested_parties */, grpc_pollset_set* /* interested_parties */,
const grpc_channel_args* channel_args, const grpc_channel_args* channel_args,
const grpc_resolved_address* addr, grpc_millis deadline) { const grpc_resolved_address* addr,
grpc_core::Timestamp deadline) {
grpc_event_engine_endpoint* ee_endpoint = grpc_event_engine_endpoint* ee_endpoint =
reinterpret_cast<grpc_event_engine_endpoint*>( reinterpret_cast<grpc_event_engine_endpoint*>(
grpc_tcp_create(channel_args, grpc_sockaddr_to_uri(addr))); grpc_tcp_create(channel_args, grpc_sockaddr_to_uri(addr)));
@ -150,8 +151,8 @@ void tcp_connect(grpc_closure* on_connect, grpc_endpoint** endpoint,
absl::make_unique<WrappedInternalSliceAllocator>(slice_allocator); absl::make_unique<WrappedInternalSliceAllocator>(slice_allocator);
EventEngine::ResolvedAddress ra(reinterpret_cast<const sockaddr*>(addr->addr), EventEngine::ResolvedAddress ra(reinterpret_cast<const sockaddr*>(addr->addr),
addr->len); addr->len);
absl::Time ee_deadline = grpc_core::ToAbslTime( absl::Time ee_deadline =
grpc_millis_to_timespec(deadline, GPR_CLOCK_MONOTONIC)); grpc_core::ToAbslTime(deadline.as_timespec(GPR_CLOCK_MONOTONIC));
ChannelArgsEndpointConfig endpoint_config(channel_args); ChannelArgsEndpointConfig endpoint_config(channel_args);
absl::Status connected = GetDefaultEventEngine()->Connect( absl::Status connected = GetDefaultEventEngine()->Connect(
ee_on_connect, ra, endpoint_config, std::move(ee_slice_allocator), ee_on_connect, ra, endpoint_config, std::move(ee_slice_allocator),

@ -29,11 +29,10 @@ using ::grpc_event_engine::experimental::EventEngine;
using ::grpc_event_engine::experimental::GetDefaultEventEngine; using ::grpc_event_engine::experimental::GetDefaultEventEngine;
using ::grpc_event_engine::experimental::GrpcClosureToCallback; using ::grpc_event_engine::experimental::GrpcClosureToCallback;
void timer_init(grpc_timer* timer, grpc_millis deadline, void timer_init(grpc_timer* timer, grpc_core::Timestamp deadline,
grpc_closure* closure) { grpc_closure* closure) {
timer->ee_task_handle = GetDefaultEventEngine()->RunAt( timer->ee_task_handle = GetDefaultEventEngine()->RunAt(
grpc_core::ToAbslTime( grpc_core::ToAbslTime(deadline.as_timespec(GPR_CLOCK_REALTIME)),
grpc_millis_to_timespec(deadline, GPR_CLOCK_REALTIME)),
GrpcClosureToCallback(closure)); GrpcClosureToCallback(closure));
timer->closure = closure; timer->closure = closure;
} }
@ -47,7 +46,7 @@ void timer_cancel(grpc_timer* timer) {
} }
/* Internal API */ /* Internal API */
grpc_timer_check_result timer_check(grpc_millis* /* next */) { grpc_timer_check_result timer_check(grpc_core::Timestamp* /* next */) {
return GRPC_TIMERS_NOT_CHECKED; return GRPC_TIMERS_NOT_CHECKED;
} }
void timer_list_init() {} void timer_list_init() {}

@ -60,97 +60,12 @@ static void exec_ctx_sched(grpc_closure* closure) {
grpc_closure_list_append(grpc_core::ExecCtx::Get()->closure_list(), closure); grpc_closure_list_append(grpc_core::ExecCtx::Get()->closure_list(), closure);
} }
static gpr_timespec g_start_time;
static gpr_cycle_counter g_start_cycle;
static grpc_millis timespan_to_millis_round_down(gpr_timespec ts) {
double x = GPR_MS_PER_SEC * static_cast<double>(ts.tv_sec) +
static_cast<double>(ts.tv_nsec) / GPR_NS_PER_MS;
if (x < 0) return 0;
if (x > static_cast<double>(GRPC_MILLIS_INF_FUTURE)) {
return GRPC_MILLIS_INF_FUTURE;
}
return static_cast<grpc_millis>(x);
}
static grpc_millis timespec_to_millis_round_down(gpr_timespec ts) {
return timespan_to_millis_round_down(gpr_time_sub(ts, g_start_time));
}
static grpc_millis timespan_to_millis_round_up(gpr_timespec ts) {
double x = GPR_MS_PER_SEC * static_cast<double>(ts.tv_sec) +
static_cast<double>(ts.tv_nsec) / GPR_NS_PER_MS +
static_cast<double>(GPR_NS_PER_SEC - 1) /
static_cast<double>(GPR_NS_PER_SEC);
if (x < 0) return 0;
if (x > static_cast<double>(GRPC_MILLIS_INF_FUTURE)) {
return GRPC_MILLIS_INF_FUTURE;
}
return static_cast<grpc_millis>(x);
}
static grpc_millis timespec_to_millis_round_up(gpr_timespec ts) {
return timespan_to_millis_round_up(gpr_time_sub(ts, g_start_time));
}
gpr_timespec grpc_millis_to_timespec(grpc_millis millis,
gpr_clock_type clock_type) {
// special-case infinities as grpc_millis can be 32bit on some platforms
// while gpr_time_from_millis always takes an int64_t.
if (millis == GRPC_MILLIS_INF_FUTURE) {
return gpr_inf_future(clock_type);
}
if (millis == GRPC_MILLIS_INF_PAST) {
return gpr_inf_past(clock_type);
}
if (clock_type == GPR_TIMESPAN) {
return gpr_time_from_millis(millis, GPR_TIMESPAN);
}
return gpr_time_add(gpr_convert_clock_type(g_start_time, clock_type),
gpr_time_from_millis(millis, GPR_TIMESPAN));
}
grpc_millis grpc_timespec_to_millis_round_down(gpr_timespec ts) {
return timespec_to_millis_round_down(
gpr_convert_clock_type(ts, g_start_time.clock_type));
}
grpc_millis grpc_timespec_to_millis_round_up(gpr_timespec ts) {
return timespec_to_millis_round_up(
gpr_convert_clock_type(ts, g_start_time.clock_type));
}
grpc_millis grpc_cycle_counter_to_millis_round_down(gpr_cycle_counter cycles) {
return timespan_to_millis_round_down(
gpr_cycle_counter_sub(cycles, g_start_cycle));
}
grpc_millis grpc_cycle_counter_to_millis_round_up(gpr_cycle_counter cycles) {
return timespan_to_millis_round_up(
gpr_cycle_counter_sub(cycles, g_start_cycle));
}
namespace grpc_core { namespace grpc_core {
GPR_THREAD_LOCAL(ExecCtx*) ExecCtx::exec_ctx_; GPR_THREAD_LOCAL(ExecCtx*) ExecCtx::exec_ctx_;
GPR_THREAD_LOCAL(ApplicationCallbackExecCtx*) GPR_THREAD_LOCAL(ApplicationCallbackExecCtx*)
ApplicationCallbackExecCtx::callback_exec_ctx_; ApplicationCallbackExecCtx::callback_exec_ctx_;
// WARNING: for testing purposes only!
void ExecCtx::TestOnlyGlobalInit(gpr_timespec new_val) {
g_start_time = new_val;
}
void ExecCtx::GlobalInit(void) {
// gpr_now(GPR_CLOCK_MONOTONIC) incurs a syscall. We don't actually know the
// exact cycle the time was captured, so we use the average of cycles before
// and after the syscall as the starting cycle.
const gpr_cycle_counter cycle_before = gpr_get_cycle_counter();
g_start_time = gpr_now(GPR_CLOCK_MONOTONIC);
const gpr_cycle_counter cycle_after = gpr_get_cycle_counter();
g_start_cycle = (cycle_before + cycle_after) / 2;
}
bool ExecCtx::Flush() { bool ExecCtx::Flush() {
bool did_something = false; bool did_something = false;
GPR_TIMER_SCOPE("grpc_exec_ctx_flush", 0); GPR_TIMER_SCOPE("grpc_exec_ctx_flush", 0);
@ -172,9 +87,9 @@ bool ExecCtx::Flush() {
return did_something; return did_something;
} }
grpc_millis ExecCtx::Now() { Timestamp ExecCtx::Now() {
if (!now_is_valid_) { if (!now_is_valid_) {
now_ = timespec_to_millis_round_down(gpr_now(GPR_CLOCK_MONOTONIC)); now_ = Timestamp::FromTimespecRoundDown(gpr_now(GPR_CLOCK_MONOTONIC));
now_is_valid_ = true; now_is_valid_ = true;
} }
return now_; return now_;

@ -23,6 +23,7 @@
#include <limits> #include <limits>
#include <grpc/impl/codegen/gpr_types.h>
#include <grpc/impl/codegen/grpc_types.h> #include <grpc/impl/codegen/grpc_types.h>
#include <grpc/support/atm.h> #include <grpc/support/atm.h>
#include <grpc/support/cpu.h> #include <grpc/support/cpu.h>
@ -32,13 +33,9 @@
#include "src/core/lib/gpr/tls.h" #include "src/core/lib/gpr/tls.h"
#include "src/core/lib/gprpp/debug_location.h" #include "src/core/lib/gprpp/debug_location.h"
#include "src/core/lib/gprpp/fork.h" #include "src/core/lib/gprpp/fork.h"
#include "src/core/lib/gprpp/time.h"
#include "src/core/lib/iomgr/closure.h" #include "src/core/lib/iomgr/closure.h"
typedef int64_t grpc_millis;
#define GRPC_MILLIS_INF_FUTURE INT64_MAX
#define GRPC_MILLIS_INF_PAST INT64_MIN
/** A combiner represents a list of work to be executed later. /** A combiner represents a list of work to be executed later.
Forward declared here to avoid a circular dependency with combiner.h. */ Forward declared here to avoid a circular dependency with combiner.h. */
typedef struct grpc_combiner grpc_combiner; typedef struct grpc_combiner grpc_combiner;
@ -57,12 +54,6 @@ typedef struct grpc_combiner grpc_combiner;
should not be counted by fork handlers */ should not be counted by fork handlers */
#define GRPC_APP_CALLBACK_EXEC_CTX_FLAG_IS_INTERNAL_THREAD 1 #define GRPC_APP_CALLBACK_EXEC_CTX_FLAG_IS_INTERNAL_THREAD 1
gpr_timespec grpc_millis_to_timespec(grpc_millis millis, gpr_clock_type clock);
grpc_millis grpc_timespec_to_millis_round_down(gpr_timespec ts);
grpc_millis grpc_timespec_to_millis_round_up(gpr_timespec ts);
grpc_millis grpc_cycle_counter_to_millis_round_down(gpr_cycle_counter cycles);
grpc_millis grpc_cycle_counter_to_millis_round_up(gpr_cycle_counter cycles);
namespace grpc_core { namespace grpc_core {
class Combiner; class Combiner;
/** Execution context. /** Execution context.
@ -189,7 +180,7 @@ class ExecCtx {
* otherwise refreshes the stored time, sets it valid and returns the new * otherwise refreshes the stored time, sets it valid and returns the new
* value. * value.
*/ */
grpc_millis Now(); Timestamp Now();
/** Invalidates the stored time value. A new time value will be set on calling /** Invalidates the stored time value. A new time value will be set on calling
* Now(). * Now().
@ -198,26 +189,18 @@ class ExecCtx {
/** To be used only by shutdown code in iomgr */ /** To be used only by shutdown code in iomgr */
void SetNowIomgrShutdown() { void SetNowIomgrShutdown() {
now_ = GRPC_MILLIS_INF_FUTURE; now_ = Timestamp::InfFuture();
now_is_valid_ = true; now_is_valid_ = true;
} }
/** To be used only for testing. /** To be used only for testing.
* Sets the now value. * Sets the now value.
*/ */
void TestOnlySetNow(grpc_millis new_val) { void TestOnlySetNow(Timestamp new_val) {
now_ = new_val; now_ = new_val;
now_is_valid_ = true; now_is_valid_ = true;
} }
static void TestOnlyGlobalInit(gpr_timespec new_val);
/** Global initialization for ExecCtx. Called by iomgr. */
static void GlobalInit(void);
/** Global shutdown for ExecCtx. Called by iomgr. */
static void GlobalShutdown(void) {}
/** Gets pointer to current exec_ctx. */ /** Gets pointer to current exec_ctx. */
static ExecCtx* Get() { return exec_ctx_; } static ExecCtx* Get() { return exec_ctx_; }
@ -245,7 +228,7 @@ class ExecCtx {
unsigned starting_cpu_ = std::numeric_limits<unsigned>::max(); unsigned starting_cpu_ = std::numeric_limits<unsigned>::max();
bool now_is_valid_ = false; bool now_is_valid_ = false;
grpc_millis now_ = 0; Timestamp now_;
static GPR_THREAD_LOCAL(ExecCtx*) exec_ctx_; static GPR_THREAD_LOCAL(ExecCtx*) exec_ctx_;
ExecCtx* last_exec_ctx_ = Get(); ExecCtx* last_exec_ctx_ = Get();
@ -370,6 +353,7 @@ class ApplicationCallbackExecCtx {
grpc_completion_queue_functor* tail_{nullptr}; grpc_completion_queue_functor* tail_{nullptr};
static GPR_THREAD_LOCAL(ApplicationCallbackExecCtx*) callback_exec_ctx_; static GPR_THREAD_LOCAL(ApplicationCallbackExecCtx*) callback_exec_ctx_;
}; };
} // namespace grpc_core } // namespace grpc_core
#endif /* GRPC_CORE_LIB_IOMGR_EXEC_CTX_H */ #endif /* GRPC_CORE_LIB_IOMGR_EXEC_CTX_H */

@ -44,18 +44,18 @@ static gpr_atm g_custom_events = 0;
static HANDLE g_iocp; static HANDLE g_iocp;
static DWORD deadline_to_millis_timeout(grpc_millis deadline) { static DWORD deadline_to_millis_timeout(grpc_core::Timestamp deadline) {
if (deadline == GRPC_MILLIS_INF_FUTURE) { if (deadline == grpc_core::Timestamp::InfFuture()) {
return INFINITE; return INFINITE;
} }
grpc_millis now = grpc_core::ExecCtx::Get()->Now(); grpc_core::Timestamp now = grpc_core::ExecCtx::Get()->Now();
if (deadline < now) return 0; if (deadline < now) return 0;
grpc_millis timeout = deadline - now; grpc_core::Duration timeout = deadline - now;
if (timeout > std::numeric_limits<DWORD>::max()) return INFINITE; if (timeout.millis() > std::numeric_limits<DWORD>::max()) return INFINITE;
return static_cast<DWORD>(deadline - now); return static_cast<DWORD>(timeout.millis());
} }
grpc_iocp_work_status grpc_iocp_work(grpc_millis deadline) { grpc_iocp_work_status grpc_iocp_work(grpc_core::Timestamp deadline) {
BOOL success; BOOL success;
DWORD bytes = 0; DWORD bytes = 0;
DWORD flags = 0; DWORD flags = 0;
@ -124,7 +124,7 @@ void grpc_iocp_flush(void) {
grpc_iocp_work_status work_status; grpc_iocp_work_status work_status;
do { do {
work_status = grpc_iocp_work(GRPC_MILLIS_INF_PAST); work_status = grpc_iocp_work(grpc_core::Timestamp::InfPast());
} while (work_status == GRPC_IOCP_WORK_KICK || } while (work_status == GRPC_IOCP_WORK_KICK ||
grpc_core::ExecCtx::Get()->Flush()); grpc_core::ExecCtx::Get()->Flush());
} }
@ -132,7 +132,7 @@ void grpc_iocp_flush(void) {
void grpc_iocp_shutdown(void) { void grpc_iocp_shutdown(void) {
grpc_core::ExecCtx exec_ctx; grpc_core::ExecCtx exec_ctx;
while (gpr_atm_acq_load(&g_custom_events)) { while (gpr_atm_acq_load(&g_custom_events)) {
grpc_iocp_work(GRPC_MILLIS_INF_FUTURE); grpc_iocp_work(grpc_core::Timestamp::InfFuture());
grpc_core::ExecCtx::Get()->Flush(); grpc_core::ExecCtx::Get()->Flush();
} }

@ -36,7 +36,7 @@ typedef enum {
GRPC_IOCP_WORK_KICK GRPC_IOCP_WORK_KICK
} grpc_iocp_work_status; } grpc_iocp_work_status;
grpc_iocp_work_status grpc_iocp_work(grpc_millis deadline); grpc_iocp_work_status grpc_iocp_work(grpc_core::Timestamp deadline);
void grpc_iocp_init(void); void grpc_iocp_init(void);
void grpc_iocp_kick(void); void grpc_iocp_kick(void);
void grpc_iocp_flush(void); void grpc_iocp_flush(void);

@ -44,7 +44,7 @@ void grpc_pollset_destroy(grpc_pollset* pollset) {
grpc_error_handle grpc_pollset_work(grpc_pollset* pollset, grpc_error_handle grpc_pollset_work(grpc_pollset* pollset,
grpc_pollset_worker** worker, grpc_pollset_worker** worker,
grpc_millis deadline) { grpc_core::Timestamp deadline) {
return grpc_pollset_impl->work(pollset, worker, deadline); return grpc_pollset_impl->work(pollset, worker, deadline);
} }

@ -45,7 +45,7 @@ typedef struct grpc_pollset_vtable {
void (*shutdown)(grpc_pollset* pollset, grpc_closure* closure); void (*shutdown)(grpc_pollset* pollset, grpc_closure* closure);
void (*destroy)(grpc_pollset* pollset); void (*destroy)(grpc_pollset* pollset);
grpc_error_handle (*work)(grpc_pollset* pollset, grpc_pollset_worker** worker, grpc_error_handle (*work)(grpc_pollset* pollset, grpc_pollset_worker** worker,
grpc_millis deadline); grpc_core::Timestamp deadline);
grpc_error_handle (*kick)(grpc_pollset* pollset, grpc_error_handle (*kick)(grpc_pollset* pollset,
grpc_pollset_worker* specific_worker); grpc_pollset_worker* specific_worker);
size_t (*pollset_size)(void); size_t (*pollset_size)(void);
@ -86,9 +86,9 @@ void grpc_pollset_destroy(grpc_pollset* pollset);
May call grpc_closure_list_run on grpc_closure_list, without holding the May call grpc_closure_list_run on grpc_closure_list, without holding the
pollset pollset
lock */ lock */
grpc_error_handle grpc_pollset_work(grpc_pollset* pollset, grpc_error_handle grpc_pollset_work(
grpc_pollset_worker** worker, grpc_pollset* pollset, grpc_pollset_worker** worker,
grpc_millis deadline) GRPC_MUST_USE_RESULT; grpc_core::Timestamp deadline) GRPC_MUST_USE_RESULT;
/* Break one polling thread out of polling work for this pollset. /* Break one polling thread out of polling work for this pollset.
If specific_worker is non-NULL, then kick that worker. */ If specific_worker is non-NULL, then kick that worker. */

@ -108,7 +108,7 @@ static void pollset_destroy(grpc_pollset* pollset) {}
static grpc_error_handle pollset_work(grpc_pollset* pollset, static grpc_error_handle pollset_work(grpc_pollset* pollset,
grpc_pollset_worker** worker_hdl, grpc_pollset_worker** worker_hdl,
grpc_millis deadline) { grpc_core::Timestamp deadline) {
grpc_pollset_worker worker; grpc_pollset_worker worker;
if (worker_hdl) *worker_hdl = &worker; if (worker_hdl) *worker_hdl = &worker;
@ -159,7 +159,7 @@ static grpc_error_handle pollset_work(grpc_pollset* pollset,
added_worker = 1; added_worker = 1;
while (!worker.kicked) { while (!worker.kicked) {
if (gpr_cv_wait(&worker.cv, &grpc_polling_mu, if (gpr_cv_wait(&worker.cv, &grpc_polling_mu,
grpc_millis_to_timespec(deadline, GPR_CLOCK_REALTIME))) { deadline.as_timespec(GPR_CLOCK_REALTIME))) {
grpc_core::ExecCtx::Get()->InvalidateNow(); grpc_core::ExecCtx::Get()->InvalidateNow();
break; break;
} }

@ -26,7 +26,7 @@ void grpc_tcp_client_connect(grpc_closure* on_connect, grpc_endpoint** endpoint,
grpc_pollset_set* interested_parties, grpc_pollset_set* interested_parties,
const grpc_channel_args* channel_args, const grpc_channel_args* channel_args,
const grpc_resolved_address* addr, const grpc_resolved_address* addr,
grpc_millis deadline) { grpc_core::Timestamp deadline) {
grpc_tcp_client_impl->connect(on_connect, endpoint, interested_parties, grpc_tcp_client_impl->connect(on_connect, endpoint, interested_parties,
channel_args, addr, deadline); channel_args, addr, deadline);
} }

@ -33,7 +33,8 @@ typedef struct grpc_tcp_client_vtable {
void (*connect)(grpc_closure* on_connect, grpc_endpoint** endpoint, void (*connect)(grpc_closure* on_connect, grpc_endpoint** endpoint,
grpc_pollset_set* interested_parties, grpc_pollset_set* interested_parties,
const grpc_channel_args* channel_args, const grpc_channel_args* channel_args,
const grpc_resolved_address* addr, grpc_millis deadline); const grpc_resolved_address* addr,
grpc_core::Timestamp deadline);
} grpc_tcp_client_vtable; } grpc_tcp_client_vtable;
/* Asynchronously connect to an address (specified as (addr, len)), and call /* Asynchronously connect to an address (specified as (addr, len)), and call
@ -45,7 +46,7 @@ void grpc_tcp_client_connect(grpc_closure* on_connect, grpc_endpoint** endpoint,
grpc_pollset_set* interested_parties, grpc_pollset_set* interested_parties,
const grpc_channel_args* channel_args, const grpc_channel_args* channel_args,
const grpc_resolved_address* addr, const grpc_resolved_address* addr,
grpc_millis deadline); grpc_core::Timestamp deadline);
void grpc_tcp_client_global_init(); void grpc_tcp_client_global_init();

@ -153,7 +153,7 @@ static void CFStreamClientConnect(grpc_closure* closure, grpc_endpoint** ep,
grpc_pollset_set* interested_parties, grpc_pollset_set* interested_parties,
const grpc_channel_args* channel_args, const grpc_channel_args* channel_args,
const grpc_resolved_address* resolved_addr, const grpc_resolved_address* resolved_addr,
grpc_millis deadline) { grpc_core::Timestamp deadline) {
CFStreamConnect* connect = new CFStreamConnect(); CFStreamConnect* connect = new CFStreamConnect();
connect->closure = closure; connect->closure = closure;
connect->endpoint = ep; connect->endpoint = ep;

@ -270,7 +270,7 @@ grpc_error_handle grpc_tcp_client_prepare_fd(
void grpc_tcp_client_create_from_prepared_fd( void grpc_tcp_client_create_from_prepared_fd(
grpc_pollset_set* interested_parties, grpc_closure* closure, const int fd, grpc_pollset_set* interested_parties, grpc_closure* closure, const int fd,
const grpc_channel_args* channel_args, const grpc_resolved_address* addr, const grpc_channel_args* channel_args, const grpc_resolved_address* addr,
grpc_millis deadline, grpc_endpoint** ep) { grpc_core::Timestamp deadline, grpc_endpoint** ep) {
int err; int err;
do { do {
err = connect(fd, reinterpret_cast<const grpc_sockaddr*>(addr->addr), err = connect(fd, reinterpret_cast<const grpc_sockaddr*>(addr->addr),
@ -325,7 +325,7 @@ static void tcp_connect(grpc_closure* closure, grpc_endpoint** ep,
grpc_pollset_set* interested_parties, grpc_pollset_set* interested_parties,
const grpc_channel_args* channel_args, const grpc_channel_args* channel_args,
const grpc_resolved_address* addr, const grpc_resolved_address* addr,
grpc_millis deadline) { grpc_core::Timestamp deadline) {
grpc_resolved_address mapped_addr; grpc_resolved_address mapped_addr;
int fd = -1; int fd = -1;
grpc_error_handle error; grpc_error_handle error;

Some files were not shown because too many files have changed in this diff Show More

Loading…
Cancel
Save