diff --git a/BUILD b/BUILD index 7a687bb7cef..b1797baa2e8 100644 --- a/BUILD +++ b/BUILD @@ -1664,6 +1664,20 @@ grpc_cc_library( ], ) +grpc_cc_library( + name = "time", + srcs = [ + "src/core/lib/gprpp/time.cc", + ], + hdrs = [ + "src/core/lib/gprpp/time.h", + ], + deps = [ + "gpr", + "gpr_codegen", + ], +) + grpc_cc_library( name = "exec_ctx", srcs = [ @@ -1683,6 +1697,7 @@ grpc_cc_library( "error", "gpr_base", "gpr_tls", + "time", "useful", ], ) @@ -2161,6 +2176,7 @@ grpc_cc_library( "slice_refcount", "sockaddr_utils", "table", + "time", "uri_parser", "useful", ], @@ -2466,6 +2482,7 @@ grpc_cc_library( "server_address", "slice", "sockaddr_utils", + "time", "uri_parser", "useful", "xds_orca_upb", diff --git a/CMakeLists.txt b/CMakeLists.txt index 7fddb466151..e58b5d73aab 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -998,6 +998,7 @@ if(gRPC_BUILD_TESTS) add_dependencies(buildtests_cxx streams_not_seen_test) add_dependencies(buildtests_cxx string_ref_test) add_dependencies(buildtests_cxx table_test) + add_dependencies(buildtests_cxx test_core_gprpp_time_test) add_dependencies(buildtests_cxx test_core_security_credentials_test) add_dependencies(buildtests_cxx test_core_slice_slice_test) add_dependencies(buildtests_cxx test_cpp_client_credentials_test) @@ -2001,6 +2002,7 @@ add_library(grpc src/core/lib/event_engine/memory_allocator.cc src/core/lib/event_engine/resolved_address.cc src/core/lib/event_engine/sockaddr.cc + src/core/lib/gprpp/time.cc src/core/lib/http/format_request.cc src/core/lib/http/httpcli.cc src/core/lib/http/httpcli_security_connector.cc @@ -2642,6 +2644,7 @@ add_library(grpc_unsecure src/core/lib/event_engine/memory_allocator.cc src/core/lib/event_engine/resolved_address.cc src/core/lib/event_engine/sockaddr.cc + src/core/lib/gprpp/time.cc src/core/lib/http/format_request.cc src/core/lib/http/httpcli.cc src/core/lib/http/parser.cc @@ -6077,6 +6080,7 @@ if(_gRPC_PLATFORM_LINUX OR _gRPC_PLATFORM_POSIX) add_executable(memory_quota_stress_test src/core/lib/debug/trace.cc src/core/lib/event_engine/memory_allocator.cc + src/core/lib/gprpp/time.cc src/core/lib/iomgr/combiner.cc src/core/lib/iomgr/error.cc src/core/lib/iomgr/exec_ctx.cc @@ -7725,6 +7729,7 @@ if(gRPC_BUILD_TESTS) add_executable(arena_promise_test src/core/lib/debug/trace.cc src/core/lib/event_engine/memory_allocator.cc + src/core/lib/gprpp/time.cc src/core/lib/iomgr/combiner.cc src/core/lib/iomgr/error.cc src/core/lib/iomgr/exec_ctx.cc @@ -8973,6 +8978,7 @@ if(gRPC_BUILD_TESTS) add_executable(chunked_vector_test src/core/lib/debug/trace.cc src/core/lib/event_engine/memory_allocator.cc + src/core/lib/gprpp/time.cc src/core/lib/iomgr/combiner.cc src/core/lib/iomgr/error.cc src/core/lib/iomgr/exec_ctx.cc @@ -10356,6 +10362,7 @@ if(gRPC_BUILD_TESTS) add_executable(exec_ctx_wakeup_scheduler_test src/core/lib/debug/trace.cc + src/core/lib/gprpp/time.cc src/core/lib/iomgr/combiner.cc src/core/lib/iomgr/error.cc src/core/lib/iomgr/exec_ctx.cc @@ -10665,6 +10672,7 @@ if(gRPC_BUILD_TESTS) add_executable(for_each_test src/core/lib/debug/trace.cc src/core/lib/event_engine/memory_allocator.cc + src/core/lib/gprpp/time.cc src/core/lib/iomgr/combiner.cc src/core/lib/iomgr/error.cc src/core/lib/iomgr/exec_ctx.cc @@ -12845,6 +12853,7 @@ if(gRPC_BUILD_TESTS) add_executable(memory_quota_test src/core/lib/debug/trace.cc src/core/lib/event_engine/memory_allocator.cc + src/core/lib/gprpp/time.cc src/core/lib/iomgr/combiner.cc src/core/lib/iomgr/error.cc src/core/lib/iomgr/exec_ctx.cc @@ -13431,6 +13440,7 @@ if(gRPC_BUILD_TESTS) add_executable(pipe_test src/core/lib/debug/trace.cc src/core/lib/event_engine/memory_allocator.cc + src/core/lib/gprpp/time.cc src/core/lib/iomgr/combiner.cc src/core/lib/iomgr/error.cc src/core/lib/iomgr/exec_ctx.cc @@ -14263,6 +14273,7 @@ if(gRPC_BUILD_TESTS) add_executable(resource_quota_test src/core/lib/debug/trace.cc src/core/lib/event_engine/memory_allocator.cc + src/core/lib/gprpp/time.cc src/core/lib/iomgr/combiner.cc src/core/lib/iomgr/error.cc src/core/lib/iomgr/exec_ctx.cc @@ -15639,6 +15650,42 @@ target_link_libraries(table_test ) +endif() +if(gRPC_BUILD_TESTS) + +add_executable(test_core_gprpp_time_test + src/core/lib/gprpp/time.cc + test/core/gprpp/time_test.cc + third_party/googletest/googletest/src/gtest-all.cc + third_party/googletest/googlemock/src/gmock-all.cc +) + +target_include_directories(test_core_gprpp_time_test + PRIVATE + ${CMAKE_CURRENT_SOURCE_DIR} + ${CMAKE_CURRENT_SOURCE_DIR}/include + ${_gRPC_ADDRESS_SORTING_INCLUDE_DIR} + ${_gRPC_RE2_INCLUDE_DIR} + ${_gRPC_SSL_INCLUDE_DIR} + ${_gRPC_UPB_GENERATED_DIR} + ${_gRPC_UPB_GRPC_GENERATED_DIR} + ${_gRPC_UPB_INCLUDE_DIR} + ${_gRPC_XXHASH_INCLUDE_DIR} + ${_gRPC_ZLIB_INCLUDE_DIR} + third_party/googletest/googletest/include + third_party/googletest/googletest + third_party/googletest/googlemock/include + third_party/googletest/googlemock + ${_gRPC_PROTO_GENS_DIR} +) + +target_link_libraries(test_core_gprpp_time_test + ${_gRPC_PROTOBUF_LIBRARIES} + ${_gRPC_ALLTARGETS_LIBRARIES} + gpr +) + + endif() if(gRPC_BUILD_TESTS) diff --git a/Makefile b/Makefile index d61ce89dbc8..4e29ddfdcfd 100644 --- a/Makefile +++ b/Makefile @@ -1443,6 +1443,7 @@ LIBGRPC_SRC = \ src/core/lib/event_engine/memory_allocator.cc \ src/core/lib/event_engine/resolved_address.cc \ src/core/lib/event_engine/sockaddr.cc \ + src/core/lib/gprpp/time.cc \ src/core/lib/http/format_request.cc \ src/core/lib/http/httpcli.cc \ src/core/lib/http/httpcli_security_connector.cc \ @@ -1933,6 +1934,7 @@ LIBGRPC_UNSECURE_SRC = \ src/core/lib/event_engine/memory_allocator.cc \ src/core/lib/event_engine/resolved_address.cc \ src/core/lib/event_engine/sockaddr.cc \ + src/core/lib/gprpp/time.cc \ src/core/lib/http/format_request.cc \ src/core/lib/http/httpcli.cc \ src/core/lib/http/parser.cc \ diff --git a/build_autogenerated.yaml b/build_autogenerated.yaml index 907579ee513..e95d8105bb1 100644 --- a/build_autogenerated.yaml +++ b/build_autogenerated.yaml @@ -850,6 +850,7 @@ libs: - src/core/lib/gprpp/ref_counted.h - src/core/lib/gprpp/ref_counted_ptr.h - src/core/lib/gprpp/table.h + - src/core/lib/gprpp/time.h - src/core/lib/http/format_request.h - src/core/lib/http/httpcli.h - src/core/lib/http/httpcli_ssl_credentials.h @@ -1500,6 +1501,7 @@ libs: - src/core/lib/event_engine/memory_allocator.cc - src/core/lib/event_engine/resolved_address.cc - src/core/lib/event_engine/sockaddr.cc + - src/core/lib/gprpp/time.cc - src/core/lib/http/format_request.cc - src/core/lib/http/httpcli.cc - src/core/lib/http/httpcli_security_connector.cc @@ -2015,6 +2017,7 @@ libs: - src/core/lib/gprpp/ref_counted.h - src/core/lib/gprpp/ref_counted_ptr.h - src/core/lib/gprpp/table.h + - src/core/lib/gprpp/time.h - src/core/lib/http/format_request.h - src/core/lib/http/httpcli.h - src/core/lib/http/parser.h @@ -2319,6 +2322,7 @@ libs: - src/core/lib/event_engine/memory_allocator.cc - src/core/lib/event_engine/resolved_address.cc - src/core/lib/event_engine/sockaddr.cc + - src/core/lib/gprpp/time.cc - src/core/lib/http/format_request.cc - src/core/lib/http/httpcli.cc - src/core/lib/http/parser.cc @@ -3890,6 +3894,7 @@ targets: - src/core/lib/gprpp/orphanable.h - src/core/lib/gprpp/ref_counted.h - src/core/lib/gprpp/ref_counted_ptr.h + - src/core/lib/gprpp/time.h - src/core/lib/iomgr/closure.h - src/core/lib/iomgr/combiner.h - src/core/lib/iomgr/error.h @@ -3920,6 +3925,7 @@ targets: src: - src/core/lib/debug/trace.cc - src/core/lib/event_engine/memory_allocator.cc + - src/core/lib/gprpp/time.cc - src/core/lib/iomgr/combiner.cc - src/core/lib/iomgr/error.cc - src/core/lib/iomgr/exec_ctx.cc @@ -4627,6 +4633,7 @@ targets: - src/core/lib/gprpp/orphanable.h - src/core/lib/gprpp/ref_counted.h - src/core/lib/gprpp/ref_counted_ptr.h + - src/core/lib/gprpp/time.h - src/core/lib/iomgr/closure.h - src/core/lib/iomgr/combiner.h - src/core/lib/iomgr/error.h @@ -4661,6 +4668,7 @@ targets: src: - src/core/lib/debug/trace.cc - src/core/lib/event_engine/memory_allocator.cc + - src/core/lib/gprpp/time.cc - src/core/lib/iomgr/combiner.cc - src/core/lib/iomgr/error.cc - src/core/lib/iomgr/exec_ctx.cc @@ -5129,6 +5137,7 @@ targets: - src/core/lib/gprpp/orphanable.h - src/core/lib/gprpp/ref_counted.h - src/core/lib/gprpp/ref_counted_ptr.h + - src/core/lib/gprpp/time.h - src/core/lib/iomgr/closure.h - src/core/lib/iomgr/combiner.h - src/core/lib/iomgr/error.h @@ -5162,6 +5171,7 @@ targets: src: - src/core/lib/debug/trace.cc - src/core/lib/event_engine/memory_allocator.cc + - src/core/lib/gprpp/time.cc - src/core/lib/iomgr/combiner.cc - src/core/lib/iomgr/error.cc - src/core/lib/iomgr/exec_ctx.cc @@ -5682,6 +5692,7 @@ targets: - src/core/lib/gprpp/atomic_utils.h - src/core/lib/gprpp/ref_counted.h - src/core/lib/gprpp/ref_counted_ptr.h + - src/core/lib/gprpp/time.h - src/core/lib/iomgr/closure.h - src/core/lib/iomgr/combiner.h - src/core/lib/iomgr/error.h @@ -5703,6 +5714,7 @@ targets: - src/core/lib/slice/slice_string_helpers.h src: - src/core/lib/debug/trace.cc + - src/core/lib/gprpp/time.cc - src/core/lib/iomgr/combiner.cc - src/core/lib/iomgr/error.cc - src/core/lib/iomgr/exec_ctx.cc @@ -5873,6 +5885,7 @@ targets: - src/core/lib/gprpp/orphanable.h - src/core/lib/gprpp/ref_counted.h - src/core/lib/gprpp/ref_counted_ptr.h + - src/core/lib/gprpp/time.h - src/core/lib/iomgr/closure.h - src/core/lib/iomgr/combiner.h - src/core/lib/iomgr/error.h @@ -5914,6 +5927,7 @@ targets: src: - src/core/lib/debug/trace.cc - src/core/lib/event_engine/memory_allocator.cc + - src/core/lib/gprpp/time.cc - src/core/lib/iomgr/combiner.cc - src/core/lib/iomgr/error.cc - src/core/lib/iomgr/exec_ctx.cc @@ -6711,6 +6725,7 @@ targets: - src/core/lib/gprpp/orphanable.h - src/core/lib/gprpp/ref_counted.h - src/core/lib/gprpp/ref_counted_ptr.h + - src/core/lib/gprpp/time.h - src/core/lib/iomgr/closure.h - src/core/lib/iomgr/combiner.h - src/core/lib/iomgr/error.h @@ -6742,6 +6757,7 @@ targets: src: - src/core/lib/debug/trace.cc - src/core/lib/event_engine/memory_allocator.cc + - src/core/lib/gprpp/time.cc - src/core/lib/iomgr/combiner.cc - src/core/lib/iomgr/error.cc - src/core/lib/iomgr/exec_ctx.cc @@ -7008,6 +7024,7 @@ targets: - src/core/lib/gprpp/orphanable.h - src/core/lib/gprpp/ref_counted.h - src/core/lib/gprpp/ref_counted_ptr.h + - src/core/lib/gprpp/time.h - src/core/lib/iomgr/closure.h - src/core/lib/iomgr/combiner.h - src/core/lib/iomgr/error.h @@ -7047,6 +7064,7 @@ targets: src: - src/core/lib/debug/trace.cc - src/core/lib/event_engine/memory_allocator.cc + - src/core/lib/gprpp/time.cc - src/core/lib/iomgr/combiner.cc - src/core/lib/iomgr/error.cc - src/core/lib/iomgr/exec_ctx.cc @@ -7366,6 +7384,7 @@ targets: - src/core/lib/gprpp/orphanable.h - src/core/lib/gprpp/ref_counted.h - src/core/lib/gprpp/ref_counted_ptr.h + - src/core/lib/gprpp/time.h - src/core/lib/iomgr/closure.h - src/core/lib/iomgr/combiner.h - src/core/lib/iomgr/error.h @@ -7398,6 +7417,7 @@ targets: src: - src/core/lib/debug/trace.cc - src/core/lib/event_engine/memory_allocator.cc + - src/core/lib/gprpp/time.cc - src/core/lib/iomgr/combiner.cc - src/core/lib/iomgr/error.cc - src/core/lib/iomgr/exec_ctx.cc @@ -7829,6 +7849,18 @@ targets: - absl/types:optional - absl/utility:utility uses_polling: false +- name: test_core_gprpp_time_test + gtest: true + build: test + language: c++ + headers: + - src/core/lib/gprpp/time.h + src: + - src/core/lib/gprpp/time.cc + - test/core/gprpp/time_test.cc + deps: + - gpr + uses_polling: false - name: test_core_security_credentials_test gtest: true build: test diff --git a/config.m4 b/config.m4 index 0cb132821bb..b744d3e35d7 100644 --- a/config.m4 +++ b/config.m4 @@ -502,6 +502,7 @@ if test "$PHP_GRPC" != "no"; then src/core/lib/gprpp/status_helper.cc \ src/core/lib/gprpp/thd_posix.cc \ src/core/lib/gprpp/thd_windows.cc \ + src/core/lib/gprpp/time.cc \ src/core/lib/gprpp/time_util.cc \ src/core/lib/http/format_request.cc \ src/core/lib/http/httpcli.cc \ diff --git a/config.w32 b/config.w32 index 13b7a44ce21..7c1cf958c8f 100644 --- a/config.w32 +++ b/config.w32 @@ -468,6 +468,7 @@ if (PHP_GRPC != "no") { "src\\core\\lib\\gprpp\\status_helper.cc " + "src\\core\\lib\\gprpp\\thd_posix.cc " + "src\\core\\lib\\gprpp\\thd_windows.cc " + + "src\\core\\lib\\gprpp\\time.cc " + "src\\core\\lib\\gprpp\\time_util.cc " + "src\\core\\lib\\http\\format_request.cc " + "src\\core\\lib\\http\\httpcli.cc " + diff --git a/doc/core/grpc-polling-engines.md b/doc/core/grpc-polling-engines.md index e5b700c0b28..b7e36624da1 100644 --- a/doc/core/grpc-polling-engines.md +++ b/doc/core/grpc-polling-engines.md @@ -64,7 +64,7 @@ The following are the **Opaque** structures exposed by Polling Engine interface > **NOTE**: There is no `grpc_pollset_remove_fd`. This is because calling `grpc_fd_orphan()` will effectively remove the fd from all the pollsets it’s a part of - **grpc_pollset_work** - - Signature: `grpc_pollset_work(grpc_pollset* ps, grpc_pollset_worker** worker, grpc_millis deadline)` + - Signature: `grpc_pollset_work(grpc_pollset* ps, grpc_pollset_worker** worker, grpc_core::Timestamp deadline)` > **NOTE**: `grpc_pollset_work()` requires the pollset mutex to be locked before calling it. Shortly after calling `grpc_pollset_work()`, the function populates the `*worker` pointer (among other things) and releases the mutex. Once `grpc_pollset_work()` returns, the `*worker` pointer is **invalid** and should not be used anymore. See the code in `completion_queue.cc` to see how this is used. - Poll the fds in the pollset for events AND return when ANY of the following is true: - Deadline expired diff --git a/gRPC-C++.podspec b/gRPC-C++.podspec index a8c8941ef85..daa38fd9b43 100644 --- a/gRPC-C++.podspec +++ b/gRPC-C++.podspec @@ -694,6 +694,7 @@ Pod::Spec.new do |s| 'src/core/lib/gprpp/sync.h', 'src/core/lib/gprpp/table.h', 'src/core/lib/gprpp/thd.h', + 'src/core/lib/gprpp/time.h', 'src/core/lib/gprpp/time_util.h', 'src/core/lib/http/format_request.h', 'src/core/lib/http/httpcli.h', @@ -1491,6 +1492,7 @@ Pod::Spec.new do |s| 'src/core/lib/gprpp/sync.h', 'src/core/lib/gprpp/table.h', 'src/core/lib/gprpp/thd.h', + 'src/core/lib/gprpp/time.h', 'src/core/lib/gprpp/time_util.h', 'src/core/lib/http/format_request.h', 'src/core/lib/http/httpcli.h', diff --git a/gRPC-Core.podspec b/gRPC-Core.podspec index 503a331b597..7ca112584b7 100644 --- a/gRPC-Core.podspec +++ b/gRPC-Core.podspec @@ -1095,6 +1095,8 @@ Pod::Spec.new do |s| 'src/core/lib/gprpp/thd.h', 'src/core/lib/gprpp/thd_posix.cc', 'src/core/lib/gprpp/thd_windows.cc', + 'src/core/lib/gprpp/time.cc', + 'src/core/lib/gprpp/time.h', 'src/core/lib/gprpp/time_util.cc', 'src/core/lib/gprpp/time_util.h', 'src/core/lib/http/format_request.cc', @@ -2085,6 +2087,7 @@ Pod::Spec.new do |s| 'src/core/lib/gprpp/sync.h', 'src/core/lib/gprpp/table.h', 'src/core/lib/gprpp/thd.h', + 'src/core/lib/gprpp/time.h', 'src/core/lib/gprpp/time_util.h', 'src/core/lib/http/format_request.h', 'src/core/lib/http/httpcli.h', diff --git a/grpc.gemspec b/grpc.gemspec index 5ed3637409d..1f9de679e9a 100644 --- a/grpc.gemspec +++ b/grpc.gemspec @@ -1014,6 +1014,8 @@ Gem::Specification.new do |s| s.files += %w( src/core/lib/gprpp/thd.h ) s.files += %w( src/core/lib/gprpp/thd_posix.cc ) s.files += %w( src/core/lib/gprpp/thd_windows.cc ) + s.files += %w( src/core/lib/gprpp/time.cc ) + s.files += %w( src/core/lib/gprpp/time.h ) s.files += %w( src/core/lib/gprpp/time_util.cc ) s.files += %w( src/core/lib/gprpp/time_util.h ) s.files += %w( src/core/lib/http/format_request.cc ) diff --git a/grpc.gyp b/grpc.gyp index 8baf95398ee..be77bb59c99 100644 --- a/grpc.gyp +++ b/grpc.gyp @@ -905,6 +905,7 @@ 'src/core/lib/event_engine/memory_allocator.cc', 'src/core/lib/event_engine/resolved_address.cc', 'src/core/lib/event_engine/sockaddr.cc', + 'src/core/lib/gprpp/time.cc', 'src/core/lib/http/format_request.cc', 'src/core/lib/http/httpcli.cc', 'src/core/lib/http/httpcli_security_connector.cc', @@ -1366,6 +1367,7 @@ 'src/core/lib/event_engine/memory_allocator.cc', 'src/core/lib/event_engine/resolved_address.cc', 'src/core/lib/event_engine/sockaddr.cc', + 'src/core/lib/gprpp/time.cc', 'src/core/lib/http/format_request.cc', 'src/core/lib/http/httpcli.cc', 'src/core/lib/http/parser.cc', diff --git a/package.xml b/package.xml index 8734c4b82ce..10e3e312618 100644 --- a/package.xml +++ b/package.xml @@ -994,6 +994,8 @@ <file baseinstalldir="/" name="src/core/lib/gprpp/thd.h" role="src" /> <file baseinstalldir="/" name="src/core/lib/gprpp/thd_posix.cc" role="src" /> <file baseinstalldir="/" name="src/core/lib/gprpp/thd_windows.cc" role="src" /> + <file baseinstalldir="/" name="src/core/lib/gprpp/time.cc" role="src" /> + <file baseinstalldir="/" name="src/core/lib/gprpp/time.h" role="src" /> <file baseinstalldir="/" name="src/core/lib/gprpp/time_util.cc" role="src" /> <file baseinstalldir="/" name="src/core/lib/gprpp/time_util.h" role="src" /> <file baseinstalldir="/" name="src/core/lib/http/format_request.cc" role="src" /> diff --git a/src/core/ext/filters/client_channel/backup_poller.cc b/src/core/ext/filters/client_channel/backup_poller.cc index 1332e877dc2..ba3365cd788 100644 --- a/src/core/ext/filters/client_channel/backup_poller.cc +++ b/src/core/ext/filters/client_channel/backup_poller.cc @@ -29,6 +29,7 @@ #include "src/core/lib/gpr/string.h" #include "src/core/lib/gprpp/global_config.h" #include "src/core/lib/gprpp/memory.h" +#include "src/core/lib/gprpp/time.h" #include "src/core/lib/iomgr/error.h" #include "src/core/lib/iomgr/iomgr.h" #include "src/core/lib/iomgr/pollset.h" @@ -57,7 +58,8 @@ static backup_poller* g_poller = nullptr; // guarded by g_poller_mu // g_poll_interval_ms is set only once at the first time // grpc_client_channel_start_backup_polling() is called, after that it is // treated as const. -static int g_poll_interval_ms = DEFAULT_POLL_INTERVAL_MS; +static grpc_core::Duration g_poll_interval = + grpc_core::Duration::Milliseconds(DEFAULT_POLL_INTERVAL_MS); GPR_GLOBAL_CONFIG_DEFINE_INT32( grpc_client_channel_backup_poll_interval_ms, DEFAULT_POLL_INTERVAL_MS, @@ -75,10 +77,10 @@ void grpc_client_channel_global_init_backup_polling() { if (poll_interval_ms < 0) { gpr_log(GPR_ERROR, "Invalid GRPC_CLIENT_CHANNEL_BACKUP_POLL_INTERVAL_MS: %d, " - "default value %d will be used.", - poll_interval_ms, g_poll_interval_ms); + "default value %" PRId64 " will be used.", + poll_interval_ms, g_poll_interval.millis()); } else { - g_poll_interval_ms = poll_interval_ms; + g_poll_interval = grpc_core::Duration::Milliseconds(poll_interval_ms); } } @@ -133,7 +135,7 @@ static void run_poller(void* arg, grpc_error_handle error) { gpr_mu_unlock(p->pollset_mu); GRPC_LOG_IF_ERROR("Run client channel backup poller", err); grpc_timer_init(&p->polling_timer, - grpc_core::ExecCtx::Get()->Now() + g_poll_interval_ms, + grpc_core::ExecCtx::Get()->Now() + g_poll_interval, &p->run_poller_closure); } @@ -150,14 +152,15 @@ static void g_poller_init_locked() { GRPC_CLOSURE_INIT(&g_poller->run_poller_closure, run_poller, g_poller, grpc_schedule_on_exec_ctx); grpc_timer_init(&g_poller->polling_timer, - grpc_core::ExecCtx::Get()->Now() + g_poll_interval_ms, + grpc_core::ExecCtx::Get()->Now() + g_poll_interval, &g_poller->run_poller_closure); } } void grpc_client_channel_start_backup_polling( grpc_pollset_set* interested_parties) { - if (g_poll_interval_ms == 0 || grpc_iomgr_run_in_background()) { + if (g_poll_interval == grpc_core::Duration::Zero() || + grpc_iomgr_run_in_background()) { return; } gpr_mu_lock(&g_poller_mu); @@ -175,7 +178,8 @@ void grpc_client_channel_start_backup_polling( void grpc_client_channel_stop_backup_polling( grpc_pollset_set* interested_parties) { - if (g_poll_interval_ms == 0 || grpc_iomgr_run_in_background()) { + if (g_poll_interval == grpc_core::Duration::Zero() || + grpc_iomgr_run_in_background()) { return; } grpc_pollset_set_del_pollset(interested_parties, g_poller->pollset); diff --git a/src/core/ext/filters/client_channel/channel_connectivity.cc b/src/core/ext/filters/client_channel/channel_connectivity.cc index a75b97462e1..e40cc0838ff 100644 --- a/src/core/ext/filters/client_channel/channel_connectivity.cc +++ b/src/core/ext/filters/client_channel/channel_connectivity.cc @@ -95,7 +95,7 @@ class StateWatcher : public DualRefCounted<StateWatcher> { // watch, but we are hiding that fact from the application. if (IsLameChannel(channel)) { // Ref from object creation is held by timer callback. - StartTimer(grpc_timespec_to_millis_round_up(deadline)); + StartTimer(Timestamp::FromTimespecRoundUp(deadline)); return; } gpr_log(GPR_ERROR, @@ -108,7 +108,7 @@ class StateWatcher : public DualRefCounted<StateWatcher> { // the other by the watcher callback. Ref().release(); auto* watcher_timer_init_state = new WatcherTimerInitState( - this, grpc_timespec_to_millis_round_up(deadline)); + this, Timestamp::FromTimespecRoundUp(deadline)); client_channel->AddExternalConnectivityWatcher( grpc_polling_entity_create_from_pollset(grpc_cq_pollset(cq)), &state_, &on_complete_, watcher_timer_init_state->closure()); @@ -123,7 +123,7 @@ class StateWatcher : public DualRefCounted<StateWatcher> { // ClientChannel actually starts the watch. class WatcherTimerInitState { public: - WatcherTimerInitState(StateWatcher* state_watcher, grpc_millis deadline) + WatcherTimerInitState(StateWatcher* state_watcher, Timestamp deadline) : state_watcher_(state_watcher), deadline_(deadline) { GRPC_CLOSURE_INIT(&closure_, WatcherTimerInit, this, nullptr); } @@ -138,11 +138,11 @@ class StateWatcher : public DualRefCounted<StateWatcher> { } StateWatcher* state_watcher_; - grpc_millis deadline_; + Timestamp deadline_; grpc_closure closure_; }; - void StartTimer(grpc_millis deadline) { + void StartTimer(Timestamp deadline) { grpc_timer_init(&timer_, deadline, &on_timeout_); } diff --git a/src/core/ext/filters/client_channel/client_channel.cc b/src/core/ext/filters/client_channel/client_channel.cc index 56761074870..11c52d3a21b 100644 --- a/src/core/ext/filters/client_channel/client_channel.cc +++ b/src/core/ext/filters/client_channel/client_channel.cc @@ -190,7 +190,7 @@ class ClientChannel::CallData { grpc_slice path_; // Request path. gpr_cycle_counter call_start_time_; - grpc_millis deadline_; + Timestamp deadline_; Arena* arena_; grpc_call_stack* owning_call_; CallCombiner* call_combiner_; @@ -373,7 +373,7 @@ class DynamicTerminationFilter::CallData { ~CallData() { grpc_slice_unref_internal(path_); } grpc_slice path_; // Request path. - grpc_millis deadline_; + Timestamp deadline_; Arena* arena_; grpc_call_stack* owning_call_; CallCombiner* call_combiner_; @@ -1862,7 +1862,7 @@ ClientChannel::CallData::CallData(grpc_call_element* elem, : deadline_state_(elem, args, GPR_LIKELY(chand.deadline_checking_enabled_) ? args.deadline - : GRPC_MILLIS_INF_FUTURE), + : Timestamp::InfFuture()), path_(grpc_slice_ref_internal(args.path)), call_start_time_(args.start_time), deadline_(args.deadline), @@ -2227,9 +2227,10 @@ grpc_error_handle ClientChannel::CallData::ApplyServiceConfigToCallLocked( if (method_params != nullptr) { // If the deadline from the service config is shorter than the one // from the client API, reset the deadline timer. - if (chand->deadline_checking_enabled_ && method_params->timeout() != 0) { - const grpc_millis per_method_deadline = - grpc_cycle_counter_to_millis_round_up(call_start_time_) + + if (chand->deadline_checking_enabled_ && + method_params->timeout() != Duration::Zero()) { + const Timestamp per_method_deadline = + Timestamp::FromCycleCounterRoundUp(call_start_time_) + method_params->timeout(); if (per_method_deadline < deadline_) { deadline_ = per_method_deadline; @@ -2471,7 +2472,8 @@ class ClientChannel::LoadBalancedCall::Metadata std::string(value_slice.as_string_view())); } - void Encode(GrpcTimeoutMetadata, grpc_millis) {} + void Encode(GrpcTimeoutMetadata, + const typename GrpcTimeoutMetadata::ValueType&) {} void Encode(HttpPathMetadata, const Slice&) {} void Encode(HttpMethodMetadata, const typename HttpMethodMetadata::ValueType&) {} diff --git a/src/core/ext/filters/client_channel/client_channel.h b/src/core/ext/filters/client_channel/client_channel.h index 05929279386..b1f76f997d1 100644 --- a/src/core/ext/filters/client_channel/client_channel.h +++ b/src/core/ext/filters/client_channel/client_channel.h @@ -449,7 +449,7 @@ class ClientChannel::LoadBalancedCall // that uses any one of them, we should store them in the call // context. This will save per-call memory overhead. Slice path_; // Request path. - grpc_millis deadline_; + Timestamp deadline_; Arena* arena_; grpc_call_stack* owning_call_; CallCombiner* call_combiner_; diff --git a/src/core/ext/filters/client_channel/connector.h b/src/core/ext/filters/client_channel/connector.h index 3e193be4cd2..5777db70d53 100644 --- a/src/core/ext/filters/client_channel/connector.h +++ b/src/core/ext/filters/client_channel/connector.h @@ -38,7 +38,7 @@ class SubchannelConnector : public InternallyRefCounted<SubchannelConnector> { // Set of pollsets interested in this connection. grpc_pollset_set* interested_parties; // Deadline for connection. - grpc_millis deadline; + Timestamp deadline; // Channel args to be passed to handshakers and transport. const grpc_channel_args* channel_args; }; diff --git a/src/core/ext/filters/client_channel/dynamic_filters.h b/src/core/ext/filters/client_channel/dynamic_filters.h index 03211ec1085..515b9252f68 100644 --- a/src/core/ext/filters/client_channel/dynamic_filters.h +++ b/src/core/ext/filters/client_channel/dynamic_filters.h @@ -40,7 +40,7 @@ class DynamicFilters : public RefCounted<DynamicFilters> { grpc_polling_entity* pollent; grpc_slice path; gpr_cycle_counter start_time; - grpc_millis deadline; + Timestamp deadline; Arena* arena; grpc_call_context_element* context; CallCombiner* call_combiner; diff --git a/src/core/ext/filters/client_channel/health/health_check_client.cc b/src/core/ext/filters/client_channel/health/health_check_client.cc index ae0a5576ecf..82aac56cfc6 100644 --- a/src/core/ext/filters/client_channel/health/health_check_client.cc +++ b/src/core/ext/filters/client_channel/health/health_check_client.cc @@ -68,12 +68,12 @@ HealthCheckClient::HealthCheckClient( watcher_(std::move(watcher)), retry_backoff_( BackOff::Options() - .set_initial_backoff( - HEALTH_CHECK_INITIAL_CONNECT_BACKOFF_SECONDS * 1000) + .set_initial_backoff(Duration::Seconds( + HEALTH_CHECK_INITIAL_CONNECT_BACKOFF_SECONDS)) .set_multiplier(HEALTH_CHECK_RECONNECT_BACKOFF_MULTIPLIER) .set_jitter(HEALTH_CHECK_RECONNECT_JITTER) - .set_max_backoff(HEALTH_CHECK_RECONNECT_MAX_BACKOFF_SECONDS * - 1000)) { + .set_max_backoff(Duration::Seconds( + HEALTH_CHECK_RECONNECT_MAX_BACKOFF_SECONDS))) { if (GRPC_TRACE_FLAG_ENABLED(grpc_health_check_client_trace)) { gpr_log(GPR_INFO, "created HealthCheckClient %p", this); } @@ -144,14 +144,14 @@ void HealthCheckClient::StartCallLocked() { void HealthCheckClient::StartRetryTimerLocked() { SetHealthStatusLocked(GRPC_CHANNEL_TRANSIENT_FAILURE, "health check call failed; will retry after backoff"); - grpc_millis next_try = retry_backoff_.NextAttemptTime(); + Timestamp next_try = retry_backoff_.NextAttemptTime(); if (GRPC_TRACE_FLAG_ENABLED(grpc_health_check_client_trace)) { gpr_log(GPR_INFO, "HealthCheckClient %p: health check call lost...", this); - grpc_millis timeout = next_try - ExecCtx::Get()->Now(); - if (timeout > 0) { + Duration timeout = next_try - ExecCtx::Get()->Now(); + if (timeout > Duration::Zero()) { gpr_log(GPR_INFO, "HealthCheckClient %p: ... will retry in %" PRId64 "ms.", this, - timeout); + timeout.millis()); } else { gpr_log(GPR_INFO, "HealthCheckClient %p: ... retrying immediately.", this); @@ -295,7 +295,7 @@ void HealthCheckClient::CallState::StartCall() { &pollent_, Slice::FromStaticString("/grpc.health.v1.Health/Watch"), gpr_get_cycle_counter(), // start_time - GRPC_MILLIS_INF_FUTURE, // deadline + Timestamp::InfFuture(), // deadline arena_.get(), context_, &call_combiner_, @@ -560,7 +560,7 @@ void HealthCheckClient::CallState::RecvTrailingMetadataReady( self->recv_trailing_metadata_.get(GrpcStatusMetadata()) .value_or(GRPC_STATUS_UNKNOWN); if (error != GRPC_ERROR_NONE) { - grpc_error_get_status(error, GRPC_MILLIS_INF_FUTURE, &status, + grpc_error_get_status(error, Timestamp::InfFuture(), &status, nullptr /* slice */, nullptr /* http_error */, nullptr /* error_string */); } diff --git a/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.cc b/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.cc index 69c33fcf993..55ee8ebb627 100644 --- a/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.cc +++ b/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.cc @@ -214,7 +214,7 @@ class GrpcLb : public LoadBalancingPolicy { // The stats for client-side load reporting associated with this LB call. // Created after the first serverlist is received. RefCountedPtr<GrpcLbClientStats> client_stats_; - grpc_millis client_stats_report_interval_ = 0; + Duration client_stats_report_interval_; grpc_timer client_load_report_timer_; bool client_load_report_timer_callback_pending_ = false; bool last_client_load_report_counters_were_zero_ = false; @@ -452,8 +452,8 @@ class GrpcLb : public LoadBalancingPolicy { // is shutting down, or the LB call has ended). A non-NULL lb_calld_ always // contains a non-NULL lb_call_. OrphanablePtr<BalancerCallState> lb_calld_; - // Timeout in milliseconds for the LB call. 0 means no deadline. - const int lb_call_timeout_ms_ = 0; + // Timeout for the LB call. 0 means no deadline. + const Duration lb_call_timeout_; // Balancer call retry state. BackOff lb_call_backoff_; bool retry_timer_callback_pending_ = false; @@ -474,7 +474,7 @@ class GrpcLb : public LoadBalancingPolicy { // State for fallback-at-startup checks. // Timeout after startup after which we will go into fallback mode if // we have not received a serverlist from the balancer. - const int fallback_at_startup_timeout_ = 0; + const Duration fallback_at_startup_timeout_; bool fallback_at_startup_checks_pending_ = false; grpc_timer lb_fallback_timer_; grpc_closure lb_on_fallback_; @@ -485,8 +485,8 @@ class GrpcLb : public LoadBalancingPolicy { bool child_policy_ready_ = false; // Deleted subchannel caching. - const grpc_millis subchannel_cache_interval_ms_; - std::map<grpc_millis /*deletion time*/, + const Duration subchannel_cache_interval_; + std::map<Timestamp /*deletion time*/, std::vector<RefCountedPtr<SubchannelInterface>>> cached_subchannels_; grpc_timer subchannel_cache_timer_; @@ -784,10 +784,10 @@ GrpcLb::BalancerCallState::BalancerCallState( this, grpc_schedule_on_exec_ctx); GRPC_CLOSURE_INIT(&client_load_report_closure_, MaybeSendClientLoadReport, this, grpc_schedule_on_exec_ctx); - const grpc_millis deadline = - grpclb_policy()->lb_call_timeout_ms_ == 0 - ? GRPC_MILLIS_INF_FUTURE - : ExecCtx::Get()->Now() + grpclb_policy()->lb_call_timeout_ms_; + const Timestamp deadline = + grpclb_policy()->lb_call_timeout_ == Duration::Zero() + ? Timestamp::InfFuture() + : ExecCtx::Get()->Now() + grpclb_policy()->lb_call_timeout_; lb_call_ = grpc_channel_create_pollset_set_call( grpclb_policy()->lb_channel_, nullptr, GRPC_PROPAGATE_DEFAULTS, grpclb_policy_->interested_parties(), @@ -914,7 +914,7 @@ void GrpcLb::BalancerCallState::ScheduleNextClientLoadReportLocked() { // in a loop while draining the currently-held WorkSerializer. // Also see https://github.com/grpc/grpc/issues/26079. ExecCtx::Get()->InvalidateNow(); - const grpc_millis next_client_load_report_time = + const Timestamp next_client_load_report_time = ExecCtx::Get()->Now() + client_stats_report_interval_; GRPC_CLOSURE_INIT(&client_load_report_closure_, MaybeSendClientLoadReport, this, grpc_schedule_on_exec_ctx); @@ -1077,15 +1077,16 @@ void GrpcLb::BalancerCallState::OnBalancerMessageReceivedLocked() { } else { switch (response.type) { case response.INITIAL: { - if (response.client_stats_report_interval != 0) { + if (response.client_stats_report_interval != Duration::Zero()) { client_stats_report_interval_ = std::max( - int64_t(GPR_MS_PER_SEC), response.client_stats_report_interval); + Duration::Seconds(1), response.client_stats_report_interval); if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_glb_trace)) { gpr_log(GPR_INFO, "[grpclb %p] lb_calld=%p: Received initial LB response " "message; client load reporting interval = %" PRId64 " milliseconds", - grpclb_policy(), this, client_stats_report_interval_); + grpclb_policy(), this, + client_stats_report_interval_.millis()); } } else if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_glb_trace)) { gpr_log(GPR_INFO, @@ -1111,7 +1112,8 @@ void GrpcLb::BalancerCallState::OnBalancerMessageReceivedLocked() { seen_serverlist_ = true; // Start sending client load report only after we start using the // serverlist returned from the current LB call. - if (client_stats_report_interval_ > 0 && client_stats_ == nullptr) { + if (client_stats_report_interval_ > Duration::Zero() && + client_stats_ == nullptr) { client_stats_ = MakeRefCounted<GrpcLbClientStats>(); // Ref held by callback. Ref(DEBUG_LOCATION, "client_load_report").release(); @@ -1364,22 +1366,25 @@ GrpcLb::GrpcLb(Args args) : LoadBalancingPolicy(std::move(args)), server_name_(GetServerNameFromChannelArgs(args.args)), response_generator_(MakeRefCounted<FakeResolverResponseGenerator>()), - lb_call_timeout_ms_(grpc_channel_args_find_integer( - args.args, GRPC_ARG_GRPCLB_CALL_TIMEOUT_MS, {0, 0, INT_MAX})), + lb_call_timeout_(Duration::Milliseconds(grpc_channel_args_find_integer( + args.args, GRPC_ARG_GRPCLB_CALL_TIMEOUT_MS, {0, 0, INT_MAX}))), lb_call_backoff_( BackOff::Options() - .set_initial_backoff(GRPC_GRPCLB_INITIAL_CONNECT_BACKOFF_SECONDS * - 1000) + .set_initial_backoff(Duration::Seconds( + GRPC_GRPCLB_INITIAL_CONNECT_BACKOFF_SECONDS)) .set_multiplier(GRPC_GRPCLB_RECONNECT_BACKOFF_MULTIPLIER) .set_jitter(GRPC_GRPCLB_RECONNECT_JITTER) - .set_max_backoff(GRPC_GRPCLB_RECONNECT_MAX_BACKOFF_SECONDS * - 1000)), - fallback_at_startup_timeout_(grpc_channel_args_find_integer( - args.args, GRPC_ARG_GRPCLB_FALLBACK_TIMEOUT_MS, - {GRPC_GRPCLB_DEFAULT_FALLBACK_TIMEOUT_MS, 0, INT_MAX})), - subchannel_cache_interval_ms_(grpc_channel_args_find_integer( - args.args, GRPC_ARG_GRPCLB_SUBCHANNEL_CACHE_INTERVAL_MS, - {GRPC_GRPCLB_DEFAULT_SUBCHANNEL_DELETION_DELAY_MS, 0, INT_MAX})) { + .set_max_backoff(Duration::Seconds( + GRPC_GRPCLB_RECONNECT_MAX_BACKOFF_SECONDS))), + fallback_at_startup_timeout_( + Duration::Milliseconds(grpc_channel_args_find_integer( + args.args, GRPC_ARG_GRPCLB_FALLBACK_TIMEOUT_MS, + {GRPC_GRPCLB_DEFAULT_FALLBACK_TIMEOUT_MS, 0, INT_MAX}))), + subchannel_cache_interval_( + Duration::Milliseconds(grpc_channel_args_find_integer( + args.args, GRPC_ARG_GRPCLB_SUBCHANNEL_CACHE_INTERVAL_MS, + {GRPC_GRPCLB_DEFAULT_SUBCHANNEL_DELETION_DELAY_MS, 0, + INT_MAX}))) { if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_glb_trace)) { gpr_log(GPR_INFO, "[grpclb %p] Will use '%s' as the server name for LB request.", @@ -1470,7 +1475,7 @@ void GrpcLb::UpdateLocked(UpdateArgs args) { if (is_initial_update) { fallback_at_startup_checks_pending_ = true; // Start timer. - grpc_millis deadline = ExecCtx::Get()->Now() + fallback_at_startup_timeout_; + Timestamp deadline = ExecCtx::Get()->Now() + fallback_at_startup_timeout_; Ref(DEBUG_LOCATION, "on_fallback_timer").release(); // Ref for callback grpc_timer_init(&lb_fallback_timer_, deadline, &lb_on_fallback_); // Start watching the channel's connectivity state. If the channel @@ -1561,13 +1566,13 @@ void GrpcLb::StartBalancerCallLocked() { } void GrpcLb::StartBalancerCallRetryTimerLocked() { - grpc_millis next_try = lb_call_backoff_.NextAttemptTime(); + Timestamp next_try = lb_call_backoff_.NextAttemptTime(); if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_glb_trace)) { gpr_log(GPR_INFO, "[grpclb %p] Connection to LB server lost...", this); - grpc_millis timeout = next_try - ExecCtx::Get()->Now(); - if (timeout > 0) { + Duration timeout = next_try - ExecCtx::Get()->Now(); + if (timeout > Duration::Zero()) { gpr_log(GPR_INFO, "[grpclb %p] ... retry_timer_active in %" PRId64 "ms.", - this, timeout); + this, timeout.millis()); } else { gpr_log(GPR_INFO, "[grpclb %p] ... retry_timer_active immediately.", this); @@ -1735,8 +1740,7 @@ void GrpcLb::CreateOrUpdateChildPolicyLocked() { void GrpcLb::CacheDeletedSubchannelLocked( RefCountedPtr<SubchannelInterface> subchannel) { - grpc_millis deletion_time = - ExecCtx::Get()->Now() + subchannel_cache_interval_ms_; + Timestamp deletion_time = ExecCtx::Get()->Now() + subchannel_cache_interval_; cached_subchannels_[deletion_time].push_back(std::move(subchannel)); if (!subchannel_cache_timer_pending_) { Ref(DEBUG_LOCATION, "OnSubchannelCacheTimer").release(); diff --git a/src/core/ext/filters/client_channel/lb_policy/grpclb/load_balancer_api.cc b/src/core/ext/filters/client_channel/lb_policy/grpclb/load_balancer_api.cc index 6f7ae366f73..c9d54a988eb 100644 --- a/src/core/ext/filters/client_channel/lb_policy/grpclb/load_balancer_api.cc +++ b/src/core/ext/filters/client_channel/lb_policy/grpclb/load_balancer_api.cc @@ -146,11 +146,10 @@ bool ParseServerList(const grpc_lb_v1_LoadBalanceResponse& response, return true; } -grpc_millis grpc_grpclb_duration_to_millis( - const google_protobuf_Duration* duration_pb) { - return static_cast<grpc_millis>( - (google_protobuf_Duration_seconds(duration_pb) * GPR_MS_PER_SEC) + - (google_protobuf_Duration_nanos(duration_pb) / GPR_NS_PER_MS)); +Duration ParseDuration(const google_protobuf_Duration* duration_pb) { + return Duration::FromSecondsAndNanoseconds( + google_protobuf_Duration_seconds(duration_pb), + google_protobuf_Duration_nanos(duration_pb)); } } // namespace @@ -177,7 +176,7 @@ bool GrpcLbResponseParse(const grpc_slice& serialized_response, initial_response); if (client_stats_report_interval != nullptr) { result->client_stats_report_interval = - grpc_grpclb_duration_to_millis(client_stats_report_interval); + ParseDuration(client_stats_report_interval); } return true; } diff --git a/src/core/ext/filters/client_channel/lb_policy/grpclb/load_balancer_api.h b/src/core/ext/filters/client_channel/lb_policy/grpclb/load_balancer_api.h index bbee01c38a6..a9ad62fa2ff 100644 --- a/src/core/ext/filters/client_channel/lb_policy/grpclb/load_balancer_api.h +++ b/src/core/ext/filters/client_channel/lb_policy/grpclb/load_balancer_api.h @@ -49,7 +49,7 @@ struct GrpcLbServer { struct GrpcLbResponse { enum { INITIAL, SERVERLIST, FALLBACK } type; - grpc_millis client_stats_report_interval = 0; + Duration client_stats_report_interval; std::vector<GrpcLbServer> serverlist; }; diff --git a/src/core/ext/filters/client_channel/lb_policy/priority/priority.cc b/src/core/ext/filters/client_channel/lb_policy/priority/priority.cc index 8203ef09655..257c36b4bea 100644 --- a/src/core/ext/filters/client_channel/lb_policy/priority/priority.cc +++ b/src/core/ext/filters/client_channel/lb_policy/priority/priority.cc @@ -47,11 +47,11 @@ constexpr char kPriority[] = "priority_experimental"; // How long we keep a child around for after it is no longer being used // (either because it has been removed from the config or because we // have switched to a higher-priority child). -constexpr int kChildRetentionIntervalMs = 15 * 60 * 1000; +constexpr Duration kChildRetentionInterval = Duration::Minutes(15); // Default for how long we wait for a newly created child to get connected // before starting to attempt the next priority. Overridable via channel arg. -constexpr int kDefaultChildFailoverTimeoutMs = 10000; +constexpr Duration kDefaultChildFailoverTimeout = Duration::Seconds(10); // Config for priority LB policy. class PriorityLbConfig : public LoadBalancingPolicy::Config { @@ -219,7 +219,7 @@ class PriorityLb : public LoadBalancingPolicy { void TryNextPriorityLocked(bool report_connecting); void SelectPriorityLocked(uint32_t priority); - const int child_failover_timeout_ms_; + const Duration child_failover_timeout_; // Current channel args and config from the resolver. const grpc_channel_args* args_ = nullptr; @@ -244,9 +244,11 @@ class PriorityLb : public LoadBalancingPolicy { PriorityLb::PriorityLb(Args args) : LoadBalancingPolicy(std::move(args)), - child_failover_timeout_ms_(grpc_channel_args_find_integer( - args.args, GRPC_ARG_PRIORITY_FAILOVER_TIMEOUT_MS, - {kDefaultChildFailoverTimeoutMs, 0, INT_MAX})) { + child_failover_timeout_( + Duration::Milliseconds(grpc_channel_args_find_integer( + args.args, GRPC_ARG_PRIORITY_FAILOVER_TIMEOUT_MS, + {static_cast<int>(kDefaultChildFailoverTimeout.millis()), 0, + INT_MAX}))) { if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_priority_trace)) { gpr_log(GPR_INFO, "[priority_lb %p] created", this); } @@ -634,15 +636,17 @@ void PriorityLb::ChildPriority::OnConnectivityStateUpdateLocked( void PriorityLb::ChildPriority::StartFailoverTimerLocked() { if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_priority_trace)) { - gpr_log(GPR_INFO, - "[priority_lb %p] child %s (%p): starting failover timer for %d ms", - priority_policy_.get(), name_.c_str(), this, - priority_policy_->child_failover_timeout_ms_); + gpr_log( + GPR_INFO, + "[priority_lb %p] child %s (%p): starting failover timer for %" PRId64 + "ms", + priority_policy_.get(), name_.c_str(), this, + priority_policy_->child_failover_timeout_.millis()); } Ref(DEBUG_LOCATION, "ChildPriority+OnFailoverTimerLocked").release(); grpc_timer_init( &failover_timer_, - ExecCtx::Get()->Now() + priority_policy_->child_failover_timeout_ms_, + ExecCtx::Get()->Now() + priority_policy_->child_failover_timeout_, &on_failover_timer_); failover_timer_callback_pending_ = true; } @@ -691,16 +695,16 @@ void PriorityLb::ChildPriority::DeactivateLocked() { if (deactivation_timer_callback_pending_) return; if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_priority_trace)) { gpr_log(GPR_INFO, - "[priority_lb %p] child %s (%p): deactivating -- will remove in %d " - "ms.", + "[priority_lb %p] child %s (%p): deactivating -- will remove in " + "%" PRId64 "ms.", priority_policy_.get(), name_.c_str(), this, - kChildRetentionIntervalMs); + kChildRetentionInterval.millis()); } MaybeCancelFailoverTimerLocked(); // Start a timer to delete the child. Ref(DEBUG_LOCATION, "ChildPriority+timer").release(); grpc_timer_init(&deactivation_timer_, - ExecCtx::Get()->Now() + kChildRetentionIntervalMs, + ExecCtx::Get()->Now() + kChildRetentionInterval, &on_deactivation_timer_); deactivation_timer_callback_pending_ = true; } diff --git a/src/core/ext/filters/client_channel/lb_policy/rls/rls.cc b/src/core/ext/filters/client_channel/lb_policy/rls/rls.cc index fac335adec3..068af6102a1 100644 --- a/src/core/ext/filters/client_channel/lb_policy/rls/rls.cc +++ b/src/core/ext/filters/client_channel/lb_policy/rls/rls.cc @@ -89,17 +89,17 @@ const char* kRlsRequestPath = "/grpc.lookup.v1.RouteLookupService/RouteLookup"; const char* kFakeTargetFieldValue = "fake_target_field_value"; const char* kRlsHeaderKey = "X-Google-RLS-Data"; -const grpc_millis kDefaultLookupServiceTimeout = 10000; -const grpc_millis kMaxMaxAge = 5 * 60 * GPR_MS_PER_SEC; -const grpc_millis kMinExpirationTime = 5 * GPR_MS_PER_SEC; -const grpc_millis kCacheBackoffInitial = 1 * GPR_MS_PER_SEC; +const Duration kDefaultLookupServiceTimeout = Duration::Seconds(10); +const Duration kMaxMaxAge = Duration::Minutes(5); +const Duration kMinExpirationTime = Duration::Seconds(5); +const Duration kCacheBackoffInitial = Duration::Seconds(1); const double kCacheBackoffMultiplier = 1.6; const double kCacheBackoffJitter = 0.2; -const grpc_millis kCacheBackoffMax = 120 * GPR_MS_PER_SEC; -const grpc_millis kDefaultThrottleWindowSizeMs = 30 * GPR_MS_PER_SEC; -const float kDefaultThrottleRatioForSuccesses = 2.0; +const Duration kCacheBackoffMax = Duration::Minutes(2); +const Duration kDefaultThrottleWindowSize = Duration::Seconds(30); +const double kDefaultThrottleRatioForSuccesses = 2.0; const int kDefaultThrottlePadding = 8; -const grpc_millis kCacheCleanupTimerInterval = 60 * GPR_MS_PER_SEC; +const Duration kCacheCleanupTimerInterval = Duration::Minutes(1); const int64_t kMaxCacheSizeBytes = 5 * 1024 * 1024; // Parsed RLS LB policy configuration. @@ -118,9 +118,9 @@ class RlsLbConfig : public LoadBalancingPolicy::Config { struct RouteLookupConfig { KeyBuilderMap key_builder_map; std::string lookup_service; - grpc_millis lookup_service_timeout = 0; - grpc_millis max_age = 0; - grpc_millis stale_age = 0; + Duration lookup_service_timeout; + Duration max_age; + Duration stale_age; int64_t cache_size_bytes = 0; std::string default_target; }; @@ -146,11 +146,11 @@ class RlsLbConfig : public LoadBalancingPolicy::Config { const std::string& lookup_service() const { return route_lookup_config_.lookup_service; } - grpc_millis lookup_service_timeout() const { + Duration lookup_service_timeout() const { return route_lookup_config_.lookup_service_timeout; } - grpc_millis max_age() const { return route_lookup_config_.max_age; } - grpc_millis stale_age() const { return route_lookup_config_.stale_age; } + Duration max_age() const { return route_lookup_config_.max_age; } + Duration stale_age() const { return route_lookup_config_.stale_age; } int64_t cache_size_bytes() const { return route_lookup_config_.cache_size_bytes; } @@ -362,15 +362,15 @@ class RlsLb : public LoadBalancingPolicy { ABSL_EXCLUSIVE_LOCKS_REQUIRED(&RlsLb::mu_) { return status_; } - grpc_millis backoff_time() const + Timestamp backoff_time() const ABSL_EXCLUSIVE_LOCKS_REQUIRED(&RlsLb::mu_) { return backoff_time_; } - grpc_millis backoff_expiration_time() const + Timestamp backoff_expiration_time() const ABSL_EXCLUSIVE_LOCKS_REQUIRED(&RlsLb::mu_) { return backoff_expiration_time_; } - grpc_millis data_expiration_time() const + Timestamp data_expiration_time() const ABSL_EXCLUSIVE_LOCKS_REQUIRED(&RlsLb::mu_) { return data_expiration_time_; } @@ -378,11 +378,10 @@ class RlsLb : public LoadBalancingPolicy { ABSL_EXCLUSIVE_LOCKS_REQUIRED(&RlsLb::mu_) { return header_data_; } - grpc_millis stale_time() const - ABSL_EXCLUSIVE_LOCKS_REQUIRED(&RlsLb::mu_) { + Timestamp stale_time() const ABSL_EXCLUSIVE_LOCKS_REQUIRED(&RlsLb::mu_) { return stale_time_; } - grpc_millis min_expiration_time() const + Timestamp min_expiration_time() const ABSL_EXCLUSIVE_LOCKS_REQUIRED(&RlsLb::mu_) { return min_expiration_time_; } @@ -423,7 +422,7 @@ class RlsLb : public LoadBalancingPolicy { private: class BackoffTimer : public InternallyRefCounted<BackoffTimer> { public: - BackoffTimer(RefCountedPtr<Entry> entry, grpc_millis backoff_time); + BackoffTimer(RefCountedPtr<Entry> entry, Timestamp backoff_time); // Note: We are forced to disable lock analysis here because // Orphan() is called by OrphanablePtr<>, which cannot have lock @@ -446,22 +445,21 @@ class RlsLb : public LoadBalancingPolicy { // Backoff states absl::Status status_ ABSL_GUARDED_BY(&RlsLb::mu_); std::unique_ptr<BackOff> backoff_state_ ABSL_GUARDED_BY(&RlsLb::mu_); - grpc_millis backoff_time_ ABSL_GUARDED_BY(&RlsLb::mu_) = - GRPC_MILLIS_INF_PAST; - grpc_millis backoff_expiration_time_ ABSL_GUARDED_BY(&RlsLb::mu_) = - GRPC_MILLIS_INF_PAST; + Timestamp backoff_time_ ABSL_GUARDED_BY(&RlsLb::mu_) = + Timestamp::InfPast(); + Timestamp backoff_expiration_time_ ABSL_GUARDED_BY(&RlsLb::mu_) = + Timestamp::InfPast(); OrphanablePtr<BackoffTimer> backoff_timer_; // RLS response states std::vector<RefCountedPtr<ChildPolicyWrapper>> child_policy_wrappers_ ABSL_GUARDED_BY(&RlsLb::mu_); std::string header_data_ ABSL_GUARDED_BY(&RlsLb::mu_); - grpc_millis data_expiration_time_ ABSL_GUARDED_BY(&RlsLb::mu_) = - GRPC_MILLIS_INF_PAST; - grpc_millis stale_time_ ABSL_GUARDED_BY(&RlsLb::mu_) = - GRPC_MILLIS_INF_PAST; + Timestamp data_expiration_time_ ABSL_GUARDED_BY(&RlsLb::mu_) = + Timestamp::InfPast(); + Timestamp stale_time_ ABSL_GUARDED_BY(&RlsLb::mu_) = Timestamp::InfPast(); - grpc_millis min_expiration_time_ ABSL_GUARDED_BY(&RlsLb::mu_); + Timestamp min_expiration_time_ ABSL_GUARDED_BY(&RlsLb::mu_); Cache::Iterator lru_iterator_ ABSL_GUARDED_BY(&RlsLb::mu_); }; @@ -566,10 +564,10 @@ class RlsLb : public LoadBalancingPolicy { class Throttle { public: explicit Throttle( - int window_size_ms = kDefaultThrottleWindowSizeMs, + Duration window_size = kDefaultThrottleWindowSize, float ratio_for_successes = kDefaultThrottleRatioForSuccesses, int padding = kDefaultThrottlePadding) - : window_size_ms_(window_size_ms), + : window_size_(window_size), ratio_for_successes_(ratio_for_successes), padding_(padding) {} @@ -579,16 +577,16 @@ class RlsLb : public LoadBalancingPolicy { ABSL_EXCLUSIVE_LOCKS_REQUIRED(&RlsLb::mu_); private: - grpc_millis window_size_ms_; - float ratio_for_successes_; + Duration window_size_; + double ratio_for_successes_; int padding_; std::mt19937 rng_{std::random_device()()}; - // Logged timestamps of requests. - std::deque<grpc_millis> requests_ ABSL_GUARDED_BY(&RlsLb::mu_); + // Logged timestamp of requests. + std::deque<Timestamp> requests_ ABSL_GUARDED_BY(&RlsLb::mu_); // Logged timestamps of failures. - std::deque<grpc_millis> failures_ ABSL_GUARDED_BY(&RlsLb::mu_); + std::deque<Timestamp> failures_ ABSL_GUARDED_BY(&RlsLb::mu_); }; RefCountedPtr<RlsLb> lb_policy_; @@ -641,7 +639,7 @@ class RlsLb : public LoadBalancingPolicy { std::string stale_header_data_; // RLS call state. - grpc_millis deadline_; + Timestamp deadline_; grpc_closure call_start_cb_; grpc_closure call_complete_cb_; grpc_call* call_ = nullptr; @@ -991,7 +989,7 @@ LoadBalancingPolicy::PickResult RlsLb::Picker::Pick(PickArgs args) { gpr_log(GPR_INFO, "[rlslb %p] picker=%p: request keys: %s", lb_policy_.get(), this, key.ToString().c_str()); } - grpc_millis now = ExecCtx::Get()->Now(); + Timestamp now = ExecCtx::Get()->Now(); MutexLock lock(&lb_policy_->mu_); if (lb_policy_->is_shutdown_) { return PickResult::Fail( @@ -1077,7 +1075,7 @@ LoadBalancingPolicy::PickResult RlsLb::Picker::Pick(PickArgs args) { // RlsLb::Cache::Entry::BackoffTimer::BackoffTimer(RefCountedPtr<Entry> entry, - grpc_millis backoff_time) + Timestamp backoff_time) : entry_(std::move(entry)) { GRPC_CLOSURE_INIT(&backoff_timer_callback_, OnBackoffTimer, this, nullptr); Ref(DEBUG_LOCATION, "BackoffTimer").release(); @@ -1211,17 +1209,17 @@ LoadBalancingPolicy::PickResult RlsLb::Cache::Entry::Pick(PickArgs args) { } void RlsLb::Cache::Entry::ResetBackoff() { - backoff_time_ = GRPC_MILLIS_INF_PAST; + backoff_time_ = Timestamp::InfPast(); backoff_timer_.reset(); } bool RlsLb::Cache::Entry::ShouldRemove() const { - grpc_millis now = ExecCtx::Get()->Now(); + Timestamp now = ExecCtx::Get()->Now(); return data_expiration_time_ < now && backoff_expiration_time_ < now; } bool RlsLb::Cache::Entry::CanEvict() const { - grpc_millis now = ExecCtx::Get()->Now(); + Timestamp now = ExecCtx::Get()->Now(); return min_expiration_time_ < now; } @@ -1247,7 +1245,7 @@ RlsLb::Cache::Entry::OnRlsResponseLocked( backoff_state_ = MakeCacheEntryBackoff(); } backoff_time_ = backoff_state_->NextAttemptTime(); - grpc_millis now = ExecCtx::Get()->Now(); + Timestamp now = ExecCtx::Get()->Now(); backoff_expiration_time_ = now + (backoff_time_ - now) * 2; backoff_timer_ = MakeOrphanable<BackoffTimer>( Ref(DEBUG_LOCATION, "BackoffTimer"), backoff_time_); @@ -1256,13 +1254,13 @@ RlsLb::Cache::Entry::OnRlsResponseLocked( } // Request succeeded, so store the result. header_data_ = std::move(response.header_data); - grpc_millis now = ExecCtx::Get()->Now(); + Timestamp now = ExecCtx::Get()->Now(); data_expiration_time_ = now + lb_policy_->config_->max_age(); stale_time_ = now + lb_policy_->config_->stale_age(); status_ = absl::OkStatus(); backoff_state_.reset(); - backoff_time_ = GRPC_MILLIS_INF_PAST; - backoff_expiration_time_ = GRPC_MILLIS_INF_PAST; + backoff_time_ = Timestamp::InfPast(); + backoff_expiration_time_ = Timestamp::InfPast(); // Check if we need to update this list of targets. bool targets_changed = [&]() ABSL_EXCLUSIVE_LOCKS_REQUIRED(&RlsLb::mu_) { if (child_policy_wrappers_.size() != response.targets.size()) return true; @@ -1322,7 +1320,7 @@ RlsLb::Cache::Entry::OnRlsResponseLocked( // RlsLb::Cache::Cache(RlsLb* lb_policy) : lb_policy_(lb_policy) { - grpc_millis now = ExecCtx::Get()->Now(); + Timestamp now = ExecCtx::Get()->Now(); lb_policy_->Ref(DEBUG_LOCATION, "CacheCleanupTimer").release(); GRPC_CLOSURE_INIT(&timer_callback_, OnCleanupTimer, this, nullptr); grpc_timer_init(&cleanup_timer_, now + kCacheCleanupTimerInterval, @@ -1405,7 +1403,7 @@ void RlsLb::Cache::OnCleanupTimer(void* arg, grpc_error_handle error) { ++it; } } - grpc_millis now = ExecCtx::Get()->Now(); + Timestamp now = ExecCtx::Get()->Now(); lb_policy.release(); grpc_timer_init(&cache->cleanup_timer_, now + kCacheCleanupTimerInterval, @@ -1474,11 +1472,11 @@ void RlsLb::RlsChannel::StateWatcher::OnConnectivityStateChange( // bool RlsLb::RlsChannel::Throttle::ShouldThrottle() { - grpc_millis now = ExecCtx::Get()->Now(); - while (!requests_.empty() && now - requests_.front() > window_size_ms_) { + Timestamp now = ExecCtx::Get()->Now(); + while (!requests_.empty() && now - requests_.front() > window_size_) { requests_.pop_front(); } - while (!failures_.empty() && now - failures_.front() > window_size_ms_) { + while (!failures_.empty() && now - failures_.front() > window_size_) { failures_.pop_front(); } // Compute probability of throttling. @@ -1502,7 +1500,7 @@ bool RlsLb::RlsChannel::Throttle::ShouldThrottle() { } void RlsLb::RlsChannel::Throttle::RegisterResponse(bool success) { - grpc_millis now = ExecCtx::Get()->Now(); + Timestamp now = ExecCtx::Get()->Now(); requests_.push_back(now); if (!success) failures_.push_back(now); } @@ -1690,7 +1688,7 @@ void RlsLb::RlsRequest::StartCallLocked() { MutexLock lock(&lb_policy_->mu_); if (lb_policy_->is_shutdown_) return; } - grpc_millis now = ExecCtx::Get()->Now(); + Timestamp now = ExecCtx::Get()->Now(); deadline_ = now + lb_policy_->config_->lookup_service_timeout(); grpc_metadata_array_init(&recv_initial_metadata_); grpc_metadata_array_init(&recv_trailing_metadata_); diff --git a/src/core/ext/filters/client_channel/lb_policy/weighted_target/weighted_target.cc b/src/core/ext/filters/client_channel/lb_policy/weighted_target/weighted_target.cc index 0b02a139468..d09f9fe1421 100644 --- a/src/core/ext/filters/client_channel/lb_policy/weighted_target/weighted_target.cc +++ b/src/core/ext/filters/client_channel/lb_policy/weighted_target/weighted_target.cc @@ -48,7 +48,7 @@ constexpr char kWeightedTarget[] = "weighted_target_experimental"; // How long we keep a child around for after it has been removed from // the config. -constexpr int kChildRetentionIntervalMs = 15 * 60 * 1000; +constexpr Duration kChildRetentionInterval = Duration::Minutes(15); // Config for weighted_target LB policy. class WeightedTargetLbConfig : public LoadBalancingPolicy::Config { @@ -564,7 +564,7 @@ void WeightedTargetLb::WeightedChild::DeactivateLocked() { Ref(DEBUG_LOCATION, "WeightedChild+timer").release(); delayed_removal_timer_callback_pending_ = true; grpc_timer_init(&delayed_removal_timer_, - ExecCtx::Get()->Now() + kChildRetentionIntervalMs, + ExecCtx::Get()->Now() + kChildRetentionInterval, &on_delayed_removal_timer_); } diff --git a/src/core/ext/filters/client_channel/lb_policy/xds/xds_cluster_manager.cc b/src/core/ext/filters/client_channel/lb_policy/xds/xds_cluster_manager.cc index 45356d9a404..35938e910f0 100644 --- a/src/core/ext/filters/client_channel/lb_policy/xds/xds_cluster_manager.cc +++ b/src/core/ext/filters/client_channel/lb_policy/xds/xds_cluster_manager.cc @@ -487,7 +487,8 @@ void XdsClusterManagerLb::ClusterChild::DeactivateLocked() { Ref(DEBUG_LOCATION, "ClusterChild+timer").release(); grpc_timer_init(&delayed_removal_timer_, ExecCtx::Get()->Now() + - GRPC_XDS_CLUSTER_MANAGER_CHILD_RETENTION_INTERVAL_MS, + Duration::Milliseconds( + GRPC_XDS_CLUSTER_MANAGER_CHILD_RETENTION_INTERVAL_MS), &on_delayed_removal_timer_); delayed_removal_timer_callback_pending_ = true; } diff --git a/src/core/ext/filters/client_channel/resolver/dns/c_ares/dns_resolver_ares.cc b/src/core/ext/filters/client_channel/resolver/dns/c_ares/dns_resolver_ares.cc index 3ecbfcfe4de..c306dc726a5 100644 --- a/src/core/ext/filters/client_channel/resolver/dns/c_ares/dns_resolver_ares.cc +++ b/src/core/ext/filters/client_channel/resolver/dns/c_ares/dns_resolver_ares.cc @@ -101,7 +101,7 @@ class AresClientChannelDNSResolver : public Resolver { // timeout in milliseconds for active DNS queries int query_timeout_ms_; /// min interval between DNS requests - grpc_millis min_time_between_resolutions_; + Duration min_time_between_resolutions_; /// closures used by the work_serializer grpc_closure on_next_resolution_; @@ -114,7 +114,7 @@ class AresClientChannelDNSResolver : public Resolver { bool have_next_resolution_timer_ = false; grpc_timer next_resolution_timer_; /// timestamp of last DNS request - grpc_millis last_resolution_timestamp_ = -1; + absl::optional<Timestamp> last_resolution_timestamp_; /// retry backoff state BackOff backoff_; /// currently resolving backend addresses @@ -141,16 +141,17 @@ AresClientChannelDNSResolver::AresClientChannelDNSResolver(ResolverArgs args) query_timeout_ms_(grpc_channel_args_find_integer( channel_args_, GRPC_ARG_DNS_ARES_QUERY_TIMEOUT_MS, {GRPC_DNS_ARES_DEFAULT_QUERY_TIMEOUT_MS, 0, INT_MAX})), - min_time_between_resolutions_(grpc_channel_args_find_integer( - channel_args_, GRPC_ARG_DNS_MIN_TIME_BETWEEN_RESOLUTIONS_MS, - {1000 * 30, 0, INT_MAX})), - backoff_( - BackOff::Options() - .set_initial_backoff(GRPC_DNS_INITIAL_CONNECT_BACKOFF_SECONDS * - 1000) - .set_multiplier(GRPC_DNS_RECONNECT_BACKOFF_MULTIPLIER) - .set_jitter(GRPC_DNS_RECONNECT_JITTER) - .set_max_backoff(GRPC_DNS_RECONNECT_MAX_BACKOFF_SECONDS * 1000)) { + min_time_between_resolutions_( + Duration::Milliseconds(grpc_channel_args_find_integer( + channel_args_, GRPC_ARG_DNS_MIN_TIME_BETWEEN_RESOLUTIONS_MS, + {1000 * 30, 0, INT_MAX}))), + backoff_(BackOff::Options() + .set_initial_backoff(Duration::Milliseconds( + GRPC_DNS_INITIAL_CONNECT_BACKOFF_SECONDS)) + .set_multiplier(GRPC_DNS_RECONNECT_BACKOFF_MULTIPLIER) + .set_jitter(GRPC_DNS_RECONNECT_JITTER) + .set_max_backoff(Duration::Milliseconds( + GRPC_DNS_RECONNECT_MAX_BACKOFF_SECONDS))) { // Closure initialization. GRPC_CLOSURE_INIT(&on_next_resolution_, OnNextResolution, this, grpc_schedule_on_exec_ctx); @@ -388,8 +389,8 @@ void AresClientChannelDNSResolver::OnResolvedLocked(grpc_error_handle error) { // in a loop while draining the currently-held WorkSerializer. // Also see https://github.com/grpc/grpc/issues/26079. ExecCtx::Get()->InvalidateNow(); - grpc_millis next_try = backoff_.NextAttemptTime(); - grpc_millis timeout = next_try - ExecCtx::Get()->Now(); + Timestamp next_try = backoff_.NextAttemptTime(); + Duration timeout = next_try - ExecCtx::Get()->Now(); GRPC_CARES_TRACE_LOG("resolver:%p dns resolution failed (will retry): %s", this, grpc_error_std_string(error).c_str()); GPR_ASSERT(!have_next_resolution_timer_); @@ -398,9 +399,9 @@ void AresClientChannelDNSResolver::OnResolvedLocked(grpc_error_handle error) { // new closure API is done, find a way to track this ref with the timer // callback as part of the type system. Ref(DEBUG_LOCATION, "retry-timer").release(); - if (timeout > 0) { + if (timeout > Duration::Zero()) { GRPC_CARES_TRACE_LOG("resolver:%p retrying in %" PRId64 " milliseconds", - this, timeout); + this, timeout.millis()); } else { GRPC_CARES_TRACE_LOG("resolver:%p retrying immediately", this); } @@ -414,29 +415,30 @@ void AresClientChannelDNSResolver::MaybeStartResolvingLocked() { // If there is an existing timer, the time it fires is the earliest time we // can start the next resolution. if (have_next_resolution_timer_) return; - if (last_resolution_timestamp_ >= 0) { + if (last_resolution_timestamp_.has_value()) { // InvalidateNow to avoid getting stuck re-initializing this timer // in a loop while draining the currently-held WorkSerializer. // Also see https://github.com/grpc/grpc/issues/26079. ExecCtx::Get()->InvalidateNow(); - const grpc_millis earliest_next_resolution = - last_resolution_timestamp_ + min_time_between_resolutions_; - const grpc_millis ms_until_next_resolution = + const Timestamp earliest_next_resolution = + *last_resolution_timestamp_ + min_time_between_resolutions_; + const Duration time_until_next_resolution = earliest_next_resolution - ExecCtx::Get()->Now(); - if (ms_until_next_resolution > 0) { - const grpc_millis last_resolution_ago = - ExecCtx::Get()->Now() - last_resolution_timestamp_; + if (time_until_next_resolution > Duration::Zero()) { + const Duration last_resolution_ago = + ExecCtx::Get()->Now() - *last_resolution_timestamp_; GRPC_CARES_TRACE_LOG( "resolver:%p In cooldown from last resolution (from %" PRId64 " ms ago). Will resolve again in %" PRId64 " ms", - this, last_resolution_ago, ms_until_next_resolution); + this, last_resolution_ago.millis(), + time_until_next_resolution.millis()); have_next_resolution_timer_ = true; // TODO(roth): We currently deal with this ref manually. Once the // new closure API is done, find a way to track this ref with the timer // callback as part of the type system. Ref(DEBUG_LOCATION, "next_resolution_timer_cooldown").release(); grpc_timer_init(&next_resolution_timer_, - ExecCtx::Get()->Now() + ms_until_next_resolution, + ExecCtx::Get()->Now() + time_until_next_resolution, &on_next_resolution_); return; } diff --git a/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.cc b/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.cc index 1b462bfe7a1..38fd38bfd3e 100644 --- a/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.cc +++ b/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.cc @@ -249,20 +249,20 @@ static fd_node* pop_fd_node_locked(fd_node** head, ares_socket_t as) return nullptr; } -static grpc_millis calculate_next_ares_backup_poll_alarm_ms( +static grpc_core::Timestamp calculate_next_ares_backup_poll_alarm( grpc_ares_ev_driver* driver) ABSL_EXCLUSIVE_LOCKS_REQUIRED(&grpc_ares_request::mu) { // An alternative here could be to use ares_timeout to try to be more // accurate, but that would require using "struct timeval"'s, which just makes // things a bit more complicated. So just poll every second, as suggested // by the c-ares code comments. - grpc_millis ms_until_next_ares_backup_poll_alarm = 1000; + grpc_core::Duration until_next_ares_backup_poll_alarm = + grpc_core::Duration::Seconds(1); GRPC_CARES_TRACE_LOG( "request:%p ev_driver=%p. next ares process poll time in " "%" PRId64 " ms", - driver->request, driver, ms_until_next_ares_backup_poll_alarm); - return ms_until_next_ares_backup_poll_alarm + - grpc_core::ExecCtx::Get()->Now(); + driver->request, driver, until_next_ares_backup_poll_alarm.millis()); + return grpc_core::ExecCtx::Get()->Now() + until_next_ares_backup_poll_alarm; } static void on_timeout(void* arg, grpc_error_handle error) { @@ -317,8 +317,8 @@ static void on_ares_backup_poll_alarm(void* arg, grpc_error_handle error) { // in a loop while draining the currently-held WorkSerializer. // Also see https://github.com/grpc/grpc/issues/26079. grpc_core::ExecCtx::Get()->InvalidateNow(); - grpc_millis next_ares_backup_poll_alarm = - calculate_next_ares_backup_poll_alarm_ms(driver); + grpc_core::Timestamp next_ares_backup_poll_alarm = + calculate_next_ares_backup_poll_alarm(driver); grpc_ares_ev_driver_ref(driver); GRPC_CLOSURE_INIT(&driver->on_ares_backup_poll_alarm_locked, on_ares_backup_poll_alarm, driver, @@ -462,22 +462,23 @@ void grpc_ares_ev_driver_start_locked(grpc_ares_ev_driver* ev_driver) ABSL_EXCLUSIVE_LOCKS_REQUIRED(&grpc_ares_request::mu) { grpc_ares_notify_on_event_locked(ev_driver); // Initialize overall DNS resolution timeout alarm - grpc_millis timeout = + grpc_core::Duration timeout = ev_driver->query_timeout_ms == 0 - ? GRPC_MILLIS_INF_FUTURE - : ev_driver->query_timeout_ms + grpc_core::ExecCtx::Get()->Now(); + ? grpc_core::Duration::Infinity() + : grpc_core::Duration::Milliseconds(ev_driver->query_timeout_ms); GRPC_CARES_TRACE_LOG( "request:%p ev_driver=%p grpc_ares_ev_driver_start_locked. timeout in " "%" PRId64 " ms", - ev_driver->request, ev_driver, timeout); + ev_driver->request, ev_driver, timeout.millis()); grpc_ares_ev_driver_ref(ev_driver); GRPC_CLOSURE_INIT(&ev_driver->on_timeout_locked, on_timeout, ev_driver, grpc_schedule_on_exec_ctx); - grpc_timer_init(&ev_driver->query_timeout, timeout, + grpc_timer_init(&ev_driver->query_timeout, + grpc_core::ExecCtx::Get()->Now() + timeout, &ev_driver->on_timeout_locked); // Initialize the backup poll alarm - grpc_millis next_ares_backup_poll_alarm = - calculate_next_ares_backup_poll_alarm_ms(ev_driver); + grpc_core::Timestamp next_ares_backup_poll_alarm = + calculate_next_ares_backup_poll_alarm(ev_driver); grpc_ares_ev_driver_ref(ev_driver); GRPC_CLOSURE_INIT(&ev_driver->on_ares_backup_poll_alarm_locked, on_ares_backup_poll_alarm, ev_driver, diff --git a/src/core/ext/filters/client_channel/resolver/dns/native/dns_resolver.cc b/src/core/ext/filters/client_channel/resolver/dns/native/dns_resolver.cc index 55e14760384..fc8ed8c221b 100644 --- a/src/core/ext/filters/client_channel/resolver/dns/native/dns_resolver.cc +++ b/src/core/ext/filters/client_channel/resolver/dns/native/dns_resolver.cc @@ -91,9 +91,9 @@ class NativeClientChannelDNSResolver : public Resolver { grpc_timer next_resolution_timer_; grpc_closure on_next_resolution_; /// min time between DNS requests - grpc_millis min_time_between_resolutions_; + Duration min_time_between_resolutions_; /// timestamp of last DNS request - grpc_millis last_resolution_timestamp_ = -1; + absl::optional<Timestamp> last_resolution_timestamp_; /// retry backoff state BackOff backoff_; /// tracks pending resolutions @@ -107,16 +107,17 @@ NativeClientChannelDNSResolver::NativeClientChannelDNSResolver( work_serializer_(std::move(args.work_serializer)), result_handler_(std::move(args.result_handler)), interested_parties_(grpc_pollset_set_create()), - min_time_between_resolutions_(grpc_channel_args_find_integer( - channel_args_, GRPC_ARG_DNS_MIN_TIME_BETWEEN_RESOLUTIONS_MS, - {1000 * 30, 0, INT_MAX})), - backoff_( - BackOff::Options() - .set_initial_backoff(GRPC_DNS_INITIAL_CONNECT_BACKOFF_SECONDS * - 1000) - .set_multiplier(GRPC_DNS_RECONNECT_BACKOFF_MULTIPLIER) - .set_jitter(GRPC_DNS_RECONNECT_JITTER) - .set_max_backoff(GRPC_DNS_RECONNECT_MAX_BACKOFF_SECONDS * 1000)) { + min_time_between_resolutions_( + Duration::Milliseconds(grpc_channel_args_find_integer( + channel_args_, GRPC_ARG_DNS_MIN_TIME_BETWEEN_RESOLUTIONS_MS, + {1000 * 30, 0, INT_MAX}))), + backoff_(BackOff::Options() + .set_initial_backoff(Duration::Seconds( + GRPC_DNS_INITIAL_CONNECT_BACKOFF_SECONDS)) + .set_multiplier(GRPC_DNS_RECONNECT_BACKOFF_MULTIPLIER) + .set_jitter(GRPC_DNS_RECONNECT_JITTER) + .set_max_backoff(Duration::Seconds( + GRPC_DNS_RECONNECT_MAX_BACKOFF_SECONDS))) { if (args.pollset_set != nullptr) { grpc_pollset_set_add_pollset_set(interested_parties_, args.pollset_set); } @@ -216,16 +217,17 @@ void NativeClientChannelDNSResolver::OnResolvedLocked( // in a loop while draining the currently-held WorkSerializer. // Also see https://github.com/grpc/grpc/issues/26079. ExecCtx::Get()->InvalidateNow(); - grpc_millis next_try = backoff_.NextAttemptTime(); - grpc_millis timeout = next_try - ExecCtx::Get()->Now(); + Timestamp next_try = backoff_.NextAttemptTime(); + Duration timeout = next_try - ExecCtx::Get()->Now(); GPR_ASSERT(!have_next_resolution_timer_); have_next_resolution_timer_ = true; // TODO(roth): We currently deal with this ref manually. Once the // new closure API is done, find a way to track this ref with the timer // callback as part of the type system. Ref(DEBUG_LOCATION, "next_resolution_timer").release(); - if (timeout > 0) { - gpr_log(GPR_DEBUG, "retrying in %" PRId64 " milliseconds", timeout); + if (timeout > Duration::Zero()) { + gpr_log(GPR_DEBUG, "retrying in %" PRId64 " milliseconds", + timeout.millis()); } else { gpr_log(GPR_DEBUG, "retrying immediately"); } @@ -241,22 +243,23 @@ void NativeClientChannelDNSResolver::MaybeStartResolvingLocked() { // If there is an existing timer, the time it fires is the earliest time we // can start the next resolution. if (have_next_resolution_timer_) return; - if (last_resolution_timestamp_ >= 0) { + if (last_resolution_timestamp_.has_value()) { // InvalidateNow to avoid getting stuck re-initializing this timer // in a loop while draining the currently-held WorkSerializer. // Also see https://github.com/grpc/grpc/issues/26079. ExecCtx::Get()->InvalidateNow(); - const grpc_millis earliest_next_resolution = - last_resolution_timestamp_ + min_time_between_resolutions_; - const grpc_millis ms_until_next_resolution = + const Timestamp earliest_next_resolution = + *last_resolution_timestamp_ + min_time_between_resolutions_; + const Duration time_until_next_resolution = earliest_next_resolution - ExecCtx::Get()->Now(); - if (ms_until_next_resolution > 0) { - const grpc_millis last_resolution_ago = - ExecCtx::Get()->Now() - last_resolution_timestamp_; + if (time_until_next_resolution > Duration::Zero()) { + const Duration last_resolution_ago = + ExecCtx::Get()->Now() - *last_resolution_timestamp_; gpr_log(GPR_DEBUG, "In cooldown from last resolution (from %" PRId64 " ms ago). Will resolve again in %" PRId64 " ms", - last_resolution_ago, ms_until_next_resolution); + last_resolution_ago.millis(), + time_until_next_resolution.millis()); have_next_resolution_timer_ = true; // TODO(roth): We currently deal with this ref manually. Once the // new closure API is done, find a way to track this ref with the timer @@ -266,7 +269,7 @@ void NativeClientChannelDNSResolver::MaybeStartResolvingLocked() { NativeClientChannelDNSResolver::OnNextResolution, this, grpc_schedule_on_exec_ctx); grpc_timer_init(&next_resolution_timer_, - ExecCtx::Get()->Now() + ms_until_next_resolution, + ExecCtx::Get()->Now() + time_until_next_resolution, &on_next_resolution_); return; } diff --git a/src/core/ext/filters/client_channel/resolver/google_c2p/google_c2p_resolver.cc b/src/core/ext/filters/client_channel/resolver/google_c2p/google_c2p_resolver.cc index 2c85e6e25a8..807a2c74baa 100644 --- a/src/core/ext/filters/client_channel/resolver/google_c2p/google_c2p_resolver.cc +++ b/src/core/ext/filters/client_channel/resolver/google_c2p/google_c2p_resolver.cc @@ -131,12 +131,12 @@ GoogleCloud2ProdResolver::MetadataQuery::MetadataQuery( const_cast<char*>(GRPC_ARG_RESOURCE_QUOTA), resolver_->resource_quota_.get(), grpc_resource_quota_arg_vtable()); grpc_channel_args args = {1, &resource_quota_arg}; - http_request_ = - HttpRequest::Get(std::move(*uri), &args, pollent, &request, - ExecCtx::Get()->Now() + 10000, // 10s timeout - &on_done_, &response_, - RefCountedPtr<grpc_channel_credentials>( - grpc_insecure_credentials_create())); + http_request_ = HttpRequest::Get( + std::move(*uri), &args, pollent, &request, + ExecCtx::Get()->Now() + Duration::Seconds(10), // 10s timeout + &on_done_, &response_, + RefCountedPtr<grpc_channel_credentials>( + grpc_insecure_credentials_create())); http_request_->Start(); } diff --git a/src/core/ext/filters/client_channel/resolver/xds/xds_resolver.cc b/src/core/ext/filters/client_channel/resolver/xds/xds_resolver.cc index a4e57646545..45af0d43314 100644 --- a/src/core/ext/filters/client_channel/resolver/xds/xds_resolver.cc +++ b/src/core/ext/filters/client_channel/resolver/xds/xds_resolver.cc @@ -472,17 +472,18 @@ grpc_error_handle XdsResolver::XdsConfigSelector::CreateMethodConfig( if (route_action.retry_policy.has_value() && !route_action.retry_policy->retry_on.Empty()) { std::vector<std::string> retry_parts; + const auto base_interval = + route_action.retry_policy->retry_back_off.base_interval.as_timespec(); + const auto max_interval = + route_action.retry_policy->retry_back_off.max_interval.as_timespec(); retry_parts.push_back(absl::StrFormat( "\"retryPolicy\": {\n" " \"maxAttempts\": %d,\n" " \"initialBackoff\": \"%d.%09ds\",\n" " \"maxBackoff\": \"%d.%09ds\",\n" " \"backoffMultiplier\": 2,\n", - route_action.retry_policy->num_retries + 1, - route_action.retry_policy->retry_back_off.base_interval.seconds, - route_action.retry_policy->retry_back_off.base_interval.nanos, - route_action.retry_policy->retry_back_off.max_interval.seconds, - route_action.retry_policy->retry_back_off.max_interval.nanos)); + route_action.retry_policy->num_retries + 1, base_interval.tv_sec, + base_interval.tv_nsec, max_interval.tv_sec, max_interval.tv_nsec)); std::vector<std::string> code_parts; if (route_action.retry_policy->retry_on.Contains(GRPC_STATUS_CANCELLED)) { code_parts.push_back(" \"CANCELLED\""); @@ -509,12 +510,10 @@ grpc_error_handle XdsResolver::XdsConfigSelector::CreateMethodConfig( } // Set timeout. if (route_action.max_stream_duration.has_value() && - (route_action.max_stream_duration->seconds != 0 || - route_action.max_stream_duration->nanos != 0)) { - fields.emplace_back( - absl::StrFormat(" \"timeout\": \"%d.%09ds\"", - route_action.max_stream_duration->seconds, - route_action.max_stream_duration->nanos)); + (route_action.max_stream_duration != Duration::Zero())) { + gpr_timespec ts = route_action.max_stream_duration->as_timespec(); + fields.emplace_back(absl::StrFormat(" \"timeout\": \"%d.%09ds\"", + ts.tv_sec, ts.tv_nsec)); } // Handle xDS HTTP filters. XdsRouting::GeneratePerHttpFilterConfigsResult result = diff --git a/src/core/ext/filters/client_channel/resolver_result_parsing.cc b/src/core/ext/filters/client_channel/resolver_result_parsing.cc index 64a468f844f..23294742e39 100644 --- a/src/core/ext/filters/client_channel/resolver_result_parsing.cc +++ b/src/core/ext/filters/client_channel/resolver_result_parsing.cc @@ -170,7 +170,7 @@ ClientChannelServiceConfigParser::ParsePerMethodParams( } } // Parse timeout. - grpc_millis timeout = 0; + Duration timeout; ParseJsonObjectFieldAsDuration(json.object_value(), "timeout", &timeout, &error_list, false); // Return result. diff --git a/src/core/ext/filters/client_channel/resolver_result_parsing.h b/src/core/ext/filters/client_channel/resolver_result_parsing.h index fea59971ecd..855ebbde643 100644 --- a/src/core/ext/filters/client_channel/resolver_result_parsing.h +++ b/src/core/ext/filters/client_channel/resolver_result_parsing.h @@ -27,7 +27,7 @@ #include "src/core/lib/config/core_configuration.h" #include "src/core/lib/gprpp/ref_counted.h" #include "src/core/lib/gprpp/ref_counted_ptr.h" -#include "src/core/lib/iomgr/exec_ctx.h" // for grpc_millis +#include "src/core/lib/iomgr/exec_ctx.h" // for grpc_core::Timestamp #include "src/core/lib/json/json.h" #include "src/core/lib/resolver/resolver.h" #include "src/core/lib/service_config/service_config_parser.h" @@ -67,16 +67,16 @@ class ClientChannelGlobalParsedConfig class ClientChannelMethodParsedConfig : public ServiceConfigParser::ParsedConfig { public: - ClientChannelMethodParsedConfig(grpc_millis timeout, + ClientChannelMethodParsedConfig(Duration timeout, const absl::optional<bool>& wait_for_ready) : timeout_(timeout), wait_for_ready_(wait_for_ready) {} - grpc_millis timeout() const { return timeout_; } + Duration timeout() const { return timeout_; } absl::optional<bool> wait_for_ready() const { return wait_for_ready_; } private: - grpc_millis timeout_ = 0; + Duration timeout_; absl::optional<bool> wait_for_ready_; }; diff --git a/src/core/ext/filters/client_channel/retry_filter.cc b/src/core/ext/filters/client_channel/retry_filter.cc index b873fe64200..3fa13bdf8b7 100644 --- a/src/core/ext/filters/client_channel/retry_filter.cc +++ b/src/core/ext/filters/client_channel/retry_filter.cc @@ -401,7 +401,7 @@ class RetryFilter::CallData { // Returns true if the call should be retried. bool ShouldRetry(absl::optional<grpc_status_code> status, - absl::optional<grpc_millis> server_pushback_ms); + absl::optional<Duration> server_pushback_ms); // Abandons the call attempt. Unrefs any deferred batches. void Abandon(); @@ -511,8 +511,8 @@ class RetryFilter::CallData { void RetryCommit(CallAttempt* call_attempt); // Starts a timer to retry after appropriate back-off. - // If server_pushback_ms is nullopt, retry_backoff_ is used. - void StartRetryTimer(absl::optional<grpc_millis> server_pushback_ms); + // If server_pushback is nullopt, retry_backoff_ is used. + void StartRetryTimer(absl::optional<Duration> server_pushback); static void OnRetryTimer(void* arg, grpc_error_handle error); static void OnRetryTimerLocked(void* arg, grpc_error_handle error); @@ -534,7 +534,7 @@ class RetryFilter::CallData { BackOff retry_backoff_; grpc_slice path_; // Request path. - grpc_millis deadline_; + Timestamp deadline_; Arena* arena_; grpc_call_stack* owning_call_; CallCombiner* call_combiner_; @@ -689,7 +689,7 @@ RetryFilter::CallData::CallAttempt::CallAttempt(CallData* calld, // If per_attempt_recv_timeout is set, start a timer. if (calld->retry_policy_ != nullptr && calld->retry_policy_->per_attempt_recv_timeout().has_value()) { - grpc_millis per_attempt_recv_deadline = + Timestamp per_attempt_recv_deadline = ExecCtx::Get()->Now() + *calld->retry_policy_->per_attempt_recv_timeout(); if (GRPC_TRACE_FLAG_ENABLED(grpc_retry_trace)) { @@ -697,7 +697,7 @@ RetryFilter::CallData::CallAttempt::CallAttempt(CallData* calld, "chand=%p calld=%p attempt=%p: per-attempt timeout in %" PRId64 " ms", calld->chand_, calld, this, - *calld->retry_policy_->per_attempt_recv_timeout()); + calld->retry_policy_->per_attempt_recv_timeout()->millis()); } // Schedule retry after computed delay. GRPC_CLOSURE_INIT(&on_per_attempt_recv_timer_, OnPerAttemptRecvTimer, this, @@ -1086,7 +1086,7 @@ void RetryFilter::CallData::CallAttempt::CancelFromSurface( bool RetryFilter::CallData::CallAttempt::ShouldRetry( absl::optional<grpc_status_code> status, - absl::optional<grpc_millis> server_pushback_ms) { + absl::optional<Duration> server_pushback) { // If no retry policy, don't retry. if (calld_->retry_policy_ == nullptr) return false; // Check status. @@ -1149,8 +1149,8 @@ bool RetryFilter::CallData::CallAttempt::ShouldRetry( return false; } // Check server push-back. - if (server_pushback_ms.has_value()) { - if (*server_pushback_ms < 0) { + if (server_pushback.has_value()) { + if (*server_pushback < Duration::Zero()) { if (GRPC_TRACE_FLAG_ENABLED(grpc_retry_trace)) { gpr_log(GPR_INFO, "chand=%p calld=%p attempt=%p: not retrying due to server " @@ -1164,7 +1164,7 @@ bool RetryFilter::CallData::CallAttempt::ShouldRetry( GPR_INFO, "chand=%p calld=%p attempt=%p: server push-back: retry in %" PRIu64 " ms", - calld_->chand_, calld_, this, *server_pushback_ms); + calld_->chand_, calld_, this, server_pushback->millis()); } } } @@ -1255,7 +1255,7 @@ void RetryFilter::CallData::CallAttempt::OnPerAttemptRecvTimerLocked( // Mark current attempt as abandoned. call_attempt->Abandon(); // We are retrying. Start backoff timer. - calld->StartRetryTimer(/*server_pushback_ms=*/absl::nullopt); + calld->StartRetryTimer(/*server_pushback=*/absl::nullopt); } else { // Not retrying, so commit the call. calld->RetryCommit(call_attempt); @@ -1551,12 +1551,12 @@ void RetryFilter::CallData::CallAttempt::BatchData::RecvMessageReady( namespace { -// Sets *status, *server_pushback_ms, and *is_lb_drop based on md_batch +// Sets *status, *server_pushback, and *is_lb_drop based on md_batch // and error. void GetCallStatus( - grpc_millis deadline, grpc_metadata_batch* md_batch, - grpc_error_handle error, grpc_status_code* status, - absl::optional<grpc_millis>* server_pushback_ms, bool* is_lb_drop, + Timestamp deadline, grpc_metadata_batch* md_batch, grpc_error_handle error, + grpc_status_code* status, absl::optional<Duration>* server_pushback, + bool* is_lb_drop, absl::optional<GrpcStreamNetworkState::ValueType>* stream_network_state) { if (error != GRPC_ERROR_NONE) { grpc_error_get_status(error, deadline, status, nullptr, nullptr, nullptr); @@ -1568,7 +1568,7 @@ void GetCallStatus( } else { *status = *md_batch->get(GrpcStatusMetadata()); } - *server_pushback_ms = md_batch->get(GrpcRetryPushbackMsMetadata()); + *server_pushback = md_batch->get(GrpcRetryPushbackMsMetadata()); *stream_network_state = md_batch->get(GrpcStreamNetworkState()); GRPC_ERROR_UNREF(error); } @@ -1700,22 +1700,21 @@ void RetryFilter::CallData::CallAttempt::BatchData::RecvTrailingMetadataReady( call_attempt->MaybeCancelPerAttemptRecvTimer(); // Get the call's status and check for server pushback metadata. grpc_status_code status = GRPC_STATUS_OK; - absl::optional<grpc_millis> server_pushback_ms; + absl::optional<Duration> server_pushback; bool is_lb_drop = false; absl::optional<GrpcStreamNetworkState::ValueType> stream_network_state; grpc_metadata_batch* md_batch = batch_data->batch_.payload->recv_trailing_metadata.recv_trailing_metadata; GetCallStatus(calld->deadline_, md_batch, GRPC_ERROR_REF(error), &status, - &server_pushback_ms, &is_lb_drop, &stream_network_state); + &server_pushback, &is_lb_drop, &stream_network_state); if (GRPC_TRACE_FLAG_ENABLED(grpc_retry_trace)) { gpr_log(GPR_INFO, "chand=%p calld=%p attempt=%p: call finished, status=%s " - "server_pushback_ms=%s is_lb_drop=%d stream_network_state=%s", + "server_pushback=%s is_lb_drop=%d stream_network_state=%s", calld->chand_, calld, call_attempt, grpc_status_code_to_string(status), - server_pushback_ms.has_value() - ? absl::StrCat(*server_pushback_ms).c_str() - : "N/A", + server_pushback.has_value() ? server_pushback->ToString().c_str() + : "N/A", is_lb_drop, stream_network_state.has_value() ? absl::StrCat(*stream_network_state).c_str() @@ -1739,7 +1738,7 @@ void RetryFilter::CallData::CallAttempt::BatchData::RecvTrailingMetadataReady( } // If not transparently retrying, check for configurable retry. if (retry == kNoRetry && - call_attempt->ShouldRetry(status, server_pushback_ms)) { + call_attempt->ShouldRetry(status, server_pushback)) { retry = kConfigurableRetry; } // If we're retrying, do so. @@ -1759,7 +1758,7 @@ void RetryFilter::CallData::CallAttempt::BatchData::RecvTrailingMetadataReady( if (retry == kTransparentRetry) { calld->AddClosureToStartTransparentRetry(&closures); } else { - calld->StartRetryTimer(server_pushback_ms); + calld->StartRetryTimer(server_pushback); } // Record that this attempt has been abandoned. call_attempt->Abandon(); @@ -2115,14 +2114,15 @@ RetryFilter::CallData::CallData(RetryFilter* chand, retry_backoff_( BackOff::Options() .set_initial_backoff(retry_policy_ == nullptr - ? 0 + ? Duration::Zero() : retry_policy_->initial_backoff()) .set_multiplier(retry_policy_ == nullptr ? 0 : retry_policy_->backoff_multiplier()) .set_jitter(RETRY_BACKOFF_JITTER) - .set_max_backoff( - retry_policy_ == nullptr ? 0 : retry_policy_->max_backoff())), + .set_max_backoff(retry_policy_ == nullptr + ? Duration::Zero() + : retry_policy_->max_backoff())), path_(grpc_slice_ref_internal(args.path)), deadline_(args.deadline), arena_(args.arena), @@ -2552,14 +2552,14 @@ void RetryFilter::CallData::RetryCommit(CallAttempt* call_attempt) { } void RetryFilter::CallData::StartRetryTimer( - absl::optional<grpc_millis> server_pushback_ms) { + absl::optional<Duration> server_pushback) { // Reset call attempt. call_attempt_.reset(DEBUG_LOCATION, "StartRetryTimer"); // Compute backoff delay. - grpc_millis next_attempt_time; - if (server_pushback_ms.has_value()) { - GPR_ASSERT(*server_pushback_ms >= 0); - next_attempt_time = ExecCtx::Get()->Now() + *server_pushback_ms; + Timestamp next_attempt_time; + if (server_pushback.has_value()) { + GPR_ASSERT(*server_pushback >= Duration::Zero()); + next_attempt_time = ExecCtx::Get()->Now() + *server_pushback; retry_backoff_.Reset(); } else { next_attempt_time = retry_backoff_.NextAttemptTime(); @@ -2567,7 +2567,7 @@ void RetryFilter::CallData::StartRetryTimer( if (GRPC_TRACE_FLAG_ENABLED(grpc_retry_trace)) { gpr_log(GPR_INFO, "chand=%p calld=%p: retrying failed call in %" PRId64 " ms", chand_, - this, next_attempt_time - ExecCtx::Get()->Now()); + this, (next_attempt_time - ExecCtx::Get()->Now()).millis()); } // Schedule retry after computed delay. GRPC_CLOSURE_INIT(&retry_closure_, OnRetryTimer, this, nullptr); diff --git a/src/core/ext/filters/client_channel/retry_service_config.cc b/src/core/ext/filters/client_channel/retry_service_config.cc index 0a670cc3fd6..9382904b6aa 100644 --- a/src/core/ext/filters/client_channel/retry_service_config.cc +++ b/src/core/ext/filters/client_channel/retry_service_config.cc @@ -158,9 +158,9 @@ namespace { grpc_error_handle ParseRetryPolicy( const grpc_channel_args* args, const Json& json, int* max_attempts, - grpc_millis* initial_backoff, grpc_millis* max_backoff, - float* backoff_multiplier, StatusCodeSet* retryable_status_codes, - absl::optional<grpc_millis>* per_attempt_recv_timeout) { + Duration* initial_backoff, Duration* max_backoff, float* backoff_multiplier, + StatusCodeSet* retryable_status_codes, + absl::optional<Duration>* per_attempt_recv_timeout) { if (json.type() != Json::Type::OBJECT) { return GRPC_ERROR_CREATE_FROM_STATIC_STRING( "field:retryPolicy error:should be of type object"); @@ -192,14 +192,14 @@ grpc_error_handle ParseRetryPolicy( // Parse initialBackoff. if (ParseJsonObjectFieldAsDuration(json.object_value(), "initialBackoff", initial_backoff, &error_list) && - *initial_backoff == 0) { + *initial_backoff == Duration::Zero()) { error_list.push_back(GRPC_ERROR_CREATE_FROM_STATIC_STRING( "field:initialBackoff error:must be greater than 0")); } // Parse maxBackoff. if (ParseJsonObjectFieldAsDuration(json.object_value(), "maxBackoff", max_backoff, &error_list) && - *max_backoff == 0) { + *max_backoff == Duration::Zero()) { error_list.push_back(GRPC_ERROR_CREATE_FROM_STATIC_STRING( "field:maxBackoff error:must be greater than 0")); } @@ -253,7 +253,7 @@ grpc_error_handle ParseRetryPolicy( false)) { it = json.object_value().find("perAttemptRecvTimeout"); if (it != json.object_value().end()) { - grpc_millis per_attempt_recv_timeout_value; + Duration per_attempt_recv_timeout_value; if (!ParseDurationFromJson(it->second, &per_attempt_recv_timeout_value)) { error_list.push_back(GRPC_ERROR_CREATE_FROM_STATIC_STRING( "field:perAttemptRecvTimeout error:type must be STRING of the " @@ -262,7 +262,7 @@ grpc_error_handle ParseRetryPolicy( *per_attempt_recv_timeout = per_attempt_recv_timeout_value; // TODO(roth): As part of implementing hedging, relax this check such // that we allow a value of 0 if a hedging policy is specified. - if (per_attempt_recv_timeout_value == 0) { + if (per_attempt_recv_timeout_value == Duration::Zero()) { error_list.push_back(GRPC_ERROR_CREATE_FROM_STATIC_STRING( "field:perAttemptRecvTimeout error:must be greater than 0")); } @@ -296,11 +296,11 @@ RetryServiceConfigParser::ParsePerMethodParams(const grpc_channel_args* args, auto it = json.object_value().find("retryPolicy"); if (it == json.object_value().end()) return nullptr; int max_attempts = 0; - grpc_millis initial_backoff = 0; - grpc_millis max_backoff = 0; + Duration initial_backoff; + Duration max_backoff; float backoff_multiplier = 0; StatusCodeSet retryable_status_codes; - absl::optional<grpc_millis> per_attempt_recv_timeout; + absl::optional<Duration> per_attempt_recv_timeout; *error = ParseRetryPolicy(args, it->second, &max_attempts, &initial_backoff, &max_backoff, &backoff_multiplier, &retryable_status_codes, &per_attempt_recv_timeout); diff --git a/src/core/ext/filters/client_channel/retry_service_config.h b/src/core/ext/filters/client_channel/retry_service_config.h index d9a8d5b114a..c576c6f63d7 100644 --- a/src/core/ext/filters/client_channel/retry_service_config.h +++ b/src/core/ext/filters/client_channel/retry_service_config.h @@ -24,7 +24,7 @@ #include "src/core/ext/filters/client_channel/retry_throttle.h" #include "src/core/lib/channel/status_util.h" #include "src/core/lib/config/core_configuration.h" -#include "src/core/lib/iomgr/exec_ctx.h" // for grpc_millis +#include "src/core/lib/gprpp/time.h" #include "src/core/lib/service_config/service_config_parser.h" namespace grpc_core { @@ -46,10 +46,10 @@ class RetryGlobalConfig : public ServiceConfigParser::ParsedConfig { class RetryMethodConfig : public ServiceConfigParser::ParsedConfig { public: - RetryMethodConfig(int max_attempts, grpc_millis initial_backoff, - grpc_millis max_backoff, float backoff_multiplier, + RetryMethodConfig(int max_attempts, Duration initial_backoff, + Duration max_backoff, float backoff_multiplier, StatusCodeSet retryable_status_codes, - absl::optional<grpc_millis> per_attempt_recv_timeout) + absl::optional<Duration> per_attempt_recv_timeout) : max_attempts_(max_attempts), initial_backoff_(initial_backoff), max_backoff_(max_backoff), @@ -58,23 +58,23 @@ class RetryMethodConfig : public ServiceConfigParser::ParsedConfig { per_attempt_recv_timeout_(per_attempt_recv_timeout) {} int max_attempts() const { return max_attempts_; } - grpc_millis initial_backoff() const { return initial_backoff_; } - grpc_millis max_backoff() const { return max_backoff_; } + Duration initial_backoff() const { return initial_backoff_; } + Duration max_backoff() const { return max_backoff_; } float backoff_multiplier() const { return backoff_multiplier_; } StatusCodeSet retryable_status_codes() const { return retryable_status_codes_; } - absl::optional<grpc_millis> per_attempt_recv_timeout() const { + absl::optional<Duration> per_attempt_recv_timeout() const { return per_attempt_recv_timeout_; } private: int max_attempts_ = 0; - grpc_millis initial_backoff_ = 0; - grpc_millis max_backoff_ = 0; + Duration initial_backoff_; + Duration max_backoff_; float backoff_multiplier_ = 0; StatusCodeSet retryable_status_codes_; - absl::optional<grpc_millis> per_attempt_recv_timeout_; + absl::optional<Duration> per_attempt_recv_timeout_; }; class RetryServiceConfigParser : public ServiceConfigParser::Parser { diff --git a/src/core/ext/filters/client_channel/subchannel.cc b/src/core/ext/filters/client_channel/subchannel.cc index d44afaa22e2..ba45e1bff12 100644 --- a/src/core/ext/filters/client_channel/subchannel.cc +++ b/src/core/ext/filters/client_channel/subchannel.cc @@ -247,7 +247,7 @@ void SubchannelCall::MaybeInterceptRecvTrailingMetadata( namespace { // Sets *status based on the rest of the parameters. -void GetCallStatus(grpc_status_code* status, grpc_millis deadline, +void GetCallStatus(grpc_status_code* status, Timestamp deadline, grpc_metadata_batch* md_batch, grpc_error_handle error) { if (error != GRPC_ERROR_NONE) { grpc_error_get_status(error, deadline, status, nullptr, nullptr, nullptr); @@ -568,52 +568,55 @@ void Subchannel::HealthWatcherMap::ShutdownLocked() { map_.clear(); } namespace { -BackOff::Options ParseArgsForBackoffValues( - const grpc_channel_args* args, grpc_millis* min_connect_timeout_ms) { - grpc_millis initial_backoff_ms = - GRPC_SUBCHANNEL_INITIAL_CONNECT_BACKOFF_SECONDS * 1000; - *min_connect_timeout_ms = - GRPC_SUBCHANNEL_RECONNECT_MIN_TIMEOUT_SECONDS * 1000; - grpc_millis max_backoff_ms = - GRPC_SUBCHANNEL_RECONNECT_MAX_BACKOFF_SECONDS * 1000; +BackOff::Options ParseArgsForBackoffValues(const grpc_channel_args* args, + Duration* min_connect_timeout) { + Duration initial_backoff = + Duration::Seconds(GRPC_SUBCHANNEL_INITIAL_CONNECT_BACKOFF_SECONDS); + *min_connect_timeout = + Duration::Seconds(GRPC_SUBCHANNEL_RECONNECT_MIN_TIMEOUT_SECONDS); + Duration max_backoff = + Duration::Seconds(GRPC_SUBCHANNEL_RECONNECT_MAX_BACKOFF_SECONDS); bool fixed_reconnect_backoff = false; if (args != nullptr) { for (size_t i = 0; i < args->num_args; i++) { if (0 == strcmp(args->args[i].key, "grpc.testing.fixed_reconnect_backoff_ms")) { fixed_reconnect_backoff = true; - initial_backoff_ms = *min_connect_timeout_ms = max_backoff_ms = - grpc_channel_arg_get_integer( + initial_backoff = *min_connect_timeout = max_backoff = + Duration::Milliseconds(grpc_channel_arg_get_integer( &args->args[i], - {static_cast<int>(initial_backoff_ms), 100, INT_MAX}); + {static_cast<int>(initial_backoff.millis()), 100, INT_MAX})); } else if (0 == strcmp(args->args[i].key, GRPC_ARG_MIN_RECONNECT_BACKOFF_MS)) { fixed_reconnect_backoff = false; - *min_connect_timeout_ms = grpc_channel_arg_get_integer( - &args->args[i], - {static_cast<int>(*min_connect_timeout_ms), 100, INT_MAX}); + *min_connect_timeout = + Duration::Milliseconds(grpc_channel_arg_get_integer( + &args->args[i], + {static_cast<int>(min_connect_timeout->millis()), 100, + INT_MAX})); } else if (0 == strcmp(args->args[i].key, GRPC_ARG_MAX_RECONNECT_BACKOFF_MS)) { fixed_reconnect_backoff = false; - max_backoff_ms = grpc_channel_arg_get_integer( - &args->args[i], {static_cast<int>(max_backoff_ms), 100, INT_MAX}); + max_backoff = Duration::Milliseconds(grpc_channel_arg_get_integer( + &args->args[i], + {static_cast<int>(max_backoff.millis()), 100, INT_MAX})); } else if (0 == strcmp(args->args[i].key, GRPC_ARG_INITIAL_RECONNECT_BACKOFF_MS)) { fixed_reconnect_backoff = false; - initial_backoff_ms = grpc_channel_arg_get_integer( + initial_backoff = Duration::Milliseconds(grpc_channel_arg_get_integer( &args->args[i], - {static_cast<int>(initial_backoff_ms), 100, INT_MAX}); + {static_cast<int>(initial_backoff.millis()), 100, INT_MAX})); } } } return BackOff::Options() - .set_initial_backoff(initial_backoff_ms) + .set_initial_backoff(initial_backoff) .set_multiplier(fixed_reconnect_backoff ? 1.0 : GRPC_SUBCHANNEL_RECONNECT_BACKOFF_MULTIPLIER) .set_jitter(fixed_reconnect_backoff ? 0.0 : GRPC_SUBCHANNEL_RECONNECT_JITTER) - .set_max_backoff(max_backoff_ms); + .set_max_backoff(max_backoff); } } // namespace @@ -642,7 +645,7 @@ Subchannel::Subchannel(SubchannelKey key, key_(std::move(key)), pollset_set_(grpc_pollset_set_create()), connector_(std::move(connector)), - backoff_(ParseArgsForBackoffValues(args, &min_connect_timeout_ms_)) { + backoff_(ParseArgsForBackoffValues(args, &min_connect_timeout_)) { GRPC_STATS_INC_CLIENT_SUBCHANNELS_CREATED(); GRPC_CLOSURE_INIT(&on_connecting_finished_, OnConnectingFinished, this, grpc_schedule_on_exec_ctx); @@ -878,14 +881,14 @@ void Subchannel::MaybeStartConnectingLocked() { } else { GPR_ASSERT(!have_retry_alarm_); have_retry_alarm_ = true; - const grpc_millis time_til_next = + const Duration time_til_next = next_attempt_deadline_ - ExecCtx::Get()->Now(); - if (time_til_next <= 0) { + if (time_til_next <= Duration::Zero()) { gpr_log(GPR_INFO, "subchannel %p %s: Retry immediately", this, key_.ToString().c_str()); } else { gpr_log(GPR_INFO, "subchannel %p %s: Retry in %" PRId64 " milliseconds", - this, key_.ToString().c_str(), time_til_next); + this, key_.ToString().c_str(), time_til_next.millis()); } GRPC_CLOSURE_INIT(&on_retry_alarm_, OnRetryAlarm, this, grpc_schedule_on_exec_ctx); @@ -922,8 +925,7 @@ void Subchannel::ContinueConnectingLocked() { SubchannelConnector::Args args; args.address = &address_for_connect_; args.interested_parties = pollset_set_; - const grpc_millis min_deadline = - min_connect_timeout_ms_ + ExecCtx::Get()->Now(); + const Timestamp min_deadline = min_connect_timeout_ + ExecCtx::Get()->Now(); next_attempt_deadline_ = backoff_.NextAttemptTime(); args.deadline = std::max(next_attempt_deadline_, min_deadline); args.channel_args = args_; diff --git a/src/core/ext/filters/client_channel/subchannel.h b/src/core/ext/filters/client_channel/subchannel.h index 55dd31e03c7..9e3ddd39b4d 100644 --- a/src/core/ext/filters/client_channel/subchannel.h +++ b/src/core/ext/filters/client_channel/subchannel.h @@ -76,7 +76,7 @@ class SubchannelCall { grpc_polling_entity* pollent; Slice path; gpr_cycle_counter start_time; - grpc_millis deadline; + Timestamp deadline; Arena* arena; grpc_call_context_element* context; CallCombiner* call_combiner; @@ -129,7 +129,7 @@ class SubchannelCall { grpc_closure recv_trailing_metadata_ready_; grpc_closure* original_recv_trailing_metadata_ = nullptr; grpc_metadata_batch* recv_trailing_metadata_ = nullptr; - grpc_millis deadline_; + Timestamp deadline_; }; // A subchannel that knows how to connect to exactly one target address. It @@ -360,10 +360,11 @@ class Subchannel : public DualRefCounted<Subchannel> { // The map of watchers with health check service names. HealthWatcherMap health_watcher_map_ ABSL_GUARDED_BY(mu_); + // Minimum connect timeout - must be located before backoff_. + Duration min_connect_timeout_ ABSL_GUARDED_BY(mu_); // Backoff state. BackOff backoff_ ABSL_GUARDED_BY(mu_); - grpc_millis next_attempt_deadline_ ABSL_GUARDED_BY(mu_); - grpc_millis min_connect_timeout_ms_ ABSL_GUARDED_BY(mu_); + Timestamp next_attempt_deadline_ ABSL_GUARDED_BY(mu_); bool backoff_begun_ ABSL_GUARDED_BY(mu_) = false; // Retry alarm. diff --git a/src/core/ext/filters/client_idle/client_idle_filter.cc b/src/core/ext/filters/client_idle/client_idle_filter.cc index b0637d04fb4..631499ade95 100644 --- a/src/core/ext/filters/client_idle/client_idle_filter.cc +++ b/src/core/ext/filters/client_idle/client_idle_filter.cc @@ -54,12 +54,14 @@ TraceFlag grpc_trace_client_idle_filter(false, "client_idle_filter"); namespace { -grpc_millis GetClientIdleTimeout(const grpc_channel_args* args) { - return std::max( +Duration GetClientIdleTimeout(const grpc_channel_args* args) { + auto millis = std::max( grpc_channel_arg_get_integer( grpc_channel_args_find(args, GRPC_ARG_CLIENT_IDLE_TIMEOUT_MS), {DEFAULT_IDLE_TIMEOUT_MS, 0, INT_MAX}), MIN_IDLE_TIMEOUT_MS); + if (millis == INT_MAX) return Duration::Infinity(); + return Duration::Milliseconds(millis); } class ClientIdleFilter : public ChannelFilter { @@ -82,7 +84,7 @@ class ClientIdleFilter : public ChannelFilter { private: ClientIdleFilter(grpc_channel_stack* channel_stack, - grpc_millis client_idle_timeout) + Duration client_idle_timeout) : channel_stack_(channel_stack), client_idle_timeout_(client_idle_timeout) {} @@ -99,7 +101,7 @@ class ClientIdleFilter : public ChannelFilter { // The channel stack to which we take refs for pending callbacks. grpc_channel_stack* channel_stack_; - grpc_millis client_idle_timeout_; + Duration client_idle_timeout_; std::shared_ptr<IdleFilterState> idle_filter_state_{ std::make_shared<IdleFilterState>(false)}; @@ -190,7 +192,7 @@ void RegisterClientIdleFilter(CoreConfiguration::Builder* builder) { [](ChannelStackBuilder* builder) { const grpc_channel_args* channel_args = builder->channel_args(); if (!grpc_channel_args_want_minimal_stack(channel_args) && - GetClientIdleTimeout(channel_args) != INT_MAX) { + GetClientIdleTimeout(channel_args) != Duration::Infinity()) { builder->PrependFilter(&grpc_client_idle_filter, nullptr); } return true; diff --git a/src/core/ext/filters/deadline/deadline_filter.cc b/src/core/ext/filters/deadline/deadline_filter.cc index 384ea8de977..8ff05762c12 100644 --- a/src/core/ext/filters/deadline/deadline_filter.cc +++ b/src/core/ext/filters/deadline/deadline_filter.cc @@ -38,7 +38,7 @@ namespace grpc_core { // Allocated on the call arena. class TimerState { public: - TimerState(grpc_call_element* elem, grpc_millis deadline) : elem_(elem) { + TimerState(grpc_call_element* elem, Timestamp deadline) : elem_(elem) { grpc_deadline_state* deadline_state = static_cast<grpc_deadline_state*>(elem_->call_data); GRPC_CALL_STACK_REF(deadline_state->call_stack, "DeadlineTimerState"); @@ -113,8 +113,8 @@ class TimerState { // This is called via the call combiner, so access to deadline_state is // synchronized. static void start_timer_if_needed(grpc_call_element* elem, - grpc_millis deadline) { - if (deadline == GRPC_MILLIS_INF_FUTURE) return; + grpc_core::Timestamp deadline) { + if (deadline == grpc_core::Timestamp::InfFuture()) return; grpc_deadline_state* deadline_state = static_cast<grpc_deadline_state*>(elem->call_data); GPR_ASSERT(deadline_state->timer_state == nullptr); @@ -157,13 +157,14 @@ static void inject_recv_trailing_metadata_ready( // Callback and associated state for starting the timer after call stack // initialization has been completed. struct start_timer_after_init_state { - start_timer_after_init_state(grpc_call_element* elem, grpc_millis deadline) + start_timer_after_init_state(grpc_call_element* elem, + grpc_core::Timestamp deadline) : elem(elem), deadline(deadline) {} ~start_timer_after_init_state() { start_timer_if_needed(elem, deadline); } bool in_call_combiner = false; grpc_call_element* elem; - grpc_millis deadline; + grpc_core::Timestamp deadline; grpc_closure closure; }; static void start_timer_after_init(void* arg, grpc_error_handle error) { @@ -187,13 +188,13 @@ static void start_timer_after_init(void* arg, grpc_error_handle error) { grpc_deadline_state::grpc_deadline_state(grpc_call_element* elem, const grpc_call_element_args& args, - grpc_millis deadline) + grpc_core::Timestamp deadline) : call_stack(args.call_stack), call_combiner(args.call_combiner), arena(args.arena) { // Deadline will always be infinite on servers, so the timer will only be // set on clients with a finite deadline. - if (deadline != GRPC_MILLIS_INF_FUTURE) { + if (deadline != grpc_core::Timestamp::InfFuture()) { // When the deadline passes, we indicate the failure by sending down // an op with cancel_error set. However, we can't send down any ops // until after the call stack is fully initialized. If we start the @@ -212,7 +213,7 @@ grpc_deadline_state::grpc_deadline_state(grpc_call_element* elem, grpc_deadline_state::~grpc_deadline_state() { cancel_timer_if_needed(this); } void grpc_deadline_state_reset(grpc_call_element* elem, - grpc_millis new_deadline) { + grpc_core::Timestamp new_deadline) { grpc_deadline_state* deadline_state = static_cast<grpc_deadline_state*>(elem->call_data); cancel_timer_if_needed(deadline_state); @@ -295,7 +296,7 @@ static void recv_initial_metadata_ready(void* arg, grpc_error_handle error) { server_call_data* calld = static_cast<server_call_data*>(elem->call_data); start_timer_if_needed( elem, calld->recv_initial_metadata->get(grpc_core::GrpcTimeoutMetadata()) - .value_or(GRPC_MILLIS_INF_FUTURE)); + .value_or(grpc_core::Timestamp::InfFuture())); // Invoke the next callback. grpc_core::Closure::Run(DEBUG_LOCATION, calld->next_recv_initial_metadata_ready, diff --git a/src/core/ext/filters/deadline/deadline_filter.h b/src/core/ext/filters/deadline/deadline_filter.h index fd6fd457213..6aaa1a05dfb 100644 --- a/src/core/ext/filters/deadline/deadline_filter.h +++ b/src/core/ext/filters/deadline/deadline_filter.h @@ -30,7 +30,8 @@ class TimerState; // Must be the first field in the filter's call_data. struct grpc_deadline_state { grpc_deadline_state(grpc_call_element* elem, - const grpc_call_element_args& args, grpc_millis deadline); + const grpc_call_element_args& args, + grpc_core::Timestamp deadline); ~grpc_deadline_state(); // We take a reference to the call stack for the timer callback. @@ -61,7 +62,7 @@ struct grpc_deadline_state { // // Note: Must be called while holding the call combiner. void grpc_deadline_state_reset(grpc_call_element* elem, - grpc_millis new_deadline); + grpc_core::Timestamp new_deadline); // To be called from the client-side filter's start_transport_stream_op_batch() // method. Ensures that the deadline timer is cancelled when the call diff --git a/src/core/ext/filters/fault_injection/fault_injection_filter.cc b/src/core/ext/filters/fault_injection/fault_injection_filter.cc index 3c05d955ba4..1d065e4b512 100644 --- a/src/core/ext/filters/fault_injection/fault_injection_filter.cc +++ b/src/core/ext/filters/fault_injection/fault_injection_filter.cc @@ -357,12 +357,13 @@ void CallData::DecideWhetherToInjectFaults( } } if (!fi_policy_->delay_header.empty() && - (copied_policy == nullptr || copied_policy->delay == 0)) { + (copied_policy == nullptr || + copied_policy->delay == Duration::Zero())) { auto value = initial_metadata->GetStringValue(fi_policy_->delay_header, &buffer); if (value.has_value()) { maybe_copy_policy_func(); - copied_policy->delay = static_cast<grpc_millis>( + copied_policy->delay = Duration::Milliseconds( std::max(AsInt<int64_t>(*value).value_or(0), int64_t(0))); } } @@ -379,7 +380,7 @@ void CallData::DecideWhetherToInjectFaults( if (copied_policy != nullptr) fi_policy_ = copied_policy; } // Roll the dice - delay_request_ = fi_policy_->delay != 0 && + delay_request_ = fi_policy_->delay != Duration::Zero() && UnderFraction(fi_policy_->delay_percentage_numerator, fi_policy_->delay_percentage_denominator); abort_request_ = fi_policy_->abort_code != GRPC_STATUS_OK && @@ -423,7 +424,7 @@ void CallData::DelayBatch(grpc_call_element* elem, MutexLock lock(&delay_mu_); delayed_batch_ = batch; resume_batch_canceller_ = new ResumeBatchCanceller(elem); - grpc_millis resume_time = ExecCtx::Get()->Now() + fi_policy_->delay; + Timestamp resume_time = ExecCtx::Get()->Now() + fi_policy_->delay; GRPC_CLOSURE_INIT(&batch->handler_private.closure, ResumeBatch, elem, grpc_schedule_on_exec_ctx); grpc_timer_init(&delay_timer_, resume_time, &batch->handler_private.closure); diff --git a/src/core/ext/filters/fault_injection/service_config_parser.h b/src/core/ext/filters/fault_injection/service_config_parser.h index 2a01fba7532..8354b0f4bc6 100644 --- a/src/core/ext/filters/fault_injection/service_config_parser.h +++ b/src/core/ext/filters/fault_injection/service_config_parser.h @@ -38,7 +38,7 @@ class FaultInjectionMethodParsedConfig uint32_t abort_percentage_numerator = 0; uint32_t abort_percentage_denominator = 100; - grpc_millis delay = 0; + Duration delay; std::string delay_header; std::string delay_percentage_header; uint32_t delay_percentage_numerator = 0; diff --git a/src/core/ext/filters/max_age/max_age_filter.cc b/src/core/ext/filters/max_age/max_age_filter.cc index 2b09879e433..2b70aac5b5a 100644 --- a/src/core/ext/filters/max_age/max_age_filter.cc +++ b/src/core/ext/filters/max_age/max_age_filter.cc @@ -68,11 +68,11 @@ struct channel_data { max_connection_idle */ grpc_timer max_idle_timer; /* Allowed max time a channel may have no outstanding rpcs */ - grpc_millis max_connection_idle; + grpc_core::Duration max_connection_idle; /* Allowed max time a channel may exist */ - grpc_millis max_connection_age; + grpc_core::Duration max_connection_age; /* Allowed grace period after the channel reaches its max age */ - grpc_millis max_connection_age_grace; + grpc_core::Duration max_connection_age_grace; /* Closure to run when the channel's idle duration reaches max_connection_idle and should be closed gracefully */ grpc_closure max_idle_timer_cb; @@ -142,7 +142,8 @@ struct channel_data { For 2, 7 : See decrease_call_count() function For 4, 6 : See increase_call_count() function */ gpr_atm idle_state; - /* Time when the channel finished its last outstanding call, in grpc_millis */ + /* Time when the channel finished its last outstanding call, in + * grpc_core::Timestamp */ gpr_atm last_enter_idle_time_millis; }; } // namespace @@ -179,7 +180,9 @@ static void decrease_call_count(channel_data* chand) { /* Enter idle */ if (gpr_atm_full_fetch_add(&chand->call_count, -1) == 1) { gpr_atm_no_barrier_store(&chand->last_enter_idle_time_millis, - (gpr_atm)grpc_core::ExecCtx::Get()->Now()); + (gpr_atm)grpc_core::ExecCtx::Get() + ->Now() + .milliseconds_after_process_epoch()); while (true) { gpr_atm idle_state = gpr_atm_acq_load(&chand->idle_state); switch (idle_state) { @@ -286,12 +289,10 @@ static void start_max_age_grace_timer_after_goaway_op( grpc_core::MutexLock lock(&chand->max_age_timer_mu); chand->max_age_grace_timer_pending = true; GRPC_CHANNEL_STACK_REF(chand->channel_stack, "max_age max_age_grace_timer"); - grpc_timer_init(&chand->max_age_grace_timer, - chand->max_connection_age_grace == GRPC_MILLIS_INF_FUTURE - ? GRPC_MILLIS_INF_FUTURE - : grpc_core::ExecCtx::Get()->Now() + - chand->max_connection_age_grace, - &chand->force_close_max_age_channel); + grpc_timer_init( + &chand->max_age_grace_timer, + grpc_core::ExecCtx::Get()->Now() + chand->max_connection_age_grace, + &chand->force_close_max_age_channel); } GRPC_CHANNEL_STACK_UNREF(chand->channel_stack, "max_age start_max_age_grace_timer_after_goaway_op"); @@ -332,11 +333,13 @@ static void max_idle_timer_cb(void* arg, grpc_error_handle error) { case MAX_IDLE_STATE_SEEN_ENTER_IDLE: GRPC_CHANNEL_STACK_REF(chand->channel_stack, "max_age max_idle_timer"); - grpc_timer_init(&chand->max_idle_timer, - static_cast<grpc_millis>(gpr_atm_no_barrier_load( - &chand->last_enter_idle_time_millis)) + - chand->max_connection_idle, - &chand->max_idle_timer_cb); + grpc_timer_init( + &chand->max_idle_timer, + grpc_core::Timestamp::FromMillisecondsAfterProcessEpoch( + gpr_atm_no_barrier_load( + &chand->last_enter_idle_time_millis)) + + chand->max_connection_idle, + &chand->max_idle_timer_cb); /* idle_state may have already been set to MAX_IDLE_STATE_SEEN_EXIT_IDLE by increase_call_count(), in this case, we don't need to set it to MAX_IDLE_STATE_TIMER_SET */ @@ -399,8 +402,8 @@ static void force_close_max_age_channel(void* arg, grpc_error_handle error) { connection storms. Note that the MAX_CONNECTION_AGE option without jitter would not create connection storms by itself, but if there happened to be a connection storm it could cause it to repeat at a fixed period. */ -static grpc_millis -add_random_max_connection_age_jitter_and_convert_to_grpc_millis(int value) { +static grpc_core::Duration +add_random_max_connection_age_jitter_and_convert_to_duration(int value) { /* generate a random number between 1 - MAX_CONNECTION_AGE_JITTER and 1 + MAX_CONNECTION_AGE_JITTER */ double multiplier = rand() * MAX_CONNECTION_AGE_JITTER * 2.0 / RAND_MAX + @@ -408,9 +411,11 @@ add_random_max_connection_age_jitter_and_convert_to_grpc_millis(int value) { double result = multiplier * value; /* INT_MAX - 0.5 converts the value to float, so that result will not be cast to int implicitly before the comparison. */ - return result > (static_cast<double>(GRPC_MILLIS_INF_FUTURE)) - 0.5 - ? GRPC_MILLIS_INF_FUTURE - : static_cast<grpc_millis>(result); + return result > (static_cast<double>( + grpc_core::Duration::Infinity().millis())) - + 0.5 + ? grpc_core::Duration::Infinity() + : grpc_core::Duration::Milliseconds(result); } /* Constructor for call_data. */ @@ -436,15 +441,17 @@ static grpc_error_handle max_age_init_channel_elem( new (chand) channel_data(); chand->channel_stack = args->channel_stack; chand->max_connection_age = - add_random_max_connection_age_jitter_and_convert_to_grpc_millis( + add_random_max_connection_age_jitter_and_convert_to_duration( DEFAULT_MAX_CONNECTION_AGE_MS); chand->max_connection_age_grace = DEFAULT_MAX_CONNECTION_AGE_GRACE_MS == INT_MAX - ? GRPC_MILLIS_INF_FUTURE - : DEFAULT_MAX_CONNECTION_AGE_GRACE_MS; - chand->max_connection_idle = DEFAULT_MAX_CONNECTION_IDLE_MS == INT_MAX - ? GRPC_MILLIS_INF_FUTURE - : DEFAULT_MAX_CONNECTION_IDLE_MS; + ? grpc_core::Duration::Infinity() + : grpc_core::Duration::Milliseconds( + DEFAULT_MAX_CONNECTION_AGE_GRACE_MS); + chand->max_connection_idle = + DEFAULT_MAX_CONNECTION_IDLE_MS == INT_MAX + ? grpc_core::Duration::Infinity() + : grpc_core::Duration::Milliseconds(DEFAULT_MAX_CONNECTION_IDLE_MS); chand->idle_state = MAX_IDLE_STATE_INIT; gpr_atm_no_barrier_store(&chand->last_enter_idle_time_millis, GPR_ATM_MIN); for (size_t i = 0; i < args->channel_args->num_args; ++i) { @@ -453,21 +460,22 @@ static grpc_error_handle max_age_init_channel_elem( const int value = grpc_channel_arg_get_integer( &args->channel_args->args[i], MAX_CONNECTION_AGE_INTEGER_OPTIONS); chand->max_connection_age = - add_random_max_connection_age_jitter_and_convert_to_grpc_millis( - value); + add_random_max_connection_age_jitter_and_convert_to_duration(value); } else if (0 == strcmp(args->channel_args->args[i].key, GRPC_ARG_MAX_CONNECTION_AGE_GRACE_MS)) { const int value = grpc_channel_arg_get_integer( &args->channel_args->args[i], {DEFAULT_MAX_CONNECTION_AGE_GRACE_MS, 0, INT_MAX}); chand->max_connection_age_grace = - value == INT_MAX ? GRPC_MILLIS_INF_FUTURE : value; + value == INT_MAX ? grpc_core::Duration::Infinity() + : grpc_core::Duration::Milliseconds(value); } else if (0 == strcmp(args->channel_args->args[i].key, GRPC_ARG_MAX_CONNECTION_IDLE_MS)) { const int value = grpc_channel_arg_get_integer( &args->channel_args->args[i], MAX_CONNECTION_IDLE_INTEGER_OPTIONS); chand->max_connection_idle = - value == INT_MAX ? GRPC_MILLIS_INF_FUTURE : value; + value == INT_MAX ? grpc_core::Duration::Infinity() + : grpc_core::Duration::Milliseconds(value); } } GRPC_CLOSURE_INIT(&chand->max_idle_timer_cb, max_idle_timer_cb, chand, @@ -487,7 +495,7 @@ static grpc_error_handle max_age_init_channel_elem( start_max_age_grace_timer_after_goaway_op, chand, grpc_schedule_on_exec_ctx); - if (chand->max_connection_age != GRPC_MILLIS_INF_FUTURE) { + if (chand->max_connection_age != grpc_core::Duration::Infinity()) { /* When the channel reaches its max age, we send down an op with goaway_error set. However, we can't send down any ops until after the channel stack is fully initialized. If we start the timer here, we have @@ -505,7 +513,7 @@ static grpc_error_handle max_age_init_channel_elem( /* Initialize the number of calls as 1, so that the max_idle_timer will not start until start_max_idle_timer_after_init is invoked. */ gpr_atm_rel_store(&chand->call_count, 1); - if (chand->max_connection_idle != GRPC_MILLIS_INF_FUTURE) { + if (chand->max_connection_idle != grpc_core::Duration::Infinity()) { GRPC_CHANNEL_STACK_REF(chand->channel_stack, "max_age start_max_idle_timer_after_init"); grpc_core::ExecCtx::Run(DEBUG_LOCATION, diff --git a/src/core/ext/transport/chttp2/server/chttp2_server.cc b/src/core/ext/transport/chttp2/server/chttp2_server.cc index 8c064ddbe70..5ac3149ddbb 100644 --- a/src/core/ext/transport/chttp2/server/chttp2_server.cc +++ b/src/core/ext/transport/chttp2/server/chttp2_server.cc @@ -147,7 +147,7 @@ class Chttp2ServerListener : public Server::ListenerInterface { RefCountedPtr<HandshakeManager> handshake_mgr_ ABSL_GUARDED_BY(&connection_->mu_); // State for enforcing handshake timeout on receiving HTTP/2 settings. - grpc_millis const deadline_; + Timestamp const deadline_; grpc_timer timer_ ABSL_GUARDED_BY(&connection_->mu_); grpc_closure on_timeout_ ABSL_GUARDED_BY(&connection_->mu_); grpc_closure on_receive_settings_ ABSL_GUARDED_BY(&connection_->mu_); @@ -334,10 +334,10 @@ void Chttp2ServerListener::ConfigFetcherWatcher::StopServing() { // Chttp2ServerListener::ActiveConnection::HandshakingState // -grpc_millis GetConnectionDeadline(const grpc_channel_args* args) { - int timeout_ms = +Timestamp GetConnectionDeadline(const grpc_channel_args* args) { + auto timeout_ms = Duration::Milliseconds( grpc_channel_args_find_integer(args, GRPC_ARG_SERVER_HANDSHAKE_TIMEOUT_MS, - {120 * GPR_MS_PER_SEC, 1, INT_MAX}); + {120 * GPR_MS_PER_SEC, 1, INT_MAX})); return ExecCtx::Get()->Now() + timeout_ms; } @@ -566,10 +566,10 @@ void Chttp2ServerListener::ActiveConnection::SendGoAway() { this, nullptr); grpc_timer_init(&drain_grace_timer_, ExecCtx::Get()->Now() + - grpc_channel_args_find_integer( + Duration::Milliseconds(grpc_channel_args_find_integer( listener_->args_, GRPC_ARG_SERVER_CONFIG_CHANGE_DRAIN_GRACE_TIME_MS, - {10 * 60 * GPR_MS_PER_SEC, 0, INT_MAX}), + {10 * 60 * GPR_MS_PER_SEC, 0, INT_MAX})), &on_drain_grace_time_expiry_); drain_grace_timer_expiry_callback_pending_ = true; shutdown_ = true; diff --git a/src/core/ext/transport/chttp2/transport/chttp2_transport.cc b/src/core/ext/transport/chttp2/transport/chttp2_transport.cc index 9519c2439a6..f8a417c1241 100644 --- a/src/core/ext/transport/chttp2/transport/chttp2_transport.cc +++ b/src/core/ext/transport/chttp2/transport/chttp2_transport.cc @@ -301,11 +301,11 @@ static bool read_channel_args(grpc_chttp2_transport* t, strcmp(channel_args->args[i].key, GRPC_ARG_HTTP2_MIN_RECV_PING_INTERVAL_WITHOUT_DATA_MS)) { t->ping_policy.min_recv_ping_interval_without_data = - grpc_channel_arg_get_integer( + grpc_core::Duration::Milliseconds(grpc_channel_arg_get_integer( &channel_args->args[i], grpc_integer_options{ g_default_min_recv_ping_interval_without_data_ms, 0, - INT_MAX}); + INT_MAX})); } else if (0 == strcmp(channel_args->args[i].key, GRPC_ARG_HTTP2_WRITE_BUFFER_SIZE)) { t->write_buffer_size = static_cast<uint32_t>(grpc_channel_arg_get_integer( @@ -321,7 +321,9 @@ static bool read_channel_args(grpc_chttp2_transport* t, ? g_default_client_keepalive_time_ms : g_default_server_keepalive_time_ms, 1, INT_MAX}); - t->keepalive_time = value == INT_MAX ? GRPC_MILLIS_INF_FUTURE : value; + t->keepalive_time = value == INT_MAX + ? grpc_core::Duration::Infinity() + : grpc_core::Duration::Milliseconds(value); } else if (0 == strcmp(channel_args->args[i].key, GRPC_ARG_KEEPALIVE_TIMEOUT_MS)) { const int value = grpc_channel_arg_get_integer( @@ -330,7 +332,9 @@ static bool read_channel_args(grpc_chttp2_transport* t, ? g_default_client_keepalive_timeout_ms : g_default_server_keepalive_timeout_ms, 0, INT_MAX}); - t->keepalive_timeout = value == INT_MAX ? GRPC_MILLIS_INF_FUTURE : value; + t->keepalive_timeout = value == INT_MAX + ? grpc_core::Duration::Infinity() + : grpc_core::Duration::Milliseconds(value); } else if (0 == strcmp(channel_args->args[i].key, GRPC_ARG_KEEPALIVE_PERMIT_WITHOUT_CALLS)) { t->keepalive_permit_without_calls = static_cast<uint32_t>( @@ -406,20 +410,24 @@ static bool read_channel_args(grpc_chttp2_transport* t, static void init_transport_keepalive_settings(grpc_chttp2_transport* t) { if (t->is_client) { t->keepalive_time = g_default_client_keepalive_time_ms == INT_MAX - ? GRPC_MILLIS_INF_FUTURE - : g_default_client_keepalive_time_ms; + ? grpc_core::Duration::Infinity() + : grpc_core::Duration::Milliseconds( + g_default_client_keepalive_time_ms); t->keepalive_timeout = g_default_client_keepalive_timeout_ms == INT_MAX - ? GRPC_MILLIS_INF_FUTURE - : g_default_client_keepalive_timeout_ms; + ? grpc_core::Duration::Infinity() + : grpc_core::Duration::Milliseconds( + g_default_client_keepalive_timeout_ms); t->keepalive_permit_without_calls = g_default_client_keepalive_permit_without_calls; } else { t->keepalive_time = g_default_server_keepalive_time_ms == INT_MAX - ? GRPC_MILLIS_INF_FUTURE - : g_default_server_keepalive_time_ms; + ? grpc_core::Duration::Infinity() + : grpc_core::Duration::Milliseconds( + g_default_server_keepalive_time_ms); t->keepalive_timeout = g_default_server_keepalive_timeout_ms == INT_MAX - ? GRPC_MILLIS_INF_FUTURE - : g_default_server_keepalive_timeout_ms; + ? grpc_core::Duration::Infinity() + : grpc_core::Duration::Milliseconds( + g_default_server_keepalive_timeout_ms); t->keepalive_permit_without_calls = g_default_server_keepalive_permit_without_calls; } @@ -429,11 +437,12 @@ static void configure_transport_ping_policy(grpc_chttp2_transport* t) { t->ping_policy.max_pings_without_data = g_default_max_pings_without_data; t->ping_policy.max_ping_strikes = g_default_max_ping_strikes; t->ping_policy.min_recv_ping_interval_without_data = - g_default_min_recv_ping_interval_without_data_ms; + grpc_core::Duration::Milliseconds( + g_default_min_recv_ping_interval_without_data_ms); } static void init_keepalive_pings_if_enabled(grpc_chttp2_transport* t) { - if (t->keepalive_time != GRPC_MILLIS_INF_FUTURE) { + if (t->keepalive_time != grpc_core::Duration::Infinity()) { t->keepalive_state = GRPC_CHTTP2_KEEPALIVE_STATE_WAITING; GRPC_CHTTP2_REF_TRANSPORT(t, "init keepalive ping"); GRPC_CLOSURE_INIT(&t->init_keepalive_ping_locked, init_keepalive_ping, t, @@ -525,9 +534,9 @@ grpc_chttp2_transport::grpc_chttp2_transport( // No pings allowed before receiving a header or data frame. ping_state.pings_before_data_required = 0; ping_state.is_delayed_ping_timer_set = false; - ping_state.last_ping_sent_time = GRPC_MILLIS_INF_PAST; + ping_state.last_ping_sent_time = grpc_core::Timestamp::InfPast(); - ping_recv_state.last_ping_recv_time = GRPC_MILLIS_INF_PAST; + ping_recv_state.last_ping_recv_time = grpc_core::Timestamp::InfPast(); ping_recv_state.ping_strikes = 0; init_keepalive_pings_if_enabled(this); @@ -1118,16 +1127,14 @@ void grpc_chttp2_add_incoming_goaway(grpc_chttp2_transport* t, gpr_log(GPR_ERROR, "Received a GOAWAY with error code ENHANCE_YOUR_CALM and debug " "data equal to \"too_many_pings\""); - double current_keepalive_time_ms = static_cast<double>(t->keepalive_time); - constexpr int max_keepalive_time_ms = - INT_MAX / KEEPALIVE_TIME_BACKOFF_MULTIPLIER; + constexpr auto max_keepalive_time = grpc_core::Duration::Milliseconds( + INT_MAX / KEEPALIVE_TIME_BACKOFF_MULTIPLIER); t->keepalive_time = - current_keepalive_time_ms > static_cast<double>(max_keepalive_time_ms) - ? GRPC_MILLIS_INF_FUTURE - : static_cast<grpc_millis>(current_keepalive_time_ms * - KEEPALIVE_TIME_BACKOFF_MULTIPLIER); + t->keepalive_time > max_keepalive_time + ? grpc_core::Duration::Infinity() + : t->keepalive_time * KEEPALIVE_TIME_BACKOFF_MULTIPLIER; status.SetPayload(grpc_core::kKeepaliveThrottlingKey, - absl::Cord(std::to_string(t->keepalive_time))); + absl::Cord(std::to_string(t->keepalive_time.millis()))); } // lie: use transient failure from the transport to indicate goaway has been // received. @@ -1435,7 +1442,7 @@ static void perform_stream_op_locked(void* stream_op, s->deadline = std::min( s->deadline, s->send_initial_metadata->get(grpc_core::GrpcTimeoutMetadata()) - .value_or(GRPC_MILLIS_INF_FUTURE)); + .value_or(grpc_core::Timestamp::InfFuture())); } if (contains_non_ok_status(s->send_initial_metadata)) { s->seen_error = true; @@ -1757,8 +1764,8 @@ static void send_goaway(grpc_chttp2_transport* t, grpc_error_handle error) { t->sent_goaway_state = GRPC_CHTTP2_GOAWAY_SEND_SCHEDULED; grpc_http2_error_code http_error; std::string message; - grpc_error_get_status(error, GRPC_MILLIS_INF_FUTURE, nullptr, &message, - &http_error, nullptr); + grpc_error_get_status(error, grpc_core::Timestamp::InfFuture(), nullptr, + &message, &http_error, nullptr); grpc_chttp2_goaway_append( t->last_new_stream_id, static_cast<uint32_t>(http_error), grpc_slice_from_cpp_string(std::move(message)), &t->qbuf); @@ -1783,7 +1790,7 @@ void grpc_chttp2_add_ping_strike(grpc_chttp2_transport* t) { void grpc_chttp2_reset_ping_clock(grpc_chttp2_transport* t) { if (!t->is_client) { - t->ping_recv_state.last_ping_recv_time = GRPC_MILLIS_INF_PAST; + t->ping_recv_state.last_ping_recv_time = grpc_core::Timestamp::InfPast(); t->ping_recv_state.ping_strikes = 0; } t->ping_state.pings_before_data_required = @@ -2605,7 +2612,8 @@ static void finish_bdp_ping_locked(void* tp, grpc_error_handle error) { return; } t->bdp_ping_started = false; - grpc_millis next_ping = t->flow_control->bdp_estimator()->CompletePing(); + grpc_core::Timestamp next_ping = + t->flow_control->bdp_estimator()->CompletePing(); grpc_chttp2_act_on_flowctl_action(t->flow_control->PeriodicUpdate(), t, nullptr); GPR_ASSERT(!t->have_next_bdp_ping_timer); diff --git a/src/core/ext/transport/chttp2/transport/flow_control.cc b/src/core/ext/transport/chttp2/transport/flow_control.cc index 6daf00a448b..1ab72ca2d57 100644 --- a/src/core/ext/transport/chttp2/transport/flow_control.cc +++ b/src/core/ext/transport/chttp2/transport/flow_control.cc @@ -348,9 +348,9 @@ double TransportFlowControl::TargetLogBdp() { } double TransportFlowControl::SmoothLogBdp(double value) { - grpc_millis now = ExecCtx::Get()->Now(); + Timestamp now = ExecCtx::Get()->Now(); double bdp_error = value - pid_controller_.last_control_value(); - const double dt = static_cast<double>(now - last_pid_update_) * 1e-3; + const double dt = (now - last_pid_update_).seconds(); last_pid_update_ = now; // Limit dt to 100ms const double kMaxDt = 0.1; diff --git a/src/core/ext/transport/chttp2/transport/flow_control.h b/src/core/ext/transport/chttp2/transport/flow_control.h index ae56ff1cf39..9bce7c8a947 100644 --- a/src/core/ext/transport/chttp2/transport/flow_control.h +++ b/src/core/ext/transport/chttp2/transport/flow_control.h @@ -335,7 +335,7 @@ class TransportFlowControl final : public TransportFlowControlBase { /* pid controller */ PidController pid_controller_; - grpc_millis last_pid_update_ = 0; + Timestamp last_pid_update_; }; // Fat interface with all methods a stream flow control implementation needs diff --git a/src/core/ext/transport/chttp2/transport/frame_ping.cc b/src/core/ext/transport/chttp2/transport/frame_ping.cc index 3ee99ebacb9..b87c221dc7a 100644 --- a/src/core/ext/transport/chttp2/transport/frame_ping.cc +++ b/src/core/ext/transport/chttp2/transport/frame_ping.cc @@ -90,8 +90,8 @@ grpc_error_handle grpc_chttp2_ping_parser_parse(void* parser, grpc_chttp2_ack_ping(t, p->opaque_8bytes); } else { if (!t->is_client) { - grpc_millis now = grpc_core::ExecCtx::Get()->Now(); - grpc_millis next_allowed_ping = + grpc_core::Timestamp now = grpc_core::ExecCtx::Get()->Now(); + grpc_core::Timestamp next_allowed_ping = t->ping_recv_state.last_ping_recv_time + t->ping_policy.min_recv_ping_interval_without_data; @@ -100,8 +100,8 @@ grpc_error_handle grpc_chttp2_ping_parser_parse(void* parser, /* According to RFC1122, the interval of TCP Keep-Alive is default to no less than two hours. When there is no outstanding streams, we restrict the number of PINGS equivalent to TCP Keep-Alive. */ - next_allowed_ping = - t->ping_recv_state.last_ping_recv_time + 7200 * GPR_MS_PER_SEC; + next_allowed_ping = t->ping_recv_state.last_ping_recv_time + + grpc_core::Duration::Hours(2); } if (next_allowed_ping > now) { diff --git a/src/core/ext/transport/chttp2/transport/hpack_encoder.cc b/src/core/ext/transport/chttp2/transport/hpack_encoder.cc index f0f7104270a..7ad68ab1b1f 100644 --- a/src/core/ext/transport/chttp2/transport/hpack_encoder.cc +++ b/src/core/ext/transport/chttp2/transport/hpack_encoder.cc @@ -523,8 +523,7 @@ void HPackCompressor::Framer::EncodeIndexedKeyWithBinaryValue( } } -void HPackCompressor::Framer::Encode(GrpcTimeoutMetadata, - grpc_millis deadline) { +void HPackCompressor::Framer::Encode(GrpcTimeoutMetadata, Timestamp deadline) { Timeout timeout = Timeout::FromDuration(deadline - ExecCtx::Get()->Now()); for (auto it = compressor_->previous_timeouts_.begin(); it != compressor_->previous_timeouts_.end(); ++it) { diff --git a/src/core/ext/transport/chttp2/transport/hpack_encoder.h b/src/core/ext/transport/chttp2/transport/hpack_encoder.h index 71c6dcc06cf..f44a33b8255 100644 --- a/src/core/ext/transport/chttp2/transport/hpack_encoder.h +++ b/src/core/ext/transport/chttp2/transport/hpack_encoder.h @@ -80,7 +80,7 @@ class HPackCompressor { void Encode(HttpPathMetadata, const Slice& value); void Encode(HttpAuthorityMetadata, const Slice& value); void Encode(HttpStatusMetadata, uint32_t status); - void Encode(GrpcTimeoutMetadata, grpc_millis deadline); + void Encode(GrpcTimeoutMetadata, Timestamp deadline); void Encode(TeMetadata, TeMetadata::ValueType value); void Encode(ContentTypeMetadata, ContentTypeMetadata::ValueType value); void Encode(HttpSchemeMetadata, HttpSchemeMetadata::ValueType value); diff --git a/src/core/ext/transport/chttp2/transport/internal.h b/src/core/ext/transport/chttp2/transport/internal.h index dcb0adf7b8d..fe2b437552f 100644 --- a/src/core/ext/transport/chttp2/transport/internal.h +++ b/src/core/ext/transport/chttp2/transport/internal.h @@ -118,16 +118,16 @@ struct grpc_chttp2_ping_queue { struct grpc_chttp2_repeated_ping_policy { int max_pings_without_data; int max_ping_strikes; - grpc_millis min_recv_ping_interval_without_data; + grpc_core::Duration min_recv_ping_interval_without_data; }; struct grpc_chttp2_repeated_ping_state { - grpc_millis last_ping_sent_time; + grpc_core::Timestamp last_ping_sent_time; int pings_before_data_required; grpc_timer delayed_ping_timer; bool is_delayed_ping_timer_set; }; struct grpc_chttp2_server_ping_recv_state { - grpc_millis last_ping_recv_time; + grpc_core::Timestamp last_ping_recv_time; int ping_strikes; }; /* deframer state for the overall http2 stream of bytes */ @@ -477,9 +477,9 @@ struct grpc_chttp2_transport { /** watchdog to kill the transport when waiting for the keepalive ping */ grpc_timer keepalive_watchdog_timer; /** time duration in between pings */ - grpc_millis keepalive_time; + grpc_core::Duration keepalive_time; /** grace period for a ping to complete before watchdog kicks in */ - grpc_millis keepalive_timeout; + grpc_core::Duration keepalive_timeout; /** if keepalive pings are allowed when there's no outstanding streams */ bool keepalive_permit_without_calls = false; /** If start_keepalive_ping_locked has been called */ @@ -609,7 +609,7 @@ struct grpc_chttp2_stream { GRPC_ERROR_NONE; /* protected by t combiner */ bool received_last_frame = false; /* protected by t combiner */ - grpc_millis deadline = GRPC_MILLIS_INF_FUTURE; + grpc_core::Timestamp deadline = grpc_core::Timestamp::InfFuture(); /** saw some stream level error */ grpc_error_handle forced_close_error = GRPC_ERROR_NONE; diff --git a/src/core/ext/transport/chttp2/transport/parsing.cc b/src/core/ext/transport/chttp2/transport/parsing.cc index d05f27b98c5..1864a416a1e 100644 --- a/src/core/ext/transport/chttp2/transport/parsing.cc +++ b/src/core/ext/transport/chttp2/transport/parsing.cc @@ -400,7 +400,7 @@ error_handler: /* t->parser = grpc_chttp2_data_parser_parse;*/ t->parser = grpc_chttp2_data_parser_parse; t->parser_data = &s->data_parser; - t->ping_state.last_ping_sent_time = GRPC_MILLIS_INF_PAST; + t->ping_state.last_ping_sent_time = grpc_core::Timestamp::InfPast(); return GRPC_ERROR_NONE; } else if (grpc_error_get_int(err, GRPC_ERROR_INT_STREAM_ID, &unused)) { /* handle stream errors by closing the stream */ @@ -440,7 +440,7 @@ static grpc_error_handle init_header_frame_parser(grpc_chttp2_transport* t, ? HPackParser::Priority::Included : HPackParser::Priority::None; - t->ping_state.last_ping_sent_time = GRPC_MILLIS_INF_PAST; + t->ping_state.last_ping_sent_time = grpc_core::Timestamp::InfPast(); /* could be a new grpc_chttp2_stream or an existing grpc_chttp2_stream */ s = grpc_chttp2_parsing_lookup_stream(t, t->incoming_stream_id); diff --git a/src/core/ext/transport/chttp2/transport/writing.cc b/src/core/ext/transport/chttp2/transport/writing.cc index d465400d2e3..41c7d0f7fd2 100644 --- a/src/core/ext/transport/chttp2/transport/writing.cc +++ b/src/core/ext/transport/chttp2/transport/writing.cc @@ -77,15 +77,16 @@ static void maybe_initiate_ping(grpc_chttp2_transport* t) { // in a loop while draining the currently-held combiner. Also see // https://github.com/grpc/grpc/issues/26079. grpc_core::ExecCtx::Get()->InvalidateNow(); - grpc_millis now = grpc_core::ExecCtx::Get()->Now(); + grpc_core::Timestamp now = grpc_core::ExecCtx::Get()->Now(); - grpc_millis next_allowed_ping_interval = + grpc_core::Duration next_allowed_ping_interval = (t->keepalive_permit_without_calls == 0 && grpc_chttp2_stream_map_size(&t->stream_map) == 0) - ? 7200 * GPR_MS_PER_SEC - : (GPR_MS_PER_SEC); /* A second is added to deal with network delays - and timing imprecision */ - grpc_millis next_allowed_ping = + ? grpc_core::Duration::Hours(2) + : grpc_core::Duration::Seconds( + 1); /* A second is added to deal with network delays and timing + imprecision */ + grpc_core::Timestamp next_allowed_ping = t->ping_state.last_ping_sent_time + next_allowed_ping_interval; if (next_allowed_ping > now) { @@ -93,12 +94,14 @@ static void maybe_initiate_ping(grpc_chttp2_transport* t) { if (GRPC_TRACE_FLAG_ENABLED(grpc_http_trace) || GRPC_TRACE_FLAG_ENABLED(grpc_bdp_estimator_trace) || GRPC_TRACE_FLAG_ENABLED(grpc_keepalive_trace)) { - gpr_log(GPR_INFO, - "%s: Ping delayed [%s]: not enough time elapsed since last ping. " - " Last ping %f: Next ping %f: Now %f", - t->is_client ? "CLIENT" : "SERVER", t->peer_string.c_str(), - static_cast<double>(t->ping_state.last_ping_sent_time), - static_cast<double>(next_allowed_ping), static_cast<double>(now)); + gpr_log( + GPR_INFO, + "%s: Ping delayed [%s]: not enough time elapsed since last ping. " + " Last ping %" PRId64 ": Next ping %" PRId64 ": Now %" PRId64, + t->is_client ? "CLIENT" : "SERVER", t->peer_string.c_str(), + t->ping_state.last_ping_sent_time.milliseconds_after_process_epoch(), + next_allowed_ping.milliseconds_after_process_epoch(), + now.milliseconds_after_process_epoch()); } if (!t->ping_state.is_delayed_ping_timer_set) { t->ping_state.is_delayed_ping_timer_set = true; diff --git a/src/core/ext/transport/inproc/inproc_transport.cc b/src/core/ext/transport/inproc/inproc_transport.cc index 6ff1d43ccd8..989d4236c5d 100644 --- a/src/core/ext/transport/inproc/inproc_transport.cc +++ b/src/core/ext/transport/inproc/inproc_transport.cc @@ -231,7 +231,8 @@ struct inproc_stream { grpc_metadata_batch write_buffer_initial_md{arena}; bool write_buffer_initial_md_filled = false; uint32_t write_buffer_initial_md_flags = 0; - grpc_millis write_buffer_deadline = GRPC_MILLIS_INF_FUTURE; + grpc_core::Timestamp write_buffer_deadline = + grpc_core::Timestamp::InfFuture(); grpc_metadata_batch write_buffer_trailing_md{arena}; bool write_buffer_trailing_md_filled = false; grpc_error_handle write_buffer_cancel_error = GRPC_ERROR_NONE; @@ -265,7 +266,7 @@ struct inproc_stream { grpc_error_handle cancel_self_error = GRPC_ERROR_NONE; grpc_error_handle cancel_other_error = GRPC_ERROR_NONE; - grpc_millis deadline = GRPC_MILLIS_INF_FUTURE; + grpc_core::Timestamp deadline = grpc_core::Timestamp::InfFuture(); bool listed = true; struct inproc_stream* stream_list_prev; @@ -705,7 +706,7 @@ void op_state_machine_locked(inproc_stream* s, grpc_error_handle error) { .recv_initial_metadata, s->recv_initial_md_op->payload->recv_initial_metadata.recv_flags, nullptr); - if (s->deadline != GRPC_MILLIS_INF_FUTURE) { + if (s->deadline != grpc_core::Timestamp::InfFuture()) { s->recv_initial_md_op->payload->recv_initial_metadata .recv_initial_metadata->Set(grpc_core::GrpcTimeoutMetadata(), s->deadline); @@ -1008,12 +1009,12 @@ void perform_stream_op(grpc_transport* gt, grpc_stream* gs, dest, destflags, destfilled); } if (s->t->is_client) { - grpc_millis* dl = + grpc_core::Timestamp* dl = (other == nullptr) ? &s->write_buffer_deadline : &other->deadline; *dl = std::min( *dl, op->payload->send_initial_metadata.send_initial_metadata ->get(grpc_core::GrpcTimeoutMetadata()) - .value_or(GRPC_MILLIS_INF_FUTURE)); + .value_or(grpc_core::Timestamp::InfFuture())); s->initial_md_sent = true; } } diff --git a/src/core/ext/xds/file_watcher_certificate_provider_factory.cc b/src/core/ext/xds/file_watcher_certificate_provider_factory.cc index 7a793b06737..939eec249c8 100644 --- a/src/core/ext/xds/file_watcher_certificate_provider_factory.cc +++ b/src/core/ext/xds/file_watcher_certificate_provider_factory.cc @@ -58,7 +58,7 @@ std::string FileWatcherCertificateProviderFactory::Config::ToString() const { absl::StrFormat("ca_certificate_file=\"%s\", ", root_cert_file_)); } parts.push_back( - absl::StrFormat("refresh_interval=%ldms}", refresh_interval_ms_)); + absl::StrFormat("refresh_interval=%ldms}", refresh_interval_.millis())); return absl::StrJoin(parts, ""); } @@ -91,8 +91,8 @@ FileWatcherCertificateProviderFactory::Config::Parse(const Json& config_json, } if (!ParseJsonObjectFieldAsDuration( config_json.object_value(), "refresh_interval", - &config->refresh_interval_ms_, &error_list, false)) { - config->refresh_interval_ms_ = 10 * 60 * 1000; // 10 minutes default + &config->refresh_interval_, &error_list, false)) { + config->refresh_interval_ = Duration::Minutes(10); // 10 minutes default } if (!error_list.empty()) { *error = GRPC_ERROR_CREATE_FROM_VECTOR( @@ -131,7 +131,7 @@ FileWatcherCertificateProviderFactory::CreateCertificateProvider( file_watcher_config->private_key_file(), file_watcher_config->identity_cert_file(), file_watcher_config->root_cert_file(), - file_watcher_config->refresh_interval_ms() / GPR_MS_PER_SEC); + file_watcher_config->refresh_interval().millis() / GPR_MS_PER_SEC); } void FileWatcherCertificateProviderInit() { diff --git a/src/core/ext/xds/file_watcher_certificate_provider_factory.h b/src/core/ext/xds/file_watcher_certificate_provider_factory.h index 13e10debb3a..10b0037efbd 100644 --- a/src/core/ext/xds/file_watcher_certificate_provider_factory.h +++ b/src/core/ext/xds/file_watcher_certificate_provider_factory.h @@ -45,13 +45,13 @@ class FileWatcherCertificateProviderFactory const std::string& root_cert_file() const { return root_cert_file_; } - grpc_millis refresh_interval_ms() const { return refresh_interval_ms_; } + Duration refresh_interval() const { return refresh_interval_; } private: std::string identity_cert_file_; std::string private_key_file_; std::string root_cert_file_; - grpc_millis refresh_interval_ms_; + Duration refresh_interval_; }; const char* name() const override; diff --git a/src/core/ext/xds/google_mesh_ca_certificate_provider_factory.cc b/src/core/ext/xds/google_mesh_ca_certificate_provider_factory.cc index 6e63ae4ef3c..95137bb3159 100644 --- a/src/core/ext/xds/google_mesh_ca_certificate_provider_factory.cc +++ b/src/core/ext/xds/google_mesh_ca_certificate_provider_factory.cc @@ -153,7 +153,7 @@ GoogleMeshCaCertificateProviderFactory::Config::ParseJsonObjectGrpcServices( } if (!ParseJsonObjectFieldAsDuration(grpc_service, "timeout", &timeout_, &error_list_grpc_services, false)) { - timeout_ = 10 * 1000; // 10sec default + timeout_ = Duration::Seconds(10); // 10sec default } return error_list_grpc_services; } @@ -216,12 +216,12 @@ GoogleMeshCaCertificateProviderFactory::Config::Parse( if (!ParseJsonObjectFieldAsDuration( config_json.object_value(), "certificate_lifetime", &config->certificate_lifetime_, &error_list, false)) { - config->certificate_lifetime_ = 24 * 60 * 60 * 1000; // 24hrs default + config->certificate_lifetime_ = Duration::Hours(24); // 24hrs default } if (!ParseJsonObjectFieldAsDuration( config_json.object_value(), "renewal_grace_period", &config->renewal_grace_period_, &error_list, false)) { - config->renewal_grace_period_ = 12 * 60 * 60 * 1000; // 12hrs default + config->renewal_grace_period_ = Duration::Hours(12); // 12hrs default } std::string key_type; if (ParseJsonObjectField(config_json.object_value(), "key_type", &key_type, diff --git a/src/core/ext/xds/google_mesh_ca_certificate_provider_factory.h b/src/core/ext/xds/google_mesh_ca_certificate_provider_factory.h index 7a33f977a4a..8d1f19a817e 100644 --- a/src/core/ext/xds/google_mesh_ca_certificate_provider_factory.h +++ b/src/core/ext/xds/google_mesh_ca_certificate_provider_factory.h @@ -52,11 +52,11 @@ class GoogleMeshCaCertificateProviderFactory const StsConfig& sts_config() const { return sts_config_; } - grpc_millis timeout() const { return timeout_; } + Duration timeout() const { return timeout_; } - grpc_millis certificate_lifetime() const { return certificate_lifetime_; } + Duration certificate_lifetime() const { return certificate_lifetime_; } - grpc_millis renewal_grace_period() const { return renewal_grace_period_; } + Duration renewal_grace_period() const { return renewal_grace_period_; } uint32_t key_size() const { return key_size_; } @@ -80,9 +80,9 @@ class GoogleMeshCaCertificateProviderFactory std::string endpoint_; StsConfig sts_config_; - grpc_millis timeout_; - grpc_millis certificate_lifetime_; - grpc_millis renewal_grace_period_; + Duration timeout_; + Duration certificate_lifetime_; + Duration renewal_grace_period_; uint32_t key_size_; std::string location_; }; diff --git a/src/core/ext/xds/xds_api.cc b/src/core/ext/xds/xds_api.cc index 426ac0270fd..c918c8218c0 100644 --- a/src/core/ext/xds/xds_api.cc +++ b/src/core/ext/xds/xds_api.cc @@ -568,8 +568,7 @@ grpc_slice XdsApi::CreateLrsRequest( envoy_config_endpoint_v3_ClusterStats_set_total_dropped_requests( cluster_stats, total_dropped_requests); // Set real load report interval. - gpr_timespec timespec = - grpc_millis_to_timespec(load_report.load_report_interval, GPR_TIMESPAN); + gpr_timespec timespec = load_report.load_report_interval.as_timespec(); google_protobuf_Duration* load_report_interval = envoy_config_endpoint_v3_ClusterStats_mutable_load_report_interval( cluster_stats, arena.ptr()); @@ -580,10 +579,10 @@ grpc_slice XdsApi::CreateLrsRequest( return SerializeLrsRequest(context, request); } -grpc_error_handle XdsApi::ParseLrsResponse( - const grpc_slice& encoded_response, bool* send_all_clusters, - std::set<std::string>* cluster_names, - grpc_millis* load_reporting_interval) { +grpc_error_handle XdsApi::ParseLrsResponse(const grpc_slice& encoded_response, + bool* send_all_clusters, + std::set<std::string>* cluster_names, + Duration* load_reporting_interval) { upb::Arena arena; // Decode the response. const envoy_service_load_stats_v3_LoadStatsResponse* decoded_response = @@ -612,21 +611,19 @@ grpc_error_handle XdsApi::ParseLrsResponse( const google_protobuf_Duration* load_reporting_interval_duration = envoy_service_load_stats_v3_LoadStatsResponse_load_reporting_interval( decoded_response); - gpr_timespec timespec{ + *load_reporting_interval = Duration::FromSecondsAndNanoseconds( google_protobuf_Duration_seconds(load_reporting_interval_duration), - google_protobuf_Duration_nanos(load_reporting_interval_duration), - GPR_TIMESPAN}; - *load_reporting_interval = gpr_time_to_millis(timespec); + google_protobuf_Duration_nanos(load_reporting_interval_duration)); return GRPC_ERROR_NONE; } namespace { -google_protobuf_Timestamp* GrpcMillisToTimestamp( - const XdsEncodingContext& context, grpc_millis value) { +google_protobuf_Timestamp* EncodeTimestamp(const XdsEncodingContext& context, + Timestamp value) { google_protobuf_Timestamp* timestamp = google_protobuf_Timestamp_new(context.arena); - gpr_timespec timespec = grpc_millis_to_timespec(value, GPR_CLOCK_REALTIME); + gpr_timespec timespec = value.as_timespec(GPR_CLOCK_REALTIME); google_protobuf_Timestamp_set_seconds(timestamp, timespec.tv_sec); google_protobuf_Timestamp_set_nanos(timestamp, timespec.tv_nsec); return timestamp; @@ -677,7 +674,7 @@ std::string XdsApi::AssembleClientConfig( envoy_service_status_v3_ClientConfig_GenericXdsConfig_set_version_info( entry, StdStringToUpbString(metadata.version)); envoy_service_status_v3_ClientConfig_GenericXdsConfig_set_last_updated( - entry, GrpcMillisToTimestamp(context, metadata.update_time)); + entry, EncodeTimestamp(context, metadata.update_time)); auto* any_field = envoy_service_status_v3_ClientConfig_GenericXdsConfig_mutable_xds_config( entry, context.arena); @@ -697,7 +694,7 @@ std::string XdsApi::AssembleClientConfig( StdStringToUpbString(metadata.failed_version)); envoy_admin_v3_UpdateFailureState_set_last_update_attempt( update_failure_state, - GrpcMillisToTimestamp(context, metadata.failed_update_time)); + EncodeTimestamp(context, metadata.failed_update_time)); envoy_service_status_v3_ClientConfig_GenericXdsConfig_set_error_state( entry, update_failure_state); } diff --git a/src/core/ext/xds/xds_api.h b/src/core/ext/xds/xds_api.h index 5e57c313425..959c2d5a075 100644 --- a/src/core/ext/xds/xds_api.h +++ b/src/core/ext/xds/xds_api.h @@ -75,7 +75,7 @@ class XdsApi { std::map<RefCountedPtr<XdsLocalityName>, XdsClusterLocalityStats::Snapshot, XdsLocalityName::Less> locality_stats; - grpc_millis load_report_interval; + Duration load_report_interval; }; using ClusterLoadReportMap = std::map< std::pair<std::string /*cluster_name*/, std::string /*eds_service_name*/>, @@ -106,7 +106,7 @@ class XdsApi { // The serialized bytes of the last successfully updated raw xDS resource. std::string serialized_proto; // The timestamp when the resource was last successfully updated. - grpc_millis update_time = 0; + Timestamp update_time; // The last successfully updated version of the resource. std::string version; // The rejected version string of the last failed update attempt. @@ -114,7 +114,7 @@ class XdsApi { // Details about the last failed update attempt. std::string failed_details; // Timestamp of the last failed update attempt. - grpc_millis failed_update_time = 0; + Timestamp failed_update_time; }; using ResourceMetadataMap = std::map<std::string /*resource_name*/, const ResourceMetadata*>; @@ -168,7 +168,7 @@ class XdsApi { grpc_error_handle ParseLrsResponse(const grpc_slice& encoded_response, bool* send_all_clusters, std::set<std::string>* cluster_names, - grpc_millis* load_reporting_interval); + Duration* load_reporting_interval); // Assemble the client config proto message and return the serialized result. std::string AssembleClientConfig( diff --git a/src/core/ext/xds/xds_client.cc b/src/core/ext/xds/xds_client.cc index a689de4e762..52781b39590 100644 --- a/src/core/ext/xds/xds_client.cc +++ b/src/core/ext/xds/xds_client.cc @@ -216,7 +216,7 @@ class XdsClient::ChannelState::AdsCallState XdsClient* xds_client() const { return ads_call_state_->xds_client(); } AdsCallState* ads_call_state_; - const grpc_millis update_time_ = ExecCtx::Get()->Now(); + const Timestamp update_time_ = ExecCtx::Get()->Now(); Result result_; }; @@ -389,7 +389,7 @@ class XdsClient::ChannelState::LrsCallState // Reports client-side load stats according to a fixed interval. class Reporter : public InternallyRefCounted<Reporter> { public: - Reporter(RefCountedPtr<LrsCallState> parent, grpc_millis report_interval) + Reporter(RefCountedPtr<LrsCallState> parent, Duration report_interval) : parent_(std::move(parent)), report_interval_(report_interval) { GRPC_CLOSURE_INIT(&on_next_report_timer_, OnNextReportTimer, this, grpc_schedule_on_exec_ctx); @@ -420,7 +420,7 @@ class XdsClient::ChannelState::LrsCallState RefCountedPtr<LrsCallState> parent_; // The load reporting state. - const grpc_millis report_interval_; + const Duration report_interval_; bool last_report_counters_were_zero_ = false; bool next_report_timer_callback_pending_ = false; grpc_timer next_report_timer_; @@ -467,7 +467,7 @@ class XdsClient::ChannelState::LrsCallState // Load reporting state. bool send_all_clusters_ = false; std::set<std::string> cluster_names_; // Asked for by the LRS server. - grpc_millis load_reporting_interval_ = 0; + Duration load_reporting_interval_; OrphanablePtr<Reporter> reporter_; }; @@ -661,13 +661,13 @@ template <typename T> XdsClient::ChannelState::RetryableCall<T>::RetryableCall( WeakRefCountedPtr<ChannelState> chand) : chand_(std::move(chand)), - backoff_( - BackOff::Options() - .set_initial_backoff(GRPC_XDS_INITIAL_CONNECT_BACKOFF_SECONDS * - 1000) - .set_multiplier(GRPC_XDS_RECONNECT_BACKOFF_MULTIPLIER) - .set_jitter(GRPC_XDS_RECONNECT_JITTER) - .set_max_backoff(GRPC_XDS_RECONNECT_MAX_BACKOFF_SECONDS * 1000)) { + backoff_(BackOff::Options() + .set_initial_backoff(Duration::Seconds( + GRPC_XDS_INITIAL_CONNECT_BACKOFF_SECONDS)) + .set_multiplier(GRPC_XDS_RECONNECT_BACKOFF_MULTIPLIER) + .set_jitter(GRPC_XDS_RECONNECT_JITTER) + .set_max_backoff(Duration::Seconds( + GRPC_XDS_RECONNECT_MAX_BACKOFF_SECONDS))) { // Closure Initialization GRPC_CLOSURE_INIT(&on_retry_timer_, OnRetryTimer, this, grpc_schedule_on_exec_ctx); @@ -715,15 +715,15 @@ void XdsClient::ChannelState::RetryableCall<T>::StartNewCallLocked() { template <typename T> void XdsClient::ChannelState::RetryableCall<T>::StartRetryTimerLocked() { if (shutting_down_) return; - const grpc_millis next_attempt_time = backoff_.NextAttemptTime(); + const Timestamp next_attempt_time = backoff_.NextAttemptTime(); if (GRPC_TRACE_FLAG_ENABLED(grpc_xds_client_trace)) { - grpc_millis timeout = - std::max(next_attempt_time - ExecCtx::Get()->Now(), grpc_millis(0)); + Duration timeout = + std::max(next_attempt_time - ExecCtx::Get()->Now(), Duration::Zero()); gpr_log(GPR_INFO, "[xds_client %p] xds server %s: call attempt failed; " "retry timer will fire in %" PRId64 "ms.", chand()->xds_client(), chand()->server_.server_uri.c_str(), - timeout); + timeout.millis()); } this->Ref(DEBUG_LOCATION, "RetryableCall+retry_timer_start").release(); grpc_timer_init(&retry_timer_, next_attempt_time, &on_retry_timer_); @@ -789,8 +789,7 @@ namespace { // Build a resource metadata struct for ADS result accepting methods and CSDS. XdsApi::ResourceMetadata CreateResourceMetadataAcked( - std::string serialized_proto, std::string version, - grpc_millis update_time) { + std::string serialized_proto, std::string version, Timestamp update_time) { XdsApi::ResourceMetadata resource_metadata; resource_metadata.serialized_proto = std::move(serialized_proto); resource_metadata.update_time = update_time; @@ -802,7 +801,7 @@ XdsApi::ResourceMetadata CreateResourceMetadataAcked( // Update resource_metadata for NACK. void UpdateResourceMetadataNacked(const std::string& version, const std::string& details, - grpc_millis update_time, + Timestamp update_time, XdsApi::ResourceMetadata* resource_metadata) { resource_metadata->client_status = XdsApi::ResourceMetadata::NACKED; resource_metadata->failed_version = version; @@ -950,7 +949,7 @@ XdsClient::ChannelState::AdsCallState::AdsCallState( chand()->channel_, nullptr, GRPC_PROPAGATE_DEFAULTS, xds_client()->interested_parties_, StaticSlice::FromStaticString(method).c_slice(), nullptr, - GRPC_MILLIS_INF_FUTURE, nullptr); + Timestamp::InfFuture(), nullptr); GPR_ASSERT(call_ != nullptr); // Init data associated with the call. grpc_metadata_array_init(&initial_metadata_recv_); @@ -1362,7 +1361,7 @@ void XdsClient::ChannelState::LrsCallState::Reporter::Orphan() { void XdsClient::ChannelState::LrsCallState::Reporter:: ScheduleNextReportLocked() { - const grpc_millis next_report_time = ExecCtx::Get()->Now() + report_interval_; + const Timestamp next_report_time = ExecCtx::Get()->Now() + report_interval_; grpc_timer_init(&next_report_timer_, next_report_time, &on_next_report_timer_); next_report_timer_callback_pending_ = true; @@ -1509,8 +1508,8 @@ XdsClient::ChannelState::LrsCallState::LrsCallState( call_ = grpc_channel_create_pollset_set_call( chand()->channel_, nullptr, GRPC_PROPAGATE_DEFAULTS, xds_client()->interested_parties_, - StaticSlice::FromStaticString(method).c_slice(), nullptr, - GRPC_MILLIS_INF_FUTURE, nullptr); + Slice::FromStaticString(method).c_slice(), nullptr, + Timestamp::InfFuture(), nullptr); GPR_ASSERT(call_ != nullptr); // Init the request payload. grpc_slice request_payload_slice = @@ -1680,7 +1679,7 @@ bool XdsClient::ChannelState::LrsCallState::OnResponseReceivedLocked() { // Parse the response. bool send_all_clusters = false; std::set<std::string> new_cluster_names; - grpc_millis new_load_reporting_interval; + Duration new_load_reporting_interval; grpc_error_handle parse_error = xds_client()->api_.ParseLrsResponse( response_slice, &send_all_clusters, &new_cluster_names, &new_load_reporting_interval); @@ -1701,7 +1700,7 @@ bool XdsClient::ChannelState::LrsCallState::OnResponseReceivedLocked() { "ms", xds_client(), chand()->server_.server_uri.c_str(), new_cluster_names.size(), send_all_clusters, - new_load_reporting_interval); + new_load_reporting_interval.millis()); size_t i = 0; for (const auto& name : new_cluster_names) { gpr_log(GPR_INFO, "[xds_client %p] cluster_name %" PRIuPTR ": %s", @@ -1709,9 +1708,10 @@ bool XdsClient::ChannelState::LrsCallState::OnResponseReceivedLocked() { } } if (new_load_reporting_interval < - GRPC_XDS_MIN_CLIENT_LOAD_REPORTING_INTERVAL_MS) { - new_load_reporting_interval = - GRPC_XDS_MIN_CLIENT_LOAD_REPORTING_INTERVAL_MS; + Duration::Milliseconds( + GRPC_XDS_MIN_CLIENT_LOAD_REPORTING_INTERVAL_MS)) { + new_load_reporting_interval = Duration::Milliseconds( + GRPC_XDS_MIN_CLIENT_LOAD_REPORTING_INTERVAL_MS); if (GRPC_TRACE_FLAG_ENABLED(grpc_xds_client_trace)) { gpr_log(GPR_INFO, "[xds_client %p] xds server %s: increased load_report_interval " @@ -1804,10 +1804,10 @@ bool XdsClient::ChannelState::LrsCallState::IsCurrentCallOnChannel() const { namespace { -grpc_millis GetRequestTimeout(const grpc_channel_args* args) { - return grpc_channel_args_find_integer( +Duration GetRequestTimeout(const grpc_channel_args* args) { + return Duration::Milliseconds(grpc_channel_args_find_integer( args, GRPC_ARG_XDS_RESOURCE_DOES_NOT_EXIST_TIMEOUT_MS, - {15000, 0, INT_MAX}); + {15000, 0, INT_MAX})); } grpc_channel_args* ModifyChannelArgs(const grpc_channel_args* args) { @@ -2285,7 +2285,7 @@ XdsApi::ClusterLoadReportMap XdsClient::BuildLoadReportSnapshotLocked( } } // Compute load report interval. - const grpc_millis now = ExecCtx::Get()->Now(); + const Timestamp now = ExecCtx::Get()->Now(); snapshot.load_report_interval = now - load_report.last_report_time; load_report.last_report_time = now; // Record snapshot. diff --git a/src/core/ext/xds/xds_client.h b/src/core/ext/xds/xds_client.h index 1dd12efbf7c..e38e50b716d 100644 --- a/src/core/ext/xds/xds_client.h +++ b/src/core/ext/xds/xds_client.h @@ -253,7 +253,7 @@ class XdsClient : public DualRefCounted<XdsClient> { std::map<RefCountedPtr<XdsLocalityName>, LocalityState, XdsLocalityName::Less> locality_stats; - grpc_millis last_report_time = ExecCtx::Get()->Now(); + Timestamp last_report_time = ExecCtx::Get()->Now(); }; // Load report data. @@ -294,7 +294,7 @@ class XdsClient : public DualRefCounted<XdsClient> { std::unique_ptr<XdsBootstrap> bootstrap_; grpc_channel_args* args_; - const grpc_millis request_timeout_; + const Duration request_timeout_; grpc_pollset_set* interested_parties_; OrphanablePtr<CertificateProviderStore> certificate_provider_store_; XdsApi api_; diff --git a/src/core/ext/xds/xds_common_types.h b/src/core/ext/xds/xds_common_types.h index d5d3f5a00c0..df96e8c93ff 100644 --- a/src/core/ext/xds/xds_common_types.h +++ b/src/core/ext/xds/xds_common_types.h @@ -32,26 +32,11 @@ namespace grpc_core { -struct Duration { - int64_t seconds = 0; - int32_t nanos = 0; - - Duration() = default; - - bool operator==(const Duration& other) const { - return seconds == other.seconds && nanos == other.nanos; - } - std::string ToString() const { - return absl::StrFormat("Duration seconds: %ld, nanos %d", seconds, nanos); - } - - static Duration Parse(const google_protobuf_Duration* proto_duration) { - Duration duration; - duration.seconds = google_protobuf_Duration_seconds(proto_duration); - duration.nanos = google_protobuf_Duration_nanos(proto_duration); - return duration; - } -}; +inline Duration ParseDuration(const google_protobuf_Duration* proto_duration) { + return Duration::FromSecondsAndNanoseconds( + google_protobuf_Duration_seconds(proto_duration), + google_protobuf_Duration_nanos(proto_duration)); +} struct CommonTlsContext { struct CertificateProviderPluginInstance { diff --git a/src/core/ext/xds/xds_listener.cc b/src/core/ext/xds/xds_listener.cc index 3e221158289..24cde3609fe 100644 --- a/src/core/ext/xds/xds_listener.cc +++ b/src/core/ext/xds/xds_listener.cc @@ -37,6 +37,7 @@ #include "upb/upb.h" #include "upb/upb.hpp" +#include "src/core/ext/xds/xds_common_types.h" #include "src/core/lib/address_utils/parse_address.h" #include "src/core/lib/address_utils/sockaddr_utils.h" #include "src/core/lib/gprpp/host_port.h" @@ -294,7 +295,7 @@ grpc_error_handle HttpConnectionManagerParse( envoy_config_core_v3_HttpProtocolOptions_max_stream_duration(options); if (duration != nullptr) { http_connection_manager->http_max_stream_duration = - Duration::Parse(duration); + ParseDuration(duration); } } // Parse filters. diff --git a/src/core/ext/xds/xds_route_config.cc b/src/core/ext/xds/xds_route_config.cc index 7f70ec13a0f..c894ad6800d 100644 --- a/src/core/ext/xds/xds_route_config.cc +++ b/src/core/ext/xds/xds_route_config.cc @@ -595,29 +595,21 @@ grpc_error_handle RetryPolicyParse( "RouteAction RetryPolicy RetryBackoff missing base interval.")); } else { retry_to_return.retry_back_off.base_interval = - Duration::Parse(base_interval); + ParseDuration(base_interval); } const google_protobuf_Duration* max_interval = envoy_config_route_v3_RetryPolicy_RetryBackOff_max_interval(backoff); Duration max; if (max_interval != nullptr) { - max = Duration::Parse(max_interval); + max = ParseDuration(max_interval); } else { - // if max interval is not set, it is 10x the base, if the value in nanos - // can yield another second, adjust the value in seconds accordingly. - max.seconds = retry_to_return.retry_back_off.base_interval.seconds * 10; - max.nanos = retry_to_return.retry_back_off.base_interval.nanos * 10; - if (max.nanos > 1000000000) { - max.seconds += max.nanos / 1000000000; - max.nanos = max.nanos % 1000000000; - } + // if max interval is not set, it is 10x the base. + max = 10 * retry_to_return.retry_back_off.base_interval; } retry_to_return.retry_back_off.max_interval = max; } else { - retry_to_return.retry_back_off.base_interval.seconds = 0; - retry_to_return.retry_back_off.base_interval.nanos = 25000000; - retry_to_return.retry_back_off.max_interval.seconds = 0; - retry_to_return.retry_back_off.max_interval.nanos = 250000000; + retry_to_return.retry_back_off.base_interval = Duration::Milliseconds(25); + retry_to_return.retry_back_off.max_interval = Duration::Milliseconds(250); } if (errors.empty()) { *retry = retry_to_return; @@ -718,7 +710,7 @@ grpc_error_handle RouteActionParse( max_stream_duration); } if (duration != nullptr) { - route->max_stream_duration = Duration::Parse(duration); + route->max_stream_duration = ParseDuration(duration); } } } diff --git a/src/core/lib/backoff/backoff.cc b/src/core/lib/backoff/backoff.cc index 9c7f4e58a7e..c3b7e8565ab 100644 --- a/src/core/lib/backoff/backoff.cc +++ b/src/core/lib/backoff/backoff.cc @@ -28,20 +28,17 @@ namespace grpc_core { BackOff::BackOff(const Options& options) : options_(options) { Reset(); } -grpc_millis BackOff::NextAttemptTime() { +Timestamp BackOff::NextAttemptTime() { if (initial_) { initial_ = false; return current_backoff_ + ExecCtx::Get()->Now(); } - current_backoff_ = static_cast<grpc_millis>( - std::min(current_backoff_ * options_.multiplier(), - static_cast<double>(options_.max_backoff()))); - const double jitter = - absl::Uniform(rand_gen_, -options_.jitter() * current_backoff_, - options_.jitter() * current_backoff_); - const grpc_millis next_timeout = - static_cast<grpc_millis>(current_backoff_ + jitter); - return next_timeout + ExecCtx::Get()->Now(); + current_backoff_ = std::min(current_backoff_ * options_.multiplier(), + options_.max_backoff()); + const Duration jitter = Duration::FromSecondsAsDouble( + absl::Uniform(rand_gen_, -options_.jitter() * current_backoff_.seconds(), + options_.jitter() * current_backoff_.seconds())); + return ExecCtx::Get()->Now() + current_backoff_ + jitter; } void BackOff::Reset() { diff --git a/src/core/lib/backoff/backoff.h b/src/core/lib/backoff/backoff.h index ee0ff4b3a12..f94c2df22e7 100644 --- a/src/core/lib/backoff/backoff.h +++ b/src/core/lib/backoff/backoff.h @@ -37,7 +37,7 @@ class BackOff { explicit BackOff(const Options& options); /// Returns the time at which the next attempt should start. - grpc_millis NextAttemptTime(); + Timestamp NextAttemptTime(); /// Reset the backoff, so the next value returned by NextAttemptTime() /// will be the time of the second attempt (rather than the Nth). @@ -45,7 +45,7 @@ class BackOff { class Options { public: - Options& set_initial_backoff(grpc_millis initial_backoff) { + Options& set_initial_backoff(Duration initial_backoff) { initial_backoff_ = initial_backoff; return *this; } @@ -57,24 +57,24 @@ class BackOff { jitter_ = jitter; return *this; } - Options& set_max_backoff(grpc_millis max_backoff) { + Options& set_max_backoff(Duration max_backoff) { max_backoff_ = max_backoff; return *this; } /// how long to wait after the first failure before retrying - grpc_millis initial_backoff() const { return initial_backoff_; } + Duration initial_backoff() const { return initial_backoff_; } /// factor with which to multiply backoff after a failed retry double multiplier() const { return multiplier_; } /// amount to randomize backoffs double jitter() const { return jitter_; } /// maximum time between retries - grpc_millis max_backoff() const { return max_backoff_; } + Duration max_backoff() const { return max_backoff_; } private: - grpc_millis initial_backoff_; + Duration initial_backoff_; double multiplier_; double jitter_; - grpc_millis max_backoff_; + Duration max_backoff_; }; // class Options private: @@ -82,7 +82,7 @@ class BackOff { absl::BitGen rand_gen_; bool initial_; /// current delay before retries - grpc_millis current_backoff_; + Duration current_backoff_; }; } // namespace grpc_core diff --git a/src/core/lib/channel/channel_stack.h b/src/core/lib/channel/channel_stack.h index e78c2d5db4d..00a09db96fd 100644 --- a/src/core/lib/channel/channel_stack.h +++ b/src/core/lib/channel/channel_stack.h @@ -83,7 +83,7 @@ struct grpc_call_element_args { grpc_call_context_element* context; const grpc_slice& path; gpr_cycle_counter start_time; // Note: not populated in subchannel stack. - grpc_millis deadline; + grpc_core::Timestamp deadline; grpc_core::Arena* arena; grpc_core::CallCombiner* call_combiner; }; diff --git a/src/core/lib/channel/channel_trace.cc b/src/core/lib/channel/channel_trace.cc index 9015d0fb2e1..2d9e747c757 100644 --- a/src/core/lib/channel/channel_trace.cc +++ b/src/core/lib/channel/channel_trace.cc @@ -46,8 +46,7 @@ ChannelTrace::TraceEvent::TraceEvent(Severity severity, const grpc_slice& data, RefCountedPtr<BaseNode> referenced_entity) : severity_(severity), data_(data), - timestamp_( - grpc_millis_to_timespec(ExecCtx::Get()->Now(), GPR_CLOCK_REALTIME)), + timestamp_(ExecCtx::Get()->Now().as_timespec(GPR_CLOCK_REALTIME)), next_(nullptr), referenced_entity_(std::move(referenced_entity)), memory_usage_(sizeof(TraceEvent) + grpc_slice_memory_usage(data)) {} @@ -55,8 +54,7 @@ ChannelTrace::TraceEvent::TraceEvent(Severity severity, const grpc_slice& data, ChannelTrace::TraceEvent::TraceEvent(Severity severity, const grpc_slice& data) : severity_(severity), data_(data), - timestamp_( - grpc_millis_to_timespec(ExecCtx::Get()->Now(), GPR_CLOCK_REALTIME)), + timestamp_(ExecCtx::Get()->Now().as_timespec(GPR_CLOCK_REALTIME)), next_(nullptr), memory_usage_(sizeof(TraceEvent) + grpc_slice_memory_usage(data)) {} @@ -72,8 +70,7 @@ ChannelTrace::ChannelTrace(size_t max_event_memory) return; // tracing is disabled if max_event_memory_ == 0 } gpr_mu_init(&tracer_mu_); - time_created_ = - grpc_millis_to_timespec(ExecCtx::Get()->Now(), GPR_CLOCK_REALTIME); + time_created_ = ExecCtx::Get()->Now().as_timespec(GPR_CLOCK_REALTIME); } ChannelTrace::~ChannelTrace() { diff --git a/src/core/lib/channel/handshaker.cc b/src/core/lib/channel/handshaker.cc index 7ebed4b92bb..5ad0db31b1d 100644 --- a/src/core/lib/channel/handshaker.cc +++ b/src/core/lib/channel/handshaker.cc @@ -169,7 +169,7 @@ void HandshakeManager::OnTimeoutFn(void* arg, grpc_error_handle error) { void HandshakeManager::DoHandshake(grpc_endpoint* endpoint, const grpc_channel_args* channel_args, - grpc_millis deadline, + Timestamp deadline, grpc_tcp_server_acceptor* acceptor, grpc_iomgr_cb_func on_handshake_done, void* user_data) { diff --git a/src/core/lib/channel/handshaker.h b/src/core/lib/channel/handshaker.h index ad2606d2e94..30f56239cc0 100644 --- a/src/core/lib/channel/handshaker.h +++ b/src/core/lib/channel/handshaker.h @@ -114,7 +114,7 @@ class HandshakeManager : public RefCounted<HandshakeManager> { /// the necessary clean-up. Otherwise, the callback takes ownership of /// the arguments. void DoHandshake(grpc_endpoint* endpoint, - const grpc_channel_args* channel_args, grpc_millis deadline, + const grpc_channel_args* channel_args, Timestamp deadline, grpc_tcp_server_acceptor* acceptor, grpc_iomgr_cb_func on_handshake_done, void* user_data); diff --git a/src/core/lib/channel/promise_based_filter.h b/src/core/lib/channel/promise_based_filter.h index 96114bba45a..001eac288eb 100644 --- a/src/core/lib/channel/promise_based_filter.h +++ b/src/core/lib/channel/promise_based_filter.h @@ -104,13 +104,13 @@ class BaseCallData { grpc_call_element* elem() const { return elem_; } CallCombiner* call_combiner() const { return call_combiner_; } - grpc_millis deadline() const { return deadline_; } + Timestamp deadline() const { return deadline_; } private: grpc_call_element* const elem_; Arena* const arena_; CallCombiner* const call_combiner_; - const grpc_millis deadline_; + const Timestamp deadline_; }; // Specific call data per channel filter. diff --git a/src/core/lib/gpr/time.cc b/src/core/lib/gpr/time.cc index d796f41422b..630e9872371 100644 --- a/src/core/lib/gpr/time.cc +++ b/src/core/lib/gpr/time.cc @@ -184,7 +184,8 @@ gpr_timespec gpr_time_sub(gpr_timespec a, gpr_timespec b) { dec++; } if (a.tv_sec == INT64_MAX || a.tv_sec == INT64_MIN) { - diff = a; + diff.tv_sec = a.tv_sec; + diff.tv_nsec = a.tv_nsec; } else if (b.tv_sec == INT64_MIN || (b.tv_sec <= 0 && a.tv_sec >= INT64_MAX + b.tv_sec)) { diff = gpr_inf_future(GPR_CLOCK_REALTIME); diff --git a/src/core/lib/gpr/useful.h b/src/core/lib/gpr/useful.h index b151ca37382..dcd457bcaf4 100644 --- a/src/core/lib/gpr/useful.h +++ b/src/core/lib/gpr/useful.h @@ -106,6 +106,20 @@ constexpr size_t HashPointer(T* p, size_t range) { range; } +// Compute a+b. +// If the result is greater than INT64_MAX, return INT64_MAX. +// If the result is less than INT64_MIN, return INT64_MIN. +inline int64_t SaturatingAdd(int64_t a, int64_t b) { + if (a > 0) { + if (b > INT64_MAX - a) { + return INT64_MAX; + } + } else if (b < INT64_MIN - a) { + return INT64_MIN; + } + return a + b; +} + inline uint32_t MixHash32(uint32_t a, uint32_t b) { return RotateLeft(a, 2u) ^ b; } diff --git a/src/core/lib/gprpp/time.cc b/src/core/lib/gprpp/time.cc new file mode 100644 index 00000000000..81b91954c0f --- /dev/null +++ b/src/core/lib/gprpp/time.cc @@ -0,0 +1,186 @@ +// Copyright 2021 gRPC authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include <grpc/support/port_platform.h> + +#include "src/core/lib/gprpp/time.h" + +#include <atomic> +#include <cstdint> +#include <limits> +#include <string> + +#include <grpc/impl/codegen/gpr_types.h> +#include <grpc/support/log.h> + +namespace grpc_core { + +namespace { + +std::atomic<int64_t> g_process_epoch_seconds; +std::atomic<gpr_cycle_counter> g_process_epoch_cycles; + +GPR_ATTRIBUTE_NOINLINE std::pair<int64_t, gpr_cycle_counter> InitTime() { + gpr_cycle_counter cycles_start; + gpr_cycle_counter cycles_end; + int64_t process_epoch_seconds; + + // Check the current time... if we end up with zero, try again after 100ms. + // If it doesn't advance after sleeping for 1100ms, crash the process. + for (int i = 0; i < 11; i++) { + cycles_start = gpr_get_cycle_counter(); + gpr_timespec now = gpr_now(GPR_CLOCK_MONOTONIC); + cycles_end = gpr_get_cycle_counter(); + process_epoch_seconds = now.tv_sec - 1; + if (process_epoch_seconds != 0) { + break; + } + gpr_sleep_until(gpr_time_add(now, gpr_time_from_millis(100, GPR_TIMESPAN))); + } + + // Time does not seem to be increasing from zero... + GPR_ASSERT(process_epoch_seconds != 0); + int64_t expected = 0; + gpr_cycle_counter process_epoch_cycles = (cycles_start + cycles_end) / 2; + GPR_ASSERT(process_epoch_cycles != 0); + if (!g_process_epoch_seconds.compare_exchange_strong( + expected, process_epoch_seconds, std::memory_order_relaxed, + std::memory_order_relaxed)) { + process_epoch_seconds = expected; + do { + process_epoch_cycles = + g_process_epoch_cycles.load(std::memory_order_relaxed); + } while (process_epoch_cycles == 0); + } else { + g_process_epoch_cycles.store(process_epoch_cycles, + std::memory_order_relaxed); + } + return std::make_pair(process_epoch_seconds, process_epoch_cycles); +} + +gpr_timespec StartTime() { + int64_t sec = g_process_epoch_seconds.load(std::memory_order_relaxed); + if (GPR_UNLIKELY(sec == 0)) sec = InitTime().first; + return {sec, 0, GPR_CLOCK_MONOTONIC}; +} + +gpr_cycle_counter StartCycleCounter() { + gpr_cycle_counter cycles = + g_process_epoch_cycles.load(std::memory_order_relaxed); + if (GPR_UNLIKELY(cycles == 0)) cycles = InitTime().second; + return cycles; +} + +gpr_timespec MillisecondsAsTimespec(int64_t millis, gpr_clock_type clock_type) { + // special-case infinities as Timestamp can be 32bit on some + // platforms while gpr_time_from_millis always takes an int64_t. + if (millis == std::numeric_limits<int64_t>::max()) { + return gpr_inf_future(clock_type); + } + if (millis == std::numeric_limits<int64_t>::min()) { + return gpr_inf_past(clock_type); + } + + if (clock_type == GPR_TIMESPAN) { + return gpr_time_from_millis(millis, GPR_TIMESPAN); + } + return gpr_time_add(gpr_convert_clock_type(StartTime(), clock_type), + gpr_time_from_millis(millis, GPR_TIMESPAN)); +} + +int64_t TimespanToMillisRoundUp(gpr_timespec ts) { + GPR_ASSERT(ts.clock_type == GPR_TIMESPAN); + double x = GPR_MS_PER_SEC * static_cast<double>(ts.tv_sec) + + static_cast<double>(ts.tv_nsec) / GPR_NS_PER_MS + + static_cast<double>(GPR_NS_PER_SEC - 1) / + static_cast<double>(GPR_NS_PER_SEC); + if (x <= static_cast<double>(std::numeric_limits<int64_t>::min())) { + return std::numeric_limits<int64_t>::min(); + } + if (x >= static_cast<double>(std::numeric_limits<int64_t>::max())) { + return std::numeric_limits<int64_t>::max(); + } + return static_cast<int64_t>(x); +} + +int64_t TimespanToMillisRoundDown(gpr_timespec ts) { + GPR_ASSERT(ts.clock_type == GPR_TIMESPAN); + double x = GPR_MS_PER_SEC * static_cast<double>(ts.tv_sec) + + static_cast<double>(ts.tv_nsec) / GPR_NS_PER_MS; + if (x <= static_cast<double>(std::numeric_limits<int64_t>::min())) { + return std::numeric_limits<int64_t>::min(); + } + if (x >= static_cast<double>(std::numeric_limits<int64_t>::max())) { + return std::numeric_limits<int64_t>::max(); + } + return static_cast<int64_t>(x); +} + +} // namespace + +Timestamp Timestamp::FromTimespecRoundUp(gpr_timespec ts) { + return FromMillisecondsAfterProcessEpoch(TimespanToMillisRoundUp(gpr_time_sub( + gpr_convert_clock_type(ts, GPR_CLOCK_MONOTONIC), StartTime()))); +} + +Timestamp Timestamp::FromTimespecRoundDown(gpr_timespec ts) { + return FromMillisecondsAfterProcessEpoch( + TimespanToMillisRoundDown(gpr_time_sub( + gpr_convert_clock_type(ts, GPR_CLOCK_MONOTONIC), StartTime()))); +} + +Timestamp Timestamp::FromCycleCounterRoundUp(gpr_cycle_counter c) { + return Timestamp::FromMillisecondsAfterProcessEpoch( + TimespanToMillisRoundUp(gpr_cycle_counter_sub(c, StartCycleCounter()))); +} + +Timestamp Timestamp::FromCycleCounterRoundDown(gpr_cycle_counter c) { + return Timestamp::FromMillisecondsAfterProcessEpoch( + TimespanToMillisRoundDown(gpr_cycle_counter_sub(c, StartCycleCounter()))); +} + +gpr_timespec Timestamp::as_timespec(gpr_clock_type clock_type) const { + return MillisecondsAsTimespec(millis_, clock_type); +} + +std::string Timestamp::ToString() const { + return "@" + std::to_string(millis_) + "ms"; +} + +gpr_timespec Duration::as_timespec() const { + return MillisecondsAsTimespec(millis_, GPR_TIMESPAN); +} + +Duration Duration::FromTimespec(gpr_timespec t) { + return Duration::Milliseconds(TimespanToMillisRoundUp(t)); +} + +std::string Duration::ToString() const { + return std::to_string(millis_) + "ms"; +} + +void TestOnlySetProcessEpoch(gpr_timespec epoch) { + g_process_epoch_seconds.store( + gpr_convert_clock_type(epoch, GPR_CLOCK_MONOTONIC).tv_sec); +} + +std::ostream& operator<<(std::ostream& out, Timestamp timestamp) { + return out << timestamp.ToString(); +} + +std::ostream& operator<<(std::ostream& out, Duration duration) { + return out << duration.ToString(); +} + +} // namespace grpc_core diff --git a/src/core/lib/gprpp/time.h b/src/core/lib/gprpp/time.h new file mode 100644 index 00000000000..5751f94644e --- /dev/null +++ b/src/core/lib/gprpp/time.h @@ -0,0 +1,292 @@ +// Copyright 2021 gRPC authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef GRPC_CORE_LIB_GPRPP_TIME_H +#define GRPC_CORE_LIB_GPRPP_TIME_H + +#include <grpc/support/port_platform.h> + +#include <stdint.h> + +#include <cstdint> +#include <limits> +#include <ostream> +#include <string> + +#include <grpc/support/time.h> + +#include "src/core/lib/gpr/time_precise.h" +#include "src/core/lib/gpr/useful.h" + +namespace grpc_core { + +namespace time_detail { + +inline int64_t MillisAdd(int64_t a, int64_t b) { + if (a == std::numeric_limits<int64_t>::max() || + b == std::numeric_limits<int64_t>::max()) { + return std::numeric_limits<int64_t>::max(); + } + if (a == std::numeric_limits<int64_t>::min() || + b == std::numeric_limits<int64_t>::min()) { + return std::numeric_limits<int64_t>::min(); + } + return SaturatingAdd(a, b); +} + +constexpr inline int64_t MillisMul(int64_t millis, int64_t mul) { + return millis >= std::numeric_limits<int64_t>::max() / mul + ? std::numeric_limits<int64_t>::max() + : millis <= std::numeric_limits<int64_t>::min() / mul + ? std::numeric_limits<int64_t>::min() + : millis * mul; +} + +} // namespace time_detail + +class Duration; + +// Timestamp represents a discrete point in time. +class Timestamp { + public: + constexpr Timestamp() = default; + // Constructs a Timestamp from a gpr_timespec. + static Timestamp FromTimespecRoundDown(gpr_timespec t); + static Timestamp FromTimespecRoundUp(gpr_timespec t); + + // Construct a Timestamp from a gpr_cycle_counter. + static Timestamp FromCycleCounterRoundUp(gpr_cycle_counter c); + static Timestamp FromCycleCounterRoundDown(gpr_cycle_counter c); + + static constexpr Timestamp FromMillisecondsAfterProcessEpoch(int64_t millis) { + return Timestamp(millis); + } + + static constexpr Timestamp ProcessEpoch() { return Timestamp(0); } + + static constexpr Timestamp InfFuture() { + return Timestamp(std::numeric_limits<int64_t>::max()); + } + + static constexpr Timestamp InfPast() { + return Timestamp(std::numeric_limits<int64_t>::min()); + } + + constexpr bool operator==(Timestamp other) const { + return millis_ == other.millis_; + } + constexpr bool operator!=(Timestamp other) const { + return millis_ != other.millis_; + } + constexpr bool operator<(Timestamp other) const { + return millis_ < other.millis_; + } + constexpr bool operator<=(Timestamp other) const { + return millis_ <= other.millis_; + } + constexpr bool operator>(Timestamp other) const { + return millis_ > other.millis_; + } + constexpr bool operator>=(Timestamp other) const { + return millis_ >= other.millis_; + } + Timestamp& operator+=(Duration duration); + + bool is_process_epoch() const { return millis_ == 0; } + + uint64_t milliseconds_after_process_epoch() const { return millis_; } + + gpr_timespec as_timespec(gpr_clock_type type) const; + + std::string ToString() const; + + private: + explicit constexpr Timestamp(int64_t millis) : millis_(millis) {} + + int64_t millis_ = 0; +}; + +// Duration represents a span of time. +class Duration { + public: + constexpr Duration() : millis_(0) {} + + static Duration FromTimespec(gpr_timespec t); + static Duration FromSecondsAndNanoseconds(int64_t seconds, int32_t nanos); + static Duration FromSecondsAsDouble(double seconds); + + static constexpr Duration Zero() { return Duration(0); } + + // Smallest representatable positive duration. + static constexpr Duration Epsilon() { return Duration(1); } + + static constexpr Duration NegativeInfinity() { + return Duration(std::numeric_limits<int64_t>::min()); + } + + static constexpr Duration Infinity() { + return Duration(std::numeric_limits<int64_t>::max()); + } + + static constexpr Duration Hours(int64_t hours) { + return Minutes(time_detail::MillisMul(hours, 60)); + } + + static constexpr Duration Minutes(int64_t minutes) { + return Seconds(time_detail::MillisMul(minutes, 60)); + } + + static constexpr Duration Seconds(int64_t seconds) { + return Milliseconds(time_detail::MillisMul(seconds, GPR_MS_PER_SEC)); + } + + static constexpr Duration Milliseconds(int64_t millis) { + return Duration(millis); + } + + static constexpr Duration MicrosecondsRoundDown(int64_t micros) { + return Duration(micros / GPR_US_PER_MS); + } + + static constexpr Duration NanosecondsRoundDown(int64_t nanos) { + return Duration(nanos / GPR_NS_PER_MS); + } + + static constexpr Duration MicrosecondsRoundUp(int64_t micros) { + return Duration(micros / GPR_US_PER_MS + (micros % GPR_US_PER_MS != 0)); + } + + static constexpr Duration NanosecondsRoundUp(int64_t nanos) { + return Duration(nanos / GPR_NS_PER_MS + (nanos % GPR_NS_PER_MS != 0)); + } + + constexpr bool operator==(Duration other) const { + return millis_ == other.millis_; + } + constexpr bool operator!=(Duration other) const { + return millis_ != other.millis_; + } + constexpr bool operator<(Duration other) const { + return millis_ < other.millis_; + } + constexpr bool operator<=(Duration other) const { + return millis_ <= other.millis_; + } + constexpr bool operator>(Duration other) const { + return millis_ > other.millis_; + } + constexpr bool operator>=(Duration other) const { + return millis_ >= other.millis_; + } + Duration& operator/=(int64_t divisor) { + if (millis_ == std::numeric_limits<int64_t>::max()) { + *this = divisor < 0 ? NegativeInfinity() : Infinity(); + } else if (millis_ == std::numeric_limits<int64_t>::min()) { + *this = divisor < 0 ? Infinity() : NegativeInfinity(); + } else { + millis_ /= divisor; + } + return *this; + } + Duration& operator+=(Duration other) { + millis_ += other.millis_; + return *this; + } + + constexpr int64_t millis() const { return millis_; } + double seconds() const { return static_cast<double>(millis_) / 1000.0; } + + gpr_timespec as_timespec() const; + + std::string ToString() const; + + private: + explicit constexpr Duration(int64_t millis) : millis_(millis) {} + + int64_t millis_; +}; + +inline Duration operator+(Duration lhs, Duration rhs) { + return Duration::Milliseconds( + time_detail::MillisAdd(lhs.millis(), rhs.millis())); +} + +inline Duration operator-(Duration lhs, Duration rhs) { + return Duration::Milliseconds( + time_detail::MillisAdd(lhs.millis(), -rhs.millis())); +} + +inline Timestamp operator+(Timestamp lhs, Duration rhs) { + return Timestamp::FromMillisecondsAfterProcessEpoch(time_detail::MillisAdd( + lhs.milliseconds_after_process_epoch(), rhs.millis())); +} + +inline Timestamp operator-(Timestamp lhs, Duration rhs) { + return Timestamp::FromMillisecondsAfterProcessEpoch(time_detail::MillisAdd( + lhs.milliseconds_after_process_epoch(), -rhs.millis())); +} + +inline Timestamp operator+(Duration lhs, Timestamp rhs) { return rhs + lhs; } + +inline Duration operator-(Timestamp lhs, Timestamp rhs) { + return Duration::Milliseconds( + time_detail::MillisAdd(lhs.milliseconds_after_process_epoch(), + -rhs.milliseconds_after_process_epoch())); +} + +inline Duration operator*(Duration lhs, double rhs) { + if (lhs == Duration::Infinity()) { + return rhs < 0 ? Duration::NegativeInfinity() : Duration::Infinity(); + } + if (lhs == Duration::NegativeInfinity()) { + return rhs < 0 ? Duration::Infinity() : Duration::NegativeInfinity(); + } + return Duration::FromSecondsAsDouble(lhs.millis() * rhs / 1000.0); +} + +inline Duration operator*(double lhs, Duration rhs) { return rhs * lhs; } + +inline Duration operator/(Duration lhs, int64_t rhs) { + lhs /= rhs; + return lhs; +} + +inline Duration Duration::FromSecondsAndNanoseconds(int64_t seconds, + int32_t nanos) { + return Seconds(seconds) + NanosecondsRoundDown(nanos); +} + +inline Duration Duration::FromSecondsAsDouble(double seconds) { + double millis = seconds * 1000.0; + if (millis >= static_cast<double>(std::numeric_limits<int64_t>::max())) { + return Infinity(); + } + if (millis <= static_cast<double>(std::numeric_limits<int64_t>::min())) { + return NegativeInfinity(); + } + return Milliseconds(static_cast<int64_t>(millis)); +} + +inline Timestamp& Timestamp::operator+=(Duration duration) { + return *this = (*this + duration); +} + +void TestOnlySetProcessEpoch(gpr_timespec epoch); + +std::ostream& operator<<(std::ostream& out, Timestamp timestamp); +std::ostream& operator<<(std::ostream& out, Duration duration); + +} // namespace grpc_core + +#endif // GRPC_CORE_LIB_GPRPP_TIME_H diff --git a/src/core/lib/http/httpcli.cc b/src/core/lib/http/httpcli.cc index 0ab638b274b..45dd0e7d3b9 100644 --- a/src/core/lib/http/httpcli.cc +++ b/src/core/lib/http/httpcli.cc @@ -59,7 +59,7 @@ grpc_httpcli_post_override g_post_override; OrphanablePtr<HttpRequest> HttpRequest::Get( URI uri, const grpc_channel_args* channel_args, grpc_polling_entity* pollent, const grpc_http_request* request, - grpc_millis deadline, grpc_closure* on_done, grpc_http_response* response, + Timestamp deadline, grpc_closure* on_done, grpc_http_response* response, RefCountedPtr<grpc_channel_credentials> channel_creds) { absl::optional<std::function<void()>> test_only_generate_response; if (g_get_override != nullptr) { @@ -85,7 +85,7 @@ OrphanablePtr<HttpRequest> HttpRequest::Get( OrphanablePtr<HttpRequest> HttpRequest::Post( URI uri, const grpc_channel_args* channel_args, grpc_polling_entity* pollent, const grpc_http_request* request, - grpc_millis deadline, grpc_closure* on_done, grpc_http_response* response, + Timestamp deadline, grpc_closure* on_done, grpc_http_response* response, RefCountedPtr<grpc_channel_credentials> channel_creds) { absl::optional<std::function<void()>> test_only_generate_response; if (g_post_override != nullptr) { @@ -114,7 +114,7 @@ void HttpRequest::SetOverride(grpc_httpcli_get_override get, HttpRequest::HttpRequest( URI uri, const grpc_slice& request_text, grpc_http_response* response, - grpc_millis deadline, const grpc_channel_args* channel_args, + Timestamp deadline, const grpc_channel_args* channel_args, grpc_closure* on_done, grpc_polling_entity* pollent, const char* name, absl::optional<std::function<void()>> test_only_generate_response, RefCountedPtr<grpc_channel_credentials> channel_creds) diff --git a/src/core/lib/http/httpcli.h b/src/core/lib/http/httpcli.h index dc20afa0d09..a198a9f3c8a 100644 --- a/src/core/lib/http/httpcli.h +++ b/src/core/lib/http/httpcli.h @@ -43,12 +43,12 @@ /* override functions return 1 if they handled the request, 0 otherwise */ typedef int (*grpc_httpcli_get_override)(const grpc_http_request* request, const char* host, const char* path, - grpc_millis deadline, + grpc_core::Timestamp deadline, grpc_closure* on_complete, grpc_http_response* response); typedef int (*grpc_httpcli_post_override)( const grpc_http_request* request, const char* host, const char* path, - const char* body_bytes, size_t body_size, grpc_millis deadline, + const char* body_bytes, size_t body_size, grpc_core::Timestamp deadline, grpc_closure* on_complete, grpc_http_response* response); namespace grpc_core { @@ -81,7 +81,7 @@ class HttpRequest : public InternallyRefCounted<HttpRequest> { // are removed. static OrphanablePtr<HttpRequest> Get( URI uri, const grpc_channel_args* args, grpc_polling_entity* pollent, - const grpc_http_request* request, grpc_millis deadline, + const grpc_http_request* request, Timestamp deadline, grpc_closure* on_done, grpc_http_response* response, RefCountedPtr<grpc_channel_credentials> channel_creds) GRPC_MUST_USE_RESULT; @@ -107,13 +107,13 @@ class HttpRequest : public InternallyRefCounted<HttpRequest> { // Does not support ?var1=val1&var2=val2 in the path. static OrphanablePtr<HttpRequest> Post( URI uri, const grpc_channel_args* args, grpc_polling_entity* pollent, - const grpc_http_request* request, grpc_millis deadline, + const grpc_http_request* request, Timestamp deadline, grpc_closure* on_done, grpc_http_response* response, RefCountedPtr<grpc_channel_credentials> channel_creds) GRPC_MUST_USE_RESULT; HttpRequest(URI uri, const grpc_slice& request_text, - grpc_http_response* response, grpc_millis deadline, + grpc_http_response* response, Timestamp deadline, const grpc_channel_args* channel_args, grpc_closure* on_done, grpc_polling_entity* pollent, const char* name, absl::optional<std::function<void()>> test_only_generate_response, @@ -185,7 +185,7 @@ class HttpRequest : public InternallyRefCounted<HttpRequest> { const URI uri_; const grpc_slice request_text_; - const grpc_millis deadline_; + const Timestamp deadline_; const grpc_channel_args* channel_args_; RefCountedPtr<grpc_channel_credentials> channel_creds_; grpc_closure on_read_; diff --git a/src/core/lib/iomgr/buffer_list.h b/src/core/lib/iomgr/buffer_list.h index 29c54dee4ac..bfa5b193427 100644 --- a/src/core/lib/iomgr/buffer_list.h +++ b/src/core/lib/iomgr/buffer_list.h @@ -82,16 +82,16 @@ struct ConnectionMetrics { absl::optional<uint64_t> sndbuf_limited_usec; }; -struct Timestamp { +struct BufferTimestamp { gpr_timespec time; ConnectionMetrics metrics; /* Metrics collected with this timestamp */ }; struct Timestamps { - Timestamp sendmsg_time; - Timestamp scheduled_time; - Timestamp sent_time; - Timestamp acked_time; + BufferTimestamp sendmsg_time; + BufferTimestamp scheduled_time; + BufferTimestamp sent_time; + BufferTimestamp acked_time; uint32_t byte_offset; /* byte offset relative to the start of the RPC */ diff --git a/src/core/lib/iomgr/ev_apple.cc b/src/core/lib/iomgr/ev_apple.cc index e0a34b5ae8d..1270183ad09 100644 --- a/src/core/lib/iomgr/ev_apple.cc +++ b/src/core/lib/iomgr/ev_apple.cc @@ -221,9 +221,10 @@ static void pollset_global_shutdown(void) { /// these events will eventually trigger the kick. static grpc_error_handle pollset_work(grpc_pollset* pollset, grpc_pollset_worker** worker, - grpc_millis deadline) { + grpc_core::Timestamp deadline) { GRPC_POLLING_TRACE("pollset work: %p, worker: %p, deadline: %" PRIu64, - pollset, worker, deadline); + pollset, worker, + deadline.milliseconds_after_process_epoch()); GrpcApplePollset* apple_pollset = reinterpret_cast<GrpcApplePollset*>(pollset); GrpcAppleWorker actual_worker; @@ -241,8 +242,8 @@ static grpc_error_handle pollset_work(grpc_pollset* pollset, while (!actual_worker.kicked && !apple_pollset->is_shutdown) { if (actual_worker.cv.WaitWithDeadline( - &apple_pollset->mu, grpc_core::ToAbslTime(grpc_millis_to_timespec( - deadline, GPR_CLOCK_REALTIME)))) { + &apple_pollset->mu, grpc_core::ToAbslTime(deadline.as_timespec( + GPR_CLOCK_REALTIME)))) { // timed out break; } diff --git a/src/core/lib/iomgr/ev_epoll1_linux.cc b/src/core/lib/iomgr/ev_epoll1_linux.cc index cecf15c6b58..0009d140ca8 100644 --- a/src/core/lib/iomgr/ev_epoll1_linux.cc +++ b/src/core/lib/iomgr/ev_epoll1_linux.cc @@ -639,9 +639,9 @@ static void pollset_shutdown(grpc_pollset* pollset, grpc_closure* closure) { pollset_maybe_finish_shutdown(pollset); } -static int poll_deadline_to_millis_timeout(grpc_millis millis) { - if (millis == GRPC_MILLIS_INF_FUTURE) return -1; - grpc_millis delta = millis - grpc_core::ExecCtx::Get()->Now(); +static int poll_deadline_to_millis_timeout(grpc_core::Timestamp millis) { + if (millis == grpc_core::Timestamp::InfFuture()) return -1; + int64_t delta = (millis - grpc_core::ExecCtx::Get()->Now()).millis(); if (delta > INT_MAX) { return INT_MAX; } else if (delta < 0) { @@ -711,7 +711,8 @@ static grpc_error_handle process_epoll_events(grpc_pollset* /*pollset*/) { NOTE ON SYNCHRONIZATION: At any point of time, only the g_active_poller (i.e the designated poller thread) will be calling this function. So there is no need for any synchronization when accesing fields in g_epoll_set */ -static grpc_error_handle do_epoll_wait(grpc_pollset* ps, grpc_millis deadline) { +static grpc_error_handle do_epoll_wait(grpc_pollset* ps, + grpc_core::Timestamp deadline) { GPR_TIMER_SCOPE("do_epoll_wait", 0); int r; @@ -744,7 +745,7 @@ static grpc_error_handle do_epoll_wait(grpc_pollset* ps, grpc_millis deadline) { static bool begin_worker(grpc_pollset* pollset, grpc_pollset_worker* worker, grpc_pollset_worker** worker_hdl, - grpc_millis deadline) { + grpc_core::Timestamp deadline) { GPR_TIMER_SCOPE("begin_worker", 0); if (worker_hdl != nullptr) *worker_hdl = worker; worker->initialized_cv = false; @@ -831,7 +832,7 @@ static bool begin_worker(grpc_pollset* pollset, grpc_pollset_worker* worker, } if (gpr_cv_wait(&worker->cv, &pollset->mu, - grpc_millis_to_timespec(deadline, GPR_CLOCK_MONOTONIC)) && + deadline.as_timespec(GPR_CLOCK_MONOTONIC)) && worker->state == UNKICKED) { /* If gpr_cv_wait returns true (i.e a timeout), pretend that the worker received a kick */ @@ -1012,7 +1013,7 @@ static void end_worker(grpc_pollset* pollset, grpc_pollset_worker* worker, ensure that it is held by the time the function returns */ static grpc_error_handle pollset_work(grpc_pollset* ps, grpc_pollset_worker** worker_hdl, - grpc_millis deadline) { + grpc_core::Timestamp deadline) { GPR_TIMER_SCOPE("pollset_work", 0); grpc_pollset_worker worker; grpc_error_handle error = GRPC_ERROR_NONE; diff --git a/src/core/lib/iomgr/ev_epollex_linux.cc b/src/core/lib/iomgr/ev_epollex_linux.cc index 38e79bc60f6..630daf133a7 100644 --- a/src/core/lib/iomgr/ev_epollex_linux.cc +++ b/src/core/lib/iomgr/ev_epollex_linux.cc @@ -798,9 +798,9 @@ static void pollset_init(grpc_pollset* pollset, gpr_mu** mu) { *mu = &pollset->mu; } -static int poll_deadline_to_millis_timeout(grpc_millis millis) { - if (millis == GRPC_MILLIS_INF_FUTURE) return -1; - grpc_millis delta = millis - grpc_core::ExecCtx::Get()->Now(); +static int poll_deadline_to_millis_timeout(grpc_core::Timestamp millis) { + if (millis == grpc_core::Timestamp::InfFuture()) return -1; + int64_t delta = (millis - grpc_core::ExecCtx::Get()->Now()).millis(); if (delta > INT_MAX) { return INT_MAX; } else if (delta < 0) { @@ -926,7 +926,8 @@ static void pollset_destroy(grpc_pollset* pollset) { gpr_mu_destroy(&pollset->mu); } -static grpc_error_handle pollable_epoll(pollable* p, grpc_millis deadline) { +static grpc_error_handle pollable_epoll(pollable* p, + grpc_core::Timestamp deadline) { GPR_TIMER_SCOPE("pollable_epoll", 0); int timeout = poll_deadline_to_millis_timeout(deadline); @@ -1001,7 +1002,7 @@ static worker_remove_result worker_remove(grpc_pollset_worker** root_worker, /* Return true if this thread should poll */ static bool begin_worker(grpc_pollset* pollset, grpc_pollset_worker* worker, grpc_pollset_worker** worker_hdl, - grpc_millis deadline) { + grpc_core::Timestamp deadline) { GPR_TIMER_SCOPE("begin_worker", 0); bool do_poll = (pollset->shutdown_closure == nullptr && !pollset->already_shutdown); @@ -1027,7 +1028,7 @@ static bool begin_worker(grpc_pollset* pollset, grpc_pollset_worker* worker, } while (do_poll && worker->pollable_obj->root_worker != worker) { if (gpr_cv_wait(&worker->cv, &worker->pollable_obj->mu, - grpc_millis_to_timespec(deadline, GPR_CLOCK_REALTIME))) { + deadline.as_timespec(GPR_CLOCK_REALTIME))) { if (GRPC_TRACE_FLAG_ENABLED(grpc_polling_trace)) { gpr_log(GPR_INFO, "PS:%p timeout_wait %p w=%p", pollset, worker->pollable_obj, worker); @@ -1099,7 +1100,7 @@ static long sys_gettid(void) { return syscall(__NR_gettid); } ensure that it is held by the time the function returns */ static grpc_error_handle pollset_work(grpc_pollset* pollset, grpc_pollset_worker** worker_hdl, - grpc_millis deadline) { + grpc_core::Timestamp deadline) { GPR_TIMER_SCOPE("pollset_work", 0); #ifdef GRPC_EPOLLEX_CREATE_WORKERS_ON_HEAP grpc_pollset_worker* worker = @@ -1116,8 +1117,10 @@ static grpc_error_handle pollset_work(grpc_pollset* pollset, gpr_log(GPR_INFO, "PS:%p work hdl=%p worker=%p now=%" PRId64 " deadline=%" PRId64 " kwp=%d pollable=%p", - pollset, worker_hdl, WORKER_PTR, grpc_core::ExecCtx::Get()->Now(), - deadline, pollset->kicked_without_poller, pollset->active_pollable); + pollset, worker_hdl, WORKER_PTR, + grpc_core::ExecCtx::Get()->Now().milliseconds_after_process_epoch(), + deadline.milliseconds_after_process_epoch(), + pollset->kicked_without_poller, pollset->active_pollable); } static const char* err_desc = "pollset_work"; grpc_error_handle error = GRPC_ERROR_NONE; diff --git a/src/core/lib/iomgr/ev_poll_posix.cc b/src/core/lib/iomgr/ev_poll_posix.cc index 03dec3f5c58..6df4bfbc10f 100644 --- a/src/core/lib/iomgr/ev_poll_posix.cc +++ b/src/core/lib/iomgr/ev_poll_posix.cc @@ -221,7 +221,7 @@ static void pollset_set_add_fd(grpc_pollset_set* pollset_set, grpc_fd* fd); - longer than a millisecond polls are rounded up to the next nearest millisecond to avoid spinning - infinite timeouts are converted to -1 */ -static int poll_deadline_to_millis_timeout(grpc_millis deadline); +static int poll_deadline_to_millis_timeout(grpc_core::Timestamp deadline); /* Allow kick to wakeup the currently polling worker */ #define GRPC_POLLSET_CAN_KICK_SELF 1 @@ -913,7 +913,7 @@ static void work_combine_error(grpc_error_handle* composite, static grpc_error_handle pollset_work(grpc_pollset* pollset, grpc_pollset_worker** worker_hdl, - grpc_millis deadline) { + grpc_core::Timestamp deadline) { GPR_TIMER_SCOPE("pollset_work", 0); grpc_pollset_worker worker; if (worker_hdl) *worker_hdl = &worker; @@ -1106,7 +1106,7 @@ static grpc_error_handle pollset_work(grpc_pollset* pollset, if (queued_work || worker.kicked_specifically) { /* If there's queued work on the list, then set the deadline to be immediate so we get back out of the polling loop quickly */ - deadline = 0; + deadline = grpc_core::Timestamp(); } keep_polling = 1; } @@ -1151,10 +1151,10 @@ static void pollset_shutdown(grpc_pollset* pollset, grpc_closure* closure) { } } -static int poll_deadline_to_millis_timeout(grpc_millis deadline) { - if (deadline == GRPC_MILLIS_INF_FUTURE) return -1; - if (deadline == 0) return 0; - grpc_millis n = deadline - grpc_core::ExecCtx::Get()->Now(); +static int poll_deadline_to_millis_timeout(grpc_core::Timestamp deadline) { + if (deadline == grpc_core::Timestamp::InfFuture()) return -1; + if (deadline.is_process_epoch()) return 0; + int64_t n = (deadline - grpc_core::ExecCtx::Get()->Now()).millis(); if (n < 0) return 0; if (n > INT_MAX) return -1; return static_cast<int>(n); diff --git a/src/core/lib/iomgr/ev_posix.cc b/src/core/lib/iomgr/ev_posix.cc index bae0f15ce03..88b0d695ca4 100644 --- a/src/core/lib/iomgr/ev_posix.cc +++ b/src/core/lib/iomgr/ev_posix.cc @@ -316,13 +316,13 @@ static void pollset_destroy(grpc_pollset* pollset) { static grpc_error_handle pollset_work(grpc_pollset* pollset, grpc_pollset_worker** worker, - grpc_millis deadline) { + grpc_core::Timestamp deadline) { GRPC_POLLING_API_TRACE("pollset_work(%p, %" PRId64 ") begin", pollset, - deadline); + deadline.milliseconds_after_process_epoch()); grpc_error_handle err = g_event_engine->pollset_work(pollset, worker, deadline); GRPC_POLLING_API_TRACE("pollset_work(%p, %" PRId64 ") end", pollset, - deadline); + deadline.milliseconds_after_process_epoch()); return err; } diff --git a/src/core/lib/iomgr/ev_posix.h b/src/core/lib/iomgr/ev_posix.h index 479921ae169..823f68266b0 100644 --- a/src/core/lib/iomgr/ev_posix.h +++ b/src/core/lib/iomgr/ev_posix.h @@ -66,7 +66,7 @@ typedef struct grpc_event_engine_vtable { void (*pollset_destroy)(grpc_pollset* pollset); grpc_error_handle (*pollset_work)(grpc_pollset* pollset, grpc_pollset_worker** worker, - grpc_millis deadline); + grpc_core::Timestamp deadline); grpc_error_handle (*pollset_kick)(grpc_pollset* pollset, grpc_pollset_worker* specific_worker); void (*pollset_add_fd)(grpc_pollset* pollset, struct grpc_fd* fd); diff --git a/src/core/lib/iomgr/event_engine/pollset.cc b/src/core/lib/iomgr/event_engine/pollset.cc index b8e08d06950..8dac8ef942e 100644 --- a/src/core/lib/iomgr/event_engine/pollset.cc +++ b/src/core/lib/iomgr/event_engine/pollset.cc @@ -42,10 +42,9 @@ void pollset_shutdown(grpc_pollset* pollset, grpc_closure* closure) { void pollset_destroy(grpc_pollset* pollset) {} grpc_error_handle pollset_work(grpc_pollset* pollset, grpc_pollset_worker** worker, - grpc_millis deadline) { + grpc_core::Timestamp deadline) { (void)worker; - gpr_cv_wait(&g_cv, &g_mu, - grpc_millis_to_timespec(deadline, GPR_CLOCK_REALTIME)); + gpr_cv_wait(&g_cv, &g_mu, deadline.as_timespec(GPR_CLOCK_REALTIME)); return GRPC_ERROR_NONE; } grpc_error_handle pollset_kick(grpc_pollset* pollset, diff --git a/src/core/lib/iomgr/event_engine/tcp.cc b/src/core/lib/iomgr/event_engine/tcp.cc index 52840ab936a..e9d6e9d8bcc 100644 --- a/src/core/lib/iomgr/event_engine/tcp.cc +++ b/src/core/lib/iomgr/event_engine/tcp.cc @@ -139,7 +139,8 @@ void tcp_connect(grpc_closure* on_connect, grpc_endpoint** endpoint, grpc_slice_allocator* slice_allocator, grpc_pollset_set* /* interested_parties */, const grpc_channel_args* channel_args, - const grpc_resolved_address* addr, grpc_millis deadline) { + const grpc_resolved_address* addr, + grpc_core::Timestamp deadline) { grpc_event_engine_endpoint* ee_endpoint = reinterpret_cast<grpc_event_engine_endpoint*>( grpc_tcp_create(channel_args, grpc_sockaddr_to_uri(addr))); @@ -150,8 +151,8 @@ void tcp_connect(grpc_closure* on_connect, grpc_endpoint** endpoint, absl::make_unique<WrappedInternalSliceAllocator>(slice_allocator); EventEngine::ResolvedAddress ra(reinterpret_cast<const sockaddr*>(addr->addr), addr->len); - absl::Time ee_deadline = grpc_core::ToAbslTime( - grpc_millis_to_timespec(deadline, GPR_CLOCK_MONOTONIC)); + absl::Time ee_deadline = + grpc_core::ToAbslTime(deadline.as_timespec(GPR_CLOCK_MONOTONIC)); ChannelArgsEndpointConfig endpoint_config(channel_args); absl::Status connected = GetDefaultEventEngine()->Connect( ee_on_connect, ra, endpoint_config, std::move(ee_slice_allocator), diff --git a/src/core/lib/iomgr/event_engine/timer.cc b/src/core/lib/iomgr/event_engine/timer.cc index 6ac49f11c93..7b139816bfc 100644 --- a/src/core/lib/iomgr/event_engine/timer.cc +++ b/src/core/lib/iomgr/event_engine/timer.cc @@ -29,11 +29,10 @@ using ::grpc_event_engine::experimental::EventEngine; using ::grpc_event_engine::experimental::GetDefaultEventEngine; using ::grpc_event_engine::experimental::GrpcClosureToCallback; -void timer_init(grpc_timer* timer, grpc_millis deadline, +void timer_init(grpc_timer* timer, grpc_core::Timestamp deadline, grpc_closure* closure) { timer->ee_task_handle = GetDefaultEventEngine()->RunAt( - grpc_core::ToAbslTime( - grpc_millis_to_timespec(deadline, GPR_CLOCK_REALTIME)), + grpc_core::ToAbslTime(deadline.as_timespec(GPR_CLOCK_REALTIME)), GrpcClosureToCallback(closure)); timer->closure = closure; } @@ -47,7 +46,7 @@ void timer_cancel(grpc_timer* timer) { } /* Internal API */ -grpc_timer_check_result timer_check(grpc_millis* /* next */) { +grpc_timer_check_result timer_check(grpc_core::Timestamp* /* next */) { return GRPC_TIMERS_NOT_CHECKED; } void timer_list_init() {} diff --git a/src/core/lib/iomgr/exec_ctx.cc b/src/core/lib/iomgr/exec_ctx.cc index ef4248112e0..8bee36e5ee0 100644 --- a/src/core/lib/iomgr/exec_ctx.cc +++ b/src/core/lib/iomgr/exec_ctx.cc @@ -60,97 +60,12 @@ static void exec_ctx_sched(grpc_closure* closure) { grpc_closure_list_append(grpc_core::ExecCtx::Get()->closure_list(), closure); } -static gpr_timespec g_start_time; -static gpr_cycle_counter g_start_cycle; - -static grpc_millis timespan_to_millis_round_down(gpr_timespec ts) { - double x = GPR_MS_PER_SEC * static_cast<double>(ts.tv_sec) + - static_cast<double>(ts.tv_nsec) / GPR_NS_PER_MS; - if (x < 0) return 0; - if (x > static_cast<double>(GRPC_MILLIS_INF_FUTURE)) { - return GRPC_MILLIS_INF_FUTURE; - } - return static_cast<grpc_millis>(x); -} - -static grpc_millis timespec_to_millis_round_down(gpr_timespec ts) { - return timespan_to_millis_round_down(gpr_time_sub(ts, g_start_time)); -} - -static grpc_millis timespan_to_millis_round_up(gpr_timespec ts) { - double x = GPR_MS_PER_SEC * static_cast<double>(ts.tv_sec) + - static_cast<double>(ts.tv_nsec) / GPR_NS_PER_MS + - static_cast<double>(GPR_NS_PER_SEC - 1) / - static_cast<double>(GPR_NS_PER_SEC); - if (x < 0) return 0; - if (x > static_cast<double>(GRPC_MILLIS_INF_FUTURE)) { - return GRPC_MILLIS_INF_FUTURE; - } - return static_cast<grpc_millis>(x); -} - -static grpc_millis timespec_to_millis_round_up(gpr_timespec ts) { - return timespan_to_millis_round_up(gpr_time_sub(ts, g_start_time)); -} - -gpr_timespec grpc_millis_to_timespec(grpc_millis millis, - gpr_clock_type clock_type) { - // special-case infinities as grpc_millis can be 32bit on some platforms - // while gpr_time_from_millis always takes an int64_t. - if (millis == GRPC_MILLIS_INF_FUTURE) { - return gpr_inf_future(clock_type); - } - if (millis == GRPC_MILLIS_INF_PAST) { - return gpr_inf_past(clock_type); - } - - if (clock_type == GPR_TIMESPAN) { - return gpr_time_from_millis(millis, GPR_TIMESPAN); - } - return gpr_time_add(gpr_convert_clock_type(g_start_time, clock_type), - gpr_time_from_millis(millis, GPR_TIMESPAN)); -} - -grpc_millis grpc_timespec_to_millis_round_down(gpr_timespec ts) { - return timespec_to_millis_round_down( - gpr_convert_clock_type(ts, g_start_time.clock_type)); -} - -grpc_millis grpc_timespec_to_millis_round_up(gpr_timespec ts) { - return timespec_to_millis_round_up( - gpr_convert_clock_type(ts, g_start_time.clock_type)); -} - -grpc_millis grpc_cycle_counter_to_millis_round_down(gpr_cycle_counter cycles) { - return timespan_to_millis_round_down( - gpr_cycle_counter_sub(cycles, g_start_cycle)); -} - -grpc_millis grpc_cycle_counter_to_millis_round_up(gpr_cycle_counter cycles) { - return timespan_to_millis_round_up( - gpr_cycle_counter_sub(cycles, g_start_cycle)); -} - namespace grpc_core { + GPR_THREAD_LOCAL(ExecCtx*) ExecCtx::exec_ctx_; GPR_THREAD_LOCAL(ApplicationCallbackExecCtx*) ApplicationCallbackExecCtx::callback_exec_ctx_; -// WARNING: for testing purposes only! -void ExecCtx::TestOnlyGlobalInit(gpr_timespec new_val) { - g_start_time = new_val; -} - -void ExecCtx::GlobalInit(void) { - // gpr_now(GPR_CLOCK_MONOTONIC) incurs a syscall. We don't actually know the - // exact cycle the time was captured, so we use the average of cycles before - // and after the syscall as the starting cycle. - const gpr_cycle_counter cycle_before = gpr_get_cycle_counter(); - g_start_time = gpr_now(GPR_CLOCK_MONOTONIC); - const gpr_cycle_counter cycle_after = gpr_get_cycle_counter(); - g_start_cycle = (cycle_before + cycle_after) / 2; -} - bool ExecCtx::Flush() { bool did_something = false; GPR_TIMER_SCOPE("grpc_exec_ctx_flush", 0); @@ -172,9 +87,9 @@ bool ExecCtx::Flush() { return did_something; } -grpc_millis ExecCtx::Now() { +Timestamp ExecCtx::Now() { if (!now_is_valid_) { - now_ = timespec_to_millis_round_down(gpr_now(GPR_CLOCK_MONOTONIC)); + now_ = Timestamp::FromTimespecRoundDown(gpr_now(GPR_CLOCK_MONOTONIC)); now_is_valid_ = true; } return now_; diff --git a/src/core/lib/iomgr/exec_ctx.h b/src/core/lib/iomgr/exec_ctx.h index a427100ef84..2c9e2147543 100644 --- a/src/core/lib/iomgr/exec_ctx.h +++ b/src/core/lib/iomgr/exec_ctx.h @@ -23,6 +23,7 @@ #include <limits> +#include <grpc/impl/codegen/gpr_types.h> #include <grpc/impl/codegen/grpc_types.h> #include <grpc/support/atm.h> #include <grpc/support/cpu.h> @@ -32,13 +33,9 @@ #include "src/core/lib/gpr/tls.h" #include "src/core/lib/gprpp/debug_location.h" #include "src/core/lib/gprpp/fork.h" +#include "src/core/lib/gprpp/time.h" #include "src/core/lib/iomgr/closure.h" -typedef int64_t grpc_millis; - -#define GRPC_MILLIS_INF_FUTURE INT64_MAX -#define GRPC_MILLIS_INF_PAST INT64_MIN - /** A combiner represents a list of work to be executed later. Forward declared here to avoid a circular dependency with combiner.h. */ typedef struct grpc_combiner grpc_combiner; @@ -57,12 +54,6 @@ typedef struct grpc_combiner grpc_combiner; should not be counted by fork handlers */ #define GRPC_APP_CALLBACK_EXEC_CTX_FLAG_IS_INTERNAL_THREAD 1 -gpr_timespec grpc_millis_to_timespec(grpc_millis millis, gpr_clock_type clock); -grpc_millis grpc_timespec_to_millis_round_down(gpr_timespec ts); -grpc_millis grpc_timespec_to_millis_round_up(gpr_timespec ts); -grpc_millis grpc_cycle_counter_to_millis_round_down(gpr_cycle_counter cycles); -grpc_millis grpc_cycle_counter_to_millis_round_up(gpr_cycle_counter cycles); - namespace grpc_core { class Combiner; /** Execution context. @@ -189,7 +180,7 @@ class ExecCtx { * otherwise refreshes the stored time, sets it valid and returns the new * value. */ - grpc_millis Now(); + Timestamp Now(); /** Invalidates the stored time value. A new time value will be set on calling * Now(). @@ -198,26 +189,18 @@ class ExecCtx { /** To be used only by shutdown code in iomgr */ void SetNowIomgrShutdown() { - now_ = GRPC_MILLIS_INF_FUTURE; + now_ = Timestamp::InfFuture(); now_is_valid_ = true; } /** To be used only for testing. * Sets the now value. */ - void TestOnlySetNow(grpc_millis new_val) { + void TestOnlySetNow(Timestamp new_val) { now_ = new_val; now_is_valid_ = true; } - static void TestOnlyGlobalInit(gpr_timespec new_val); - - /** Global initialization for ExecCtx. Called by iomgr. */ - static void GlobalInit(void); - - /** Global shutdown for ExecCtx. Called by iomgr. */ - static void GlobalShutdown(void) {} - /** Gets pointer to current exec_ctx. */ static ExecCtx* Get() { return exec_ctx_; } @@ -245,7 +228,7 @@ class ExecCtx { unsigned starting_cpu_ = std::numeric_limits<unsigned>::max(); bool now_is_valid_ = false; - grpc_millis now_ = 0; + Timestamp now_; static GPR_THREAD_LOCAL(ExecCtx*) exec_ctx_; ExecCtx* last_exec_ctx_ = Get(); @@ -370,6 +353,7 @@ class ApplicationCallbackExecCtx { grpc_completion_queue_functor* tail_{nullptr}; static GPR_THREAD_LOCAL(ApplicationCallbackExecCtx*) callback_exec_ctx_; }; + } // namespace grpc_core #endif /* GRPC_CORE_LIB_IOMGR_EXEC_CTX_H */ diff --git a/src/core/lib/iomgr/iocp_windows.cc b/src/core/lib/iomgr/iocp_windows.cc index d4c76eca8a2..b5c34907f49 100644 --- a/src/core/lib/iomgr/iocp_windows.cc +++ b/src/core/lib/iomgr/iocp_windows.cc @@ -44,18 +44,18 @@ static gpr_atm g_custom_events = 0; static HANDLE g_iocp; -static DWORD deadline_to_millis_timeout(grpc_millis deadline) { - if (deadline == GRPC_MILLIS_INF_FUTURE) { +static DWORD deadline_to_millis_timeout(grpc_core::Timestamp deadline) { + if (deadline == grpc_core::Timestamp::InfFuture()) { return INFINITE; } - grpc_millis now = grpc_core::ExecCtx::Get()->Now(); + grpc_core::Timestamp now = grpc_core::ExecCtx::Get()->Now(); if (deadline < now) return 0; - grpc_millis timeout = deadline - now; - if (timeout > std::numeric_limits<DWORD>::max()) return INFINITE; - return static_cast<DWORD>(deadline - now); + grpc_core::Duration timeout = deadline - now; + if (timeout.millis() > std::numeric_limits<DWORD>::max()) return INFINITE; + return static_cast<DWORD>(timeout.millis()); } -grpc_iocp_work_status grpc_iocp_work(grpc_millis deadline) { +grpc_iocp_work_status grpc_iocp_work(grpc_core::Timestamp deadline) { BOOL success; DWORD bytes = 0; DWORD flags = 0; @@ -124,7 +124,7 @@ void grpc_iocp_flush(void) { grpc_iocp_work_status work_status; do { - work_status = grpc_iocp_work(GRPC_MILLIS_INF_PAST); + work_status = grpc_iocp_work(grpc_core::Timestamp::InfPast()); } while (work_status == GRPC_IOCP_WORK_KICK || grpc_core::ExecCtx::Get()->Flush()); } @@ -132,7 +132,7 @@ void grpc_iocp_flush(void) { void grpc_iocp_shutdown(void) { grpc_core::ExecCtx exec_ctx; while (gpr_atm_acq_load(&g_custom_events)) { - grpc_iocp_work(GRPC_MILLIS_INF_FUTURE); + grpc_iocp_work(grpc_core::Timestamp::InfFuture()); grpc_core::ExecCtx::Get()->Flush(); } diff --git a/src/core/lib/iomgr/iocp_windows.h b/src/core/lib/iomgr/iocp_windows.h index 68d9de61582..b8e3fb81a46 100644 --- a/src/core/lib/iomgr/iocp_windows.h +++ b/src/core/lib/iomgr/iocp_windows.h @@ -36,7 +36,7 @@ typedef enum { GRPC_IOCP_WORK_KICK } grpc_iocp_work_status; -grpc_iocp_work_status grpc_iocp_work(grpc_millis deadline); +grpc_iocp_work_status grpc_iocp_work(grpc_core::Timestamp deadline); void grpc_iocp_init(void); void grpc_iocp_kick(void); void grpc_iocp_flush(void); diff --git a/src/core/lib/iomgr/pollset.cc b/src/core/lib/iomgr/pollset.cc index ba2a58d85ae..ae9ab6cf6e6 100644 --- a/src/core/lib/iomgr/pollset.cc +++ b/src/core/lib/iomgr/pollset.cc @@ -44,7 +44,7 @@ void grpc_pollset_destroy(grpc_pollset* pollset) { grpc_error_handle grpc_pollset_work(grpc_pollset* pollset, grpc_pollset_worker** worker, - grpc_millis deadline) { + grpc_core::Timestamp deadline) { return grpc_pollset_impl->work(pollset, worker, deadline); } diff --git a/src/core/lib/iomgr/pollset.h b/src/core/lib/iomgr/pollset.h index 7c87a827cdf..b1f1138f8e8 100644 --- a/src/core/lib/iomgr/pollset.h +++ b/src/core/lib/iomgr/pollset.h @@ -45,7 +45,7 @@ typedef struct grpc_pollset_vtable { void (*shutdown)(grpc_pollset* pollset, grpc_closure* closure); void (*destroy)(grpc_pollset* pollset); grpc_error_handle (*work)(grpc_pollset* pollset, grpc_pollset_worker** worker, - grpc_millis deadline); + grpc_core::Timestamp deadline); grpc_error_handle (*kick)(grpc_pollset* pollset, grpc_pollset_worker* specific_worker); size_t (*pollset_size)(void); @@ -86,9 +86,9 @@ void grpc_pollset_destroy(grpc_pollset* pollset); May call grpc_closure_list_run on grpc_closure_list, without holding the pollset lock */ -grpc_error_handle grpc_pollset_work(grpc_pollset* pollset, - grpc_pollset_worker** worker, - grpc_millis deadline) GRPC_MUST_USE_RESULT; +grpc_error_handle grpc_pollset_work( + grpc_pollset* pollset, grpc_pollset_worker** worker, + grpc_core::Timestamp deadline) GRPC_MUST_USE_RESULT; /* Break one polling thread out of polling work for this pollset. If specific_worker is non-NULL, then kick that worker. */ diff --git a/src/core/lib/iomgr/pollset_windows.cc b/src/core/lib/iomgr/pollset_windows.cc index f8758a3f73e..ccc71c3617b 100644 --- a/src/core/lib/iomgr/pollset_windows.cc +++ b/src/core/lib/iomgr/pollset_windows.cc @@ -108,7 +108,7 @@ static void pollset_destroy(grpc_pollset* pollset) {} static grpc_error_handle pollset_work(grpc_pollset* pollset, grpc_pollset_worker** worker_hdl, - grpc_millis deadline) { + grpc_core::Timestamp deadline) { grpc_pollset_worker worker; if (worker_hdl) *worker_hdl = &worker; @@ -159,7 +159,7 @@ static grpc_error_handle pollset_work(grpc_pollset* pollset, added_worker = 1; while (!worker.kicked) { if (gpr_cv_wait(&worker.cv, &grpc_polling_mu, - grpc_millis_to_timespec(deadline, GPR_CLOCK_REALTIME))) { + deadline.as_timespec(GPR_CLOCK_REALTIME))) { grpc_core::ExecCtx::Get()->InvalidateNow(); break; } diff --git a/src/core/lib/iomgr/tcp_client.cc b/src/core/lib/iomgr/tcp_client.cc index 3441a63c829..f252f207ebe 100644 --- a/src/core/lib/iomgr/tcp_client.cc +++ b/src/core/lib/iomgr/tcp_client.cc @@ -26,7 +26,7 @@ void grpc_tcp_client_connect(grpc_closure* on_connect, grpc_endpoint** endpoint, grpc_pollset_set* interested_parties, const grpc_channel_args* channel_args, const grpc_resolved_address* addr, - grpc_millis deadline) { + grpc_core::Timestamp deadline) { grpc_tcp_client_impl->connect(on_connect, endpoint, interested_parties, channel_args, addr, deadline); } diff --git a/src/core/lib/iomgr/tcp_client.h b/src/core/lib/iomgr/tcp_client.h index 3e785182ea4..1dfb8d25740 100644 --- a/src/core/lib/iomgr/tcp_client.h +++ b/src/core/lib/iomgr/tcp_client.h @@ -33,7 +33,8 @@ typedef struct grpc_tcp_client_vtable { void (*connect)(grpc_closure* on_connect, grpc_endpoint** endpoint, grpc_pollset_set* interested_parties, const grpc_channel_args* channel_args, - const grpc_resolved_address* addr, grpc_millis deadline); + const grpc_resolved_address* addr, + grpc_core::Timestamp deadline); } grpc_tcp_client_vtable; /* Asynchronously connect to an address (specified as (addr, len)), and call @@ -45,7 +46,7 @@ void grpc_tcp_client_connect(grpc_closure* on_connect, grpc_endpoint** endpoint, grpc_pollset_set* interested_parties, const grpc_channel_args* channel_args, const grpc_resolved_address* addr, - grpc_millis deadline); + grpc_core::Timestamp deadline); void grpc_tcp_client_global_init(); diff --git a/src/core/lib/iomgr/tcp_client_cfstream.cc b/src/core/lib/iomgr/tcp_client_cfstream.cc index 7d3f5d63393..b6e89420137 100644 --- a/src/core/lib/iomgr/tcp_client_cfstream.cc +++ b/src/core/lib/iomgr/tcp_client_cfstream.cc @@ -153,7 +153,7 @@ static void CFStreamClientConnect(grpc_closure* closure, grpc_endpoint** ep, grpc_pollset_set* interested_parties, const grpc_channel_args* channel_args, const grpc_resolved_address* resolved_addr, - grpc_millis deadline) { + grpc_core::Timestamp deadline) { CFStreamConnect* connect = new CFStreamConnect(); connect->closure = closure; connect->endpoint = ep; diff --git a/src/core/lib/iomgr/tcp_client_posix.cc b/src/core/lib/iomgr/tcp_client_posix.cc index c14233183d6..f146fdec7f7 100644 --- a/src/core/lib/iomgr/tcp_client_posix.cc +++ b/src/core/lib/iomgr/tcp_client_posix.cc @@ -270,7 +270,7 @@ grpc_error_handle grpc_tcp_client_prepare_fd( void grpc_tcp_client_create_from_prepared_fd( grpc_pollset_set* interested_parties, grpc_closure* closure, const int fd, const grpc_channel_args* channel_args, const grpc_resolved_address* addr, - grpc_millis deadline, grpc_endpoint** ep) { + grpc_core::Timestamp deadline, grpc_endpoint** ep) { int err; do { err = connect(fd, reinterpret_cast<const grpc_sockaddr*>(addr->addr), @@ -325,7 +325,7 @@ static void tcp_connect(grpc_closure* closure, grpc_endpoint** ep, grpc_pollset_set* interested_parties, const grpc_channel_args* channel_args, const grpc_resolved_address* addr, - grpc_millis deadline) { + grpc_core::Timestamp deadline) { grpc_resolved_address mapped_addr; int fd = -1; grpc_error_handle error; diff --git a/src/core/lib/iomgr/tcp_client_posix.h b/src/core/lib/iomgr/tcp_client_posix.h index 101e5cb6ba2..eb87b9fb79e 100644 --- a/src/core/lib/iomgr/tcp_client_posix.h +++ b/src/core/lib/iomgr/tcp_client_posix.h @@ -64,6 +64,6 @@ grpc_error_handle grpc_tcp_client_prepare_fd( void grpc_tcp_client_create_from_prepared_fd( grpc_pollset_set* interested_parties, grpc_closure* closure, const int fd, const grpc_channel_args* channel_args, const grpc_resolved_address* addr, - grpc_millis deadline, grpc_endpoint** ep); + grpc_core::Timestamp deadline, grpc_endpoint** ep); #endif /* GRPC_CORE_LIB_IOMGR_TCP_CLIENT_POSIX_H */ diff --git a/src/core/lib/iomgr/tcp_client_windows.cc b/src/core/lib/iomgr/tcp_client_windows.cc index 223688b6de7..2c0b28eec6e 100644 --- a/src/core/lib/iomgr/tcp_client_windows.cc +++ b/src/core/lib/iomgr/tcp_client_windows.cc @@ -125,7 +125,7 @@ static void tcp_connect(grpc_closure* on_done, grpc_endpoint** endpoint, grpc_pollset_set* interested_parties, const grpc_channel_args* channel_args, const grpc_resolved_address* addr, - grpc_millis deadline) { + grpc_core::Timestamp deadline) { SOCKET sock = INVALID_SOCKET; BOOL success; int status; diff --git a/src/core/lib/iomgr/tcp_posix.cc b/src/core/lib/iomgr/tcp_posix.cc index 3c943b0e53d..cb84bbc6854 100644 --- a/src/core/lib/iomgr/tcp_posix.cc +++ b/src/core/lib/iomgr/tcp_posix.cc @@ -461,7 +461,8 @@ static void run_poller(void* bp, grpc_error_handle /*error_ignored*/) { gpr_log(GPR_INFO, "BACKUP_POLLER:%p run", p); } gpr_mu_lock(p->pollset_mu); - grpc_millis deadline = grpc_core::ExecCtx::Get()->Now() + 10 * GPR_MS_PER_SEC; + grpc_core::Timestamp deadline = + grpc_core::ExecCtx::Get()->Now() + grpc_core::Duration::Seconds(10); GRPC_STATS_INC_TCP_BACKUP_POLLER_POLLS(); GRPC_LOG_IF_ERROR( "backup_poller:pollset_work", diff --git a/src/core/lib/iomgr/timer.cc b/src/core/lib/iomgr/timer.cc index 6506d302abc..39af4198b48 100644 --- a/src/core/lib/iomgr/timer.cc +++ b/src/core/lib/iomgr/timer.cc @@ -28,14 +28,14 @@ void grpc_set_timer_impl(grpc_timer_vtable* vtable) { grpc_timer_impl = vtable; } -void grpc_timer_init(grpc_timer* timer, grpc_millis deadline, +void grpc_timer_init(grpc_timer* timer, grpc_core::Timestamp deadline, grpc_closure* closure) { grpc_timer_impl->init(timer, deadline, closure); } void grpc_timer_cancel(grpc_timer* timer) { grpc_timer_impl->cancel(timer); } -grpc_timer_check_result grpc_timer_check(grpc_millis* next) { +grpc_timer_check_result grpc_timer_check(grpc_core::Timestamp* next) { return grpc_timer_impl->check(next); } diff --git a/src/core/lib/iomgr/timer.h b/src/core/lib/iomgr/timer.h index 492d2a5e76e..675df66a149 100644 --- a/src/core/lib/iomgr/timer.h +++ b/src/core/lib/iomgr/timer.h @@ -21,6 +21,8 @@ #include <grpc/support/port_platform.h> +#include <cstdint> + #include <grpc/event_engine/event_engine.h> #include <grpc/support/time.h> @@ -29,7 +31,7 @@ #include "src/core/lib/iomgr/port.h" typedef struct grpc_timer { - grpc_millis deadline; + int64_t deadline; // Uninitialized if not using heap, or INVALID_HEAP_INDEX if not in heap. uint32_t heap_index; bool pending; @@ -47,6 +49,9 @@ typedef struct grpc_timer { }; } grpc_timer; +static_assert(std::is_trivial<grpc_timer>::value, + "grpc_timer is expected to be a trivial type"); + typedef enum { GRPC_TIMERS_NOT_CHECKED, GRPC_TIMERS_CHECKED_AND_EMPTY, @@ -54,11 +59,11 @@ typedef enum { } grpc_timer_check_result; typedef struct grpc_timer_vtable { - void (*init)(grpc_timer* timer, grpc_millis, grpc_closure* closure); + void (*init)(grpc_timer* timer, grpc_core::Timestamp, grpc_closure* closure); void (*cancel)(grpc_timer* timer); /* Internal API */ - grpc_timer_check_result (*check)(grpc_millis* next); + grpc_timer_check_result (*check)(grpc_core::Timestamp* next); void (*list_init)(); void (*list_shutdown)(void); void (*consume_kick)(void); @@ -70,8 +75,8 @@ typedef struct grpc_timer_vtable { application code should check the error to determine how it was invoked. The application callback is also responsible for maintaining information about when to free up any user-level state. Behavior is undefined for a deadline of - GRPC_MILLIS_INF_FUTURE. */ -void grpc_timer_init(grpc_timer* timer, grpc_millis deadline, + grpc_core::Timestamp::InfFuture(). */ +void grpc_timer_init(grpc_timer* timer, grpc_core::Timestamp deadline, grpc_closure* closure); /* Initialize *timer without setting it. This can later be passed through @@ -115,7 +120,7 @@ void grpc_timer_cancel(grpc_timer* timer); *next is never guaranteed to be updated on any given execution; however, with high probability at least one thread in the system will see an update at any time slice. */ -grpc_timer_check_result grpc_timer_check(grpc_millis* next); +grpc_timer_check_result grpc_timer_check(grpc_core::Timestamp* next); void grpc_timer_list_init(); void grpc_timer_list_shutdown(); diff --git a/src/core/lib/iomgr/timer_generic.cc b/src/core/lib/iomgr/timer_generic.cc index 74761e24647..e4e0e6abb44 100644 --- a/src/core/lib/iomgr/timer_generic.cc +++ b/src/core/lib/iomgr/timer_generic.cc @@ -33,6 +33,7 @@ #include "src/core/lib/gpr/spinlock.h" #include "src/core/lib/gpr/tls.h" #include "src/core/lib/gpr/useful.h" +#include "src/core/lib/gprpp/time.h" #include "src/core/lib/iomgr/exec_ctx.h" #include "src/core/lib/iomgr/port.h" #include "src/core/lib/iomgr/time_averaged_stats.h" @@ -61,9 +62,9 @@ struct timer_shard { gpr_mu mu; grpc_time_averaged_stats stats; /* All and only timers with deadlines < this will be in the heap. */ - grpc_millis queue_deadline_cap; + grpc_core::Timestamp queue_deadline_cap; /* The deadline of the next timer due in this shard. */ - grpc_millis min_deadline; + grpc_core::Timestamp min_deadline; /* Index of this timer_shard in the g_shard_queue. */ uint32_t shard_queue_index; /* This holds all timers with deadlines < queue_deadline_cap. Timers in this @@ -214,11 +215,11 @@ static void validate_non_pending_timer(grpc_timer* t) { * has last-seen. This is an optimization to prevent the thread from checking * shared_mutables.min_timer (which requires acquiring shared_mutables.mu lock, * an expensive operation) */ -static GPR_THREAD_LOCAL(grpc_millis) g_last_seen_min_timer; +static GPR_THREAD_LOCAL(int64_t) g_last_seen_min_timer; struct shared_mutables { /* The deadline of the next timer due across all timer shards */ - grpc_millis min_timer; + grpc_core::Timestamp min_timer; /* Allow only one run_some_expired_timers at once */ gpr_spinlock checker_mu; bool initialized; @@ -228,21 +229,15 @@ struct shared_mutables { static struct shared_mutables g_shared_mutables; -static grpc_millis saturating_add(grpc_millis a, grpc_millis b) { - if (a > GRPC_MILLIS_INF_FUTURE - b) { - return GRPC_MILLIS_INF_FUTURE; - } - return a + b; -} - -static grpc_timer_check_result run_some_expired_timers(grpc_millis now, - grpc_millis* next, - grpc_error_handle error); +static grpc_timer_check_result run_some_expired_timers( + grpc_core::Timestamp now, grpc_core::Timestamp* next, + grpc_error_handle error); -static grpc_millis compute_min_deadline(timer_shard* shard) { +static grpc_core::Timestamp compute_min_deadline(timer_shard* shard) { return grpc_timer_heap_is_empty(&shard->heap) - ? saturating_add(shard->queue_deadline_cap, 1) - : grpc_timer_heap_top(&shard->heap)->deadline; + ? shard->queue_deadline_cap + grpc_core::Duration::Epsilon() + : grpc_core::Timestamp::FromMillisecondsAfterProcessEpoch( + grpc_timer_heap_top(&shard->heap)->deadline); } static void timer_list_init() { @@ -280,7 +275,7 @@ static void timer_list_init() { static void timer_list_shutdown() { size_t i; run_some_expired_timers( - GRPC_MILLIS_INF_FUTURE, nullptr, + grpc_core::Timestamp::InfFuture(), nullptr, GRPC_ERROR_CREATE_FROM_STATIC_STRING("Timer list shutdown")); for (i = 0; i < g_num_shards; i++) { timer_shard* shard = &g_shards[i]; @@ -334,12 +329,12 @@ static void note_deadline_change(timer_shard* shard) { void grpc_timer_init_unset(grpc_timer* timer) { timer->pending = false; } -static void timer_init(grpc_timer* timer, grpc_millis deadline, +static void timer_init(grpc_timer* timer, grpc_core::Timestamp deadline, grpc_closure* closure) { int is_first_timer = 0; timer_shard* shard = &g_shards[grpc_core::HashPointer(timer, g_num_shards)]; timer->closure = closure; - timer->deadline = deadline; + timer->deadline = deadline.milliseconds_after_process_epoch(); #ifndef NDEBUG timer->hash_table_next = nullptr; @@ -347,8 +342,9 @@ static void timer_init(grpc_timer* timer, grpc_millis deadline, if (GRPC_TRACE_FLAG_ENABLED(grpc_timer_trace)) { gpr_log(GPR_INFO, "TIMER %p: SET %" PRId64 " now %" PRId64 " call %p[%p]", - timer, deadline, grpc_core::ExecCtx::Get()->Now(), closure, - closure->cb); + timer, deadline.milliseconds_after_process_epoch(), + grpc_core::ExecCtx::Get()->Now().milliseconds_after_process_epoch(), + closure, closure->cb); } if (!g_shared_mutables.initialized) { @@ -362,7 +358,7 @@ static void timer_init(grpc_timer* timer, grpc_millis deadline, gpr_mu_lock(&shard->mu); timer->pending = true; - grpc_millis now = grpc_core::ExecCtx::Get()->Now(); + grpc_core::Timestamp now = grpc_core::ExecCtx::Get()->Now(); if (deadline <= now) { timer->pending = false; grpc_core::ExecCtx::Run(DEBUG_LOCATION, timer->closure, GRPC_ERROR_NONE); @@ -371,8 +367,8 @@ static void timer_init(grpc_timer* timer, grpc_millis deadline, return; } - grpc_time_averaged_stats_add_sample( - &shard->stats, static_cast<double>(deadline - now) / 1000.0); + grpc_time_averaged_stats_add_sample(&shard->stats, + (deadline - now).millis() / 1000.0); ADD_TO_HASH_TABLE(timer); @@ -386,7 +382,8 @@ static void timer_init(grpc_timer* timer, grpc_millis deadline, gpr_log(GPR_INFO, " .. add to shard %d with queue_deadline_cap=%" PRId64 " => is_first_timer=%s", - static_cast<int>(shard - g_shards), shard->queue_deadline_cap, + static_cast<int>(shard - g_shards), + shard->queue_deadline_cap.milliseconds_after_process_epoch(), is_first_timer ? "true" : "false"); } gpr_mu_unlock(&shard->mu); @@ -406,10 +403,10 @@ static void timer_init(grpc_timer* timer, grpc_millis deadline, gpr_mu_lock(&g_shared_mutables.mu); if (GRPC_TRACE_FLAG_ENABLED(grpc_timer_trace)) { gpr_log(GPR_INFO, " .. old shard min_deadline=%" PRId64, - shard->min_deadline); + shard->min_deadline.milliseconds_after_process_epoch()); } if (deadline < shard->min_deadline) { - grpc_millis old_min_deadline = g_shard_queue[0]->min_deadline; + grpc_core::Timestamp old_min_deadline = g_shard_queue[0]->min_deadline; shard->min_deadline = deadline; note_deadline_change(shard); if (shard->shard_queue_index == 0 && deadline < old_min_deadline) { @@ -419,10 +416,10 @@ static void timer_init(grpc_timer* timer, grpc_millis deadline, // (&g_shared_mutables.min_timer) is a (long long *). The cast should be // safe since we know that both are pointer types and 64-bit wide. gpr_atm_no_barrier_store((gpr_atm*)(&g_shared_mutables.min_timer), - deadline); + deadline.milliseconds_after_process_epoch()); #else // On 32-bit systems, gpr_atm_no_barrier_store does not work on 64-bit - // types (like grpc_millis). So all reads and writes to + // types (like grpc_core::Timestamp). So all reads and writes to // g_shared_mutables.min_timer varialbe under g_shared_mutables.mu g_shared_mutables.min_timer = deadline; #endif @@ -473,7 +470,7 @@ static void timer_cancel(grpc_timer* timer) { 'queue_deadline_cap') into into shard->heap. Returns 'true' if shard->heap has at least ONE element REQUIRES: shard->mu locked */ -static bool refill_heap(timer_shard* shard, grpc_millis now) { +static bool refill_heap(timer_shard* shard, grpc_core::Timestamp now) { /* Compute the new queue window width and bound by the limits: */ double computed_deadline_delta = grpc_time_averaged_stats_update_average(&shard->stats) * @@ -485,20 +482,24 @@ static bool refill_heap(timer_shard* shard, grpc_millis now) { /* Compute the new cap and put all timers under it into the queue: */ shard->queue_deadline_cap = - saturating_add(std::max(now, shard->queue_deadline_cap), - static_cast<grpc_millis>(deadline_delta * 1000.0)); + std::max(now, shard->queue_deadline_cap) + + grpc_core::Duration::FromSecondsAsDouble(deadline_delta); if (GRPC_TRACE_FLAG_ENABLED(grpc_timer_check_trace)) { gpr_log(GPR_INFO, " .. shard[%d]->queue_deadline_cap --> %" PRId64, - static_cast<int>(shard - g_shards), shard->queue_deadline_cap); + static_cast<int>(shard - g_shards), + shard->queue_deadline_cap.milliseconds_after_process_epoch()); } for (timer = shard->list.next; timer != &shard->list; timer = next) { next = timer->next; + auto timer_deadline = + grpc_core::Timestamp::FromMillisecondsAfterProcessEpoch( + timer->deadline); - if (timer->deadline < shard->queue_deadline_cap) { + if (timer_deadline < shard->queue_deadline_cap) { if (GRPC_TRACE_FLAG_ENABLED(grpc_timer_check_trace)) { gpr_log(GPR_INFO, " .. add timer with deadline %" PRId64 " to heap", - timer->deadline); + timer_deadline.milliseconds_after_process_epoch()); } list_remove(timer); grpc_timer_heap_add(&shard->heap, timer); @@ -510,7 +511,7 @@ static bool refill_heap(timer_shard* shard, grpc_millis now) { /* This pops the next non-cancelled timer with deadline <= now from the queue, or returns NULL if there isn't one. REQUIRES: shard->mu locked */ -static grpc_timer* pop_one(timer_shard* shard, grpc_millis now) { +static grpc_timer* pop_one(timer_shard* shard, grpc_core::Timestamp now) { grpc_timer* timer; for (;;) { if (GRPC_TRACE_FLAG_ENABLED(grpc_timer_check_trace)) { @@ -523,15 +524,19 @@ static grpc_timer* pop_one(timer_shard* shard, grpc_millis now) { if (!refill_heap(shard, now)) return nullptr; } timer = grpc_timer_heap_top(&shard->heap); + auto timer_deadline = + grpc_core::Timestamp::FromMillisecondsAfterProcessEpoch( + timer->deadline); if (GRPC_TRACE_FLAG_ENABLED(grpc_timer_check_trace)) { gpr_log(GPR_INFO, " .. check top timer deadline=%" PRId64 " now=%" PRId64, - timer->deadline, now); + timer_deadline.milliseconds_after_process_epoch(), + now.milliseconds_after_process_epoch()); } - if (timer->deadline > now) return nullptr; + if (timer_deadline > now) return nullptr; if (GRPC_TRACE_FLAG_ENABLED(grpc_timer_trace)) { gpr_log(GPR_INFO, "TIMER %p: FIRE %" PRId64 "ms late", timer, - now - timer->deadline); + (now - timer_deadline).millis()); } timer->pending = false; grpc_timer_heap_pop(&shard->heap); @@ -540,8 +545,8 @@ static grpc_timer* pop_one(timer_shard* shard, grpc_millis now) { } /* REQUIRES: shard->mu unlocked */ -static size_t pop_timers(timer_shard* shard, grpc_millis now, - grpc_millis* new_min_deadline, +static size_t pop_timers(timer_shard* shard, grpc_core::Timestamp now, + grpc_core::Timestamp* new_min_deadline, grpc_error_handle error) { size_t n = 0; grpc_timer* timer; @@ -562,7 +567,8 @@ static size_t pop_timers(timer_shard* shard, grpc_millis now, } static grpc_timer_check_result run_some_expired_timers( - grpc_millis now, grpc_millis* next, grpc_error_handle error) { + grpc_core::Timestamp now, grpc_core::Timestamp* next, + grpc_error_handle error) { grpc_timer_check_result result = GRPC_TIMERS_NOT_CHECKED; #if GPR_ARCH_64 @@ -570,17 +576,18 @@ static grpc_timer_check_result run_some_expired_timers( // mac platforms complaining that gpr_atm* is (long *) while // (&g_shared_mutables.min_timer) is a (long long *). The cast should be // safe since we know that both are pointer types and 64-bit wide - grpc_millis min_timer = static_cast<grpc_millis>( - gpr_atm_no_barrier_load((gpr_atm*)(&g_shared_mutables.min_timer))); + grpc_core::Timestamp min_timer = + grpc_core::Timestamp::FromMillisecondsAfterProcessEpoch( + gpr_atm_no_barrier_load((gpr_atm*)(&g_shared_mutables.min_timer))); #else // On 32-bit systems, gpr_atm_no_barrier_load does not work on 64-bit types - // (like grpc_millis). So all reads and writes to g_shared_mutables.min_timer - // are done under g_shared_mutables.mu + // (like grpc_core::Timestamp). So all reads and writes to + // g_shared_mutables.min_timer are done under g_shared_mutables.mu gpr_mu_lock(&g_shared_mutables.mu); - grpc_millis min_timer = g_shared_mutables.min_timer; + grpc_core::Timestamp min_timer = g_shared_mutables.min_timer; gpr_mu_unlock(&g_shared_mutables.mu); #endif - g_last_seen_min_timer = min_timer; + g_last_seen_min_timer = min_timer.milliseconds_after_process_epoch(); if (now < min_timer) { if (next != nullptr) *next = std::min(*next, min_timer); @@ -592,15 +599,16 @@ static grpc_timer_check_result run_some_expired_timers( result = GRPC_TIMERS_CHECKED_AND_EMPTY; if (GRPC_TRACE_FLAG_ENABLED(grpc_timer_check_trace)) { - gpr_log(GPR_INFO, " .. shard[%d]->min_deadline = %" PRId64, - static_cast<int>(g_shard_queue[0] - g_shards), - g_shard_queue[0]->min_deadline); + gpr_log( + GPR_INFO, " .. shard[%d]->min_deadline = %" PRId64, + static_cast<int>(g_shard_queue[0] - g_shards), + g_shard_queue[0]->min_deadline.milliseconds_after_process_epoch()); } while (g_shard_queue[0]->min_deadline < now || - (now != GRPC_MILLIS_INF_FUTURE && + (now != grpc_core::Timestamp::InfFuture() && g_shard_queue[0]->min_deadline == now)) { - grpc_millis new_min_deadline; + grpc_core::Timestamp new_min_deadline; /* For efficiency, we pop as many available timers as we can from the shard. This may violate perfect timer deadline ordering, but that @@ -610,12 +618,15 @@ static grpc_timer_check_result run_some_expired_timers( } if (GRPC_TRACE_FLAG_ENABLED(grpc_timer_check_trace)) { - gpr_log(GPR_INFO, - " .. result --> %d" - ", shard[%d]->min_deadline %" PRId64 " --> %" PRId64 - ", now=%" PRId64, - result, static_cast<int>(g_shard_queue[0] - g_shards), - g_shard_queue[0]->min_deadline, new_min_deadline, now); + gpr_log( + GPR_INFO, + " .. result --> %d" + ", shard[%d]->min_deadline %" PRId64 " --> %" PRId64 + ", now=%" PRId64, + result, static_cast<int>(g_shard_queue[0] - g_shards), + g_shard_queue[0]->min_deadline.milliseconds_after_process_epoch(), + new_min_deadline.milliseconds_after_process_epoch(), + now.milliseconds_after_process_epoch()); } /* An grpc_timer_init() on the shard could intervene here, adding a new @@ -636,11 +647,12 @@ static grpc_timer_check_result run_some_expired_timers( // mac platforms complaining that gpr_atm* is (long *) while // (&g_shared_mutables.min_timer) is a (long long *). The cast should be // safe since we know that both are pointer types and 64-bit wide - gpr_atm_no_barrier_store((gpr_atm*)(&g_shared_mutables.min_timer), - g_shard_queue[0]->min_deadline); + gpr_atm_no_barrier_store( + (gpr_atm*)(&g_shared_mutables.min_timer), + g_shard_queue[0]->min_deadline.milliseconds_after_process_epoch()); #else // On 32-bit systems, gpr_atm_no_barrier_store does not work on 64-bit - // types (like grpc_millis). So all reads and writes to + // types (like grpc_core::Timestamp). So all reads and writes to // g_shared_mutables.min_timer are done under g_shared_mutables.mu g_shared_mutables.min_timer = g_shard_queue[0]->min_deadline; #endif @@ -653,13 +665,15 @@ static grpc_timer_check_result run_some_expired_timers( return result; } -static grpc_timer_check_result timer_check(grpc_millis* next) { +static grpc_timer_check_result timer_check(grpc_core::Timestamp* next) { // prelude - grpc_millis now = grpc_core::ExecCtx::Get()->Now(); + grpc_core::Timestamp now = grpc_core::ExecCtx::Get()->Now(); /* fetch from a thread-local first: this avoids contention on a globally mutable cacheline in the common case */ - grpc_millis min_timer = g_last_seen_min_timer; + grpc_core::Timestamp min_timer = + grpc_core::Timestamp::FromMillisecondsAfterProcessEpoch( + g_last_seen_min_timer); if (now < min_timer) { if (next != nullptr) { @@ -667,13 +681,14 @@ static grpc_timer_check_result timer_check(grpc_millis* next) { } if (GRPC_TRACE_FLAG_ENABLED(grpc_timer_check_trace)) { gpr_log(GPR_INFO, "TIMER CHECK SKIP: now=%" PRId64 " min_timer=%" PRId64, - now, min_timer); + now.milliseconds_after_process_epoch(), + min_timer.milliseconds_after_process_epoch()); } return GRPC_TIMERS_CHECKED_AND_EMPTY; } grpc_error_handle shutdown_error = - now != GRPC_MILLIS_INF_FUTURE + now != grpc_core::Timestamp::InfFuture() ? GRPC_ERROR_NONE : GRPC_ERROR_CREATE_FROM_STATIC_STRING("Shutting down timer system"); @@ -683,18 +698,22 @@ static grpc_timer_check_result timer_check(grpc_millis* next) { if (next == nullptr) { next_str = "NULL"; } else { - next_str = absl::StrCat(*next); + next_str = absl::StrCat(next->milliseconds_after_process_epoch()); } #if GPR_ARCH_64 - gpr_log(GPR_INFO, - "TIMER CHECK BEGIN: now=%" PRId64 " next=%s tls_min=%" PRId64 - " glob_min=%" PRId64, - now, next_str.c_str(), min_timer, - static_cast<grpc_millis>(gpr_atm_no_barrier_load( - (gpr_atm*)(&g_shared_mutables.min_timer)))); + gpr_log( + GPR_INFO, + "TIMER CHECK BEGIN: now=%" PRId64 " next=%s tls_min=%" PRId64 + " glob_min=%" PRId64, + now.milliseconds_after_process_epoch(), next_str.c_str(), + min_timer.milliseconds_after_process_epoch(), + grpc_core::Timestamp::FromMillisecondsAfterProcessEpoch( + gpr_atm_no_barrier_load((gpr_atm*)(&g_shared_mutables.min_timer))) + .milliseconds_after_process_epoch()); #else gpr_log(GPR_INFO, "TIMER CHECK BEGIN: now=%" PRId64 " next=%s min=%" PRId64, - now, next_str.c_str(), min_timer); + now.milliseconds_after_process_epoch(), next_str.c_str(), + min_timer.milliseconds_after_process_epoch()); #endif } // actual code @@ -706,7 +725,7 @@ static grpc_timer_check_result timer_check(grpc_millis* next) { if (next == nullptr) { next_str = "NULL"; } else { - next_str = absl::StrCat(*next); + next_str = absl::StrCat(next->milliseconds_after_process_epoch()); } gpr_log(GPR_INFO, "TIMER CHECK END: r=%d; next=%s", r, next_str.c_str()); } diff --git a/src/core/lib/iomgr/timer_manager.cc b/src/core/lib/iomgr/timer_manager.cc index 317e46344c1..5cb236c913f 100644 --- a/src/core/lib/iomgr/timer_manager.cc +++ b/src/core/lib/iomgr/timer_manager.cc @@ -56,7 +56,7 @@ static bool g_kicked; static bool g_has_timed_waiter; // the deadline of the current timed waiter thread (only relevant if // g_has_timed_waiter is true) -static grpc_millis g_timed_waiter_deadline; +static grpc_core::Timestamp g_timed_waiter_deadline; // generation counter to track which thread is waiting for the next timer static uint64_t g_timed_waiter_generation; // number of timer wakeups @@ -143,7 +143,7 @@ static void run_some_timers() { // wait until 'next' (or forever if there is already a timed waiter in the pool) // returns true if the thread should continue executing (false if it should // shutdown) -static bool wait_until(grpc_millis next) { +static bool wait_until(grpc_core::Timestamp next) { gpr_mu_lock(&g_mu); // if we're not threaded anymore, leave if (!g_threaded) { @@ -177,28 +177,29 @@ static bool wait_until(grpc_millis next) { unless their 'next' is earlier than the current timed-waiter's deadline (in which case the thread with earlier 'next' takes over as the new timed waiter) */ - if (next != GRPC_MILLIS_INF_FUTURE) { + if (next != grpc_core::Timestamp::InfFuture()) { if (!g_has_timed_waiter || (next < g_timed_waiter_deadline)) { my_timed_waiter_generation = ++g_timed_waiter_generation; g_has_timed_waiter = true; g_timed_waiter_deadline = next; if (GRPC_TRACE_FLAG_ENABLED(grpc_timer_check_trace)) { - grpc_millis wait_time = next - grpc_core::ExecCtx::Get()->Now(); - gpr_log(GPR_INFO, "sleep for a %" PRId64 " milliseconds", wait_time); + grpc_core::Duration wait_time = + next - grpc_core::ExecCtx::Get()->Now(); + gpr_log(GPR_INFO, "sleep for a %" PRId64 " milliseconds", + wait_time.millis()); } } else { // g_timed_waiter == true && next >= g_timed_waiter_deadline - next = GRPC_MILLIS_INF_FUTURE; + next = grpc_core::Timestamp::InfFuture(); } } if (GRPC_TRACE_FLAG_ENABLED(grpc_timer_check_trace) && - next == GRPC_MILLIS_INF_FUTURE) { + next == grpc_core::Timestamp::InfFuture()) { gpr_log(GPR_INFO, "sleep until kicked"); } - gpr_cv_wait(&g_cv_wait, &g_mu, - grpc_millis_to_timespec(next, GPR_CLOCK_MONOTONIC)); + gpr_cv_wait(&g_cv_wait, &g_mu, next.as_timespec(GPR_CLOCK_MONOTONIC)); if (GRPC_TRACE_FLAG_ENABLED(grpc_timer_check_trace)) { gpr_log(GPR_INFO, "wait ended: was_timed:%d kicked:%d", @@ -211,7 +212,7 @@ static bool wait_until(grpc_millis next) { if (my_timed_waiter_generation == g_timed_waiter_generation) { ++g_wakeups; g_has_timed_waiter = false; - g_timed_waiter_deadline = GRPC_MILLIS_INF_FUTURE; + g_timed_waiter_deadline = grpc_core::Timestamp::InfFuture(); } } @@ -228,7 +229,7 @@ static bool wait_until(grpc_millis next) { static void timer_main_loop() { for (;;) { - grpc_millis next = GRPC_MILLIS_INF_FUTURE; + grpc_core::Timestamp next = grpc_core::Timestamp::InfFuture(); grpc_core::ExecCtx::Get()->InvalidateNow(); // check timer state, updates next to the next time to run a check @@ -249,7 +250,7 @@ static void timer_main_loop() { if (GRPC_TRACE_FLAG_ENABLED(grpc_timer_check_trace)) { gpr_log(GPR_INFO, "timers not checked: expect another thread to"); } - next = GRPC_MILLIS_INF_FUTURE; + next = grpc_core::Timestamp::InfFuture(); ABSL_FALLTHROUGH_INTENDED; case GRPC_TIMERS_CHECKED_AND_EMPTY: if (!wait_until(next)) { @@ -306,7 +307,7 @@ void grpc_timer_manager_init(void) { g_completed_threads = nullptr; g_has_timed_waiter = false; - g_timed_waiter_deadline = GRPC_MILLIS_INF_FUTURE; + g_timed_waiter_deadline = grpc_core::Timestamp::InfFuture(); start_threads(); } @@ -354,7 +355,7 @@ void grpc_kick_poller(void) { gpr_mu_lock(&g_mu); g_kicked = true; g_has_timed_waiter = false; - g_timed_waiter_deadline = GRPC_MILLIS_INF_FUTURE; + g_timed_waiter_deadline = grpc_core::Timestamp::InfFuture(); ++g_timed_waiter_generation; gpr_cv_signal(&g_cv_wait); gpr_mu_unlock(&g_mu); diff --git a/src/core/lib/json/json_util.cc b/src/core/lib/json/json_util.cc index 3aff1824051..168070bd08a 100644 --- a/src/core/lib/json/json_util.cc +++ b/src/core/lib/json/json_util.cc @@ -26,7 +26,7 @@ namespace grpc_core { -bool ParseDurationFromJson(const Json& field, grpc_millis* duration) { +bool ParseDurationFromJson(const Json& field, Duration* duration) { if (field.type() != Json::Type::STRING) return false; size_t len = field.string_value().size(); if (field.string_value()[len - 1] != 's') return false; @@ -51,7 +51,7 @@ bool ParseDurationFromJson(const Json& field, grpc_millis* duration) { int seconds = decimal_point == buf.get() ? 0 : gpr_parse_nonnegative_int(buf.get()); if (seconds == -1) return false; - *duration = seconds * GPR_MS_PER_SEC + nanos / GPR_NS_PER_MS; + *duration = Duration::FromSecondsAndNanoseconds(seconds, nanos); return true; } @@ -99,7 +99,7 @@ bool ExtractJsonObject(const Json& json, absl::string_view field_name, bool ParseJsonObjectFieldAsDuration(const Json::Object& object, absl::string_view field_name, - grpc_millis* output, + Duration* output, std::vector<grpc_error_handle>* error_list, bool required) { // TODO(roth): Once we can use C++14 heterogenous lookups, stop @@ -113,7 +113,7 @@ bool ParseJsonObjectFieldAsDuration(const Json::Object& object, return false; } if (!ParseDurationFromJson(it->second, output)) { - *output = GRPC_MILLIS_INF_PAST; + *output = Duration::NegativeInfinity(); error_list->push_back(GRPC_ERROR_CREATE_FROM_CPP_STRING( absl::StrCat("field:", field_name, " error:type should be STRING of the form given by " diff --git a/src/core/lib/json/json_util.h b/src/core/lib/json/json_util.h index 7b9fdddd79d..29de52d8173 100644 --- a/src/core/lib/json/json_util.h +++ b/src/core/lib/json/json_util.h @@ -33,7 +33,7 @@ namespace grpc_core { // proto message, as per: // https://developers.google.com/protocol-buffers/docs/proto3#json // Returns true on success, false otherwise. -bool ParseDurationFromJson(const Json& field, grpc_millis* duration); +bool ParseDurationFromJson(const Json& field, Duration* duration); // // Helper functions for extracting types from JSON. @@ -145,7 +145,7 @@ bool ParseJsonObjectField(const Json::Object& object, // Alternative to ParseJsonObjectField() for duration-value fields. bool ParseJsonObjectFieldAsDuration(const Json::Object& object, absl::string_view field_name, - grpc_millis* output, + Duration* output, std::vector<grpc_error_handle>* error_list, bool required = true); diff --git a/src/core/lib/promise/sleep.cc b/src/core/lib/promise/sleep.cc index df58d83f435..c28f2df04e3 100644 --- a/src/core/lib/promise/sleep.cc +++ b/src/core/lib/promise/sleep.cc @@ -18,7 +18,7 @@ namespace grpc_core { -Sleep::Sleep(grpc_millis deadline) : state_(new State(deadline)) { +Sleep::Sleep(Timestamp deadline) : state_(new State(deadline)) { GRPC_CLOSURE_INIT(&state_->on_timer, &OnTimer, state_, nullptr); } diff --git a/src/core/lib/promise/sleep.h b/src/core/lib/promise/sleep.h index 8cf9b15fe94..0af4915c8c8 100644 --- a/src/core/lib/promise/sleep.h +++ b/src/core/lib/promise/sleep.h @@ -26,7 +26,7 @@ namespace grpc_core { // Promise that sleeps until a deadline and then finishes. class Sleep { public: - explicit Sleep(grpc_millis deadline); + explicit Sleep(Timestamp deadline); ~Sleep(); Sleep(const Sleep&) = delete; @@ -46,9 +46,9 @@ class Sleep { enum class Stage { kInitial, kStarted, kDone }; struct State { - explicit State(grpc_millis deadline) : deadline(deadline) {} + explicit State(Timestamp deadline) : deadline(deadline) {} RefCount refs{2}; - const grpc_millis deadline; + const Timestamp deadline; grpc_timer timer; grpc_closure on_timer; Mutex mu; diff --git a/src/core/lib/security/credentials/external/external_account_credentials.cc b/src/core/lib/security/credentials/external/external_account_credentials.cc index 0ff86bb7ad8..cc5165ebbc2 100644 --- a/src/core/lib/security/credentials/external/external_account_credentials.cc +++ b/src/core/lib/security/credentials/external/external_account_credentials.cc @@ -239,7 +239,7 @@ std::string ExternalAccountCredentials::debug_string() { void ExternalAccountCredentials::fetch_oauth2( grpc_credentials_metadata_request* metadata_req, grpc_polling_entity* pollent, grpc_iomgr_cb_func response_cb, - grpc_millis deadline) { + Timestamp deadline) { GPR_ASSERT(ctx_ == nullptr); ctx_ = new HTTPRequestContext(pollent, deadline); metadata_req_ = metadata_req; diff --git a/src/core/lib/security/credentials/external/external_account_credentials.h b/src/core/lib/security/credentials/external/external_account_credentials.h index 21f3c6747a7..c31294d97f3 100644 --- a/src/core/lib/security/credentials/external/external_account_credentials.h +++ b/src/core/lib/security/credentials/external/external_account_credentials.h @@ -61,14 +61,14 @@ class ExternalAccountCredentials // This is a helper struct to pass information between multiple callback based // asynchronous calls. struct HTTPRequestContext { - HTTPRequestContext(grpc_polling_entity* pollent, grpc_millis deadline) + HTTPRequestContext(grpc_polling_entity* pollent, Timestamp deadline) : pollent(pollent), deadline(deadline) {} ~HTTPRequestContext() { grpc_http_response_destroy(&response); } // Contextual parameters passed from // grpc_oauth2_token_fetcher_credentials::fetch_oauth2(). grpc_polling_entity* pollent; - grpc_millis deadline; + Timestamp deadline; // Reusable token fetch http response and closure. grpc_closure closure; @@ -89,7 +89,7 @@ class ExternalAccountCredentials // when grpc_oauth2_token_fetcher_credentials request a new access token. void fetch_oauth2(grpc_credentials_metadata_request* req, grpc_polling_entity* pollent, grpc_iomgr_cb_func cb, - grpc_millis deadline) override; + Timestamp deadline) override; void OnRetrieveSubjectTokenInternal(absl::string_view subject_token, grpc_error_handle error); diff --git a/src/core/lib/security/credentials/google_default/google_default_credentials.cc b/src/core/lib/security/credentials/google_default/google_default_credentials.cc index c8c47a531f5..b4830f88f90 100644 --- a/src/core/lib/security/credentials/google_default/google_default_credentials.cc +++ b/src/core/lib/security/credentials/google_default/google_default_credentials.cc @@ -175,7 +175,7 @@ static int is_metadata_server_reachable() { grpc_closure destroy_closure; /* The http call is local. If it takes more than one sec, it is for sure not on compute engine. */ - grpc_millis max_detection_delay = GPR_MS_PER_SEC; + const auto max_detection_delay = grpc_core::Duration::Seconds(1); grpc_pollset* pollset = static_cast<grpc_pollset*>(gpr_zalloc(grpc_pollset_size())); grpc_pollset_init(pollset, &g_polling_mu); @@ -205,7 +205,7 @@ static int is_metadata_server_reachable() { if (!GRPC_LOG_IF_ERROR( "pollset_work", grpc_pollset_work(grpc_polling_entity_pollset(&detector.pollent), - &worker, GRPC_MILLIS_INF_FUTURE))) { + &worker, grpc_core::Timestamp::InfFuture()))) { detector.is_done = 1; detector.success = 0; } diff --git a/src/core/lib/security/credentials/jwt/jwt_verifier.cc b/src/core/lib/security/credentials/jwt/jwt_verifier.cc index bc29111d3a3..f4bf0055b1c 100644 --- a/src/core/lib/security/credentials/jwt/jwt_verifier.cc +++ b/src/core/lib/security/credentials/jwt/jwt_verifier.cc @@ -387,7 +387,8 @@ void verifier_cb_ctx_destroy(verifier_cb_ctx* ctx) { gpr_timespec grpc_jwt_verifier_clock_skew = {60, 0, GPR_TIMESPAN}; /* Max delay defaults to one minute. */ -grpc_millis grpc_jwt_verifier_max_delay = 60 * GPR_MS_PER_SEC; +grpc_core::Duration grpc_jwt_verifier_max_delay = + grpc_core::Duration::Minutes(1); struct email_key_mapping { char* email_domain; diff --git a/src/core/lib/security/credentials/jwt/jwt_verifier.h b/src/core/lib/security/credentials/jwt/jwt_verifier.h index 2d1d3606af6..85883db9b4d 100644 --- a/src/core/lib/security/credentials/jwt/jwt_verifier.h +++ b/src/core/lib/security/credentials/jwt/jwt_verifier.h @@ -82,7 +82,7 @@ struct grpc_jwt_verifier_email_domain_key_url_mapping { }; /* Globals to control the verifier. Not thread-safe. */ extern gpr_timespec grpc_jwt_verifier_clock_skew; -extern grpc_millis grpc_jwt_verifier_max_delay; +extern grpc_core::Duration grpc_jwt_verifier_max_delay; /* The verifier can be created with some custom mappings to help with key discovery in the case where the issuer is an email address. diff --git a/src/core/lib/security/credentials/oauth2/oauth2_credentials.cc b/src/core/lib/security/credentials/oauth2/oauth2_credentials.cc index d712866dc3a..1faaa56d3a0 100644 --- a/src/core/lib/security/credentials/oauth2/oauth2_credentials.cc +++ b/src/core/lib/security/credentials/oauth2/oauth2_credentials.cc @@ -136,7 +136,7 @@ grpc_credentials_status grpc_oauth2_token_fetcher_credentials_parse_server_response( const grpc_http_response* response, absl::optional<grpc_core::Slice>* token_value, - grpc_millis* token_lifetime) { + grpc_core::Duration* token_lifetime) { char* null_terminated_body = nullptr; grpc_credentials_status status = GRPC_CREDENTIALS_OK; Json json; @@ -203,7 +203,8 @@ grpc_oauth2_token_fetcher_credentials_parse_server_response( goto end; } expires_in = it->second.string_value().c_str(); - *token_lifetime = strtol(expires_in, nullptr, 10) * GPR_MS_PER_SEC; + *token_lifetime = + grpc_core::Duration::Seconds(strtol(expires_in, nullptr, 10)); *token_value = grpc_core::Slice::FromCopiedString( absl::StrCat(token_type, " ", access_token)); status = GRPC_CREDENTIALS_OK; @@ -228,7 +229,7 @@ static void on_oauth2_token_fetcher_http_response(void* user_data, void grpc_oauth2_token_fetcher_credentials::on_http_response( grpc_credentials_metadata_request* r, grpc_error_handle error) { absl::optional<grpc_core::Slice> access_token_value; - grpc_millis token_lifetime = 0; + grpc_core::Duration token_lifetime; grpc_credentials_status status = error == GRPC_ERROR_NONE ? grpc_oauth2_token_fetcher_credentials_parse_server_response( @@ -242,11 +243,10 @@ void grpc_oauth2_token_fetcher_credentials::on_http_response( } else { access_token_value_ = absl::nullopt; } - token_expiration_ = - status == GRPC_CREDENTIALS_OK - ? gpr_time_add(gpr_now(GPR_CLOCK_MONOTONIC), - gpr_time_from_millis(token_lifetime, GPR_TIMESPAN)) - : gpr_inf_past(GPR_CLOCK_MONOTONIC); + token_expiration_ = status == GRPC_CREDENTIALS_OK + ? gpr_time_add(gpr_now(GPR_CLOCK_MONOTONIC), + token_lifetime.as_timespec()) + : gpr_inf_past(GPR_CLOCK_MONOTONIC); grpc_oauth2_pending_get_request_metadata* pending_request = pending_requests_; pending_requests_ = nullptr; gpr_mu_unlock(&mu_); @@ -278,8 +278,8 @@ bool grpc_oauth2_token_fetcher_credentials::get_request_metadata( grpc_core::CredentialsMetadataArray* md_array, grpc_closure* on_request_metadata, grpc_error_handle* /*error*/) { // Check if we can use the cached token. - grpc_millis refresh_threshold = - GRPC_SECURE_TOKEN_REFRESH_THRESHOLD_SECS * GPR_MS_PER_SEC; + grpc_core::Duration refresh_threshold = + grpc_core::Duration::Seconds(GRPC_SECURE_TOKEN_REFRESH_THRESHOLD_SECS); absl::optional<grpc_core::Slice> cached_access_token_value; gpr_mu_lock(&mu_); if (access_token_value_.has_value() && @@ -378,7 +378,7 @@ class grpc_compute_engine_token_fetcher_credentials void fetch_oauth2(grpc_credentials_metadata_request* metadata_req, grpc_polling_entity* pollent, grpc_iomgr_cb_func response_cb, - grpc_millis deadline) override { + grpc_core::Timestamp deadline) override { grpc_http_header header = {const_cast<char*>("Metadata-Flavor"), const_cast<char*>("Google")}; grpc_http_request request; @@ -438,7 +438,7 @@ grpc_google_refresh_token_credentials:: void grpc_google_refresh_token_credentials::fetch_oauth2( grpc_credentials_metadata_request* metadata_req, grpc_polling_entity* pollent, grpc_iomgr_cb_func response_cb, - grpc_millis deadline) { + grpc_core::Timestamp deadline) { grpc_http_header header = { const_cast<char*>("Content-Type"), const_cast<char*>("application/x-www-form-urlencoded")}; @@ -563,7 +563,7 @@ class StsTokenFetcherCredentials void fetch_oauth2(grpc_credentials_metadata_request* metadata_req, grpc_polling_entity* pollent, grpc_iomgr_cb_func response_cb, - grpc_millis deadline) override { + Timestamp deadline) override { grpc_http_request request; memset(&request, 0, sizeof(grpc_http_request)); grpc_error_handle err = FillBody(&request.body, &request.body_length); diff --git a/src/core/lib/security/credentials/oauth2/oauth2_credentials.h b/src/core/lib/security/credentials/oauth2/oauth2_credentials.h index 1c75130b37a..5faf26b6a84 100644 --- a/src/core/lib/security/credentials/oauth2/oauth2_credentials.h +++ b/src/core/lib/security/credentials/oauth2/oauth2_credentials.h @@ -107,7 +107,7 @@ class grpc_oauth2_token_fetcher_credentials : public grpc_call_credentials { protected: virtual void fetch_oauth2(grpc_credentials_metadata_request* req, grpc_polling_entity* pollent, grpc_iomgr_cb_func cb, - grpc_millis deadline) = 0; + grpc_core::Timestamp deadline) = 0; private: int cmp_impl(const grpc_call_credentials* other) const override { @@ -141,7 +141,7 @@ class grpc_google_refresh_token_credentials final protected: void fetch_oauth2(grpc_credentials_metadata_request* req, grpc_polling_entity* pollent, grpc_iomgr_cb_func cb, - grpc_millis deadline) override; + grpc_core::Timestamp deadline) override; private: grpc_auth_refresh_token refresh_token_; @@ -186,7 +186,8 @@ grpc_refresh_token_credentials_create_from_auth_refresh_token( grpc_credentials_status grpc_oauth2_token_fetcher_credentials_parse_server_response( const struct grpc_http_response* response, - absl::optional<grpc_core::Slice>* token_value, grpc_millis* token_lifetime); + absl::optional<grpc_core::Slice>* token_value, + grpc_core::Duration* token_lifetime); namespace grpc_core { // Exposed for testing only. This function validates the options, ensuring that diff --git a/src/core/lib/surface/call.cc b/src/core/lib/surface/call.cc index 3a9908bf536..8698d400797 100644 --- a/src/core/lib/surface/call.cc +++ b/src/core/lib/surface/call.cc @@ -208,7 +208,7 @@ struct grpc_call { /* Contexts for various subsystems (security, tracing, ...). */ grpc_call_context_element context[GRPC_CONTEXT_COUNT] = {}; - grpc_millis send_deadline; + grpc_core::Timestamp send_deadline; grpc_core::ManualConstructor<grpc_core::SliceBufferByteStream> sending_stream; @@ -371,7 +371,7 @@ grpc_error_handle grpc_call_create(grpc_call_create_args* args, call->final_op.server.core_server = args->server; } - grpc_millis send_deadline = args->send_deadline; + grpc_core::Timestamp send_deadline = args->send_deadline; bool immediately_cancel = false; if (args->parent != nullptr) { @@ -831,8 +831,9 @@ class PublishToAppEncoder { Append(grpc_core::GrpcPreviousRpcAttemptsMetadata::key(), count); } - void Encode(grpc_core::GrpcRetryPushbackMsMetadata, grpc_millis count) { - Append(grpc_core::GrpcRetryPushbackMsMetadata::key(), count); + void Encode(grpc_core::GrpcRetryPushbackMsMetadata, + grpc_core::Duration count) { + Append(grpc_core::GrpcRetryPushbackMsMetadata::key(), count.millis()); } void Encode(grpc_core::LbTokenMetadata, const grpc_core::Slice& slice) { @@ -1258,7 +1259,7 @@ static void receiving_initial_metadata_ready(void* bctlp, GPR_TIMER_SCOPE("validate_filtered_metadata", 0); validate_filtered_metadata(bctl); - absl::optional<grpc_millis> deadline = + absl::optional<grpc_core::Timestamp> deadline = md->get(grpc_core::GrpcTimeoutMetadata()); if (deadline.has_value() && !call->is_client) { call->send_deadline = *deadline; @@ -1444,7 +1445,8 @@ static grpc_call_error call_start_batch(grpc_call* call, const grpc_op* ops, // Ignore any te metadata key value pairs specified. call->send_initial_metadata.Remove(grpc_core::TeMetadata()); /* TODO(ctiller): just make these the same variable? */ - if (call->is_client && call->send_deadline != GRPC_MILLIS_INF_FUTURE) { + if (call->is_client && + call->send_deadline != grpc_core::Timestamp::InfFuture()) { call->send_initial_metadata.Set(grpc_core::GrpcTimeoutMetadata(), call->send_deadline); } diff --git a/src/core/lib/surface/call.h b/src/core/lib/surface/call.h index 155ce16479e..3f9c5ad7471 100644 --- a/src/core/lib/surface/call.h +++ b/src/core/lib/surface/call.h @@ -49,7 +49,7 @@ typedef struct grpc_call_create_args { absl::optional<grpc_core::Slice> path; absl::optional<grpc_core::Slice> authority; - grpc_millis send_deadline; + grpc_core::Timestamp send_deadline; } grpc_call_create_args; /* Create a new call based on \a args. diff --git a/src/core/lib/surface/channel.cc b/src/core/lib/surface/channel.cc index 3ed9c542dfe..a9ef675212e 100644 --- a/src/core/lib/surface/channel.cc +++ b/src/core/lib/surface/channel.cc @@ -340,7 +340,7 @@ static grpc_call* grpc_channel_create_call_internal( grpc_channel* channel, grpc_call* parent_call, uint32_t propagation_mask, grpc_completion_queue* cq, grpc_pollset_set* pollset_set_alternative, grpc_core::Slice path, absl::optional<grpc_core::Slice> authority, - grpc_millis deadline) { + grpc_core::Timestamp deadline) { GPR_ASSERT(channel->is_client); GPR_ASSERT(!(cq != nullptr && pollset_set_alternative != nullptr)); @@ -376,7 +376,7 @@ grpc_call* grpc_channel_create_call(grpc_channel* channel, host != nullptr ? absl::optional<grpc_core::Slice>(grpc_slice_ref_internal(*host)) : absl::nullopt, - grpc_timespec_to_millis_round_up(deadline)); + grpc_core::Timestamp::FromTimespecRoundUp(deadline)); return call; } @@ -384,7 +384,7 @@ grpc_call* grpc_channel_create_call(grpc_channel* channel, grpc_call* grpc_channel_create_pollset_set_call( grpc_channel* channel, grpc_call* parent_call, uint32_t propagation_mask, grpc_pollset_set* pollset_set, const grpc_slice& method, - const grpc_slice* host, grpc_millis deadline, void* reserved) { + const grpc_slice* host, grpc_core::Timestamp deadline, void* reserved) { GPR_ASSERT(!reserved); return grpc_channel_create_call_internal( channel, parent_call, propagation_mask, nullptr, pollset_set, @@ -463,7 +463,7 @@ grpc_call* grpc_channel_create_registered_call( rc->authority.has_value() ? absl::optional<grpc_core::Slice>(rc->authority->Ref()) : absl::nullopt, - grpc_timespec_to_millis_round_up(deadline)); + grpc_core::Timestamp::FromTimespecRoundUp(deadline)); return call; } diff --git a/src/core/lib/surface/channel.h b/src/core/lib/surface/channel.h index 15bf81770f8..c079de8eb79 100644 --- a/src/core/lib/surface/channel.h +++ b/src/core/lib/surface/channel.h @@ -58,7 +58,7 @@ grpc_channel* grpc_channel_create_with_builder( grpc_call* grpc_channel_create_pollset_set_call( grpc_channel* channel, grpc_call* parent_call, uint32_t propagation_mask, grpc_pollset_set* pollset_set, const grpc_slice& method, - const grpc_slice* host, grpc_millis deadline, void* reserved); + const grpc_slice* host, grpc_core::Timestamp deadline, void* reserved); /** Get a (borrowed) pointer to this channels underlying channel stack */ grpc_channel_stack* grpc_channel_get_channel_stack(grpc_channel* channel); diff --git a/src/core/lib/surface/completion_queue.cc b/src/core/lib/surface/completion_queue.cc index 2e44d71fb67..7170787aa92 100644 --- a/src/core/lib/surface/completion_queue.cc +++ b/src/core/lib/surface/completion_queue.cc @@ -73,7 +73,7 @@ struct cq_poller_vtable { grpc_error_handle (*kick)(grpc_pollset* pollset, grpc_pollset_worker* specific_worker); grpc_error_handle (*work)(grpc_pollset* pollset, grpc_pollset_worker** worker, - grpc_millis deadline); + grpc_core::Timestamp deadline); void (*shutdown)(grpc_pollset* pollset, grpc_closure* closure); void (*destroy)(grpc_pollset* pollset); }; @@ -105,7 +105,7 @@ void non_polling_poller_destroy(grpc_pollset* pollset) { grpc_error_handle non_polling_poller_work(grpc_pollset* pollset, grpc_pollset_worker** worker, - grpc_millis deadline) { + grpc_core::Timestamp deadline) { non_polling_poller* npp = reinterpret_cast<non_polling_poller*>(pollset); if (npp->shutdown) return GRPC_ERROR_NONE; if (npp->kicked_without_poller) { @@ -123,8 +123,7 @@ grpc_error_handle non_polling_poller_work(grpc_pollset* pollset, w.next->prev = w.prev->next = &w; } w.kicked = false; - gpr_timespec deadline_ts = - grpc_millis_to_timespec(deadline, GPR_CLOCK_MONOTONIC); + gpr_timespec deadline_ts = deadline.as_timespec(GPR_CLOCK_MONOTONIC); while (!npp->shutdown && !w.kicked && !gpr_cv_wait(&w.cv, &npp->mu, deadline_ts)) { } @@ -894,7 +893,7 @@ void grpc_cq_end_op(grpc_completion_queue* cq, void* tag, struct cq_is_finished_arg { gpr_atm last_seen_things_queued_ever; grpc_completion_queue* cq; - grpc_millis deadline; + grpc_core::Timestamp deadline; grpc_cq_completion* stolen_completion; void* tag; /* for pluck */ bool first_loop; @@ -974,7 +973,8 @@ static grpc_event cq_next(grpc_completion_queue* cq, gpr_timespec deadline, GRPC_CQ_INTERNAL_REF(cq, "next"); - grpc_millis deadline_millis = grpc_timespec_to_millis_round_up(deadline); + grpc_core::Timestamp deadline_millis = + grpc_core::Timestamp::FromTimespecRoundUp(deadline); cq_is_finished_arg is_finished_arg = { cqd->things_queued_ever.load(std::memory_order_relaxed), cq, @@ -984,7 +984,7 @@ static grpc_event cq_next(grpc_completion_queue* cq, gpr_timespec deadline, true}; ExecCtxNext exec_ctx(&is_finished_arg); for (;;) { - grpc_millis iteration_deadline = deadline_millis; + grpc_core::Timestamp iteration_deadline = deadline_millis; if (is_finished_arg.stolen_completion != nullptr) { grpc_cq_completion* c = is_finished_arg.stolen_completion; @@ -1011,7 +1011,7 @@ static grpc_event cq_next(grpc_completion_queue* cq, gpr_timespec deadline, attempt at popping. Not doing this can potentially deadlock this thread forever (if the deadline is infinity) */ if (cqd->queue.num_items() > 0) { - iteration_deadline = 0; + iteration_deadline = grpc_core::Timestamp::ProcessEpoch(); } } @@ -1223,7 +1223,8 @@ static grpc_event cq_pluck(grpc_completion_queue* cq, void* tag, GRPC_CQ_INTERNAL_REF(cq, "pluck"); gpr_mu_lock(cq->mu); - grpc_millis deadline_millis = grpc_timespec_to_millis_round_up(deadline); + grpc_core::Timestamp deadline_millis = + grpc_core::Timestamp::FromTimespecRoundUp(deadline); cq_is_finished_arg is_finished_arg = { cqd->things_queued_ever.load(std::memory_order_relaxed), cq, diff --git a/src/core/lib/surface/init.cc b/src/core/lib/surface/init.cc index c979b9836fd..47420d16107 100644 --- a/src/core/lib/surface/init.cc +++ b/src/core/lib/surface/init.cc @@ -182,7 +182,6 @@ void grpc_init(void) { grpc_stats_init(); grpc_core::channelz::ChannelzRegistry::Init(); grpc_core::ApplicationCallbackExecCtx::GlobalInit(); - grpc_core::ExecCtx::GlobalInit(); grpc_iomgr_init(); gpr_timers_global_init(); for (int i = 0; i < g_number_of_plugins; i++) { @@ -218,7 +217,6 @@ void grpc_shutdown_internal_locked(void) grpc_stats_shutdown(); grpc_core::Fork::GlobalShutdown(); } - grpc_core::ExecCtx::GlobalShutdown(); grpc_core::ApplicationCallbackExecCtx::GlobalShutdown(); g_shutting_down = false; g_shutting_down_cv->SignalAll(); diff --git a/src/core/lib/surface/server.cc b/src/core/lib/surface/server.cc index c1b00101024..baff98fc305 100644 --- a/src/core/lib/surface/server.cc +++ b/src/core/lib/surface/server.cc @@ -1110,7 +1110,7 @@ void Server::ChannelData::AcceptStream(void* arg, grpc_transport* /*transport*/, args.cq = nullptr; args.pollset_set_alternative = nullptr; args.server_transport_data = transport_server_data; - args.send_deadline = GRPC_MILLIS_INF_FUTURE; + args.send_deadline = Timestamp::InfFuture(); grpc_call* call; grpc_error_handle error = grpc_call_create(&args, &call); grpc_call_element* elem = @@ -1240,12 +1240,12 @@ void Server::CallData::Publish(size_t cq_idx, RequestedCall* rc) { rc->data.batch.details->method = grpc_slice_ref_internal(path_->c_slice()); rc->data.batch.details->deadline = - grpc_millis_to_timespec(deadline_, GPR_CLOCK_MONOTONIC); + deadline_.as_timespec(GPR_CLOCK_MONOTONIC); rc->data.batch.details->flags = recv_initial_metadata_flags_; break; case RequestedCall::Type::REGISTERED_CALL: *rc->data.registered.deadline = - grpc_millis_to_timespec(deadline_, GPR_CLOCK_MONOTONIC); + deadline_.as_timespec(GPR_CLOCK_MONOTONIC); if (rc->data.registered.optional_payload != nullptr) { *rc->data.registered.optional_payload = payload_; payload_ = nullptr; diff --git a/src/core/lib/surface/server.h b/src/core/lib/surface/server.h index 276d917fc06..76dcb288576 100644 --- a/src/core/lib/surface/server.h +++ b/src/core/lib/surface/server.h @@ -295,7 +295,7 @@ class Server : public InternallyRefCounted<Server>, absl::optional<Slice> path_; absl::optional<Slice> host_; - grpc_millis deadline_ = GRPC_MILLIS_INF_FUTURE; + Timestamp deadline_ = Timestamp::InfFuture(); grpc_completion_queue* cq_new_ = nullptr; diff --git a/src/core/lib/transport/bdp_estimator.cc b/src/core/lib/transport/bdp_estimator.cc index 6af45afc01a..de4f329265a 100644 --- a/src/core/lib/transport/bdp_estimator.cc +++ b/src/core/lib/transport/bdp_estimator.cc @@ -34,18 +34,18 @@ BdpEstimator::BdpEstimator(const char* name) accumulator_(0), estimate_(65536), ping_start_time_(gpr_time_0(GPR_CLOCK_MONOTONIC)), - inter_ping_delay_(100), // start at 100ms + inter_ping_delay_(Duration::Milliseconds(100)), // start at 100ms stable_estimate_count_(0), bw_est_(0), name_(name) {} -grpc_millis BdpEstimator::CompletePing() { +Timestamp BdpEstimator::CompletePing() { gpr_timespec now = gpr_now(GPR_CLOCK_MONOTONIC); gpr_timespec dt_ts = gpr_time_sub(now, ping_start_time_); double dt = static_cast<double>(dt_ts.tv_sec) + 1e-9 * static_cast<double>(dt_ts.tv_nsec); double bw = dt > 0 ? (static_cast<double>(accumulator_) / dt) : 0; - int start_inter_ping_delay = inter_ping_delay_; + Duration start_inter_ping_delay = inter_ping_delay_; if (GRPC_TRACE_FLAG_ENABLED(grpc_bdp_estimator_trace)) { gpr_log(GPR_INFO, "bdp[%s]:complete acc=%" PRId64 " est=%" PRId64 @@ -63,20 +63,19 @@ grpc_millis BdpEstimator::CompletePing() { } inter_ping_delay_ /= 2; // if the ping estimate changes, // exponentially get faster at probing - } else if (inter_ping_delay_ < 10000) { + } else if (inter_ping_delay_ < Duration::Seconds(10)) { stable_estimate_count_++; if (stable_estimate_count_ >= 2) { - inter_ping_delay_ += - 100 + static_cast<int>(rand() * 100.0 / - RAND_MAX); // if the ping estimate is steady, - // slowly ramp down the probe time + // if the ping estimate is steady, slowly ramp down the probe time + inter_ping_delay_ += Duration::Milliseconds( + 100 + static_cast<int>(rand() * 100.0 / RAND_MAX)); } } if (start_inter_ping_delay != inter_ping_delay_) { stable_estimate_count_ = 0; if (GRPC_TRACE_FLAG_ENABLED(grpc_bdp_estimator_trace)) { - gpr_log(GPR_INFO, "bdp[%s]:update_inter_time to %dms", name_, - inter_ping_delay_); + gpr_log(GPR_INFO, "bdp[%s]:update_inter_time to %" PRId64 "ms", name_, + inter_ping_delay_.millis()); } } ping_state_ = PingState::UNSCHEDULED; diff --git a/src/core/lib/transport/bdp_estimator.h b/src/core/lib/transport/bdp_estimator.h index 7eb336d8bb4..5e4a61a875c 100644 --- a/src/core/lib/transport/bdp_estimator.h +++ b/src/core/lib/transport/bdp_estimator.h @@ -72,7 +72,7 @@ class BdpEstimator { } // Completes a previously started ping, returns when to schedule the next one - grpc_millis CompletePing(); + Timestamp CompletePing(); int64_t accumulator() { return accumulator_; } @@ -84,7 +84,7 @@ class BdpEstimator { int64_t estimate_; // when was the current ping started? gpr_timespec ping_start_time_; - int inter_ping_delay_; + Duration inter_ping_delay_; int stable_estimate_count_; double bw_est_; const char* name_; diff --git a/src/core/lib/transport/error_utils.cc b/src/core/lib/transport/error_utils.cc index 2d2b67e393b..e8cb09d41d4 100644 --- a/src/core/lib/transport/error_utils.cc +++ b/src/core/lib/transport/error_utils.cc @@ -56,7 +56,8 @@ static grpc_error_handle recursively_find_error_with_field( return GRPC_ERROR_NONE; } -void grpc_error_get_status(grpc_error_handle error, grpc_millis deadline, +void grpc_error_get_status(grpc_error_handle error, + grpc_core::Timestamp deadline, grpc_status_code* code, std::string* message, grpc_http2_error_code* http_error, const char** error_string) { @@ -149,8 +150,9 @@ absl::Status grpc_error_to_absl_status(grpc_error_handle error) { // TODO(yashykt): This should be updated once we decide on how to use the // absl::Status payload to capture all the contents of grpc_error. std::string message; - grpc_error_get_status(error, GRPC_MILLIS_INF_FUTURE, &status, &message, - nullptr /* http_error */, nullptr /* error_string */); + grpc_error_get_status(error, grpc_core::Timestamp::InfFuture(), &status, + &message, nullptr /* http_error */, + nullptr /* error_string */); return absl::Status(static_cast<absl::StatusCode>(status), message); } diff --git a/src/core/lib/transport/error_utils.h b/src/core/lib/transport/error_utils.h index 4cc2db68f9c..4b500675d58 100644 --- a/src/core/lib/transport/error_utils.h +++ b/src/core/lib/transport/error_utils.h @@ -34,7 +34,8 @@ /// be populated with the entire error string. If any of the attributes (code, /// msg, http_status, error_string) are unneeded, they can be passed as /// NULL. -void grpc_error_get_status(grpc_error_handle error, grpc_millis deadline, +void grpc_error_get_status(grpc_error_handle error, + grpc_core::Timestamp deadline, grpc_status_code* code, std::string* message, grpc_http2_error_code* http_error, const char** error_string); diff --git a/src/core/lib/transport/metadata_batch.h b/src/core/lib/transport/metadata_batch.h index 8b6904f2c4d..fc031a8dac3 100644 --- a/src/core/lib/transport/metadata_batch.h +++ b/src/core/lib/transport/metadata_batch.h @@ -23,6 +23,7 @@ #include <stdbool.h> +#include <cstdint> #include <limits> #include "absl/strings/escaping.h" @@ -47,7 +48,7 @@ namespace grpc_core { // grpc-timeout metadata trait. -// ValueType is defined as grpc_millis - an absolute timestamp (i.e. a +// ValueType is defined as Timestamp - an absolute timestamp (i.e. a // deadline!), that is converted to a duration by transports before being // sent. // TODO(ctiller): Move this elsewhere. During the transition we need to be able @@ -55,27 +56,27 @@ namespace grpc_core { // should not need to. struct GrpcTimeoutMetadata { static constexpr bool kRepeatable = false; - using ValueType = grpc_millis; - using MementoType = grpc_millis; + using ValueType = Timestamp; + using MementoType = Duration; static absl::string_view key() { return "grpc-timeout"; } static MementoType ParseMemento(Slice value, MetadataParseErrorFn on_error) { auto timeout = ParseTimeout(value); if (!timeout.has_value()) { on_error("invalid value", value); - return GRPC_MILLIS_INF_FUTURE; + return Duration::Infinity(); } return *timeout; } static ValueType MementoToValue(MementoType timeout) { - if (timeout == GRPC_MILLIS_INF_FUTURE) { - return GRPC_MILLIS_INF_FUTURE; + if (timeout == Duration::Infinity()) { + return Timestamp::InfFuture(); } return ExecCtx::Get()->Now() + timeout; } static Slice Encode(ValueType x) { return Timeout::FromDuration(x - ExecCtx::Get()->Now()).Encode(); } - static MementoType DisplayValue(MementoType x) { return x; } + static std::string DisplayValue(MementoType x) { return x.ToString(); } }; // TE metadata trait. @@ -153,7 +154,7 @@ struct ContentTypeMetadata { case kApplicationGrpc: return StaticSlice::FromStaticString("application/grpc"); case kInvalid: - abort(); + return StaticSlice::FromStaticString("application/grpc+unknown"); } GPR_UNREACHABLE_CODE( return StaticSlice::FromStaticString("unrepresentable value")); @@ -432,10 +433,22 @@ struct GrpcPreviousRpcAttemptsMetadata }; // grpc-retry-pushback-ms metadata trait. -struct GrpcRetryPushbackMsMetadata - : public SimpleIntBasedMetadata<grpc_millis, GRPC_MILLIS_INF_PAST> { +struct GrpcRetryPushbackMsMetadata { static constexpr bool kRepeatable = false; static absl::string_view key() { return "grpc-retry-pushback-ms"; } + using ValueType = Duration; + using MementoType = Duration; + static ValueType MementoToValue(MementoType x) { return x; } + static Slice Encode(Duration x) { return Slice::FromInt64(x.millis()); } + static int64_t DisplayValue(Duration x) { return x.millis(); } + static Duration ParseMemento(Slice value, MetadataParseErrorFn on_error) { + int64_t out; + if (!absl::SimpleAtoi(value.as_string_view(), &out)) { + on_error("not an integer", value); + return Duration::NegativeInfinity(); + } + return Duration::Milliseconds(out); + } }; // :status metadata trait. @@ -745,10 +758,29 @@ class GetStringValueHelper { // Sink for key value logs using LogFn = absl::FunctionRef<void(absl::string_view, absl::string_view)>; +template <typename T> +struct AdaptDisplayValueToLog { + static std::string ToString(const T& value) { return absl::StrCat(value); } +}; + +template <> +struct AdaptDisplayValueToLog<Slice> { + static std::string ToString(Slice value) { + return std::string(value.as_string_view()); + } +}; + +template <> +struct AdaptDisplayValueToLog<StaticSlice> { + static absl::string_view ToString(StaticSlice value) { + return value.as_string_view(); + } +}; + template <typename T, typename U, typename V> GPR_ATTRIBUTE_NOINLINE void LogKeyValueTo(absl::string_view key, const T& value, V (*display_value)(U), LogFn log_fn) { - log_fn(key, absl::StrCat(display_value(value))); + log_fn(key, AdaptDisplayValueToLog<V>::ToString(display_value(value))); } // Generate a strong type for metadata values per trait. @@ -775,7 +807,7 @@ struct Value<Which, absl::enable_if_t<Which::kRepeatable == false && encoder->Encode(Which(), value); } void LogTo(LogFn log_fn) const { - LogKeyValueTo(Which::key(), value, Which::DisplayValue, log_fn); + LogKeyValueTo(Which::key(), value, Which::Encode, log_fn); } using StorageType = typename Which::ValueType; GPR_NO_UNIQUE_ADDRESS StorageType value; @@ -831,7 +863,7 @@ struct Value<Which, absl::enable_if_t<Which::kRepeatable == true && } void LogTo(LogFn log_fn) const { for (const auto& v : value) { - LogKeyValueTo(Which::key(), v, Which::DisplayValue, log_fn); + LogKeyValueTo(Which::key(), v, Which::Encode, log_fn); } } using StorageType = absl::InlinedVector<typename Which::ValueType, 1>; diff --git a/src/core/lib/transport/parsed_metadata.h b/src/core/lib/transport/parsed_metadata.h index 1a293477cd9..a0b69944040 100644 --- a/src/core/lib/transport/parsed_metadata.h +++ b/src/core/lib/transport/parsed_metadata.h @@ -23,6 +23,7 @@ #include "absl/meta/type_traits.h" #include "absl/strings/match.h" +#include "src/core/lib/gprpp/time.h" #include "src/core/lib/iomgr/error.h" #include "src/core/lib/slice/slice.h" #include "src/core/lib/surface/validate_metadata.h" @@ -39,13 +40,14 @@ namespace metadata_detail { template <typename Which> struct HasSimpleMemento { static constexpr bool value = - std::is_trivial<typename Which::MementoType>::value && - sizeof(typename Which::MementoType) <= sizeof(grpc_slice); + (std::is_trivial<typename Which::MementoType>::value && + sizeof(typename Which::MementoType) <= sizeof(grpc_slice)) || + std::is_same<typename Which::MementoType, Duration>::value; }; // Storage type for a single metadata entry. union Buffer { - char trivial[sizeof(grpc_slice)]; + uint8_t trivial[sizeof(grpc_slice)]; void* pointer; grpc_slice slice; }; @@ -72,9 +74,9 @@ GPR_ATTRIBUTE_NOINLINE std::string MakeDebugStringPipeline( // Extract a trivial field value from a Buffer - for MakeDebugStringPipeline. template <typename Field> Field FieldFromTrivial(const Buffer& value) { - Field x; - memcpy(&x, value.trivial, sizeof(x)); - return x; + Field field; + memcpy(&field, value.trivial, sizeof(Field)); + return field; } // Extract a pointer field value from a Buffer - for MakeDebugStringPipeline. diff --git a/src/core/lib/transport/status_conversion.cc b/src/core/lib/transport/status_conversion.cc index 46c6cd8c3b0..acfd5c5eee3 100644 --- a/src/core/lib/transport/status_conversion.cc +++ b/src/core/lib/transport/status_conversion.cc @@ -39,8 +39,8 @@ grpc_http2_error_code grpc_status_to_http2_error(grpc_status_code status) { } } -grpc_status_code grpc_http2_error_to_grpc_status(grpc_http2_error_code error, - grpc_millis deadline) { +grpc_status_code grpc_http2_error_to_grpc_status( + grpc_http2_error_code error, grpc_core::Timestamp deadline) { switch (error) { case GRPC_HTTP2_NO_ERROR: /* should never be received */ diff --git a/src/core/lib/transport/status_conversion.h b/src/core/lib/transport/status_conversion.h index 487f00c08b1..73a58fb96c9 100644 --- a/src/core/lib/transport/status_conversion.h +++ b/src/core/lib/transport/status_conversion.h @@ -29,7 +29,7 @@ /* Conversion of grpc status codes to http2 error codes (for RST_STREAM) */ grpc_http2_error_code grpc_status_to_http2_error(grpc_status_code status); grpc_status_code grpc_http2_error_to_grpc_status(grpc_http2_error_code error, - grpc_millis deadline); + grpc_core::Timestamp deadline); /* Conversion of HTTP status codes (:status) to grpc status codes */ grpc_status_code grpc_http2_status_to_grpc_status(int status); diff --git a/src/core/lib/transport/timeout_encoding.cc b/src/core/lib/transport/timeout_encoding.cc index 307a970d7bb..8af89f5e221 100644 --- a/src/core/lib/transport/timeout_encoding.cc +++ b/src/core/lib/transport/timeout_encoding.cc @@ -23,6 +23,8 @@ #include <stdio.h> #include <string.h> +#include <cstdint> + #include <grpc/support/log.h> #include "src/core/lib/gpr/string.h" @@ -37,7 +39,6 @@ int64_t DivideRoundingUp(int64_t dividend, int64_t divisor) { constexpr int64_t kSecondsPerMinute = 60; constexpr int64_t kMinutesPerHour = 60; -constexpr int64_t kSecondsPerHour = kSecondsPerMinute * kMinutesPerHour; constexpr int64_t kMaxHours = 27000; bool IsAllSpace(const uint8_t* p, const uint8_t* end) { @@ -47,13 +48,13 @@ bool IsAllSpace(const uint8_t* p, const uint8_t* end) { } // namespace -Timeout Timeout::FromDuration(grpc_millis duration) { - return Timeout::FromMillis(duration); +Timeout Timeout::FromDuration(Duration duration) { + return Timeout::FromMillis(duration.millis()); } double Timeout::RatioVersus(Timeout other) const { - double a = AsDuration(); - double b = other.AsDuration(); + double a = AsDuration().millis(); + double b = other.AsDuration().millis(); if (b == 0) { if (a > 0) return 100; if (a < 0) return -100; @@ -62,33 +63,33 @@ double Timeout::RatioVersus(Timeout other) const { return 100 * (a / b - 1); } -grpc_millis Timeout::AsDuration() const { - grpc_millis value = value_; +Duration Timeout::AsDuration() const { + int64_t value = value_; switch (unit_) { case Unit::kNanoseconds: - return 0; + return Duration::Zero(); case Unit::kMilliseconds: - return value; + return Duration::Milliseconds(value); case Unit::kTenMilliseconds: - return value * 10; + return Duration::Milliseconds(value * 10); case Unit::kHundredMilliseconds: - return value * 100; + return Duration::Milliseconds(value * 100); case Unit::kSeconds: - return value * 1000; + return Duration::Seconds(value); case Unit::kTenSeconds: - return value * 10000; + return Duration::Seconds(value * 10); case Unit::kHundredSeconds: - return value * 100000; + return Duration::Seconds(value * 100); case Unit::kMinutes: - return value * 1000 * kSecondsPerMinute; + return Duration::Minutes(value); case Unit::kTenMinutes: - return value * 10000 * kSecondsPerMinute; + return Duration::Minutes(value * 10); case Unit::kHundredMinutes: - return value * 100000 * kSecondsPerMinute; + return Duration::Minutes(value * 100); case Unit::kHours: - return value * 1000 * kSecondsPerHour; + return Duration::Hours(value); } - GPR_UNREACHABLE_CODE(return -1); + GPR_UNREACHABLE_CODE(return Duration::NegativeInfinity()); } Slice Timeout::Encode() const { @@ -228,8 +229,8 @@ Timeout Timeout::FromHours(int64_t hours) { return Timeout(kMaxHours, Unit::kHours); } -absl::optional<grpc_millis> ParseTimeout(const Slice& text) { - grpc_millis x = 0; +absl::optional<Duration> ParseTimeout(const Slice& text) { + int32_t x = 0; const uint8_t* p = text.begin(); const uint8_t* end = text.end(); int have_digit = 0; @@ -243,7 +244,7 @@ absl::optional<grpc_millis> ParseTimeout(const Slice& text) { /* spec allows max. 8 digits, but we allow values up to 1,000,000,000 */ if (x >= (100 * 1000 * 1000)) { if (x != (100 * 1000 * 1000) || digit != 0) { - return GRPC_MILLIS_INF_FUTURE; + return Duration::Infinity(); } } x = x * 10 + digit; @@ -254,25 +255,27 @@ absl::optional<grpc_millis> ParseTimeout(const Slice& text) { } if (p == end) return absl::nullopt; /* decode unit specifier */ - int64_t timeout; + Duration timeout; switch (*p) { case 'n': - timeout = x / GPR_NS_PER_MS + (x % GPR_NS_PER_MS != 0); + timeout = + Duration::Milliseconds(x / GPR_NS_PER_MS + (x % GPR_NS_PER_MS != 0)); break; case 'u': - timeout = x / GPR_US_PER_MS + (x % GPR_US_PER_MS != 0); + timeout = + Duration::Milliseconds(x / GPR_US_PER_MS + (x % GPR_US_PER_MS != 0)); break; case 'm': - timeout = x; + timeout = Duration::Milliseconds(x); break; case 'S': - timeout = x * GPR_MS_PER_SEC; + timeout = Duration::Seconds(x); break; case 'M': - timeout = x * 60 * GPR_MS_PER_SEC; + timeout = Duration::Minutes(x); break; case 'H': - timeout = x * 60 * 60 * GPR_MS_PER_SEC; + timeout = Duration::Hours(x); break; default: return absl::nullopt; diff --git a/src/core/lib/transport/timeout_encoding.h b/src/core/lib/transport/timeout_encoding.h index 0ffd2e82194..4d8cd736217 100644 --- a/src/core/lib/transport/timeout_encoding.h +++ b/src/core/lib/transport/timeout_encoding.h @@ -28,12 +28,12 @@ namespace grpc_core { class Timeout { public: - static Timeout FromDuration(grpc_millis duration); + static Timeout FromDuration(Duration duration); // Computes: 100 * ((this - other) / other) double RatioVersus(Timeout other) const; Slice Encode() const; - grpc_millis AsDuration() const; + Duration AsDuration() const; private: enum class Unit : uint8_t { @@ -61,7 +61,7 @@ class Timeout { Unit unit_; }; -absl::optional<grpc_millis> ParseTimeout(const Slice& text); +absl::optional<Duration> ParseTimeout(const Slice& text); } // namespace grpc_core diff --git a/src/core/tsi/alts/handshaker/alts_handshaker_client.cc b/src/core/tsi/alts/handshaker/alts_handshaker_client.cc index ae52efbb40b..da21201d7f2 100644 --- a/src/core/tsi/alts/handshaker/alts_handshaker_client.cc +++ b/src/core/tsi/alts/handshaker/alts_handshaker_client.cc @@ -721,7 +721,7 @@ alts_handshaker_client* alts_grpc_handshaker_client_create( : grpc_channel_create_pollset_set_call( channel, nullptr, GRPC_PROPAGATE_DEFAULTS, interested_parties, grpc_slice_from_static_string(ALTS_SERVICE_METHOD), &slice, - GRPC_MILLIS_INF_FUTURE, nullptr); + grpc_core::Timestamp::InfFuture(), nullptr); grpc_slice_unref_internal(slice); GRPC_CLOSURE_INIT(&client->on_handshaker_service_resp_recv, grpc_cb, client, grpc_schedule_on_exec_ctx); diff --git a/src/cpp/common/alarm.cc b/src/cpp/common/alarm.cc index be0d1f798c7..913c0c7317c 100644 --- a/src/cpp/common/alarm.cc +++ b/src/cpp/common/alarm.cc @@ -70,7 +70,8 @@ class AlarmImpl : public grpc::internal::CompletionQueueTag { GRPC_CQ_INTERNAL_UNREF(cq, "alarm"); }, this, grpc_schedule_on_exec_ctx); - grpc_timer_init(&timer_, grpc_timespec_to_millis_round_up(deadline), + grpc_timer_init(&timer_, + grpc_core::Timestamp::FromTimespecRoundUp(deadline), &on_alarm_); } void Set(gpr_timespec deadline, std::function<void(bool)> f) { @@ -93,7 +94,8 @@ class AlarmImpl : public grpc::internal::CompletionQueueTag { error); }, this, grpc_schedule_on_exec_ctx); - grpc_timer_init(&timer_, grpc_timespec_to_millis_round_up(deadline), + grpc_timer_init(&timer_, + grpc_core::Timestamp::FromTimespecRoundUp(deadline), &on_alarm_); } void Cancel() { diff --git a/src/python/grpcio/grpc_core_dependencies.py b/src/python/grpcio/grpc_core_dependencies.py index e23ed8316dc..4d7a1ae0cdd 100644 --- a/src/python/grpcio/grpc_core_dependencies.py +++ b/src/python/grpcio/grpc_core_dependencies.py @@ -477,6 +477,7 @@ CORE_SOURCE_FILES = [ 'src/core/lib/gprpp/status_helper.cc', 'src/core/lib/gprpp/thd_posix.cc', 'src/core/lib/gprpp/thd_windows.cc', + 'src/core/lib/gprpp/time.cc', 'src/core/lib/gprpp/time_util.cc', 'src/core/lib/http/format_request.cc', 'src/core/lib/http/httpcli.cc', diff --git a/test/core/backoff/backoff_test.cc b/test/core/backoff/backoff_test.cc index 86042c599f4..5270b486f44 100644 --- a/test/core/backoff/backoff_test.cc +++ b/test/core/backoff/backoff_test.cc @@ -25,6 +25,7 @@ #include <grpc/grpc.h> #include <grpc/support/log.h> +#include "src/core/lib/gprpp/time.h" #include "test/core/util/test_config.h" namespace grpc { @@ -34,10 +35,10 @@ namespace { using grpc_core::BackOff; TEST(BackOffTest, ConstantBackOff) { - const grpc_millis initial_backoff = 200; + const auto initial_backoff = grpc_core::Duration::Milliseconds(200); const double multiplier = 1.0; const double jitter = 0.0; - const grpc_millis max_backoff = 1000; + const auto max_backoff = grpc_core::Duration::Seconds(1); grpc_core::ExecCtx exec_ctx; BackOff::Options options; options.set_initial_backoff(initial_backoff) @@ -46,7 +47,7 @@ TEST(BackOffTest, ConstantBackOff) { .set_max_backoff(max_backoff); BackOff backoff(options); - grpc_millis next_attempt_start_time = backoff.NextAttemptTime(); + grpc_core::Timestamp next_attempt_start_time = backoff.NextAttemptTime(); EXPECT_EQ(next_attempt_start_time - grpc_core::ExecCtx::Get()->Now(), initial_backoff); for (int i = 0; i < 10000; i++) { @@ -57,10 +58,10 @@ TEST(BackOffTest, ConstantBackOff) { } TEST(BackOffTest, MinConnect) { - const grpc_millis initial_backoff = 100; + const auto initial_backoff = grpc_core::Duration::Milliseconds(100); const double multiplier = 1.0; const double jitter = 0.0; - const grpc_millis max_backoff = 1000; + const auto max_backoff = grpc_core::Duration::Seconds(1); grpc_core::ExecCtx exec_ctx; BackOff::Options options; options.set_initial_backoff(initial_backoff) @@ -68,15 +69,15 @@ TEST(BackOffTest, MinConnect) { .set_jitter(jitter) .set_max_backoff(max_backoff); BackOff backoff(options); - grpc_millis next = backoff.NextAttemptTime(); + grpc_core::Timestamp next = backoff.NextAttemptTime(); EXPECT_EQ(next - grpc_core::ExecCtx::Get()->Now(), initial_backoff); } TEST(BackOffTest, NoJitterBackOff) { - const grpc_millis initial_backoff = 2; + const auto initial_backoff = grpc_core::Duration::Milliseconds(2); const double multiplier = 2.0; const double jitter = 0.0; - const grpc_millis max_backoff = 513; + const auto max_backoff = grpc_core::Duration::Milliseconds(513); BackOff::Options options; options.set_initial_backoff(initial_backoff) .set_multiplier(multiplier) @@ -86,50 +87,55 @@ TEST(BackOffTest, NoJitterBackOff) { // x_1 = 2 // x_n = 2**i + x_{i-1} ( = 2**(n+1) - 2 ) grpc_core::ExecCtx exec_ctx; - grpc_core::ExecCtx::Get()->TestOnlySetNow(0); - grpc_millis next = backoff.NextAttemptTime(); - EXPECT_EQ(next, 2); + grpc_core::ExecCtx::Get()->TestOnlySetNow( + grpc_core::Timestamp::FromMillisecondsAfterProcessEpoch(0)); + grpc_core::Timestamp next = backoff.NextAttemptTime(); + EXPECT_EQ(next, grpc_core::Timestamp::FromMillisecondsAfterProcessEpoch(2)); grpc_core::ExecCtx::Get()->TestOnlySetNow(next); next = backoff.NextAttemptTime(); - EXPECT_EQ(next, 6); + EXPECT_EQ(next, grpc_core::Timestamp::FromMillisecondsAfterProcessEpoch(6)); grpc_core::ExecCtx::Get()->TestOnlySetNow(next); next = backoff.NextAttemptTime(); - EXPECT_EQ(next, 14); + EXPECT_EQ(next, grpc_core::Timestamp::FromMillisecondsAfterProcessEpoch(14)); grpc_core::ExecCtx::Get()->TestOnlySetNow(next); next = backoff.NextAttemptTime(); - EXPECT_EQ(next, 30); + EXPECT_EQ(next, grpc_core::Timestamp::FromMillisecondsAfterProcessEpoch(30)); grpc_core::ExecCtx::Get()->TestOnlySetNow(next); next = backoff.NextAttemptTime(); - EXPECT_EQ(next, 62); + EXPECT_EQ(next, grpc_core::Timestamp::FromMillisecondsAfterProcessEpoch(62)); grpc_core::ExecCtx::Get()->TestOnlySetNow(next); next = backoff.NextAttemptTime(); - EXPECT_EQ(next, 126); + EXPECT_EQ(next, grpc_core::Timestamp::FromMillisecondsAfterProcessEpoch(126)); grpc_core::ExecCtx::Get()->TestOnlySetNow(next); next = backoff.NextAttemptTime(); - EXPECT_EQ(next, 254); + EXPECT_EQ(next, grpc_core::Timestamp::FromMillisecondsAfterProcessEpoch(254)); grpc_core::ExecCtx::Get()->TestOnlySetNow(next); next = backoff.NextAttemptTime(); - EXPECT_EQ(next, 510); + EXPECT_EQ(next, grpc_core::Timestamp::FromMillisecondsAfterProcessEpoch(510)); grpc_core::ExecCtx::Get()->TestOnlySetNow(next); next = backoff.NextAttemptTime(); - EXPECT_EQ(next, 1022); + EXPECT_EQ(next, + grpc_core::Timestamp::FromMillisecondsAfterProcessEpoch(1022)); grpc_core::ExecCtx::Get()->TestOnlySetNow(next); next = backoff.NextAttemptTime(); // Hit the maximum timeout. From this point onwards, retries will increase // only by max timeout. - EXPECT_EQ(next, 1535); + EXPECT_EQ(next, + grpc_core::Timestamp::FromMillisecondsAfterProcessEpoch(1535)); grpc_core::ExecCtx::Get()->TestOnlySetNow(next); next = backoff.NextAttemptTime(); - EXPECT_EQ(next, 2048); + EXPECT_EQ(next, + grpc_core::Timestamp::FromMillisecondsAfterProcessEpoch(2048)); grpc_core::ExecCtx::Get()->TestOnlySetNow(next); next = backoff.NextAttemptTime(); - EXPECT_EQ(next, 2561); + EXPECT_EQ(next, + grpc_core::Timestamp::FromMillisecondsAfterProcessEpoch(2561)); } TEST(BackOffTest, JitterBackOff) { - const grpc_millis initial_backoff = 500; - grpc_millis current_backoff = initial_backoff; - const grpc_millis max_backoff = 1000; + const auto initial_backoff = grpc_core::Duration::Milliseconds(500); + auto current_backoff = initial_backoff; + const auto max_backoff = grpc_core::Duration::Seconds(1); const double multiplier = 1.0; const double jitter = 0.1; BackOff::Options options; @@ -140,29 +146,30 @@ TEST(BackOffTest, JitterBackOff) { BackOff backoff(options); grpc_core::ExecCtx exec_ctx; - grpc_millis next = backoff.NextAttemptTime(); + grpc_core::Timestamp next = backoff.NextAttemptTime(); EXPECT_EQ(next - grpc_core::ExecCtx::Get()->Now(), initial_backoff); - grpc_millis expected_next_lower_bound = static_cast<grpc_millis>( - static_cast<double>(current_backoff) * (1 - jitter)); - grpc_millis expected_next_upper_bound = static_cast<grpc_millis>( - static_cast<double>(current_backoff) * (1 + jitter)); + auto expected_next_lower_bound = grpc_core::Duration::Milliseconds( + static_cast<double>(current_backoff.millis()) * (1 - jitter)); + auto expected_next_upper_bound = grpc_core::Duration::Milliseconds( + static_cast<double>(current_backoff.millis()) * (1 + jitter)); for (int i = 0; i < 10000; i++) { next = backoff.NextAttemptTime(); // next-now must be within (jitter*100)% of the current backoff (which // increases by * multiplier up to max_backoff). - const grpc_millis timeout_millis = next - grpc_core::ExecCtx::Get()->Now(); + const grpc_core::Duration timeout_millis = + next - grpc_core::ExecCtx::Get()->Now(); EXPECT_GE(timeout_millis, expected_next_lower_bound); EXPECT_LE(timeout_millis, expected_next_upper_bound); - current_backoff = - std::min(static_cast<grpc_millis>(static_cast<double>(current_backoff) * - multiplier), - max_backoff); - expected_next_lower_bound = static_cast<grpc_millis>( - static_cast<double>(current_backoff) * (1 - jitter)); - expected_next_upper_bound = static_cast<grpc_millis>( - static_cast<double>(current_backoff) * (1 + jitter)); + current_backoff = std::min( + grpc_core::Duration::Milliseconds( + static_cast<double>(current_backoff.millis()) * multiplier), + max_backoff); + expected_next_lower_bound = grpc_core::Duration::Milliseconds( + static_cast<double>(current_backoff.millis()) * (1 - jitter)); + expected_next_upper_bound = grpc_core::Duration::Milliseconds( + static_cast<double>(current_backoff.millis()) * (1 + jitter)); } } diff --git a/test/core/channel/channel_stack_test.cc b/test/core/channel/channel_stack_test.cc index af3d69dc0c6..97a1e49d518 100644 --- a/test/core/channel/channel_stack_test.cc +++ b/test/core/channel/channel_stack_test.cc @@ -121,14 +121,14 @@ static void test_create_channel_stack(void) { call_stack = static_cast<grpc_call_stack*>(gpr_malloc(channel_stack->call_stack_size)); const grpc_call_element_args args = { - call_stack, /* call_stack */ - nullptr, /* server_transport_data */ - nullptr, /* context */ - path, /* path */ - gpr_get_cycle_counter(), /* start_time */ - GRPC_MILLIS_INF_FUTURE, /* deadline */ - nullptr, /* arena */ - nullptr, /* call_combiner */ + call_stack, /* call_stack */ + nullptr, /* server_transport_data */ + nullptr, /* context */ + path, /* path */ + gpr_get_cycle_counter(), /* start_time */ + grpc_core::Timestamp::InfFuture(), /* deadline */ + nullptr, /* arena */ + nullptr, /* call_combiner */ }; grpc_error_handle error = grpc_call_stack_init(channel_stack, 1, free_call, call_stack, &args); diff --git a/test/core/client_channel/resolvers/dns_resolver_cooldown_test.cc b/test/core/client_channel/resolvers/dns_resolver_cooldown_test.cc index a610ca9c734..baa5e42efe7 100644 --- a/test/core/client_channel/resolvers/dns_resolver_cooldown_test.cc +++ b/test/core/client_channel/resolvers/dns_resolver_cooldown_test.cc @@ -20,6 +20,7 @@ #include <functional> #include <grpc/grpc.h> +#include <grpc/impl/codegen/gpr_types.h> #include <grpc/support/log.h> #include "src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.h" @@ -27,6 +28,7 @@ #include "src/core/lib/channel/channel_args.h" #include "src/core/lib/config/core_configuration.h" #include "src/core/lib/gprpp/memory.h" +#include "src/core/lib/gprpp/time.h" #include "src/core/lib/iomgr/resolve_address.h" #include "src/core/lib/iomgr/work_serializer.h" #include "src/core/lib/resolver/resolver_registry.h" @@ -72,14 +74,16 @@ class TestDNSResolver : public grpc_core::DNSResolver { auto result = g_default_dns_resolver->ResolveName( name, default_port, interested_parties, std::move(on_done)); ++g_resolution_count; - static grpc_millis last_resolution_time = 0; - if (last_resolution_time == 0) { - last_resolution_time = - grpc_timespec_to_millis_round_up(gpr_now(GPR_CLOCK_MONOTONIC)); + static grpc_core::Timestamp last_resolution_time = + grpc_core::Timestamp::ProcessEpoch(); + if (last_resolution_time == grpc_core::Timestamp::ProcessEpoch()) { + last_resolution_time = grpc_core::Timestamp::FromTimespecRoundUp( + gpr_now(GPR_CLOCK_MONOTONIC)); } else { - grpc_millis now = - grpc_timespec_to_millis_round_up(gpr_now(GPR_CLOCK_MONOTONIC)); - GPR_ASSERT(now - last_resolution_time >= kMinResolutionPeriodMs); + auto now = grpc_core::Timestamp::FromTimespecRoundUp( + gpr_now(GPR_CLOCK_MONOTONIC)); + GPR_ASSERT(now - last_resolution_time >= + grpc_core::Duration::Milliseconds(kMinResolutionPeriodMs)); last_resolution_time = now; } // For correct time diff comparisons, make sure that any subsequent calls @@ -110,20 +114,19 @@ static grpc_ares_request* test_dns_lookup_ares( dns_server, name, default_port, g_iomgr_args.pollset_set, on_done, addresses, balancer_addresses, service_config_json, query_timeout_ms); ++g_resolution_count; - static grpc_millis last_resolution_time = 0; - grpc_millis now = - grpc_timespec_to_millis_round_up(gpr_now(GPR_CLOCK_MONOTONIC)); + static auto last_resolution_time = grpc_core::Timestamp::ProcessEpoch(); + auto now = + grpc_core::Timestamp::FromTimespecRoundUp(gpr_now(GPR_CLOCK_MONOTONIC)); gpr_log(GPR_DEBUG, "last_resolution_time:%" PRId64 " now:%" PRId64 " min_time_between:%d", - last_resolution_time, now, kMinResolutionPeriodMs); - if (last_resolution_time == 0) { - last_resolution_time = - grpc_timespec_to_millis_round_up(gpr_now(GPR_CLOCK_MONOTONIC)); - } else { - GPR_ASSERT(now - last_resolution_time >= kMinResolutionPeriodMs); - last_resolution_time = now; + last_resolution_time.milliseconds_after_process_epoch(), + now.milliseconds_after_process_epoch(), kMinResolutionPeriodMs); + if (last_resolution_time != grpc_core::Timestamp::ProcessEpoch()) { + GPR_ASSERT(now - last_resolution_time >= + grpc_core::Duration::Milliseconds(kMinResolutionPeriodMs)); } + last_resolution_time = now; // For correct time diff comparisons, make sure that any subsequent calls // to grpc_core::ExecCtx::Get()->Now() on this thread don't return a time // which is earlier than that returned by the call(s) to @@ -165,22 +168,22 @@ static void iomgr_args_finish(iomgr_args* args) { gpr_free(args->pollset); } -static grpc_millis n_sec_deadline(int seconds) { - return grpc_timespec_to_millis_round_up( +static grpc_core::Timestamp n_sec_deadline(int seconds) { + return grpc_core::Timestamp::FromTimespecRoundUp( grpc_timeout_seconds_to_deadline(seconds)); } static void poll_pollset_until_request_done(iomgr_args* args) { grpc_core::ExecCtx exec_ctx; - grpc_millis deadline = n_sec_deadline(10); + grpc_core::Timestamp deadline = n_sec_deadline(10); while (true) { bool done = gpr_atm_acq_load(&args->done_atm) != 0; if (done) { break; } - grpc_millis time_left = deadline - grpc_core::ExecCtx::Get()->Now(); - gpr_log(GPR_DEBUG, "done=%d, time_left=%" PRId64, done, time_left); - GPR_ASSERT(time_left >= 0); + grpc_core::Duration time_left = deadline - grpc_core::ExecCtx::Get()->Now(); + gpr_log(GPR_DEBUG, "done=%d, time_left=%" PRId64, done, time_left.millis()); + GPR_ASSERT(time_left >= grpc_core::Duration::Zero()); grpc_pollset_worker* worker = nullptr; gpr_mu_lock(args->mu); GRPC_LOG_IF_ERROR("pollset_work", grpc_pollset_work(args->pollset, &worker, diff --git a/test/core/client_channel/service_config_test.cc b/test/core/client_channel/service_config_test.cc index 746f43f250b..038840b2291 100644 --- a/test/core/client_channel/service_config_test.cc +++ b/test/core/client_channel/service_config_test.cc @@ -29,6 +29,7 @@ #include "src/core/ext/filters/client_channel/retry_service_config.h" #include "src/core/ext/filters/message_size/message_size_filter.h" #include "src/core/lib/gpr/string.h" +#include "src/core/lib/gprpp/time.h" #include "src/core/lib/service_config/service_config_impl.h" #include "src/core/lib/service_config/service_config_parser.h" #include "test/core/util/port.h" @@ -690,7 +691,7 @@ TEST_F(ClientChannelParserTest, ValidTimeout) { EXPECT_EQ( (static_cast<internal::ClientChannelMethodParsedConfig*>(parsed_config)) ->timeout(), - 5000); + Duration::Seconds(5)); } TEST_F(ClientChannelParserTest, InvalidTimeout) { @@ -921,8 +922,8 @@ TEST_F(RetryParserTest, ValidRetryPolicy) { static_cast<internal::RetryMethodConfig*>(((*vector_ptr)[0]).get()); ASSERT_NE(parsed_config, nullptr); EXPECT_EQ(parsed_config->max_attempts(), 3); - EXPECT_EQ(parsed_config->initial_backoff(), 1000); - EXPECT_EQ(parsed_config->max_backoff(), 120000); + EXPECT_EQ(parsed_config->initial_backoff(), Duration::Seconds(1)); + EXPECT_EQ(parsed_config->max_backoff(), Duration::Minutes(2)); EXPECT_EQ(parsed_config->backoff_multiplier(), 1.6f); EXPECT_EQ(parsed_config->per_attempt_recv_timeout(), absl::nullopt); EXPECT_TRUE( @@ -1306,10 +1307,10 @@ TEST_F(RetryParserTest, ValidRetryPolicyWithPerAttemptRecvTimeout) { static_cast<internal::RetryMethodConfig*>(((*vector_ptr)[0]).get()); ASSERT_NE(parsed_config, nullptr); EXPECT_EQ(parsed_config->max_attempts(), 2); - EXPECT_EQ(parsed_config->initial_backoff(), 1000); - EXPECT_EQ(parsed_config->max_backoff(), 120000); + EXPECT_EQ(parsed_config->initial_backoff(), Duration::Seconds(1)); + EXPECT_EQ(parsed_config->max_backoff(), Duration::Minutes(2)); EXPECT_EQ(parsed_config->backoff_multiplier(), 1.6f); - EXPECT_EQ(parsed_config->per_attempt_recv_timeout(), 1000); + EXPECT_EQ(parsed_config->per_attempt_recv_timeout(), Duration::Seconds(1)); EXPECT_TRUE( parsed_config->retryable_status_codes().Contains(GRPC_STATUS_ABORTED)); } @@ -1342,8 +1343,8 @@ TEST_F(RetryParserTest, static_cast<internal::RetryMethodConfig*>(((*vector_ptr)[0]).get()); ASSERT_NE(parsed_config, nullptr); EXPECT_EQ(parsed_config->max_attempts(), 2); - EXPECT_EQ(parsed_config->initial_backoff(), 1000); - EXPECT_EQ(parsed_config->max_backoff(), 120000); + EXPECT_EQ(parsed_config->initial_backoff(), Duration::Seconds(1)); + EXPECT_EQ(parsed_config->max_backoff(), Duration::Minutes(2)); EXPECT_EQ(parsed_config->backoff_multiplier(), 1.6f); EXPECT_EQ(parsed_config->per_attempt_recv_timeout(), absl::nullopt); EXPECT_TRUE( @@ -1380,10 +1381,10 @@ TEST_F(RetryParserTest, static_cast<internal::RetryMethodConfig*>(((*vector_ptr)[0]).get()); ASSERT_NE(parsed_config, nullptr); EXPECT_EQ(parsed_config->max_attempts(), 2); - EXPECT_EQ(parsed_config->initial_backoff(), 1000); - EXPECT_EQ(parsed_config->max_backoff(), 120000); + EXPECT_EQ(parsed_config->initial_backoff(), Duration::Seconds(1)); + EXPECT_EQ(parsed_config->max_backoff(), Duration::Minutes(2)); EXPECT_EQ(parsed_config->backoff_multiplier(), 1.6f); - EXPECT_EQ(parsed_config->per_attempt_recv_timeout(), 1000); + EXPECT_EQ(parsed_config->per_attempt_recv_timeout(), Duration::Seconds(1)); EXPECT_TRUE(parsed_config->retryable_status_codes().Empty()); } diff --git a/test/core/end2end/bad_server_response_test.cc b/test/core/end2end/bad_server_response_test.cc index 22c4ad7b8e8..418d1cf8829 100644 --- a/test/core/end2end/bad_server_response_test.cc +++ b/test/core/end2end/bad_server_response_test.cc @@ -342,7 +342,6 @@ static void run_test(bool http2_response, bool send_settings, int main(int argc, char** argv) { grpc::testing::TestEnvironment env(argc, argv); grpc_init(); - /* status defined in hpack static table */ run_test(true, true, HTTP2_RESP(204), sizeof(HTTP2_RESP(204)) - 1, GRPC_STATUS_UNKNOWN, HTTP2_DETAIL_MSG(204)); @@ -372,7 +371,8 @@ int main(int argc, char** argv) { GRPC_STATUS_UNAVAILABLE, HTTP2_DETAIL_MSG(503)); run_test(true, true, HTTP2_RESP(504), sizeof(HTTP2_RESP(504)) - 1, GRPC_STATUS_UNAVAILABLE, HTTP2_DETAIL_MSG(504)); - /* unparseable response. RPC should fail immediately due to a connect failure. + /* unparseable response. RPC should fail immediately due to a connect + * failure. */ run_test(false, false, UNPARSEABLE_RESP, sizeof(UNPARSEABLE_RESP) - 1, GRPC_STATUS_UNAVAILABLE, nullptr); diff --git a/test/core/end2end/cq_verifier.cc b/test/core/end2end/cq_verifier.cc index 720c8950366..df6e9a080eb 100644 --- a/test/core/end2end/cq_verifier.cc +++ b/test/core/end2end/cq_verifier.cc @@ -285,7 +285,7 @@ void cq_verify(cq_verifier* v, int timeout_sec) { void cq_verify_empty_timeout(cq_verifier* v, int timeout_sec) { gpr_timespec deadline = - gpr_time_add(gpr_now(GPR_CLOCK_REALTIME), + gpr_time_add(gpr_now(GPR_CLOCK_MONOTONIC), gpr_time_from_seconds(timeout_sec, GPR_TIMESPAN)); grpc_event ev; diff --git a/test/core/end2end/fixtures/http_proxy_fixture.cc b/test/core/end2end/fixtures/http_proxy_fixture.cc index 647cbc9e09a..6444aa6ae03 100644 --- a/test/core/end2end/fixtures/http_proxy_fixture.cc +++ b/test/core/end2end/fixtures/http_proxy_fixture.cc @@ -531,8 +531,8 @@ static void on_read_request_done_locked(void* arg, grpc_error_handle error) { GPR_ASSERT(!addresses_or->empty()); // Connect to requested address. // The connection callback inherits our reference to conn. - const grpc_millis deadline = - grpc_core::ExecCtx::Get()->Now() + 10 * GPR_MS_PER_SEC; + const grpc_core::Timestamp deadline = + grpc_core::ExecCtx::Get()->Now() + grpc_core::Duration::Seconds(10); GRPC_CLOSURE_INIT(&conn->on_server_connect_done, on_server_connect_done, conn, grpc_schedule_on_exec_ctx); const grpc_channel_args* args = grpc_core::CoreConfiguration::Get() @@ -593,10 +593,10 @@ static void thread_main(void* arg) { gpr_ref(&proxy->users); grpc_pollset_worker* worker = nullptr; gpr_mu_lock(proxy->mu); - GRPC_LOG_IF_ERROR( - "grpc_pollset_work", - grpc_pollset_work(proxy->pollset[0], &worker, - grpc_core::ExecCtx::Get()->Now() + GPR_MS_PER_SEC)); + GRPC_LOG_IF_ERROR("grpc_pollset_work", + grpc_pollset_work(proxy->pollset[0], &worker, + grpc_core::ExecCtx::Get()->Now() + + grpc_core::Duration::Seconds(1))); gpr_mu_unlock(proxy->mu); grpc_core::ExecCtx::Get()->Flush(); } while (!gpr_unref(&proxy->users)); diff --git a/test/core/end2end/fuzzers/BUILD b/test/core/end2end/fuzzers/BUILD index b8ed88ad996..3146ec8fa95 100644 --- a/test/core/end2end/fuzzers/BUILD +++ b/test/core/end2end/fuzzers/BUILD @@ -27,6 +27,7 @@ grpc_proto_fuzzer( language = "C++", proto = "api_fuzzer.proto", tags = ["no_windows"], + uses_polling = False, deps = [ "//:gpr", "//:grpc", @@ -41,6 +42,7 @@ grpc_fuzzer( corpus = "client_fuzzer_corpus", language = "C++", tags = ["no_windows"], + uses_polling = False, deps = [ "//:gpr", "//:grpc", @@ -54,6 +56,7 @@ grpc_fuzzer( corpus = "server_fuzzer_corpus", language = "C++", tags = ["no_windows"], + uses_polling = False, deps = [ "//:gpr", "//:grpc", diff --git a/test/core/end2end/fuzzers/api_fuzzer.cc b/test/core/end2end/fuzzers/api_fuzzer.cc index c3ddd23f4c4..b778e854833 100644 --- a/test/core/end2end/fuzzers/api_fuzzer.cc +++ b/test/core/end2end/fuzzers/api_fuzzer.cc @@ -124,7 +124,8 @@ class FuzzerDNSResolver : public grpc_core::DNSResolver { void Start() override { Ref().release(); // ref held by timer callback grpc_timer_init( - &timer_, GPR_MS_PER_SEC + grpc_core::ExecCtx::Get()->Now(), + &timer_, + grpc_core::Duration::Seconds(1) + grpc_core::ExecCtx::Get()->Now(), GRPC_CLOSURE_CREATE(FinishResolve, this, grpc_schedule_on_exec_ctx)); } @@ -188,7 +189,8 @@ grpc_ares_request* my_dns_lookup_ares( r->on_done = on_done; r->addresses = addresses; grpc_timer_init( - &r->timer, GPR_MS_PER_SEC + grpc_core::ExecCtx::Get()->Now(), + &r->timer, + grpc_core::Duration::Seconds(1) + grpc_core::ExecCtx::Get()->Now(), GRPC_CLOSURE_CREATE(finish_resolve, r, grpc_schedule_on_exec_ctx)); return nullptr; } @@ -253,7 +255,8 @@ static void sched_connect(grpc_closure* closure, grpc_endpoint** ep, fc->ep = ep; fc->deadline = deadline; grpc_timer_init( - &fc->timer, GPR_MS_PER_SEC + grpc_core::ExecCtx::Get()->Now(), + &fc->timer, + grpc_core::Duration::Seconds(1) + grpc_core::ExecCtx::Get()->Now(), GRPC_CLOSURE_CREATE(do_connect, fc, grpc_schedule_on_exec_ctx)); } @@ -261,9 +264,8 @@ static void my_tcp_client_connect(grpc_closure* closure, grpc_endpoint** ep, grpc_pollset_set* /*interested_parties*/, const grpc_channel_args* /*channel_args*/, const grpc_resolved_address* /*addr*/, - grpc_millis deadline) { - sched_connect(closure, ep, - grpc_millis_to_timespec(deadline, GPR_CLOCK_MONOTONIC)); + grpc_core::Timestamp deadline) { + sched_connect(closure, ep, deadline.as_timespec(GPR_CLOCK_MONOTONIC)); } grpc_tcp_client_vtable fuzz_tcp_client_vtable = {my_tcp_client_connect}; @@ -273,7 +275,7 @@ grpc_tcp_client_vtable fuzz_tcp_client_vtable = {my_tcp_client_connect}; class Validator { public: - explicit Validator(std::function<void(bool)> impl) : impl_(impl) {} + explicit Validator(std::function<void(bool)> impl) : impl_(std::move(impl)) {} virtual ~Validator() {} void Run(bool success) { @@ -304,7 +306,8 @@ static Validator* ValidateConnectivityWatch(gpr_timespec deadline, int* counter) { return MakeValidator([deadline, counter](bool success) { if (!success) { - GPR_ASSERT(gpr_time_cmp(gpr_now(deadline.clock_type), deadline) >= 0); + auto now = gpr_now(deadline.clock_type); + GPR_ASSERT(gpr_time_cmp(now, deadline) >= 0); } --*counter; }); @@ -755,6 +758,8 @@ DEFINE_PROTO_FUZZER(const api_fuzzer::Msg& msg) { if (squelch && grpc_trace_fuzzer == nullptr) gpr_set_log_function(dont_log); gpr_free(grpc_trace_fuzzer); grpc_set_tcp_client_impl(&fuzz_tcp_client_vtable); + g_now = {1, 0, GPR_CLOCK_MONOTONIC}; + grpc_core::TestOnlySetProcessEpoch(g_now); gpr_now_impl = now_impl; grpc_init(); grpc_timer_manager_set_threading(false); diff --git a/test/core/end2end/fuzzers/api_fuzzer_corpus/min_time_rounding b/test/core/end2end/fuzzers/api_fuzzer_corpus/min_time_rounding new file mode 100644 index 00000000000..7a14cd77e85 --- /dev/null +++ b/test/core/end2end/fuzzers/api_fuzzer_corpus/min_time_rounding @@ -0,0 +1,15 @@ +actions { + create_channel { + channel_actions {} + } +} +actions { + advance_time : 2 +} +actions { + watch_connectivity : 1 +} +actions { + poll_cq {} +} + diff --git a/test/core/end2end/fuzzers/server_fuzzer.cc b/test/core/end2end/fuzzers/server_fuzzer.cc index 30fc573820e..afcdc91f51e 100644 --- a/test/core/end2end/fuzzers/server_fuzzer.cc +++ b/test/core/end2end/fuzzers/server_fuzzer.cc @@ -19,6 +19,7 @@ #include <grpc/grpc.h> #include "src/core/ext/transport/chttp2/transport/chttp2_transport.h" +#include "src/core/lib/gprpp/time.h" #include "src/core/lib/iomgr/executor.h" #include "src/core/lib/resource_quota/api.h" #include "src/core/lib/slice/slice_internal.h" @@ -102,7 +103,8 @@ extern "C" int LLVMFuzzerTestOneInput(const uint8_t* data, size_t size) { grpc_metadata_array_destroy(&request_metadata1); grpc_server_shutdown_and_notify(server, cq, tag(0xdead)); grpc_server_cancel_all_calls(server); - grpc_millis deadline = grpc_core::ExecCtx::Get()->Now() + 5000; + grpc_core::Timestamp deadline = + grpc_core::ExecCtx::Get()->Now() + grpc_core::Duration::Seconds(5); for (int i = 0; i <= requested_calls; i++) { // A single grpc_completion_queue_next might not be sufficient for getting // the tag from shutdown, because we might potentially get blocked by diff --git a/test/core/gpr/useful_test.cc b/test/core/gpr/useful_test.cc index fb75d7aac81..bf86cd6d1fc 100644 --- a/test/core/gpr/useful_test.cc +++ b/test/core/gpr/useful_test.cc @@ -62,6 +62,20 @@ TEST(UsefulTest, BitOps) { EXPECT_EQ(BitCount(std::numeric_limits<uint64_t>::max()), 64); } +TEST(UsefulTest, SaturatingAdd) { + EXPECT_EQ(SaturatingAdd(0, 0), 0); + EXPECT_EQ(SaturatingAdd(0, 1), 1); + EXPECT_EQ(SaturatingAdd(1, 0), 1); + EXPECT_EQ(SaturatingAdd(1, 1), 2); + EXPECT_EQ(SaturatingAdd(std::numeric_limits<int64_t>::max(), 1), + std::numeric_limits<int64_t>::max()); + EXPECT_EQ(SaturatingAdd(std::numeric_limits<int64_t>::max(), + std::numeric_limits<int64_t>::max()), + std::numeric_limits<int64_t>::max()); + EXPECT_EQ(SaturatingAdd(std::numeric_limits<int64_t>::min(), -1), + std::numeric_limits<int64_t>::min()); +} + } // namespace grpc_core int main(int argc, char** argv) { diff --git a/test/core/gprpp/BUILD b/test/core/gprpp/BUILD index 99dcbec8110..501a37914c9 100644 --- a/test/core/gprpp/BUILD +++ b/test/core/gprpp/BUILD @@ -325,3 +325,15 @@ grpc_proto_fuzzer( "//test/core/util:grpc_test_util", ], ) + +grpc_cc_test( + name = "time_test", + srcs = ["time_test.cc"], + external_deps = ["gtest"], + language = "c++", + uses_polling = False, + deps = [ + "//:time", + "//test/core/util:grpc_suppressions", + ], +) diff --git a/test/core/gprpp/time_test.cc b/test/core/gprpp/time_test.cc new file mode 100644 index 00000000000..1e8ccb3866c --- /dev/null +++ b/test/core/gprpp/time_test.cc @@ -0,0 +1,87 @@ +// Copyright 2021 gRPC authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "src/core/lib/gprpp/time.h" + +#include <limits> + +#include <gtest/gtest.h> + +namespace grpc_core { +namespace testing { + +TEST(TimestampTest, Empty) { + EXPECT_EQ(Timestamp(), Timestamp::ProcessEpoch()); +} + +TEST(TimestampTest, Infinities) { + EXPECT_EQ(Timestamp::InfFuture() - Duration::Milliseconds(1), + Timestamp::InfFuture()); + EXPECT_EQ(Timestamp::InfPast() + Duration::Milliseconds(1), + Timestamp::InfPast()); +} + +TEST(TimestampTest, ToString) { + EXPECT_EQ(Timestamp::FromMillisecondsAfterProcessEpoch(42).ToString(), + "@42ms"); +} + +TEST(DurationTest, Empty) { EXPECT_EQ(Duration(), Duration::Zero()); } + +TEST(DurationTest, Scales) { + EXPECT_EQ(Duration::Milliseconds(1000), Duration::Seconds(1)); + EXPECT_EQ(Duration::Seconds(60), Duration::Minutes(1)); + EXPECT_EQ(Duration::Minutes(60), Duration::Hours(1)); + EXPECT_EQ(Duration::FromSecondsAsDouble(1.2), Duration::Milliseconds(1200)); + EXPECT_EQ(Duration::FromSecondsAndNanoseconds(1, 300000000), + Duration::Milliseconds(1300)); +} + +TEST(DurationTest, Epsilon) { + EXPECT_LE(Duration::Epsilon(), Duration::Milliseconds(1)); +} + +TEST(DurationTest, Infinities) { + EXPECT_EQ(Duration::Infinity() - Duration::Milliseconds(1), + Duration::Infinity()); + EXPECT_EQ(Duration::Infinity() + Duration::Milliseconds(1), + Duration::Infinity()); + EXPECT_EQ(Duration::Infinity() * 2, Duration::Infinity()); + EXPECT_EQ(Duration::Infinity() * -1, Duration::NegativeInfinity()); + EXPECT_EQ(Duration::Infinity() / 3, Duration::Infinity()); + EXPECT_EQ(Duration::NegativeInfinity() / -3, Duration::Infinity()); + EXPECT_EQ(Duration::NegativeInfinity() + Duration::Milliseconds(1), + Duration::NegativeInfinity()); + EXPECT_EQ(Duration::NegativeInfinity() - Duration::Milliseconds(1), + Duration::NegativeInfinity()); + EXPECT_EQ(Duration::NegativeInfinity() / 3, Duration::NegativeInfinity()); + EXPECT_EQ(Duration::Hours(std::numeric_limits<int64_t>::max()), + Duration::Infinity()); + EXPECT_EQ(Duration::FromSecondsAsDouble(1e100), Duration::Infinity()); + EXPECT_EQ(Duration::FromSecondsAsDouble(-1e100), + Duration::NegativeInfinity()); +} + +TEST(DurationTest, FromTimespan) { + EXPECT_EQ(Duration::FromTimespec(gpr_time_from_millis(1234, GPR_TIMESPAN)), + Duration::Milliseconds(1234)); +} + +} // namespace testing +} // namespace grpc_core + +int main(int argc, char** argv) { + ::testing::InitGoogleTest(&argc, argv); + return RUN_ALL_TESTS(); +} diff --git a/test/core/http/httpcli_test.cc b/test/core/http/httpcli_test.cc index 714605ff363..f9cd63d0e3d 100644 --- a/test/core/http/httpcli_test.cc +++ b/test/core/http/httpcli_test.cc @@ -30,6 +30,7 @@ #include <grpc/support/sync.h> #include "src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.h" +#include "src/core/lib/gprpp/time.h" #include "src/core/lib/iomgr/iomgr.h" #include "test/core/http/httpcli_test_util.h" #include "test/core/util/fake_udp_and_tcp_server.h" @@ -39,8 +40,8 @@ namespace { -grpc_millis NSecondsTime(int seconds) { - return grpc_timespec_to_millis_round_up( +grpc_core::Timestamp NSecondsTime(int seconds) { + return grpc_core::Timestamp::FromTimespecRoundUp( grpc_timeout_seconds_to_deadline(seconds)); } diff --git a/test/core/http/httpscli_test.cc b/test/core/http/httpscli_test.cc index f2d3b353a36..b2887ad1e0c 100644 --- a/test/core/http/httpscli_test.cc +++ b/test/core/http/httpscli_test.cc @@ -28,6 +28,8 @@ #include <grpc/support/sync.h> #include "src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.h" +#include "src/core/lib/gpr/env.h" +#include "src/core/lib/gprpp/time.h" #include "src/core/lib/http/httpcli.h" #include "src/core/lib/http/httpcli_ssl_credentials.h" #include "src/core/lib/iomgr/iomgr.h" @@ -39,8 +41,8 @@ namespace { -grpc_millis NSecondsTime(int seconds) { - return grpc_timespec_to_millis_round_up( +grpc_core::Timestamp NSecondsTime(int seconds) { + return grpc_core::Timestamp::FromTimespecRoundUp( grpc_timeout_seconds_to_deadline(seconds)); } diff --git a/test/core/iomgr/endpoint_tests.cc b/test/core/iomgr/endpoint_tests.cc index 2a0e5a18d86..060ce816d65 100644 --- a/test/core/iomgr/endpoint_tests.cc +++ b/test/core/iomgr/endpoint_tests.cc @@ -27,6 +27,7 @@ #include <grpc/support/time.h> #include "src/core/lib/gpr/useful.h" +#include "src/core/lib/gprpp/time.h" #include "src/core/lib/slice/slice_internal.h" #include "test/core/util/test_config.h" @@ -200,8 +201,8 @@ static void read_and_write_test(grpc_endpoint_test_config config, grpc_endpoint_test_fixture f = begin_test(config, "read_and_write_test", slice_size); grpc_core::ExecCtx exec_ctx; - grpc_millis deadline = - grpc_timespec_to_millis_round_up(grpc_timeout_seconds_to_deadline(20)); + auto deadline = grpc_core::Timestamp::FromTimespecRoundUp( + grpc_timeout_seconds_to_deadline(20)); gpr_log(GPR_DEBUG, "num_bytes=%" PRIuPTR " write_size=%" PRIuPTR " slice_size=%" PRIuPTR " shutdown=%d", @@ -284,8 +285,8 @@ static void inc_on_failure(void* arg, grpc_error_handle error) { static void wait_for_fail_count(int* fail_count, int want_fail_count) { grpc_core::ExecCtx::Get()->Flush(); gpr_mu_lock(g_mu); - grpc_millis deadline = - grpc_timespec_to_millis_round_up(grpc_timeout_seconds_to_deadline(10)); + grpc_core::Timestamp deadline = grpc_core::Timestamp::FromTimespecRoundUp( + grpc_timeout_seconds_to_deadline(10)); while (grpc_core::ExecCtx::Get()->Now() < deadline && *fail_count < want_fail_count) { grpc_pollset_worker* worker = nullptr; diff --git a/test/core/iomgr/fd_posix_test.cc b/test/core/iomgr/fd_posix_test.cc index 7c2ec737656..b6d7318dccd 100644 --- a/test/core/iomgr/fd_posix_test.cc +++ b/test/core/iomgr/fd_posix_test.cc @@ -248,8 +248,8 @@ static void server_wait_and_shutdown(server* sv) { grpc_core::ExecCtx exec_ctx; grpc_pollset_worker* worker = nullptr; GPR_ASSERT(GRPC_LOG_IF_ERROR( - "pollset_work", - grpc_pollset_work(g_pollset, &worker, GRPC_MILLIS_INF_FUTURE))); + "pollset_work", grpc_pollset_work(g_pollset, &worker, + grpc_core::Timestamp::InfFuture()))); gpr_mu_unlock(g_mu); gpr_mu_lock(g_mu); @@ -364,8 +364,8 @@ static void client_wait_and_shutdown(client* cl) { grpc_pollset_worker* worker = nullptr; grpc_core::ExecCtx exec_ctx; GPR_ASSERT(GRPC_LOG_IF_ERROR( - "pollset_work", - grpc_pollset_work(g_pollset, &worker, GRPC_MILLIS_INF_FUTURE))); + "pollset_work", grpc_pollset_work(g_pollset, &worker, + grpc_core::Timestamp::InfFuture()))); gpr_mu_unlock(g_mu); gpr_mu_lock(g_mu); @@ -466,8 +466,8 @@ static void test_grpc_fd_change(void) { while (a.cb_that_ran == nullptr) { grpc_pollset_worker* worker = nullptr; GPR_ASSERT(GRPC_LOG_IF_ERROR( - "pollset_work", - grpc_pollset_work(g_pollset, &worker, GRPC_MILLIS_INF_FUTURE))); + "pollset_work", grpc_pollset_work(g_pollset, &worker, + grpc_core::Timestamp::InfFuture()))); gpr_mu_unlock(g_mu); gpr_mu_lock(g_mu); @@ -490,8 +490,8 @@ static void test_grpc_fd_change(void) { while (b.cb_that_ran == nullptr) { grpc_pollset_worker* worker = nullptr; GPR_ASSERT(GRPC_LOG_IF_ERROR( - "pollset_work", - grpc_pollset_work(g_pollset, &worker, GRPC_MILLIS_INF_FUTURE))); + "pollset_work", grpc_pollset_work(g_pollset, &worker, + grpc_core::Timestamp::InfFuture()))); gpr_mu_unlock(g_mu); gpr_mu_lock(g_mu); diff --git a/test/core/iomgr/ios/CFStreamTests/CFStreamClientTests.mm b/test/core/iomgr/ios/CFStreamTests/CFStreamClientTests.mm index f99ae304aad..0880eb14111 100644 --- a/test/core/iomgr/ios/CFStreamTests/CFStreamClientTests.mm +++ b/test/core/iomgr/ios/CFStreamTests/CFStreamClientTests.mm @@ -107,7 +107,7 @@ static void must_fail(void* arg, grpc_error_handle error) { grpc_core::CoreConfiguration::Get().channel_args_preconditioning().PreconditionChannelArgs( nullptr); grpc_tcp_client_connect(&done, &g_connecting, nullptr, args, &resolved_addr, - GRPC_MILLIS_INF_FUTURE); + grpc_core::Timestamp::InfFuture()); grpc_channel_args_destroy(args); /* await the connection */ @@ -164,7 +164,7 @@ static void must_fail(void* arg, grpc_error_handle error) { grpc_core::CoreConfiguration::Get().channel_args_preconditioning().PreconditionChannelArgs( nullptr); grpc_tcp_client_connect(&done, &g_connecting, nullptr, args, &resolved_addr, - GRPC_MILLIS_INF_FUTURE); + grpc_core::Timestamp::InfFuture()); grpc_channel_args_destroy(args); grpc_core::ExecCtx::Get()->Flush(); diff --git a/test/core/iomgr/pollset_windows_starvation_test.cc b/test/core/iomgr/pollset_windows_starvation_test.cc index 3357e3b5bf9..cad5fe0006f 100644 --- a/test/core/iomgr/pollset_windows_starvation_test.cc +++ b/test/core/iomgr/pollset_windows_starvation_test.cc @@ -92,7 +92,8 @@ int main(int argc, char** argv) { // Queue for work and once we're done, make sure to kick the remaining // threads. grpc_error_handle error; - error = grpc_pollset_work(&pollset, NULL, GRPC_MILLIS_INF_FUTURE); + error = grpc_pollset_work(&pollset, NULL, + grpc_core::Timestamp::InfFuture()); error = grpc_pollset_kick(&pollset, NULL); gpr_mu_unlock(mu); diff --git a/test/core/iomgr/resolve_address_posix_test.cc b/test/core/iomgr/resolve_address_posix_test.cc index a79dedb2eaf..28d06257462 100644 --- a/test/core/iomgr/resolve_address_posix_test.cc +++ b/test/core/iomgr/resolve_address_posix_test.cc @@ -35,6 +35,7 @@ #include "src/core/lib/gpr/string.h" #include "src/core/lib/gpr/useful.h" #include "src/core/lib/gprpp/thd.h" +#include "src/core/lib/gprpp/time.h" #include "src/core/lib/iomgr/executor.h" #include "src/core/lib/iomgr/iomgr.h" #include "src/core/lib/iomgr/resolve_address.h" @@ -82,14 +83,14 @@ void args_finish(args_struct* args) { gpr_free(args->pollset); } -static grpc_millis n_sec_deadline(int seconds) { - return grpc_timespec_to_millis_round_up( +static grpc_core::Timestamp n_sec_deadline(int seconds) { + return grpc_core::Timestamp::FromTimespecRoundUp( grpc_timeout_seconds_to_deadline(seconds)); } static void actually_poll(void* argsp) { args_struct* args = static_cast<args_struct*>(argsp); - grpc_millis deadline = n_sec_deadline(10); + grpc_core::Timestamp deadline = n_sec_deadline(10); while (true) { grpc_core::ExecCtx exec_ctx; { @@ -97,9 +98,11 @@ static void actually_poll(void* argsp) { if (args->done) { break; } - grpc_millis time_left = deadline - grpc_core::ExecCtx::Get()->Now(); - gpr_log(GPR_DEBUG, "done=%d, time_left=%" PRId64, args->done, time_left); - GPR_ASSERT(time_left >= 0); + grpc_core::Duration time_left = + deadline - grpc_core::ExecCtx::Get()->Now(); + gpr_log(GPR_DEBUG, "done=%d, time_left=%" PRId64, args->done, + time_left.millis()); + GPR_ASSERT(time_left >= grpc_core::Duration::Zero()); grpc_pollset_worker* worker = nullptr; GRPC_LOG_IF_ERROR( "pollset_work", diff --git a/test/core/iomgr/resolve_address_test.cc b/test/core/iomgr/resolve_address_test.cc index f86b7e42ea9..f1e477ffba0 100644 --- a/test/core/iomgr/resolve_address_test.cc +++ b/test/core/iomgr/resolve_address_test.cc @@ -38,6 +38,7 @@ #include "src/core/lib/event_engine/sockaddr.h" #include "src/core/lib/gpr/string.h" #include "src/core/lib/gprpp/sync.h" +#include "src/core/lib/gprpp/time.h" #include "src/core/lib/iomgr/executor.h" #include "src/core/lib/iomgr/iomgr.h" #include "test/core/util/cmdline.h" @@ -47,8 +48,8 @@ namespace { -grpc_millis NSecDeadline(int seconds) { - return grpc_timespec_to_millis_round_up( +grpc_core::Timestamp NSecDeadline(int seconds) { + return grpc_core::Timestamp::FromTimespecRoundUp( grpc_timeout_seconds_to_deadline(seconds)); } @@ -90,7 +91,7 @@ class ResolveAddressTest : public ::testing::Test { void PollPollsetUntilRequestDone() { // Try to give enough time for c-ares to run through its retries // a few times if needed. - grpc_millis deadline = NSecDeadline(90); + grpc_core::Timestamp deadline = NSecDeadline(90); while (true) { grpc_core::ExecCtx exec_ctx; { @@ -98,9 +99,11 @@ class ResolveAddressTest : public ::testing::Test { if (done_) { break; } - grpc_millis time_left = deadline - grpc_core::ExecCtx::Get()->Now(); - gpr_log(GPR_DEBUG, "done=%d, time_left=%" PRId64, done_, time_left); - ASSERT_GE(time_left, 0); + grpc_core::Duration time_left = + deadline - grpc_core::ExecCtx::Get()->Now(); + gpr_log(GPR_DEBUG, "done=%d, time_left=%" PRId64, done_, + time_left.millis()); + ASSERT_GE(time_left, grpc_core::Duration::Zero()); grpc_pollset_worker* worker = nullptr; GRPC_LOG_IF_ERROR("pollset_work", grpc_pollset_work(pollset_, &worker, NSecDeadline(1))); diff --git a/test/core/iomgr/tcp_client_posix_test.cc b/test/core/iomgr/tcp_client_posix_test.cc index e74d605ac37..d0092e97b22 100644 --- a/test/core/iomgr/tcp_client_posix_test.cc +++ b/test/core/iomgr/tcp_client_posix_test.cc @@ -16,6 +16,7 @@ * */ +#include "src/core/lib/gprpp/time.h" #include "src/core/lib/iomgr/port.h" // This test won't work except with posix sockets enabled @@ -46,8 +47,9 @@ static grpc_pollset* g_pollset; static int g_connections_complete = 0; static grpc_endpoint* g_connecting = nullptr; -static grpc_millis test_deadline(void) { - return grpc_timespec_to_millis_round_up(grpc_timeout_seconds_to_deadline(10)); +static grpc_core::Timestamp test_deadline(void) { + return grpc_core::Timestamp::FromTimespecRoundUp( + grpc_timeout_seconds_to_deadline(10)); } static void finish_connection() { @@ -110,7 +112,7 @@ void test_succeeds(void) { .channel_args_preconditioning() .PreconditionChannelArgs(nullptr); grpc_tcp_client_connect(&done, &g_connecting, g_pollset_set, args, - &resolved_addr, GRPC_MILLIS_INF_FUTURE); + &resolved_addr, grpc_core::Timestamp::InfFuture()); grpc_channel_args_destroy(args); /* await the connection */ do { @@ -128,7 +130,7 @@ void test_succeeds(void) { GPR_ASSERT(GRPC_LOG_IF_ERROR( "pollset_work", grpc_pollset_work(g_pollset, &worker, - grpc_timespec_to_millis_round_up( + grpc_core::Timestamp::FromTimespecRoundUp( grpc_timeout_seconds_to_deadline(5))))); gpr_mu_unlock(g_mu); grpc_core::ExecCtx::Get()->Flush(); @@ -159,18 +161,18 @@ void test_fails(void) { /* connect to a broken address */ GRPC_CLOSURE_INIT(&done, must_fail, nullptr, grpc_schedule_on_exec_ctx); grpc_tcp_client_connect(&done, &g_connecting, g_pollset_set, nullptr, - &resolved_addr, GRPC_MILLIS_INF_FUTURE); + &resolved_addr, grpc_core::Timestamp::InfFuture()); gpr_mu_lock(g_mu); /* wait for the connection callback to finish */ while (g_connections_complete == connections_complete_before) { grpc_pollset_worker* worker = nullptr; - grpc_millis polling_deadline = test_deadline(); + grpc_core::Timestamp polling_deadline = test_deadline(); switch (grpc_timer_check(&polling_deadline)) { case GRPC_TIMERS_FIRED: break; case GRPC_TIMERS_NOT_CHECKED: - polling_deadline = 0; + polling_deadline = grpc_core::Timestamp::ProcessEpoch(); ABSL_FALLTHROUGH_INTENDED; case GRPC_TIMERS_CHECKED_AND_EMPTY: GPR_ASSERT(GRPC_LOG_IF_ERROR( @@ -205,16 +207,16 @@ void test_fails_bad_addr_no_leak(void) { // connect to an invalid address. GRPC_CLOSURE_INIT(&done, must_fail, nullptr, grpc_schedule_on_exec_ctx); grpc_tcp_client_connect(&done, &g_connecting, g_pollset_set, nullptr, - &resolved_addr, GRPC_MILLIS_INF_FUTURE); + &resolved_addr, grpc_core::Timestamp::InfFuture()); gpr_mu_lock(g_mu); while (g_connections_complete == connections_complete_before) { grpc_pollset_worker* worker = nullptr; - grpc_millis polling_deadline = test_deadline(); + grpc_core::Timestamp polling_deadline = test_deadline(); switch (grpc_timer_check(&polling_deadline)) { case GRPC_TIMERS_FIRED: break; case GRPC_TIMERS_NOT_CHECKED: - polling_deadline = 0; + polling_deadline = grpc_core::Timestamp::ProcessEpoch(); ABSL_FALLTHROUGH_INTENDED; case GRPC_TIMERS_CHECKED_AND_EMPTY: GPR_ASSERT(GRPC_LOG_IF_ERROR( diff --git a/test/core/iomgr/tcp_posix_test.cc b/test/core/iomgr/tcp_posix_test.cc index 507e0045449..5bac0debe4d 100644 --- a/test/core/iomgr/tcp_posix_test.cc +++ b/test/core/iomgr/tcp_posix_test.cc @@ -16,6 +16,7 @@ * */ +#include "src/core/lib/gprpp/time.h" #include "src/core/lib/iomgr/port.h" // This test won't work except with posix sockets enabled @@ -203,8 +204,8 @@ static void read_test(size_t num_bytes, size_t slice_size) { grpc_endpoint* ep; struct read_socket_state state; size_t written_bytes; - grpc_millis deadline = - grpc_timespec_to_millis_round_up(grpc_timeout_seconds_to_deadline(20)); + grpc_core::Timestamp deadline = grpc_core::Timestamp::FromTimespecRoundUp( + grpc_timeout_seconds_to_deadline(20)); grpc_core::ExecCtx exec_ctx; gpr_log(GPR_INFO, "Read test of size %" PRIuPTR ", slice size %" PRIuPTR, @@ -261,8 +262,8 @@ static void large_read_test(size_t slice_size) { grpc_endpoint* ep; struct read_socket_state state; ssize_t written_bytes; - grpc_millis deadline = - grpc_timespec_to_millis_round_up(grpc_timeout_seconds_to_deadline(20)); + grpc_core::Timestamp deadline = grpc_core::Timestamp::FromTimespecRoundUp( + grpc_timeout_seconds_to_deadline(20)); grpc_core::ExecCtx exec_ctx; gpr_log(GPR_INFO, "Start large read test, slice size %" PRIuPTR, slice_size); @@ -370,7 +371,7 @@ void drain_socket_blocking(int fd, size_t num_bytes, size_t read_size) { GPR_ASSERT(GRPC_LOG_IF_ERROR( "pollset_work", grpc_pollset_work(g_pollset, &worker, - grpc_timespec_to_millis_round_up( + grpc_core::Timestamp::FromTimespecRoundUp( grpc_timeout_milliseconds_to_deadline(10))))); gpr_mu_unlock(g_mu); @@ -418,8 +419,8 @@ static void write_test(size_t num_bytes, size_t slice_size, uint8_t current_data = 0; grpc_slice_buffer outgoing; grpc_closure write_done_closure; - grpc_millis deadline = - grpc_timespec_to_millis_round_up(grpc_timeout_seconds_to_deadline(20)); + grpc_core::Timestamp deadline = grpc_core::Timestamp::FromTimespecRoundUp( + grpc_timeout_seconds_to_deadline(20)); grpc_core::ExecCtx exec_ctx; if (collect_timestamps && !grpc_event_engine_can_track_errors()) { @@ -505,8 +506,8 @@ static void release_fd_test(size_t num_bytes, size_t slice_size) { struct read_socket_state state; size_t written_bytes; int fd; - grpc_millis deadline = - grpc_timespec_to_millis_round_up(grpc_timeout_seconds_to_deadline(20)); + grpc_core::Timestamp deadline = grpc_core::Timestamp::FromTimespecRoundUp( + grpc_timeout_seconds_to_deadline(20)); grpc_core::ExecCtx exec_ctx; grpc_closure fd_released_cb; int fd_released_done = 0; diff --git a/test/core/iomgr/tcp_server_posix_test.cc b/test/core/iomgr/tcp_server_posix_test.cc index 476925c05d5..8cf44ca44ef 100644 --- a/test/core/iomgr/tcp_server_posix_test.cc +++ b/test/core/iomgr/tcp_server_posix_test.cc @@ -16,6 +16,7 @@ * */ +#include "src/core/lib/gprpp/time.h" #include "src/core/lib/iomgr/port.h" // This test won't work except with posix sockets enabled @@ -239,8 +240,8 @@ static void test_no_op_with_port_and_start(void) { static grpc_error_handle tcp_connect(const test_addr* remote, on_connect_result* result) { - grpc_millis deadline = - grpc_timespec_to_millis_round_up(grpc_timeout_seconds_to_deadline(10)); + grpc_core::Timestamp deadline = grpc_core::Timestamp::FromTimespecRoundUp( + grpc_timeout_seconds_to_deadline(10)); int clifd; int nconnects_before; const struct sockaddr* remote_addr = diff --git a/test/core/iomgr/timer_heap_test.cc b/test/core/iomgr/timer_heap_test.cc index 82da474dc8e..d24b13b8b18 100644 --- a/test/core/iomgr/timer_heap_test.cc +++ b/test/core/iomgr/timer_heap_test.cc @@ -203,7 +203,7 @@ static void test2(void) { } if (num_inserted) { - grpc_millis* min_deadline = nullptr; + int64_t* min_deadline = nullptr; for (size_t i = 0; i < elems_size; i++) { if (elems[i].inserted) { if (min_deadline == nullptr) { diff --git a/test/core/iomgr/timer_list_test.cc b/test/core/iomgr/timer_list_test.cc index 0f3a6d213af..e256b821e21 100644 --- a/test/core/iomgr/timer_list_test.cc +++ b/test/core/iomgr/timer_list_test.cc @@ -18,10 +18,14 @@ #include <string.h> +#include <cstdint> +#include <limits> + #include <grpc/grpc.h> #include <grpc/support/log.h> #include "src/core/lib/debug/trace.h" +#include "src/core/lib/gprpp/time.h" #include "src/core/lib/iomgr/iomgr_internal.h" #include "src/core/lib/iomgr/port.h" #include "src/core/lib/iomgr/timer.h" @@ -34,8 +38,9 @@ extern grpc_core::TraceFlag grpc_timer_trace; extern grpc_core::TraceFlag grpc_timer_check_trace; static int cb_called[MAX_CB][2]; -static const int64_t kMillisIn25Days = 2160000000; -static const int64_t kHoursIn25Days = 600; +static const int64_t kHoursIn25Days = 25 * 24; +static const grpc_core::Duration k25Days = + grpc_core::Duration::Hours(kHoursIn25Days); static void cb(void* arg, grpc_error_handle error) { cb_called[reinterpret_cast<intptr_t>(arg)][error == GRPC_ERROR_NONE]++; @@ -53,24 +58,25 @@ static void add_test(void) { grpc_core::testing::grpc_tracer_enable_flag(&grpc_timer_check_trace); memset(cb_called, 0, sizeof(cb_called)); - grpc_millis start = grpc_core::ExecCtx::Get()->Now(); + grpc_core::Timestamp start = grpc_core::ExecCtx::Get()->Now(); /* 10 ms timers. will expire in the current epoch */ for (i = 0; i < 10; i++) { grpc_timer_init( - &timers[i], start + 10, + &timers[i], start + grpc_core::Duration::Milliseconds(10), GRPC_CLOSURE_CREATE(cb, (void*)(intptr_t)i, grpc_schedule_on_exec_ctx)); } /* 1010 ms timers. will expire in the next epoch */ for (i = 10; i < 20; i++) { grpc_timer_init( - &timers[i], start + 1010, + &timers[i], start + grpc_core::Duration::Milliseconds(1010), GRPC_CLOSURE_CREATE(cb, (void*)(intptr_t)i, grpc_schedule_on_exec_ctx)); } /* collect timers. Only the first batch should be ready. */ - grpc_core::ExecCtx::Get()->TestOnlySetNow(start + 500); + grpc_core::ExecCtx::Get()->TestOnlySetNow( + start + grpc_core::Duration::Milliseconds(500)); GPR_ASSERT(grpc_timer_check(nullptr) == GRPC_TIMERS_FIRED); grpc_core::ExecCtx::Get()->Flush(); for (i = 0; i < 20; i++) { @@ -78,7 +84,8 @@ static void add_test(void) { GPR_ASSERT(cb_called[i][0] == 0); } - grpc_core::ExecCtx::Get()->TestOnlySetNow(start + 600); + grpc_core::ExecCtx::Get()->TestOnlySetNow( + start + grpc_core::Duration::Milliseconds(600)); GPR_ASSERT(grpc_timer_check(nullptr) == GRPC_TIMERS_CHECKED_AND_EMPTY); grpc_core::ExecCtx::Get()->Flush(); for (i = 0; i < 30; i++) { @@ -87,7 +94,8 @@ static void add_test(void) { } /* collect the rest of the timers */ - grpc_core::ExecCtx::Get()->TestOnlySetNow(start + 1500); + grpc_core::ExecCtx::Get()->TestOnlySetNow( + start + grpc_core::Duration::Milliseconds(1500)); GPR_ASSERT(grpc_timer_check(nullptr) == GRPC_TIMERS_FIRED); grpc_core::ExecCtx::Get()->Flush(); for (i = 0; i < 30; i++) { @@ -95,7 +103,8 @@ static void add_test(void) { GPR_ASSERT(cb_called[i][0] == 0); } - grpc_core::ExecCtx::Get()->TestOnlySetNow(start + 1600); + grpc_core::ExecCtx::Get()->TestOnlySetNow( + start + grpc_core::Duration::Milliseconds(1600)); GPR_ASSERT(grpc_timer_check(nullptr) == GRPC_TIMERS_CHECKED_AND_EMPTY); for (i = 0; i < 30; i++) { GPR_ASSERT(cb_called[i][1] == (i < 20)); @@ -112,28 +121,30 @@ void destruction_test(void) { gpr_log(GPR_INFO, "destruction_test"); - grpc_core::ExecCtx::Get()->TestOnlySetNow(0); + grpc_core::ExecCtx::Get()->TestOnlySetNow( + grpc_core::Timestamp::FromMillisecondsAfterProcessEpoch(0)); grpc_timer_list_init(); grpc_core::testing::grpc_tracer_enable_flag(&grpc_timer_trace); grpc_core::testing::grpc_tracer_enable_flag(&grpc_timer_check_trace); memset(cb_called, 0, sizeof(cb_called)); grpc_timer_init( - &timers[0], 100, + &timers[0], grpc_core::Timestamp::FromMillisecondsAfterProcessEpoch(100), GRPC_CLOSURE_CREATE(cb, (void*)(intptr_t)0, grpc_schedule_on_exec_ctx)); grpc_timer_init( - &timers[1], 3, + &timers[1], grpc_core::Timestamp::FromMillisecondsAfterProcessEpoch(3), GRPC_CLOSURE_CREATE(cb, (void*)(intptr_t)1, grpc_schedule_on_exec_ctx)); grpc_timer_init( - &timers[2], 100, + &timers[2], grpc_core::Timestamp::FromMillisecondsAfterProcessEpoch(100), GRPC_CLOSURE_CREATE(cb, (void*)(intptr_t)2, grpc_schedule_on_exec_ctx)); grpc_timer_init( - &timers[3], 3, + &timers[3], grpc_core::Timestamp::FromMillisecondsAfterProcessEpoch(3), GRPC_CLOSURE_CREATE(cb, (void*)(intptr_t)3, grpc_schedule_on_exec_ctx)); grpc_timer_init( - &timers[4], 1, + &timers[4], grpc_core::Timestamp::FromMillisecondsAfterProcessEpoch(1), GRPC_CLOSURE_CREATE(cb, (void*)(intptr_t)4, grpc_schedule_on_exec_ctx)); - grpc_core::ExecCtx::Get()->TestOnlySetNow(2); + grpc_core::ExecCtx::Get()->TestOnlySetNow( + grpc_core::Timestamp::FromMillisecondsAfterProcessEpoch(2)); GPR_ASSERT(grpc_timer_check(nullptr) == GRPC_TIMERS_FIRED); grpc_core::ExecCtx::Get()->Flush(); GPR_ASSERT(1 == cb_called[4][1]); @@ -167,33 +178,36 @@ void long_running_service_cleanup_test(void) { gpr_log(GPR_INFO, "long_running_service_cleanup_test"); - grpc_millis now = grpc_core::ExecCtx::Get()->Now(); - GPR_ASSERT(now >= kMillisIn25Days); + grpc_core::Timestamp now = grpc_core::ExecCtx::Get()->Now(); + GPR_ASSERT(now.milliseconds_after_process_epoch() >= k25Days.millis()); grpc_timer_list_init(); grpc_core::testing::grpc_tracer_enable_flag(&grpc_timer_trace); grpc_core::testing::grpc_tracer_enable_flag(&grpc_timer_check_trace); memset(cb_called, 0, sizeof(cb_called)); grpc_timer_init( - &timers[0], now + kMillisIn25Days, + &timers[0], now + k25Days, GRPC_CLOSURE_CREATE(cb, (void*)(intptr_t)0, grpc_schedule_on_exec_ctx)); grpc_timer_init( - &timers[1], now + 3, + &timers[1], now + grpc_core::Duration::Milliseconds(3), GRPC_CLOSURE_CREATE(cb, (void*)(intptr_t)1, grpc_schedule_on_exec_ctx)); grpc_timer_init( - &timers[2], GRPC_MILLIS_INF_FUTURE - 1, + &timers[2], + grpc_core::Timestamp::FromMillisecondsAfterProcessEpoch( + std::numeric_limits<int64_t>::max() - 1), GRPC_CLOSURE_CREATE(cb, (void*)(intptr_t)2, grpc_schedule_on_exec_ctx)); - gpr_timespec deadline_spec = grpc_millis_to_timespec( - now + kMillisIn25Days, gpr_clock_type::GPR_CLOCK_MONOTONIC); + gpr_timespec deadline_spec = + (now + k25Days).as_timespec(gpr_clock_type::GPR_CLOCK_MONOTONIC); /* grpc_timespec_to_millis_round_up is how users usually compute a millisecond input value into grpc_timer_init, so we mimic that behavior here */ grpc_timer_init( - &timers[3], grpc_timespec_to_millis_round_up(deadline_spec), + &timers[3], grpc_core::Timestamp::FromTimespecRoundUp(deadline_spec), GRPC_CLOSURE_CREATE(cb, (void*)(intptr_t)3, grpc_schedule_on_exec_ctx)); - grpc_core::ExecCtx::Get()->TestOnlySetNow(now + 4); + grpc_core::ExecCtx::Get()->TestOnlySetNow( + now + grpc_core::Duration::Milliseconds(4)); GPR_ASSERT(grpc_timer_check(nullptr) == GRPC_TIMERS_FIRED); grpc_core::ExecCtx::Get()->Flush(); GPR_ASSERT(0 == cb_called[0][0]); // Timer 0 not called @@ -215,10 +229,11 @@ void long_running_service_cleanup_test(void) { } int main(int argc, char** argv) { + gpr_time_init(); + /* Tests with default g_start_time */ { grpc::testing::TestEnvironment env(argc, argv); - grpc_core::ExecCtx::GlobalInit(); grpc_core::ExecCtx exec_ctx; grpc_set_default_iomgr_platform(); grpc_iomgr_platform_init(); @@ -227,7 +242,6 @@ int main(int argc, char** argv) { destruction_test(); grpc_iomgr_platform_shutdown(); } - grpc_core::ExecCtx::GlobalShutdown(); /* Begin long running service tests */ { @@ -235,11 +249,10 @@ int main(int argc, char** argv) { /* Set g_start_time back 25 days. */ /* We set g_start_time here in case there are any initialization dependencies that use g_start_time. */ - gpr_timespec new_start = - gpr_time_sub(gpr_now(gpr_clock_type::GPR_CLOCK_MONOTONIC), - gpr_time_from_hours(kHoursIn25Days, - gpr_clock_type::GPR_CLOCK_MONOTONIC)); - grpc_core::ExecCtx::TestOnlyGlobalInit(new_start); + grpc_core::TestOnlySetProcessEpoch(gpr_time_sub( + gpr_now(gpr_clock_type::GPR_CLOCK_MONOTONIC), + gpr_time_add(gpr_time_from_hours(kHoursIn25Days, GPR_TIMESPAN), + gpr_time_from_seconds(10, GPR_TIMESPAN)))); grpc_core::ExecCtx exec_ctx; grpc_set_default_iomgr_platform(); grpc_iomgr_platform_init(); @@ -249,7 +262,6 @@ int main(int argc, char** argv) { destruction_test(); grpc_iomgr_platform_shutdown(); } - grpc_core::ExecCtx::GlobalShutdown(); return 0; } diff --git a/test/core/promise/exec_ctx_wakeup_scheduler_test.cc b/test/core/promise/exec_ctx_wakeup_scheduler_test.cc index ea3568c3833..cd14b194840 100644 --- a/test/core/promise/exec_ctx_wakeup_scheduler_test.cc +++ b/test/core/promise/exec_ctx_wakeup_scheduler_test.cc @@ -59,8 +59,5 @@ TEST(ExecCtxWakeupSchedulerTest, Works) { int main(int argc, char** argv) { ::testing::InitGoogleTest(&argc, argv); - grpc_core::ExecCtx::GlobalInit(); - int r = RUN_ALL_TESTS(); - grpc_core::ExecCtx::GlobalShutdown(); - return r; + return RUN_ALL_TESTS(); } diff --git a/test/core/promise/sleep_test.cc b/test/core/promise/sleep_test.cc index 21e8e82d54a..b694f0a7402 100644 --- a/test/core/promise/sleep_test.cc +++ b/test/core/promise/sleep_test.cc @@ -33,7 +33,7 @@ namespace { TEST(Sleep, Zzzz) { ExecCtx exec_ctx; absl::Notification done; - grpc_millis done_time = ExecCtx::Get()->Now() + 1000; + Timestamp done_time = ExecCtx::Get()->Now() + Duration::Seconds(1); // Sleep for one second then set done to true. auto activity = MakeActivity(Sleep(done_time), InlineWakeupScheduler(), [&done](absl::Status r) { @@ -48,7 +48,7 @@ TEST(Sleep, Zzzz) { TEST(Sleep, AlreadyDone) { ExecCtx exec_ctx; absl::Notification done; - grpc_millis done_time = ExecCtx::Get()->Now() - 1000; + Timestamp done_time = ExecCtx::Get()->Now() - Duration::Seconds(1); // Sleep for no time at all then set done to true. auto activity = MakeActivity(Sleep(done_time), InlineWakeupScheduler(), [&done](absl::Status r) { @@ -61,7 +61,7 @@ TEST(Sleep, AlreadyDone) { TEST(Sleep, Cancel) { ExecCtx exec_ctx; absl::Notification done; - grpc_millis done_time = ExecCtx::Get()->Now() + 1000; + Timestamp done_time = ExecCtx::Get()->Now() + Duration::Seconds(1); // Sleep for one second but race it to complete immediately auto activity = MakeActivity( Race(Sleep(done_time), [] { return absl::CancelledError(); }), diff --git a/test/core/security/credentials_test.cc b/test/core/security/credentials_test.cc index 2990a359222..fc1afbe73eb 100644 --- a/test/core/security/credentials_test.cc +++ b/test/core/security/credentials_test.cc @@ -44,6 +44,7 @@ #include "src/core/lib/gpr/string.h" #include "src/core/lib/gpr/tmpfile.h" #include "src/core/lib/gprpp/host_port.h" +#include "src/core/lib/gprpp/time.h" #include "src/core/lib/http/httpcli.h" #include "src/core/lib/http/httpcli_ssl_credentials.h" #include "src/core/lib/iomgr/error.h" @@ -314,12 +315,12 @@ static grpc_http_response http_response(int status, const char* body) { TEST(CredentialsTest, TestOauth2TokenFetcherCredsParsingOk) { grpc_core::ExecCtx exec_ctx; absl::optional<grpc_core::Slice> token_value; - grpc_millis token_lifetime; + grpc_core::Duration token_lifetime; grpc_http_response response = http_response(200, valid_oauth2_json_response); GPR_ASSERT(grpc_oauth2_token_fetcher_credentials_parse_server_response( &response, &token_value, &token_lifetime) == GRPC_CREDENTIALS_OK); - GPR_ASSERT(token_lifetime == 3599 * GPR_MS_PER_SEC); + GPR_ASSERT(token_lifetime == grpc_core::Duration::Seconds(3599)); GPR_ASSERT(token_value->as_string_view() == "Bearer ya29.AHES6ZRN3-HlhAPya30GnW_bHSb_"); grpc_http_response_destroy(&response); @@ -328,7 +329,7 @@ TEST(CredentialsTest, TestOauth2TokenFetcherCredsParsingOk) { TEST(CredentialsTest, TestOauth2TokenFetcherCredsParsingBadHttpStatus) { grpc_core::ExecCtx exec_ctx; absl::optional<grpc_core::Slice> token_value; - grpc_millis token_lifetime; + grpc_core::Duration token_lifetime; grpc_http_response response = http_response(401, valid_oauth2_json_response); GPR_ASSERT(grpc_oauth2_token_fetcher_credentials_parse_server_response( &response, &token_value, &token_lifetime) == @@ -339,7 +340,7 @@ TEST(CredentialsTest, TestOauth2TokenFetcherCredsParsingBadHttpStatus) { TEST(CredentialsTest, TestOauth2TokenFetcherCredsParsingEmptyHttpBody) { grpc_core::ExecCtx exec_ctx; absl::optional<grpc_core::Slice> token_value; - grpc_millis token_lifetime; + grpc_core::Duration token_lifetime; grpc_http_response response = http_response(200, ""); GPR_ASSERT(grpc_oauth2_token_fetcher_credentials_parse_server_response( &response, &token_value, &token_lifetime) == @@ -350,7 +351,7 @@ TEST(CredentialsTest, TestOauth2TokenFetcherCredsParsingEmptyHttpBody) { TEST(CredentialsTest, TestOauth2TokenFetcherCredsParsingInvalidJson) { grpc_core::ExecCtx exec_ctx; absl::optional<grpc_core::Slice> token_value; - grpc_millis token_lifetime; + grpc_core::Duration token_lifetime; grpc_http_response response = http_response(200, "{\"access_token\":\"ya29.AHES6ZRN3-HlhAPya30GnW_bHSb_\"," @@ -365,7 +366,7 @@ TEST(CredentialsTest, TestOauth2TokenFetcherCredsParsingInvalidJson) { TEST(CredentialsTest, TestOauth2TokenFetcherCredsParsingMissingToken) { grpc_core::ExecCtx exec_ctx; absl::optional<grpc_core::Slice> token_value; - grpc_millis token_lifetime; + grpc_core::Duration token_lifetime; grpc_http_response response = http_response(200, "{" " \"expires_in\":3599, " @@ -379,7 +380,7 @@ TEST(CredentialsTest, TestOauth2TokenFetcherCredsParsingMissingToken) { TEST(CredentialsTest, TestOauth2TokenFetcherCredsParsingMissingTokenType) { grpc_core::ExecCtx exec_ctx; absl::optional<grpc_core::Slice> token_value; - grpc_millis token_lifetime; + grpc_core::Duration token_lifetime; grpc_http_response response = http_response(200, "{\"access_token\":\"ya29.AHES6ZRN3-HlhAPya30GnW_bHSb_\"," @@ -394,7 +395,7 @@ TEST(CredentialsTest, TestOauth2TokenFetcherCredsParsingMissingTokenType) { TEST(CredentialsTest, TestOauth2TokenFetcherCredsParsingMissingTokenLifetime) { grpc_core::ExecCtx exec_ctx; absl::optional<grpc_core::Slice> token_value; - grpc_millis token_lifetime; + grpc_core::Duration token_lifetime; grpc_http_response response = http_response(200, "{\"access_token\":\"ya29.AHES6ZRN3-HlhAPya30GnW_bHSb_\"," @@ -704,7 +705,7 @@ static void validate_compute_engine_http_request( static int compute_engine_httpcli_get_success_override( const grpc_http_request* request, const char* host, const char* path, - grpc_millis /*deadline*/, grpc_closure* on_done, + grpc_core::Timestamp /*deadline*/, grpc_closure* on_done, grpc_http_response* response) { validate_compute_engine_http_request(request, host, path); *response = http_response(200, valid_oauth2_json_response); @@ -714,7 +715,7 @@ static int compute_engine_httpcli_get_success_override( static int compute_engine_httpcli_get_failure_override( const grpc_http_request* request, const char* host, const char* path, - grpc_millis /*deadline*/, grpc_closure* on_done, + grpc_core::Timestamp /*deadline*/, grpc_closure* on_done, grpc_http_response* response) { validate_compute_engine_http_request(request, host, path); *response = http_response(403, "Not Authorized."); @@ -725,7 +726,7 @@ static int compute_engine_httpcli_get_failure_override( static int httpcli_post_should_not_be_called( const grpc_http_request* /*request*/, const char* /*host*/, const char* /*path*/, const char* /*body_bytes*/, size_t /*body_size*/, - grpc_millis /*deadline*/, grpc_closure* /*on_done*/, + grpc_core::Timestamp /*deadline*/, grpc_closure* /*on_done*/, grpc_http_response* /*response*/) { GPR_ASSERT("HTTP POST should not be called" == nullptr); return 1; @@ -733,8 +734,8 @@ static int httpcli_post_should_not_be_called( static int httpcli_get_should_not_be_called( const grpc_http_request* /*request*/, const char* /*host*/, - const char* /*path*/, grpc_millis /*deadline*/, grpc_closure* /*on_done*/, - grpc_http_response* /*response*/) { + const char* /*path*/, grpc_core::Timestamp /*deadline*/, + grpc_closure* /*on_done*/, grpc_http_response* /*response*/) { GPR_ASSERT("HTTP GET should not be called" == nullptr); return 1; } @@ -820,7 +821,7 @@ static void validate_refresh_token_http_request( static int refresh_token_httpcli_post_success( const grpc_http_request* request, const char* host, const char* path, - const char* body, size_t body_size, grpc_millis /*deadline*/, + const char* body, size_t body_size, grpc_core::Timestamp /*deadline*/, grpc_closure* on_done, grpc_http_response* response) { validate_refresh_token_http_request(request, host, path, body, body_size); *response = http_response(200, valid_oauth2_json_response); @@ -831,7 +832,7 @@ static int refresh_token_httpcli_post_success( static int token_httpcli_post_failure( const grpc_http_request* /*request*/, const char* /*host*/, const char* /*path*/, const char* /*body*/, size_t /*body_size*/, - grpc_millis /*deadline*/, grpc_closure* on_done, + grpc_core::Timestamp /*deadline*/, grpc_closure* on_done, grpc_http_response* response) { *response = http_response(403, "Not Authorized."); grpc_core::ExecCtx::Run(DEBUG_LOCATION, on_done, GRPC_ERROR_NONE); @@ -1053,7 +1054,7 @@ static void validate_sts_token_http_request(const grpc_http_request* request, static int sts_token_httpcli_post_success(const grpc_http_request* request, const char* host, const char* path, const char* body, size_t body_size, - grpc_millis /*deadline*/, + grpc_core::Timestamp /*deadline*/, grpc_closure* on_done, grpc_http_response* response) { validate_sts_token_http_request(request, host, path, body, body_size, true); @@ -1064,7 +1065,7 @@ static int sts_token_httpcli_post_success(const grpc_http_request* request, static int sts_token_httpcli_post_success_no_actor_token( const grpc_http_request* request, const char* host, const char* path, - const char* body, size_t body_size, grpc_millis /*deadline*/, + const char* body, size_t body_size, grpc_core::Timestamp /*deadline*/, grpc_closure* on_done, grpc_http_response* response) { validate_sts_token_http_request(request, host, path, body, body_size, false); *response = http_response(200, valid_sts_json_response); @@ -1622,7 +1623,7 @@ TEST(CredentialsTest, static int default_creds_metadata_server_detection_httpcli_get_success_override( const grpc_http_request* /*request*/, const char* host, const char* path, - grpc_millis /*deadline*/, grpc_closure* on_done, + grpc_core::Timestamp /*deadline*/, grpc_closure* on_done, grpc_http_response* response) { *response = http_response(200, ""); grpc_http_header* headers = @@ -1714,7 +1715,7 @@ TEST(CredentialsTest, TestGoogleDefaultCredsNonGce) { static int default_creds_gce_detection_httpcli_get_failure_override( const grpc_http_request* /*request*/, const char* host, const char* path, - grpc_millis /*deadline*/, grpc_closure* on_done, + grpc_core::Timestamp /*deadline*/, grpc_closure* on_done, grpc_http_response* response) { /* No magic header. */ GPR_ASSERT(strcmp(path, "/") == 0); @@ -2191,7 +2192,7 @@ validate_external_account_creds_service_account_impersonation_request( static int external_account_creds_httpcli_post_success( const grpc_http_request* request, const char* host, const char* path, - const char* body, size_t body_size, grpc_millis /*deadline*/, + const char* body, size_t body_size, grpc_core::Timestamp /*deadline*/, grpc_closure* on_done, grpc_http_response* response) { if (strcmp(path, "/token") == 0) { validate_external_account_creds_token_exchage_request( @@ -2218,7 +2219,7 @@ static int external_account_creds_httpcli_post_failure_token_exchange_response_missing_access_token( const grpc_http_request* /*request*/, const char* /*host*/, const char* path, const char* /*body*/, size_t /*body_size*/, - grpc_millis /*deadline*/, grpc_closure* on_done, + grpc_core::Timestamp /*deadline*/, grpc_closure* on_done, grpc_http_response* response) { if (strcmp(path, "/token") == 0) { *response = http_response(200, @@ -2236,7 +2237,7 @@ external_account_creds_httpcli_post_failure_token_exchange_response_missing_acce static int url_external_account_creds_httpcli_get_success( const grpc_http_request* /*request*/, const char* /*host*/, - const char* path, grpc_millis /*deadline*/, grpc_closure* on_done, + const char* path, grpc_core::Timestamp /*deadline*/, grpc_closure* on_done, grpc_http_response* response) { if (strcmp(path, "/generate_subject_token_format_text") == 0) { *response = http_response( @@ -2291,7 +2292,7 @@ static void validate_aws_external_account_creds_token_exchage_request( static int aws_external_account_creds_httpcli_get_success( const grpc_http_request* /*request*/, const char* /*host*/, - const char* path, grpc_millis /*deadline*/, grpc_closure* on_done, + const char* path, grpc_core::Timestamp /*deadline*/, grpc_closure* on_done, grpc_http_response* response) { if (strcmp(path, "/region_url") == 0) { *response = http_response(200, "test_regionz"); @@ -2309,7 +2310,7 @@ static int aws_external_account_creds_httpcli_get_success( static int aws_external_account_creds_httpcli_post_success( const grpc_http_request* request, const char* host, const char* path, - const char* body, size_t body_size, grpc_millis /*deadline*/, + const char* body, size_t body_size, grpc_core::Timestamp /*deadline*/, grpc_closure* on_done, grpc_http_response* response) { if (strcmp(path, "/token") == 0) { validate_aws_external_account_creds_token_exchage_request( diff --git a/test/core/security/jwt_verifier_test.cc b/test/core/security/jwt_verifier_test.cc index 24846cb8eaf..892bf3c958f 100644 --- a/test/core/security/jwt_verifier_test.cc +++ b/test/core/security/jwt_verifier_test.cc @@ -342,7 +342,7 @@ static grpc_http_response http_response(int status, char* body) { static int httpcli_post_should_not_be_called( const grpc_http_request* /*request*/, const char* /*host*/, const char* /*path*/, const char* /*body_bytes*/, size_t /*body_size*/, - grpc_millis /*deadline*/, grpc_closure* /*on_done*/, + grpc_core::Timestamp /*deadline*/, grpc_closure* /*on_done*/, grpc_http_response* /*response*/) { GPR_ASSERT("HTTP POST should not be called" == nullptr); return 1; @@ -350,7 +350,7 @@ static int httpcli_post_should_not_be_called( static int httpcli_get_google_keys_for_email( const grpc_http_request* /*request*/, const char* host, const char* path, - grpc_millis /*deadline*/, grpc_closure* on_done, + grpc_core::Timestamp /*deadline*/, grpc_closure* on_done, grpc_http_response* response) { *response = http_response(200, good_google_email_keys()); GPR_ASSERT(strcmp(host, "www.googleapis.com") == 0); @@ -397,7 +397,7 @@ static void test_jwt_verifier_google_email_issuer_success(void) { static int httpcli_get_custom_keys_for_email( const grpc_http_request* /*request*/, const char* host, const char* path, - grpc_millis /*deadline*/, grpc_closure* on_done, + grpc_core::Timestamp /*deadline*/, grpc_closure* on_done, grpc_http_response* response) { *response = http_response(200, gpr_strdup(good_jwk_set)); GPR_ASSERT(strcmp(host, "keys.bar.com") == 0); @@ -431,7 +431,8 @@ static void test_jwt_verifier_custom_email_issuer_success(void) { static int httpcli_get_jwk_set(const grpc_http_request* /*request*/, const char* host, const char* path, - grpc_millis /*deadline*/, grpc_closure* on_done, + grpc_core::Timestamp /*deadline*/, + grpc_closure* on_done, grpc_http_response* response) { *response = http_response(200, gpr_strdup(good_jwk_set)); GPR_ASSERT(strcmp(host, "www.googleapis.com") == 0); @@ -442,7 +443,7 @@ static int httpcli_get_jwk_set(const grpc_http_request* /*request*/, static int httpcli_get_openid_config(const grpc_http_request* /*request*/, const char* host, const char* path, - grpc_millis /*deadline*/, + grpc_core::Timestamp /*deadline*/, grpc_closure* on_done, grpc_http_response* response) { *response = http_response(200, gpr_strdup(good_openid_config)); @@ -487,7 +488,8 @@ static void on_verification_key_retrieval_error(void* user_data, static int httpcli_get_bad_json(const grpc_http_request* /* request */, const char* /*host*/, const char* /*path*/, - grpc_millis /*deadline*/, grpc_closure* on_done, + grpc_core::Timestamp /*deadline*/, + grpc_closure* on_done, grpc_http_response* response) { *response = http_response(200, gpr_strdup("{\"bad\": \"stuff\"}")); grpc_core::ExecCtx::Run(DEBUG_LOCATION, on_done, GRPC_ERROR_NONE); @@ -594,8 +596,8 @@ static void test_jwt_verifier_bad_signature(void) { static int httpcli_get_should_not_be_called( const grpc_http_request* /*request*/, const char* /*host*/, - const char* /*path*/, grpc_millis /*deadline*/, grpc_closure* /*on_done*/, - grpc_http_response* /*response*/) { + const char* /*path*/, grpc_core::Timestamp /*deadline*/, + grpc_closure* /*on_done*/, grpc_http_response* /*response*/) { GPR_ASSERT(0); return 1; } diff --git a/test/core/security/oauth2_utils.cc b/test/core/security/oauth2_utils.cc index 46b4a3ce101..08e47142be2 100644 --- a/test/core/security/oauth2_utils.cc +++ b/test/core/security/oauth2_utils.cc @@ -99,7 +99,7 @@ char* grpc_test_fetch_oauth2_token_with_credentials( if (!GRPC_LOG_IF_ERROR( "pollset_work", grpc_pollset_work(grpc_polling_entity_pollset(&request.pops), - &worker, GRPC_MILLIS_INF_FUTURE))) { + &worker, grpc_core::Timestamp::InfFuture()))) { request.is_done = true; } } diff --git a/test/core/security/print_google_default_creds_token.cc b/test/core/security/print_google_default_creds_token.cc index 3bf3d3e5a47..c2ced88b53b 100644 --- a/test/core/security/print_google_default_creds_token.cc +++ b/test/core/security/print_google_default_creds_token.cc @@ -112,7 +112,7 @@ int main(int argc, char** argv) { if (!GRPC_LOG_IF_ERROR( "pollset_work", grpc_pollset_work(grpc_polling_entity_pollset(&sync.pops), &worker, - GRPC_MILLIS_INF_FUTURE))) + grpc_core::Timestamp::InfFuture()))) sync.is_done = true; gpr_mu_unlock(sync.mu); grpc_core::ExecCtx::Get()->Flush(); diff --git a/test/core/security/ssl_server_fuzzer.cc b/test/core/security/ssl_server_fuzzer.cc index 6dc8c75fb0b..db783109165 100644 --- a/test/core/security/ssl_server_fuzzer.cc +++ b/test/core/security/ssl_server_fuzzer.cc @@ -89,7 +89,8 @@ extern "C" int LLVMFuzzerTestOneInput(const uint8_t* data, size_t size) { grpc_core::RefCountedPtr<grpc_server_security_connector> sc = creds->create_security_connector(nullptr); GPR_ASSERT(sc != nullptr); - grpc_millis deadline = GPR_MS_PER_SEC + grpc_core::ExecCtx::Get()->Now(); + grpc_core::Timestamp deadline = + grpc_core::Duration::Seconds(1) + grpc_core::ExecCtx::Get()->Now(); struct handshake_state state; state.done_callback_called = false; diff --git a/test/core/security/verify_jwt.cc b/test/core/security/verify_jwt.cc index bf951b47cf9..8dd7e65e030 100644 --- a/test/core/security/verify_jwt.cc +++ b/test/core/security/verify_jwt.cc @@ -101,7 +101,8 @@ int main(int argc, char** argv) { grpc_pollset_worker* worker = nullptr; if (!GRPC_LOG_IF_ERROR( "pollset_work", - grpc_pollset_work(sync.pollset, &worker, GRPC_MILLIS_INF_FUTURE))) { + grpc_pollset_work(sync.pollset, &worker, + grpc_core::Timestamp::InfFuture()))) { sync.is_done = true; } gpr_mu_unlock(sync.mu); diff --git a/test/core/surface/concurrent_connectivity_test.cc b/test/core/surface/concurrent_connectivity_test.cc index ad1d5c37f74..9244f7c6ac4 100644 --- a/test/core/surface/concurrent_connectivity_test.cc +++ b/test/core/surface/concurrent_connectivity_test.cc @@ -31,6 +31,7 @@ #include "src/core/lib/address_utils/sockaddr_utils.h" #include "src/core/lib/gprpp/thd.h" +#include "src/core/lib/gprpp/time.h" #include "src/core/lib/iomgr/exec_ctx.h" #include "src/core/lib/iomgr/iomgr.h" #include "src/core/lib/iomgr/resolve_address.h" @@ -153,7 +154,8 @@ void bad_server_thread(void* vargs) { gpr_mu_lock(args->mu); while (!args->stop.load(std::memory_order_acquire)) { - grpc_millis deadline = grpc_core::ExecCtx::Get()->Now() + 100; + grpc_core::Timestamp deadline = grpc_core::ExecCtx::Get()->Now() + + grpc_core::Duration::Milliseconds(100); grpc_pollset_worker* worker = nullptr; if (!GRPC_LOG_IF_ERROR( diff --git a/test/core/transport/bdp_estimator_test.cc b/test/core/transport/bdp_estimator_test.cc index 47d6d6d9b9f..6fabb5876c0 100644 --- a/test/core/transport/bdp_estimator_test.cc +++ b/test/core/transport/bdp_estimator_test.cc @@ -37,7 +37,7 @@ extern gpr_timespec (*gpr_now_impl)(gpr_clock_type clock_type); namespace grpc_core { namespace testing { namespace { -int g_clock = 0; +int g_clock = 123; gpr_timespec fake_gpr_now(gpr_clock_type clock_type) { gpr_timespec ts; diff --git a/test/core/transport/binder/end2end/fuzzers/server_fuzzer.cc b/test/core/transport/binder/end2end/fuzzers/server_fuzzer.cc index b89ec3bb240..f241519c204 100644 --- a/test/core/transport/binder/end2end/fuzzers/server_fuzzer.cc +++ b/test/core/transport/binder/end2end/fuzzers/server_fuzzer.cc @@ -95,7 +95,8 @@ DEFINE_PROTO_FUZZER(const binder_transport_fuzzer::Input& input) { grpc_metadata_array_destroy(&request_metadata1); grpc_server_shutdown_and_notify(server, cq, tag(0xdead)); grpc_server_cancel_all_calls(server); - grpc_millis deadline = grpc_core::ExecCtx::Get()->Now() + 5000; + grpc_core::Timestamp deadline = + grpc_core::ExecCtx::Get()->Now() + grpc_core::Duration::Seconds(5); for (int i = 0; i <= requested_calls; i++) { // A single grpc_completion_queue_next might not be sufficient for getting // the tag from shutdown, because we might potentially get blocked by diff --git a/test/core/transport/chttp2/settings_timeout_test.cc b/test/core/transport/chttp2/settings_timeout_test.cc index 6a80a90e3e8..bff846d5f7c 100644 --- a/test/core/transport/chttp2/settings_timeout_test.cc +++ b/test/core/transport/chttp2/settings_timeout_test.cc @@ -127,11 +127,10 @@ class Client { .channel_args_preconditioning() .PreconditionChannelArgs(nullptr); grpc_tcp_client_connect(state.closure(), &endpoint_, pollset_set, args, - addresses_or->data(), ExecCtx::Get()->Now() + 1000); + addresses_or->data(), + ExecCtx::Get()->Now() + Duration::Seconds(1)); grpc_channel_args_destroy(args); - ASSERT_TRUE(PollUntilDone( - &state, - grpc_timespec_to_millis_round_up(gpr_inf_future(GPR_CLOCK_MONOTONIC)))); + ASSERT_TRUE(PollUntilDone(&state, Timestamp::InfFuture())); ASSERT_EQ(GRPC_ERROR_NONE, state.error()); grpc_pollset_set_destroy(pollset_set); grpc_endpoint_add_to_pollset(endpoint_, pollset_); @@ -146,7 +145,7 @@ class Client { bool retval = true; // Use a deadline of 3 seconds, which is a lot more than we should // need for a 1-second timeout, but this helps avoid flakes. - grpc_millis deadline = ExecCtx::Get()->Now() + 3000; + Timestamp deadline = ExecCtx::Get()->Now() + Duration::Seconds(3); while (true) { EventState state; grpc_endpoint_read(endpoint_, &read_buffer, state.closure(), @@ -206,13 +205,14 @@ class Client { }; // Returns true if done, or false if deadline exceeded. - bool PollUntilDone(EventState* state, grpc_millis deadline) { + bool PollUntilDone(EventState* state, Timestamp deadline) { while (true) { grpc_pollset_worker* worker = nullptr; gpr_mu_lock(mu_); - GRPC_LOG_IF_ERROR( - "grpc_pollset_work", - grpc_pollset_work(pollset_, &worker, ExecCtx::Get()->Now() + 100)); + GRPC_LOG_IF_ERROR("grpc_pollset_work", + grpc_pollset_work(pollset_, &worker, + ExecCtx::Get()->Now() + + Duration::Milliseconds(100))); // Flushes any work scheduled before or during polling. ExecCtx::Get()->Flush(); gpr_mu_unlock(mu_); diff --git a/test/core/transport/metadata_map_test.cc b/test/core/transport/metadata_map_test.cc index fc0cd2f8a55..b4a59f22a49 100644 --- a/test/core/transport/metadata_map_test.cc +++ b/test/core/transport/metadata_map_test.cc @@ -58,10 +58,13 @@ TEST(MetadataMapTest, SimpleOps) { TimeoutOnlyMetadataMap map(arena.get()); EXPECT_EQ(map.get_pointer(GrpcTimeoutMetadata()), nullptr); EXPECT_EQ(map.get(GrpcTimeoutMetadata()), absl::nullopt); - map.Set(GrpcTimeoutMetadata(), 1234); + map.Set(GrpcTimeoutMetadata(), + Timestamp::FromMillisecondsAfterProcessEpoch(1234)); EXPECT_NE(map.get_pointer(GrpcTimeoutMetadata()), nullptr); - EXPECT_EQ(*map.get_pointer(GrpcTimeoutMetadata()), 1234); - EXPECT_EQ(map.get(GrpcTimeoutMetadata()), 1234); + EXPECT_EQ(*map.get_pointer(GrpcTimeoutMetadata()), + Timestamp::FromMillisecondsAfterProcessEpoch(1234)); + EXPECT_EQ(map.get(GrpcTimeoutMetadata()), + Timestamp::FromMillisecondsAfterProcessEpoch(1234)); map.Remove(GrpcTimeoutMetadata()); EXPECT_EQ(map.get_pointer(GrpcTimeoutMetadata()), nullptr); EXPECT_EQ(map.get(GrpcTimeoutMetadata()), absl::nullopt); @@ -79,8 +82,9 @@ class FakeEncoder { " value=", value.as_string_view(), "\n"); } - void Encode(GrpcTimeoutMetadata, grpc_millis deadline) { - output_ += absl::StrCat("grpc-timeout: deadline=", deadline, "\n"); + void Encode(GrpcTimeoutMetadata, Timestamp deadline) { + output_ += absl::StrCat("grpc-timeout: deadline=", + deadline.milliseconds_after_process_epoch(), "\n"); } private: @@ -99,7 +103,8 @@ TEST(MetadataMapTest, TimeoutEncodeTest) { FakeEncoder encoder; auto arena = MakeScopedArena(1024, g_memory_allocator); TimeoutOnlyMetadataMap map(arena.get()); - map.Set(GrpcTimeoutMetadata(), 1234); + map.Set(GrpcTimeoutMetadata(), + Timestamp::FromMillisecondsAfterProcessEpoch(1234)); map.Encode(&encoder); EXPECT_EQ(encoder.output(), "grpc-timeout: deadline=1234\n"); } diff --git a/test/core/transport/status_conversion_test.cc b/test/core/transport/status_conversion_test.cc index ef7e61703da..cd3fde54c67 100644 --- a/test/core/transport/status_conversion_test.cc +++ b/test/core/transport/status_conversion_test.cc @@ -85,7 +85,8 @@ static void test_grpc_status_to_http2_status() { } static void test_http2_error_to_grpc_status() { - const grpc_millis before_deadline = GRPC_MILLIS_INF_FUTURE; + const grpc_core::Timestamp before_deadline = + grpc_core::Timestamp::InfFuture(); HTTP2_ERROR_TO_GRPC_STATUS(GRPC_HTTP2_NO_ERROR, before_deadline, GRPC_STATUS_INTERNAL); HTTP2_ERROR_TO_GRPC_STATUS(GRPC_HTTP2_PROTOCOL_ERROR, before_deadline, @@ -113,7 +114,7 @@ static void test_http2_error_to_grpc_status() { HTTP2_ERROR_TO_GRPC_STATUS(GRPC_HTTP2_INADEQUATE_SECURITY, before_deadline, GRPC_STATUS_PERMISSION_DENIED); - const grpc_millis after_deadline = 0; + const grpc_core::Timestamp after_deadline; HTTP2_ERROR_TO_GRPC_STATUS(GRPC_HTTP2_NO_ERROR, after_deadline, GRPC_STATUS_INTERNAL); HTTP2_ERROR_TO_GRPC_STATUS(GRPC_HTTP2_PROTOCOL_ERROR, after_deadline, diff --git a/test/core/transport/timeout_encoding_test.cc b/test/core/transport/timeout_encoding_test.cc index 1e1a7e9a1c6..4ff577569bb 100644 --- a/test/core/transport/timeout_encoding_test.cc +++ b/test/core/transport/timeout_encoding_test.cc @@ -33,49 +33,50 @@ #include "src/core/lib/gpr/murmur_hash.h" #include "src/core/lib/gpr/string.h" #include "src/core/lib/gpr/useful.h" +#include "src/core/lib/gprpp/time.h" #include "test/core/util/test_config.h" namespace grpc_core { namespace { -void assert_encodes_as(grpc_millis ts, const char* s) { +void assert_encodes_as(Duration ts, const char* s) { EXPECT_EQ(absl::string_view(s), Timeout::FromDuration(ts).Encode().as_string_view()) - << " ts=" << ts; + << " ts=" << ts.ToString(); } TEST(TimeoutTest, Encoding) { - assert_encodes_as(-1, "1n"); - assert_encodes_as(-10, "1n"); - assert_encodes_as(1, "1m"); - assert_encodes_as(10, "10m"); - assert_encodes_as(100, "100m"); - assert_encodes_as(890, "890m"); - assert_encodes_as(900, "900m"); - assert_encodes_as(901, "901m"); - assert_encodes_as(1000, "1S"); - assert_encodes_as(2000, "2S"); - assert_encodes_as(2500, "2500m"); - assert_encodes_as(59900, "59900m"); - assert_encodes_as(50000, "50S"); - assert_encodes_as(59000, "59S"); - assert_encodes_as(60000, "1M"); - assert_encodes_as(80000, "80S"); - assert_encodes_as(90000, "90S"); - assert_encodes_as(120000, "2M"); - assert_encodes_as(20 * 60 * GPR_MS_PER_SEC, "20M"); - assert_encodes_as(60 * 60 * GPR_MS_PER_SEC, "1H"); - assert_encodes_as(10 * 60 * 60 * GPR_MS_PER_SEC, "10H"); - assert_encodes_as(60 * 60 * GPR_MS_PER_SEC - 100, "1H"); - assert_encodes_as(100 * 60 * 60 * GPR_MS_PER_SEC, "100H"); - assert_encodes_as(100000000000, "27000H"); + assert_encodes_as(Duration::Milliseconds(-1), "1n"); + assert_encodes_as(Duration::Milliseconds(-10), "1n"); + assert_encodes_as(Duration::Milliseconds(1), "1m"); + assert_encodes_as(Duration::Milliseconds(10), "10m"); + assert_encodes_as(Duration::Milliseconds(100), "100m"); + assert_encodes_as(Duration::Milliseconds(890), "890m"); + assert_encodes_as(Duration::Milliseconds(900), "900m"); + assert_encodes_as(Duration::Milliseconds(901), "901m"); + assert_encodes_as(Duration::Milliseconds(1000), "1S"); + assert_encodes_as(Duration::Milliseconds(2000), "2S"); + assert_encodes_as(Duration::Milliseconds(2500), "2500m"); + assert_encodes_as(Duration::Milliseconds(59900), "59900m"); + assert_encodes_as(Duration::Seconds(50), "50S"); + assert_encodes_as(Duration::Seconds(59), "59S"); + assert_encodes_as(Duration::Seconds(60), "1M"); + assert_encodes_as(Duration::Seconds(80), "80S"); + assert_encodes_as(Duration::Seconds(90), "90S"); + assert_encodes_as(Duration::Seconds(120), "2M"); + assert_encodes_as(Duration::Minutes(20), "20M"); + assert_encodes_as(Duration::Hours(1), "1H"); + assert_encodes_as(Duration::Hours(10), "10H"); + assert_encodes_as(Duration::Hours(1) - Duration::Milliseconds(100), "1H"); + assert_encodes_as(Duration::Hours(100), "100H"); + assert_encodes_as(Duration::Hours(100000), "27000H"); } -void assert_decodes_as(const char* buffer, grpc_millis expected) { +void assert_decodes_as(const char* buffer, Duration expected) { EXPECT_EQ(expected, ParseTimeout(Slice::FromCopiedString(buffer))); } -void decode_suite(char ext, grpc_millis (*answer)(int64_t x)) { +void decode_suite(char ext, Duration (*answer)(int64_t x)) { long test_vals[] = {1, 12, 123, 1234, 12345, 123456, 1234567, 12345678, 123456789, 98765432, 9876543, 987654, 98765, 9876, 987, 98, 9}; @@ -94,37 +95,18 @@ void decode_suite(char ext, grpc_millis (*answer)(int64_t x)) { } } -grpc_millis millis_from_nanos(int64_t x) { - return static_cast<grpc_millis>(x / GPR_NS_PER_MS + (x % GPR_NS_PER_MS != 0)); -} -grpc_millis millis_from_micros(int64_t x) { - return static_cast<grpc_millis>(x / GPR_US_PER_MS + (x % GPR_US_PER_MS != 0)); -} -grpc_millis millis_from_millis(int64_t x) { - return static_cast<grpc_millis>(x); -} -grpc_millis millis_from_seconds(int64_t x) { - return static_cast<grpc_millis>(x * GPR_MS_PER_SEC); -} -grpc_millis millis_from_minutes(int64_t x) { - return static_cast<grpc_millis>(x * 60 * GPR_MS_PER_SEC); -} -grpc_millis millis_from_hours(int64_t x) { - return static_cast<grpc_millis>(x * 3600 * GPR_MS_PER_SEC); -} - TEST(TimeoutTest, DecodingSucceeds) { - decode_suite('n', millis_from_nanos); - decode_suite('u', millis_from_micros); - decode_suite('m', millis_from_millis); - decode_suite('S', millis_from_seconds); - decode_suite('M', millis_from_minutes); - decode_suite('H', millis_from_hours); - assert_decodes_as("1000000000S", millis_from_seconds(1000 * 1000 * 1000)); - assert_decodes_as("1000000000000000000000u", GRPC_MILLIS_INF_FUTURE); - assert_decodes_as("1000000001S", GRPC_MILLIS_INF_FUTURE); - assert_decodes_as("2000000001S", GRPC_MILLIS_INF_FUTURE); - assert_decodes_as("9999999999S", GRPC_MILLIS_INF_FUTURE); + decode_suite('n', Duration::NanosecondsRoundUp); + decode_suite('u', Duration::MicrosecondsRoundUp); + decode_suite('m', Duration::Milliseconds); + decode_suite('S', Duration::Seconds); + decode_suite('M', Duration::Minutes); + decode_suite('H', Duration::Hours); + assert_decodes_as("1000000000S", Duration::Seconds(1000 * 1000 * 1000)); + assert_decodes_as("1000000000000000000000u", Duration::Infinity()); + assert_decodes_as("1000000001S", Duration::Infinity()); + assert_decodes_as("2000000001S", Duration::Infinity()); + assert_decodes_as("9999999999S", Duration::Infinity()); } void assert_decoding_fails(const char* s) { diff --git a/test/core/util/fuzzer_corpus_test.cc b/test/core/util/fuzzer_corpus_test.cc index 6433543bce6..aa489d19d20 100644 --- a/test/core/util/fuzzer_corpus_test.cc +++ b/test/core/util/fuzzer_corpus_test.cc @@ -47,7 +47,6 @@ TEST_P(FuzzerCorpusTest, RunOneExample) { // down before calling LLVMFuzzerTestOneInput(), because most // implementations of that function will initialize and shutdown gRPC // internally. - grpc_init(); gpr_log(GPR_INFO, "Example file: %s", GetParam().c_str()); grpc_slice buffer; squelch = false; @@ -59,7 +58,6 @@ TEST_P(FuzzerCorpusTest, RunOneExample) { memcpy(data, GPR_SLICE_START_PTR(buffer), length); } grpc_slice_unref(buffer); - grpc_shutdown(); LLVMFuzzerTestOneInput(static_cast<uint8_t*>(data), length); gpr_free(data); } @@ -109,6 +107,9 @@ class ExampleGenerator // Make sure we don't succeed without doing anything, which caused // us to be blind to our fuzzers not running for 9 months. GPR_ASSERT(!examples_.empty()); + // Get a consistent ordering of examples so problems don't just show up on + // CI + std::sort(examples_.begin(), examples_.end()); } mutable std::vector<std::string> examples_; diff --git a/test/core/util/passthru_endpoint.cc b/test/core/util/passthru_endpoint.cc index 66297fc598e..c124decd17e 100644 --- a/test/core/util/passthru_endpoint.cc +++ b/test/core/util/passthru_endpoint.cc @@ -28,6 +28,7 @@ #include <grpc/support/alloc.h> #include <grpc/support/string_util.h> +#include "src/core/lib/gprpp/time.h" #include "src/core/lib/iomgr/sockaddr.h" #include "src/core/lib/iomgr/timer.h" #include "src/core/lib/slice/slice_internal.h" @@ -483,7 +484,8 @@ static void sched_next_channel_action_locked(half* m) { return; } grpc_timer_init(&m->parent->channel_effects->timer, - m->parent->channel_effects->actions[0].wait_ms + + grpc_core::Duration::Milliseconds( + m->parent->channel_effects->actions[0].wait_ms) + grpc_core::ExecCtx::Get()->Now(), GRPC_CLOSURE_CREATE(do_next_sched_channel_action, m, grpc_schedule_on_exec_ctx)); diff --git a/test/core/util/port_server_client.cc b/test/core/util/port_server_client.cc index 385f9c2685a..f468d95599b 100644 --- a/test/core/util/port_server_client.cc +++ b/test/core/util/port_server_client.cc @@ -86,7 +86,7 @@ void grpc_free_port_using_server(int port) { GPR_ASSERT(uri.ok()); auto http_request = grpc_core::HttpRequest::Get( std::move(*uri), nullptr /* channel args */, &pr.pops, &req, - grpc_core::ExecCtx::Get()->Now() + 30 * GPR_MS_PER_SEC, + grpc_core::ExecCtx::Get()->Now() + grpc_core::Duration::Seconds(30), GRPC_CLOSURE_CREATE(freed_port_from_server, &pr, grpc_schedule_on_exec_ctx), &rsp, @@ -99,9 +99,9 @@ void grpc_free_port_using_server(int port) { grpc_pollset_worker* worker = nullptr; if (!GRPC_LOG_IF_ERROR( "pollset_work", - grpc_pollset_work( - grpc_polling_entity_pollset(&pr.pops), &worker, - grpc_core::ExecCtx::Get()->Now() + GPR_MS_PER_SEC))) { + grpc_pollset_work(grpc_polling_entity_pollset(&pr.pops), &worker, + grpc_core::ExecCtx::Get()->Now() + + grpc_core::Duration::Seconds(1)))) { pr.done = 1; } } @@ -170,7 +170,7 @@ static void got_port_from_server(void* arg, grpc_error_handle error) { GPR_ASSERT(uri.ok()); pr->http_request = grpc_core::HttpRequest::Get( std::move(*uri), nullptr /* channel args */, &pr->pops, &req, - grpc_core::ExecCtx::Get()->Now() + 30 * GPR_MS_PER_SEC, + grpc_core::ExecCtx::Get()->Now() + grpc_core::Duration::Seconds(30), GRPC_CLOSURE_CREATE(got_port_from_server, pr, grpc_schedule_on_exec_ctx), &pr->response, @@ -217,7 +217,7 @@ int grpc_pick_port_using_server(void) { GPR_ASSERT(uri.ok()); auto http_request = grpc_core::HttpRequest::Get( std::move(*uri), nullptr /* channel args */, &pr.pops, &req, - grpc_core::ExecCtx::Get()->Now() + 30 * GPR_MS_PER_SEC, + grpc_core::ExecCtx::Get()->Now() + grpc_core::Duration::Seconds(30), GRPC_CLOSURE_CREATE(got_port_from_server, &pr, grpc_schedule_on_exec_ctx), &pr.response, @@ -230,9 +230,9 @@ int grpc_pick_port_using_server(void) { grpc_pollset_worker* worker = nullptr; if (!GRPC_LOG_IF_ERROR( "pollset_work", - grpc_pollset_work( - grpc_polling_entity_pollset(&pr.pops), &worker, - grpc_core::ExecCtx::Get()->Now() + GPR_MS_PER_SEC))) { + grpc_pollset_work(grpc_polling_entity_pollset(&pr.pops), &worker, + grpc_core::ExecCtx::Get()->Now() + + grpc_core::Duration::Seconds(1)))) { pr.port = 0; } } diff --git a/test/core/util/test_tcp_server.cc b/test/core/util/test_tcp_server.cc index 57d6c213747..44b1974f0c6 100644 --- a/test/core/util/test_tcp_server.cc +++ b/test/core/util/test_tcp_server.cc @@ -86,7 +86,7 @@ void test_tcp_server_start(test_tcp_server* server, int port) { void test_tcp_server_poll(test_tcp_server* server, int milliseconds) { grpc_pollset_worker* worker = nullptr; grpc_core::ExecCtx exec_ctx; - grpc_millis deadline = grpc_timespec_to_millis_round_up( + grpc_core::Timestamp deadline = grpc_core::Timestamp::FromTimespecRoundUp( grpc_timeout_milliseconds_to_deadline(milliseconds)); gpr_mu_lock(server->mu); GRPC_LOG_IF_ERROR("pollset_work", diff --git a/test/core/xds/file_watcher_certificate_provider_factory_test.cc b/test/core/xds/file_watcher_certificate_provider_factory_test.cc index bba2f3749bd..f2e21fcd2e0 100644 --- a/test/core/xds/file_watcher_certificate_provider_factory_test.cc +++ b/test/core/xds/file_watcher_certificate_provider_factory_test.cc @@ -54,7 +54,7 @@ TEST(FileWatcherConfigTest, Basic) { EXPECT_EQ(config->identity_cert_file(), kIdentityCertFile); EXPECT_EQ(config->private_key_file(), kPrivateKeyFile); EXPECT_EQ(config->root_cert_file(), kRootCertFile); - EXPECT_EQ(config->refresh_interval_ms(), kRefreshInterval * 1000); + EXPECT_EQ(config->refresh_interval(), Duration::Seconds(kRefreshInterval)); } TEST(FileWatcherConfigTest, DefaultRefreshInterval) { @@ -74,7 +74,7 @@ TEST(FileWatcherConfigTest, DefaultRefreshInterval) { EXPECT_EQ(config->identity_cert_file(), kIdentityCertFile); EXPECT_EQ(config->private_key_file(), kPrivateKeyFile); EXPECT_EQ(config->root_cert_file(), kRootCertFile); - EXPECT_EQ(config->refresh_interval_ms(), 600 * 1000); + EXPECT_EQ(config->refresh_interval(), Duration::Seconds(600)); } TEST(FileWatcherConfigTest, OnlyRootCertificatesFileProvided) { @@ -92,7 +92,7 @@ TEST(FileWatcherConfigTest, OnlyRootCertificatesFileProvided) { EXPECT_TRUE(config->identity_cert_file().empty()); EXPECT_TRUE(config->private_key_file().empty()); EXPECT_EQ(config->root_cert_file(), kRootCertFile); - EXPECT_EQ(config->refresh_interval_ms(), 600 * 1000); + EXPECT_EQ(config->refresh_interval(), Duration::Seconds(600)); } TEST(FileWatcherConfigTest, OnlyIdenityCertificatesAndPrivateKeyProvided) { @@ -111,7 +111,7 @@ TEST(FileWatcherConfigTest, OnlyIdenityCertificatesAndPrivateKeyProvided) { EXPECT_EQ(config->identity_cert_file(), kIdentityCertFile); EXPECT_EQ(config->private_key_file(), kPrivateKeyFile); EXPECT_TRUE(config->root_cert_file().empty()); - EXPECT_EQ(config->refresh_interval_ms(), 600 * 1000); + EXPECT_EQ(config->refresh_interval(), Duration::Seconds(600)); } TEST(FileWatcherConfigTest, WrongTypes) { diff --git a/test/core/xds/google_mesh_ca_certificate_provider_factory_test.cc b/test/core/xds/google_mesh_ca_certificate_provider_factory_test.cc index 8c6af3e1f64..b5ee4fbbb63 100644 --- a/test/core/xds/google_mesh_ca_certificate_provider_factory_test.cc +++ b/test/core/xds/google_mesh_ca_certificate_provider_factory_test.cc @@ -23,6 +23,7 @@ #include <grpc/grpc.h> +#include "src/core/lib/gprpp/time.h" #include "test/core/util/test_config.h" namespace grpc_core { @@ -89,9 +90,9 @@ TEST(GoogleMeshCaConfigTest, Basic) { EXPECT_EQ(config->sts_config().actor_token_path, "/etc/secret/sajwt.token"); EXPECT_EQ(config->sts_config().actor_token_type, "urn:ietf:params:oauth:token-type:jwt"); - EXPECT_EQ(config->timeout(), 20 * 1000); - EXPECT_EQ(config->certificate_lifetime(), 400 * 1000); - EXPECT_EQ(config->renewal_grace_period(), 100 * 1000); + EXPECT_EQ(config->timeout(), Duration::Seconds(20)); + EXPECT_EQ(config->certificate_lifetime(), Duration::Seconds(400)); + EXPECT_EQ(config->renewal_grace_period(), Duration::Seconds(100)); EXPECT_EQ(config->key_size(), 1024); EXPECT_EQ(config->location(), "https://container.googleapis.com/v1/project/test-project1/" @@ -140,9 +141,9 @@ TEST(GoogleMeshCaConfigTest, Defaults) { "urn:ietf:params:oauth:token-type:jwt"); EXPECT_EQ(config->sts_config().actor_token_path, ""); EXPECT_EQ(config->sts_config().actor_token_type, ""); - EXPECT_EQ(config->timeout(), 10 * 1000); - EXPECT_EQ(config->certificate_lifetime(), 24 * 60 * 60 * 1000); - EXPECT_EQ(config->renewal_grace_period(), 12 * 60 * 60 * 1000); + EXPECT_EQ(config->timeout(), Duration::Seconds(10)); + EXPECT_EQ(config->certificate_lifetime(), Duration::Hours(24)); + EXPECT_EQ(config->renewal_grace_period(), Duration::Hours(12)); EXPECT_EQ(config->key_size(), 2048); EXPECT_EQ(config->location(), "https://container.googleapis.com/v1/project/test-project1/" diff --git a/test/cpp/common/time_jump_test.cc b/test/cpp/common/time_jump_test.cc index 91643a7501b..67296fdc596 100644 --- a/test/cpp/common/time_jump_test.cc +++ b/test/cpp/common/time_jump_test.cc @@ -90,12 +90,14 @@ INSTANTIATE_TEST_SUITE_P(TimeJump, TimeJumpTest, TEST_P(TimeJumpTest, TimerRunning) { grpc_core::ExecCtx exec_ctx; grpc_timer timer; - grpc_timer_init(&timer, grpc_core::ExecCtx::Get()->Now() + 3000, - GRPC_CLOSURE_CREATE( - [](void*, grpc_error_handle error) { - GPR_ASSERT(error == GRPC_ERROR_CANCELLED); - }, - nullptr, grpc_schedule_on_exec_ctx)); + grpc_timer_init( + &timer, + grpc_core::ExecCtx::Get()->Now() + grpc_core::Duration::Seconds(3), + GRPC_CLOSURE_CREATE( + [](void*, grpc_error_handle error) { + GPR_ASSERT(error == GRPC_ERROR_CANCELLED); + }, + nullptr, grpc_schedule_on_exec_ctx)); gpr_sleep_until(grpc_timeout_milliseconds_to_deadline(100)); std::ostringstream cmd; cmd << "sudo date `date -v" << GetParam() << " \"+%m%d%H%M%y\"`"; diff --git a/test/cpp/common/timer_test.cc b/test/cpp/common/timer_test.cc index 2a802461010..53e9face137 100644 --- a/test/cpp/common/timer_test.cc +++ b/test/cpp/common/timer_test.cc @@ -23,6 +23,7 @@ #include <grpc/grpc.h> #include <grpc/support/log.h> +#include "src/core/lib/gprpp/time.h" #include "src/core/lib/iomgr/closure.h" #include "src/core/lib/iomgr/error.h" #include "src/core/lib/iomgr/exec_ctx.h" @@ -84,13 +85,15 @@ TEST_F(TimerTest, OneTimerExpires) { grpc_core::ExecCtx exec_ctx; grpc_timer timer; int timer_fired = 0; - grpc_timer_init(&timer, grpc_core::ExecCtx::Get()->Now() + 500, - GRPC_CLOSURE_CREATE( - [](void* arg, grpc_error_handle) { - int* timer_fired = static_cast<int*>(arg); - ++*timer_fired; - }, - &timer_fired, grpc_schedule_on_exec_ctx)); + grpc_timer_init( + &timer, + grpc_core::ExecCtx::Get()->Now() + grpc_core::Duration::Milliseconds(500), + GRPC_CLOSURE_CREATE( + [](void* arg, grpc_error_handle) { + int* timer_fired = static_cast<int*>(arg); + ++*timer_fired; + }, + &timer_fired, grpc_schedule_on_exec_ctx)); gpr_sleep_until(grpc_timeout_milliseconds_to_deadline(1500)); GPR_ASSERT(1 == timer_fired); @@ -109,7 +112,10 @@ TEST_F(TimerTest, MultipleTimersExpire) { grpc_timer timers[kNumTimers]; int timer_fired = 0; for (int i = 0; i < kNumTimers; ++i) { - grpc_timer_init(&timers[i], grpc_core::ExecCtx::Get()->Now() + 500 + i, + grpc_timer_init(&timers[i], + grpc_core::ExecCtx::Get()->Now() + + grpc_core::Duration::Milliseconds(500) + + grpc_core::Duration::Milliseconds(i), GRPC_CLOSURE_CREATE( [](void* arg, grpc_error_handle) { int* timer_fired = static_cast<int*>(arg); @@ -136,7 +142,10 @@ TEST_F(TimerTest, CancelSomeTimers) { grpc_timer timers[kNumTimers]; int timer_fired = 0; for (int i = 0; i < kNumTimers; ++i) { - grpc_timer_init(&timers[i], grpc_core::ExecCtx::Get()->Now() + 500 + i, + grpc_timer_init(&timers[i], + grpc_core::ExecCtx::Get()->Now() + + grpc_core::Duration::Milliseconds(500) + + grpc_core::Duration::Milliseconds(i), GRPC_CLOSURE_CREATE( [](void* arg, grpc_error_handle error) { if (error == GRPC_ERROR_CANCELLED) { @@ -167,9 +176,11 @@ TEST_F(TimerTest, CancelSomeTimers) { TEST_F(TimerTest, DISABLED_TimerNotCanceled) { grpc_core::ExecCtx exec_ctx; grpc_timer timer; - grpc_timer_init(&timer, grpc_core::ExecCtx::Get()->Now() + 10000, - GRPC_CLOSURE_CREATE([](void*, grpc_error_handle) {}, nullptr, - grpc_schedule_on_exec_ctx)); + grpc_timer_init( + &timer, + grpc_core::ExecCtx::Get()->Now() + grpc_core::Duration::Seconds(10), + GRPC_CLOSURE_CREATE([](void*, grpc_error_handle) {}, nullptr, + grpc_schedule_on_exec_ctx)); } // Enable the following test after @@ -181,7 +192,9 @@ TEST_F(TimerTest, DISABLED_CancelRace) { grpc_timer timers[kNumTimers]; for (int i = 0; i < kNumTimers; ++i) { grpc_timer* arg = (i != 0) ? &timers[i - 1] : nullptr; - grpc_timer_init(&timers[i], grpc_core::ExecCtx::Get()->Now() + 100, + grpc_timer_init(&timers[i], + grpc_core::ExecCtx::Get()->Now() + + grpc_core::Duration::Milliseconds(100), GRPC_CLOSURE_CREATE( [](void* arg, grpc_error_handle /*error*/) { grpc_timer* timer = static_cast<grpc_timer*>(arg); @@ -211,7 +224,9 @@ TEST_F(TimerTest, DISABLED_CancelNextTimer) { if (i < kNumTimers - 1) { arg = &timers[i + 1]; } - grpc_timer_init(&timers[i], grpc_core::ExecCtx::Get()->Now() + 100, + grpc_timer_init(&timers[i], + grpc_core::ExecCtx::Get()->Now() + + grpc_core::Duration::Milliseconds(100), GRPC_CLOSURE_CREATE( [](void* arg, grpc_error_handle /*error*/) { grpc_timer* timer = static_cast<grpc_timer*>(arg); diff --git a/test/cpp/end2end/client_lb_end2end_test.cc b/test/cpp/end2end/client_lb_end2end_test.cc index 8b2085f7785..f76980c25ad 100644 --- a/test/cpp/end2end/client_lb_end2end_test.cc +++ b/test/cpp/end2end/client_lb_end2end_test.cc @@ -50,6 +50,7 @@ #include "src/core/lib/gpr/env.h" #include "src/core/lib/gprpp/debug_location.h" #include "src/core/lib/gprpp/ref_counted_ptr.h" +#include "src/core/lib/gprpp/time.h" #include "src/core/lib/iomgr/tcp_client.h" #include "src/core/lib/resolver/server_address.h" #include "src/core/lib/security/credentials/fake/fake_credentials.h" @@ -83,13 +84,14 @@ void tcp_client_connect_with_delay(grpc_closure* closure, grpc_endpoint** ep, grpc_pollset_set* interested_parties, const grpc_channel_args* channel_args, const grpc_resolved_address* addr, - grpc_millis deadline) { + grpc_core::Timestamp deadline) { const int delay_ms = gpr_atm_acq_load(&g_connection_delay_ms); if (delay_ms > 0) { gpr_sleep_until(grpc_timeout_milliseconds_to_deadline(delay_ms)); } - default_client_impl->connect(closure, ep, interested_parties, channel_args, - addr, deadline + delay_ms); + default_client_impl->connect( + closure, ep, interested_parties, channel_args, addr, + deadline + grpc_core::Duration::Milliseconds(delay_ms)); } grpc_tcp_client_vtable delayed_connect = {tcp_client_connect_with_delay}; @@ -600,11 +602,12 @@ TEST_F(ClientLbEnd2endTest, PickFirstBackOffInitialReconnect) { ASSERT_TRUE(channel->WaitForConnected( grpc_timeout_milliseconds_to_deadline(kInitialBackOffMs * 2))); const gpr_timespec t1 = gpr_now(GPR_CLOCK_MONOTONIC); - const grpc_millis waited_ms = gpr_time_to_millis(gpr_time_sub(t1, t0)); - gpr_log(GPR_DEBUG, "Waited %" PRId64 " milliseconds", waited_ms); + const grpc_core::Duration waited = + grpc_core::Duration::FromTimespec(gpr_time_sub(t1, t0)); + gpr_log(GPR_DEBUG, "Waited %" PRId64 " milliseconds", waited.millis()); // We should have waited at least kInitialBackOffMs. We substract one to // account for test and precision accuracy drift. - EXPECT_GE(waited_ms, kInitialBackOffMs - 1); + EXPECT_GE(waited.millis(), kInitialBackOffMs - 1); // But not much more. EXPECT_GT( gpr_time_cmp( @@ -630,11 +633,12 @@ TEST_F(ClientLbEnd2endTest, PickFirstBackOffMinReconnect) { channel->WaitForConnected( grpc_timeout_milliseconds_to_deadline(kMinReconnectBackOffMs * 2)); const gpr_timespec t1 = gpr_now(GPR_CLOCK_MONOTONIC); - const grpc_millis waited_ms = gpr_time_to_millis(gpr_time_sub(t1, t0)); - gpr_log(GPR_DEBUG, "Waited %" PRId64 " ms", waited_ms); + const grpc_core::Duration waited = + grpc_core::Duration::FromTimespec(gpr_time_sub(t1, t0)); + gpr_log(GPR_DEBUG, "Waited %" PRId64 " milliseconds", waited.millis()); // We should have waited at least kMinReconnectBackOffMs. We substract one to // account for test and precision accuracy drift. - EXPECT_GE(waited_ms, kMinReconnectBackOffMs - 1); + EXPECT_GE(waited.millis(), kMinReconnectBackOffMs - 1); gpr_atm_rel_store(&g_connection_delay_ms, 0); } @@ -665,10 +669,11 @@ TEST_F(ClientLbEnd2endTest, PickFirstResetConnectionBackoff) { EXPECT_TRUE( channel->WaitForConnected(grpc_timeout_milliseconds_to_deadline(20))); const gpr_timespec t1 = gpr_now(GPR_CLOCK_MONOTONIC); - const grpc_millis waited_ms = gpr_time_to_millis(gpr_time_sub(t1, t0)); - gpr_log(GPR_DEBUG, "Waited %" PRId64 " milliseconds", waited_ms); + const grpc_core::Duration waited = + grpc_core::Duration::FromTimespec(gpr_time_sub(t1, t0)); + gpr_log(GPR_DEBUG, "Waited %" PRId64 " milliseconds", waited.millis()); // We should have waited less than kInitialBackOffMs. - EXPECT_LT(waited_ms, kInitialBackOffMs); + EXPECT_LT(waited.millis(), kInitialBackOffMs); } TEST_F(ClientLbEnd2endTest, @@ -712,9 +717,11 @@ TEST_F(ClientLbEnd2endTest, EXPECT_TRUE(channel->WaitForConnected( grpc_timeout_milliseconds_to_deadline(kWaitMs))); const gpr_timespec t1 = gpr_now(GPR_CLOCK_MONOTONIC); - const grpc_millis waited_ms = gpr_time_to_millis(gpr_time_sub(t1, t0)); - gpr_log(GPR_DEBUG, "Waited %" PRId64 " milliseconds", waited_ms); - EXPECT_LT(waited_ms, kWaitMs); + const grpc_core::Duration waited = + grpc_core::Duration::FromTimespec(gpr_time_sub(t1, t0)); + gpr_log(GPR_DEBUG, "Waited %" PRId64 " milliseconds", waited.millis()); + // We should have waited less than kInitialBackOffMs. + EXPECT_LT(waited.millis(), kWaitMs); } TEST_F(ClientLbEnd2endTest, PickFirstUpdates) { diff --git a/test/cpp/end2end/rls_end2end_test.cc b/test/cpp/end2end/rls_end2end_test.cc index 367e8d08cf4..a83938d32ed 100644 --- a/test/cpp/end2end/rls_end2end_test.cc +++ b/test/cpp/end2end/rls_end2end_test.cc @@ -45,6 +45,7 @@ #include "src/core/lib/channel/channel_args.h" #include "src/core/lib/gpr/env.h" #include "src/core/lib/gprpp/host_port.h" +#include "src/core/lib/gprpp/time.h" #include "src/core/lib/iomgr/sockaddr.h" #include "src/core/lib/security/credentials/fake/fake_credentials.h" #include "src/core/lib/service_config/service_config_impl.h" @@ -115,9 +116,9 @@ class RlsServiceImpl : public RlsService { res = it->second; } // Configured response found, so use it. - if (res.response_delay > 0) { + if (res.response_delay > grpc_core::Duration::Zero()) { gpr_sleep_until( - grpc_timeout_milliseconds_to_deadline(res.response_delay)); + grpc_timeout_milliseconds_to_deadline(res.response_delay.millis())); } IncreaseResponseCount(); *response = res.response; @@ -131,7 +132,7 @@ class RlsServiceImpl : public RlsService { void Shutdown() {} void SetResponse(RouteLookupRequest request, RouteLookupResponse response, - grpc_millis response_delay = 0) { + grpc_core::Duration response_delay = grpc_core::Duration()) { grpc::internal::MutexLock lock(&mu_); responses_[std::move(request)] = {std::move(response), response_delay}; } @@ -164,7 +165,7 @@ class RlsServiceImpl : public RlsService { struct ResponseData { RouteLookupResponse response; - grpc_millis response_delay; + grpc_core::Duration response_delay; }; grpc::internal::Mutex mu_; @@ -404,7 +405,8 @@ class RlsEnd2endTest : public ::testing::Test { explicit ServiceConfigBuilder(int rls_server_port) : rls_server_port_(rls_server_port) {} - ServiceConfigBuilder& set_lookup_service_timeout(grpc_millis timeout) { + ServiceConfigBuilder& set_lookup_service_timeout( + grpc_core::Duration timeout) { lookup_service_timeout_ = timeout * grpc_test_slowdown_factor(); return *this; } @@ -414,12 +416,12 @@ class RlsEnd2endTest : public ::testing::Test { return *this; } - ServiceConfigBuilder& set_max_age(grpc_millis max_age) { + ServiceConfigBuilder& set_max_age(grpc_core::Duration max_age) { max_age_ = max_age * grpc_test_slowdown_factor(); return *this; } - ServiceConfigBuilder& set_stale_age(grpc_millis stale_age) { + ServiceConfigBuilder& set_stale_age(grpc_core::Duration stale_age) { stale_age_ = stale_age * grpc_test_slowdown_factor(); return *this; } @@ -439,10 +441,10 @@ class RlsEnd2endTest : public ::testing::Test { std::vector<std::string> route_lookup_config_parts; route_lookup_config_parts.push_back(absl::StrFormat( " \"lookupService\":\"localhost:%d\"", rls_server_port_)); - if (lookup_service_timeout_ > 0) { - route_lookup_config_parts.push_back(absl::StrFormat( - " \"lookupServiceTimeout\":\"%d.%09ds\"", - lookup_service_timeout_ / 1000, lookup_service_timeout_ % 1000)); + if (lookup_service_timeout_ > grpc_core::Duration::Zero()) { + route_lookup_config_parts.push_back( + absl::StrFormat(" \"lookupServiceTimeout\":\"%fs\"", + lookup_service_timeout_.seconds())); } if (!default_target_.empty()) { route_lookup_config_parts.push_back(absl::StrFormat( @@ -450,15 +452,13 @@ class RlsEnd2endTest : public ::testing::Test { } route_lookup_config_parts.push_back(absl::StrFormat( " \"cacheSizeBytes\":%" PRId64, cache_size_bytes_)); - if (max_age_ > 0) { + if (max_age_ > grpc_core::Duration::Zero()) { route_lookup_config_parts.push_back( - absl::StrFormat(" \"maxAge\":\"%d.%09ds\"", max_age_ / 1000, - max_age_ % 1000)); + absl::StrFormat(" \"maxAge\":\"%fs\"", max_age_.seconds())); } - if (stale_age_ > 0) { - route_lookup_config_parts.push_back( - absl::StrFormat(" \"staleAge\":\"%d.%09ds\"", - stale_age_ / 1000, stale_age_ % 1000)); + if (stale_age_ > grpc_core::Duration::Zero()) { + route_lookup_config_parts.push_back(absl::StrFormat( + " \"staleAge\":\"%fs\"", stale_age_.seconds())); } if (!key_builders_.empty()) { route_lookup_config_parts.push_back( @@ -490,10 +490,10 @@ class RlsEnd2endTest : public ::testing::Test { private: int rls_server_port_; - grpc_millis lookup_service_timeout_ = 0; + grpc_core::Duration lookup_service_timeout_; std::string default_target_; - grpc_millis max_age_ = 0; - grpc_millis stale_age_ = 0; + grpc_core::Duration max_age_; + grpc_core::Duration stale_age_; int64_t cache_size_bytes_ = 10485760; std::vector<std::string> key_builders_; }; @@ -1015,13 +1015,13 @@ TEST_F(RlsEnd2endTest, RlsRequestTimeout) { "]", kServiceValue, kMethodValue, kTestKey)) .set_default_target(TargetStringForPort(backends_[1]->port_)) - .set_lookup_service_timeout(2000) + .set_lookup_service_timeout(grpc_core::Duration::Seconds(2)) .Build()); // RLS server will send a response, but it's longer than the timeout. rls_server_->service_.SetResponse( BuildRlsRequest({{kTestKey, kTestValue}}), BuildRlsResponse({TargetStringForPort(backends_[0]->port_)}), - /*response_delay=*/3000); + /*response_delay=*/grpc_core::Duration::Seconds(3)); // The data plane RPC should be sent to the default target. CheckRpcSendOk(DEBUG_LOCATION, RpcOptions().set_timeout_ms(4000).set_metadata( {{"key1", kTestValue}})); @@ -1129,8 +1129,8 @@ TEST_F(RlsEnd2endTest, StaleCacheEntry) { " }" "]", kServiceValue, kMethodValue, kTestKey)) - .set_max_age(5000) - .set_stale_age(1000) + .set_max_age(grpc_core::Duration::Seconds(5)) + .set_stale_age(grpc_core::Duration::Seconds(1)) .Build()); rls_server_->service_.SetResponse( BuildRlsRequest({{kTestKey, kTestValue}}), @@ -1179,8 +1179,8 @@ TEST_F(RlsEnd2endTest, StaleCacheEntryWithHeaderData) { " }" "]", kServiceValue, kMethodValue, kTestKey)) - .set_max_age(5000) - .set_stale_age(1000) + .set_max_age(grpc_core::Duration::Seconds(5)) + .set_stale_age(grpc_core::Duration::Seconds(1)) .Build()); rls_server_->service_.SetResponse( BuildRlsRequest({{kTestKey, kTestValue}}), @@ -1230,8 +1230,8 @@ TEST_F(RlsEnd2endTest, ExpiredCacheEntry) { " }" "]", kServiceValue, kMethodValue, kTestKey)) - .set_max_age(1000) - .set_lookup_service_timeout(1000) + .set_max_age(grpc_core::Duration::Seconds(1)) + .set_lookup_service_timeout(grpc_core::Duration::Seconds(1)) .Build()); rls_server_->service_.SetResponse( BuildRlsRequest({{kTestKey, kTestValue}}), diff --git a/test/cpp/end2end/time_change_test.cc b/test/cpp/end2end/time_change_test.cc index 0ea080e6c05..159fbc20a20 100644 --- a/test/cpp/end2end/time_change_test.cc +++ b/test/cpp/end2end/time_change_test.cc @@ -122,7 +122,9 @@ TEST(TimespecTest, GprTimeSubNegativeNs) { // Add negative milliseconds to gpr_timespec TEST(TimespecTest, GrpcNegativeMillisToTimespec) { // -1500 milliseconds converts to timespec (-2 secs, 5 * 10^8 nsec) - gpr_timespec ts = grpc_millis_to_timespec(-1500, GPR_CLOCK_MONOTONIC); + gpr_timespec ts = + grpc_core::Timestamp::FromMillisecondsAfterProcessEpoch(-1500) + .as_timespec(GPR_CLOCK_MONOTONIC); GPR_ASSERT(ts.tv_sec = -2); GPR_ASSERT(ts.tv_nsec = 5e8); GPR_ASSERT(ts.clock_type == GPR_CLOCK_MONOTONIC); diff --git a/test/cpp/end2end/xds/xds_end2end_test.cc b/test/cpp/end2end/xds/xds_end2end_test.cc index 76e690366d2..c34b360a1c9 100644 --- a/test/cpp/end2end/xds/xds_end2end_test.cc +++ b/test/cpp/end2end/xds/xds_end2end_test.cc @@ -72,6 +72,7 @@ #include "src/core/lib/gpr/tmpfile.h" #include "src/core/lib/gprpp/ref_counted_ptr.h" #include "src/core/lib/gprpp/sync.h" +#include "src/core/lib/gprpp/time.h" #include "src/core/lib/gprpp/time_util.h" #include "src/core/lib/iomgr/load_file.h" #include "src/core/lib/iomgr/sockaddr.h" @@ -517,8 +518,9 @@ class NoOpHttpFilter : public grpc_core::XdsHttpFilterImpl { // clock API. It's unclear if they are using the same syscall, but we do know // GPR round the number at millisecond-level. This creates a 1ms difference, // which could cause flake. -grpc_millis NowFromCycleCounter() { - return grpc_timespec_to_millis_round_down(gpr_now(GPR_CLOCK_MONOTONIC)); +grpc_core::Timestamp NowFromCycleCounter() { + return grpc_core::Timestamp::FromTimespecRoundDown( + gpr_now(GPR_CLOCK_MONOTONIC)); } // Returns the number of RPCs needed to pass error_tolerance at 99.99994% @@ -1887,7 +1889,7 @@ class XdsEnd2endTest : public ::testing::TestWithParam<TestType> { struct ConcurrentRpc { ClientContext context; Status status; - grpc_millis elapsed_time; + grpc_core::Duration elapsed_time; EchoResponse response; }; @@ -1905,7 +1907,7 @@ class XdsEnd2endTest : public ::testing::TestWithParam<TestType> { for (size_t i = 0; i < num_rpcs; i++) { ConcurrentRpc* rpc = &rpcs[i]; rpc_options.SetupRpc(&rpc->context, &request); - grpc_millis t0 = NowFromCycleCounter(); + grpc_core::Timestamp t0 = NowFromCycleCounter(); stub->async()->Echo(&rpc->context, &request, &rpc->response, [rpc, &mu, &completed, &cv, num_rpcs, t0](Status s) { rpc->status = s; @@ -3450,10 +3452,11 @@ using LdsRdsTest = BasicTest; MATCHER_P2(AdjustedClockInRange, t1, t2, "equals time") { gpr_cycle_counter cycle_now = gpr_get_cycle_counter(); - grpc_millis cycle_time = grpc_cycle_counter_to_millis_round_down(cycle_now); - grpc_millis time_spec = - grpc_timespec_to_millis_round_down(gpr_now(GPR_CLOCK_MONOTONIC)); - grpc_millis now = arg + time_spec - cycle_time; + grpc_core::Timestamp cycle_time = + grpc_core::Timestamp::FromCycleCounterRoundDown(cycle_now); + grpc_core::Timestamp time_spec = + grpc_core::Timestamp::FromTimespecRoundDown(gpr_now(GPR_CLOCK_MONOTONIC)); + grpc_core::Timestamp now = arg + (time_spec - cycle_time); bool ok = true; ok &= ::testing::ExplainMatchResult(::testing::Ge(t1), now, result_listener); ok &= ::testing::ExplainMatchResult(::testing::Lt(t2), now, result_listener); @@ -4828,42 +4831,51 @@ TEST_P(LdsRdsTest, XdsRoutingApplyXdsTimeout) { SetListenerAndRouteConfiguration(balancer_.get(), std::move(listener), new_route_config); // Test grpc_timeout_header_max of 1.5 seconds applied - grpc_millis t0 = NowFromCycleCounter(); - grpc_millis t1 = - t0 + kTimeoutGrpcTimeoutHeaderMaxSecond * 1000 + kTimeoutMillis; - grpc_millis t2 = t0 + kTimeoutMaxStreamDurationSecond * 1000 + kTimeoutMillis; + grpc_core::Timestamp t0 = NowFromCycleCounter(); + grpc_core::Timestamp t1 = + t0 + grpc_core::Duration::Seconds(kTimeoutGrpcTimeoutHeaderMaxSecond) + + grpc_core::Duration::Milliseconds(kTimeoutMillis); + grpc_core::Timestamp t2 = + t0 + grpc_core::Duration::Seconds(kTimeoutMaxStreamDurationSecond) + + grpc_core::Duration::Milliseconds(kTimeoutMillis); CheckRpcSendFailure( CheckRpcSendFailureOptions() - .set_rpc_options( - RpcOptions() - .set_rpc_service(SERVICE_ECHO1) - .set_rpc_method(METHOD_ECHO1) - .set_wait_for_ready(true) - .set_timeout_ms(kTimeoutApplicationSecond * 1000)) + .set_rpc_options(RpcOptions() + .set_rpc_service(SERVICE_ECHO1) + .set_rpc_method(METHOD_ECHO1) + .set_wait_for_ready(true) + .set_timeout_ms(grpc_core::Duration::Seconds( + kTimeoutApplicationSecond) + .millis())) .set_expected_error_code(StatusCode::DEADLINE_EXCEEDED)); EXPECT_THAT(NowFromCycleCounter(), AdjustedClockInRange(t1, t2)); // Test max_stream_duration of 2.5 seconds applied t0 = NowFromCycleCounter(); - t1 = t0 + kTimeoutMaxStreamDurationSecond * 1000 + kTimeoutMillis; - t2 = t0 + kTimeoutHttpMaxStreamDurationSecond * 1000 + kTimeoutMillis; + t1 = t0 + grpc_core::Duration::Seconds(kTimeoutMaxStreamDurationSecond) + + grpc_core::Duration::Milliseconds(kTimeoutMillis); + t2 = t0 + grpc_core::Duration::Seconds(kTimeoutHttpMaxStreamDurationSecond) + + grpc_core::Duration::Milliseconds(kTimeoutMillis); CheckRpcSendFailure( CheckRpcSendFailureOptions() - .set_rpc_options( - RpcOptions() - .set_rpc_service(SERVICE_ECHO2) - .set_rpc_method(METHOD_ECHO2) - .set_wait_for_ready(true) - .set_timeout_ms(kTimeoutApplicationSecond * 1000)) + .set_rpc_options(RpcOptions() + .set_rpc_service(SERVICE_ECHO2) + .set_rpc_method(METHOD_ECHO2) + .set_wait_for_ready(true) + .set_timeout_ms(grpc_core::Duration::Seconds( + kTimeoutApplicationSecond) + .millis())) .set_expected_error_code(StatusCode::DEADLINE_EXCEEDED)); EXPECT_THAT(NowFromCycleCounter(), AdjustedClockInRange(t1, t2)); // Test http_stream_duration of 3.5 seconds applied t0 = NowFromCycleCounter(); - t1 = t0 + kTimeoutHttpMaxStreamDurationSecond * 1000 + kTimeoutMillis; - t2 = t0 + kTimeoutApplicationSecond * 1000 + kTimeoutMillis; + t1 = t0 + grpc_core::Duration::Seconds(kTimeoutHttpMaxStreamDurationSecond) + + grpc_core::Duration::Milliseconds(kTimeoutMillis); + t2 = t0 + grpc_core::Duration::Seconds(kTimeoutApplicationSecond) + + grpc_core::Duration::Milliseconds(kTimeoutMillis); CheckRpcSendFailure( CheckRpcSendFailureOptions() .set_rpc_options(RpcOptions().set_wait_for_ready(true).set_timeout_ms( - kTimeoutApplicationSecond * 1000)) + grpc_core::Duration::Seconds(kTimeoutApplicationSecond).millis())) .set_expected_error_code(StatusCode::DEADLINE_EXCEEDED)); EXPECT_THAT(NowFromCycleCounter(), AdjustedClockInRange(t1, t2)); } @@ -4994,7 +5006,7 @@ TEST_P(LdsRdsTest, XdsRoutingApplyApplicationTimeoutWhenHttpTimeoutExplicit0) { CheckRpcSendFailure( CheckRpcSendFailureOptions() .set_rpc_options(RpcOptions().set_wait_for_ready(true).set_timeout_ms( - kTimeoutApplicationSecond * 1000)) + grpc_core::Duration::Seconds(kTimeoutApplicationSecond).millis())) .set_expected_error_code(StatusCode::DEADLINE_EXCEEDED)); auto ellapsed_nano_seconds = std::chrono::duration_cast<std::chrono::nanoseconds>(system_clock::now() - @@ -5014,7 +5026,7 @@ TEST_P(LdsRdsTest, XdsRoutingWithOnlyApplicationTimeout) { CheckRpcSendFailure( CheckRpcSendFailureOptions() .set_rpc_options(RpcOptions().set_wait_for_ready(true).set_timeout_ms( - kTimeoutApplicationSecond * 1000)) + grpc_core::Duration::Seconds(kTimeoutApplicationSecond).millis())) .set_expected_error_code(StatusCode::DEADLINE_EXCEEDED)); auto ellapsed_nano_seconds = std::chrono::duration_cast<std::chrono::nanoseconds>(system_clock::now() - @@ -9422,7 +9434,7 @@ TEST_P(XdsEnabledServerStatusNotificationTest, streaming_rpcs[i].stream->Read(&response); EXPECT_EQ(request.message(), response.message()); } - grpc_millis update_time = NowFromCycleCounter(); + grpc_core::Timestamp update_time = NowFromCycleCounter(); // Update the resource. SetLdsUpdate("", "", "fake_plugin1", "", false); // Wait for the updated resource to take effect. @@ -9433,7 +9445,8 @@ TEST_P(XdsEnabledServerStatusNotificationTest, // Wait for the drain grace time to expire EXPECT_FALSE(streaming_rpcs[i].stream->Read(&response)); // Make sure that the drain grace interval is honored. - EXPECT_GE(NowFromCycleCounter() - update_time, kDrainGraceTimeMs); + EXPECT_GE(NowFromCycleCounter() - update_time, + grpc_core::Duration::Milliseconds(kDrainGraceTimeMs)); auto status = streaming_rpcs[i].stream->Finish(); EXPECT_EQ(status.error_code(), grpc::StatusCode::UNAVAILABLE) << status.error_code() << ", " << status.error_message() << ", " @@ -12678,7 +12691,8 @@ TEST_P(FaultInjectionTest, XdsFaultInjectionAlwaysDelayPercentageAbort) { std::vector<ConcurrentRpc> rpcs = SendConcurrentRpcs(stub_.get(), kNumRpcs, rpc_options); for (auto& rpc : rpcs) { - EXPECT_GE(rpc.elapsed_time, kFixedDelaySeconds * 1000); + EXPECT_GE(rpc.elapsed_time, + grpc_core::Duration::Seconds(kFixedDelaySeconds)); if (rpc.status.error_code() == StatusCode::OK) continue; EXPECT_EQ("Fault injected", rpc.status.error_message()); ++num_aborted; @@ -12739,7 +12753,8 @@ TEST_P(FaultInjectionTest, std::vector<ConcurrentRpc> rpcs = SendConcurrentRpcs(stub_.get(), kNumRpcs, rpc_options); for (auto& rpc : rpcs) { - EXPECT_GE(rpc.elapsed_time, kFixedDelaySeconds * 1000); + EXPECT_GE(rpc.elapsed_time, + grpc_core::Duration::Seconds(kFixedDelaySeconds)); if (rpc.status.error_code() == StatusCode::OK) continue; EXPECT_EQ("Fault injected", rpc.status.error_message()); ++num_aborted; diff --git a/test/cpp/grpclb/grpclb_api_test.cc b/test/cpp/grpclb/grpclb_api_test.cc index 6e2d19f3cf9..ff853bc97eb 100644 --- a/test/cpp/grpclb/grpclb_api_test.cc +++ b/test/cpp/grpclb/grpclb_api_test.cc @@ -95,7 +95,8 @@ TEST_F(GrpclbTest, ParseInitialResponse) { grpc_core::GrpcLbResponseParse(encoded_slice, arena.ptr(), &resp)); grpc_slice_unref(encoded_slice); EXPECT_EQ(resp.type, resp.INITIAL); - EXPECT_EQ(resp.client_stats_report_interval, 123456); + EXPECT_EQ(resp.client_stats_report_interval, + grpc_core::Duration::Milliseconds(123456)); EXPECT_EQ(resp.serverlist.size(), 0); } diff --git a/test/cpp/microbenchmarks/BUILD b/test/cpp/microbenchmarks/BUILD index 22138bd6a14..fdecd486013 100644 --- a/test/cpp/microbenchmarks/BUILD +++ b/test/cpp/microbenchmarks/BUILD @@ -272,18 +272,6 @@ grpc_cc_test( ], ) -grpc_cc_test( - name = "bm_timer", - srcs = ["bm_timer.cc"], - args = grpc_benchmark_args(), - tags = [ - "no_mac", - "no_windows", - ], - uses_polling = False, - deps = [":helpers"], -) - grpc_cc_test( name = "bm_pollset", srcs = ["bm_pollset.cc"], diff --git a/test/cpp/microbenchmarks/bm_call_create.cc b/test/cpp/microbenchmarks/bm_call_create.cc index 09ae4e331f5..ed81152c597 100644 --- a/test/cpp/microbenchmarks/bm_call_create.cc +++ b/test/cpp/microbenchmarks/bm_call_create.cc @@ -555,7 +555,7 @@ static void BM_IsolatedFilter(benchmark::State& state) { grpc_core::ExecCtx::Get()->Flush(); grpc_call_stack* call_stack = static_cast<grpc_call_stack*>(gpr_zalloc(channel_stack->call_stack_size)); - grpc_millis deadline = GRPC_MILLIS_INF_FUTURE; + grpc_core::Timestamp deadline = grpc_core::Timestamp::InfFuture(); gpr_cycle_counter start_time = gpr_get_cycle_counter(); grpc_slice method = grpc_slice_from_static_string("/foo/bar"); grpc_call_final_info final_info; diff --git a/test/cpp/microbenchmarks/bm_chttp2_hpack.cc b/test/cpp/microbenchmarks/bm_chttp2_hpack.cc index 8b099883888..0b89404dbf9 100644 --- a/test/cpp/microbenchmarks/bm_chttp2_hpack.cc +++ b/test/cpp/microbenchmarks/bm_chttp2_hpack.cc @@ -31,6 +31,7 @@ #include "src/core/ext/transport/chttp2/transport/hpack_encoder.h" #include "src/core/ext/transport/chttp2/transport/hpack_parser.h" +#include "src/core/lib/gprpp/time.h" #include "src/core/lib/resource_quota/resource_quota.h" #include "src/core/lib/slice/slice_internal.h" #include "src/core/lib/slice/slice_string_helpers.h" @@ -72,11 +73,12 @@ BENCHMARK(BM_HpackEncoderInitDestroy); static void BM_HpackEncoderEncodeDeadline(benchmark::State& state) { TrackCounters track_counters; grpc_core::ExecCtx exec_ctx; - grpc_millis saved_now = grpc_core::ExecCtx::Get()->Now(); + grpc_core::Timestamp saved_now = grpc_core::ExecCtx::Get()->Now(); auto arena = grpc_core::MakeScopedArena(1024, g_memory_allocator); grpc_metadata_batch b(arena.get()); - b.Set(grpc_core::GrpcTimeoutMetadata(), saved_now + 30 * 1000); + b.Set(grpc_core::GrpcTimeoutMetadata(), + saved_now + grpc_core::Duration::Seconds(30)); grpc_core::HPackCompressor c; grpc_transport_one_way_stats stats; diff --git a/test/cpp/microbenchmarks/bm_cq_multiple_threads.cc b/test/cpp/microbenchmarks/bm_cq_multiple_threads.cc index d5e2381a3b7..78e6e3afb2f 100644 --- a/test/cpp/microbenchmarks/bm_cq_multiple_threads.cc +++ b/test/cpp/microbenchmarks/bm_cq_multiple_threads.cc @@ -26,6 +26,7 @@ #include <grpc/support/alloc.h> #include <grpc/support/log.h> +#include "src/core/lib/gprpp/time.h" #include "src/core/lib/iomgr/ev_posix.h" #include "src/core/lib/iomgr/port.h" #include "src/core/lib/surface/completion_queue.h" @@ -72,8 +73,8 @@ static void cq_done_cb(void* /*done_arg*/, grpc_cq_completion* cq_completion) { * Does nothing if deadline is 0 (i.e gpr_time_0(GPR_CLOCK_MONOTONIC)) */ static grpc_error_handle pollset_work(grpc_pollset* ps, grpc_pollset_worker** /*worker*/, - grpc_millis deadline) { - if (deadline == 0) { + grpc_core::Timestamp deadline) { + if (deadline == grpc_core::Timestamp::ProcessEpoch()) { gpr_log(GPR_DEBUG, "no-op"); return GRPC_ERROR_NONE; } diff --git a/test/cpp/microbenchmarks/bm_pollset.cc b/test/cpp/microbenchmarks/bm_pollset.cc index cfcd976ef7f..0b86a308e37 100644 --- a/test/cpp/microbenchmarks/bm_pollset.cc +++ b/test/cpp/microbenchmarks/bm_pollset.cc @@ -27,6 +27,7 @@ #include <grpc/support/log.h> #include "src/core/lib/gpr/useful.h" +#include "src/core/lib/gprpp/time.h" #include "src/core/lib/iomgr/ev_posix.h" #include "src/core/lib/iomgr/pollset.h" #include "src/core/lib/iomgr/port.h" @@ -117,7 +118,8 @@ static void BM_PollEmptyPollset(benchmark::State& state) { grpc_core::ExecCtx exec_ctx; gpr_mu_lock(mu); for (auto _ : state) { - GRPC_ERROR_UNREF(grpc_pollset_work(ps, nullptr, 0)); + GRPC_ERROR_UNREF( + grpc_pollset_work(ps, nullptr, grpc_core::Timestamp::ProcessEpoch())); } grpc_closure shutdown_ps_closure; GRPC_CLOSURE_INIT(&shutdown_ps_closure, shutdown_ps, ps, @@ -235,7 +237,8 @@ static void BM_SingleThreadPollOneFd(benchmark::State& state) { grpc_fd_notify_on_read(wakeup, continue_closure); gpr_mu_lock(mu); while (!done) { - GRPC_ERROR_UNREF(grpc_pollset_work(ps, nullptr, GRPC_MILLIS_INF_FUTURE)); + GRPC_ERROR_UNREF( + grpc_pollset_work(ps, nullptr, grpc_core::Timestamp::InfFuture())); } grpc_fd_orphan(wakeup, nullptr, nullptr, "done"); wakeup_fd.read_fd = 0; diff --git a/test/cpp/microbenchmarks/bm_timer.cc b/test/cpp/microbenchmarks/bm_timer.cc deleted file mode 100644 index 6ff5c2424e8..00000000000 --- a/test/cpp/microbenchmarks/bm_timer.cc +++ /dev/null @@ -1,123 +0,0 @@ -/* - * - * Copyright 2017 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -#include <string.h> - -#include <atomic> -#include <vector> - -#include <benchmark/benchmark.h> - -#include <grpc/grpc.h> -#include <grpc/support/alloc.h> -#include <grpc/support/log.h> - -#include "src/core/lib/iomgr/timer.h" -#include "test/core/util/test_config.h" -#include "test/cpp/microbenchmarks/helpers.h" -#include "test/cpp/util/test_config.h" - -namespace grpc { -namespace testing { - -struct TimerClosure { - grpc_timer timer; - grpc_closure closure; -}; - -static void BM_InitCancelTimer(benchmark::State& state) { - constexpr int kTimerCount = 1024; - TrackCounters track_counters; - grpc_core::ExecCtx exec_ctx; - std::vector<TimerClosure> timer_closures(kTimerCount); - int i = 0; - for (auto _ : state) { - TimerClosure* timer_closure = &timer_closures[i++ % kTimerCount]; - GRPC_CLOSURE_INIT( - &timer_closure->closure, - [](void* /*args*/, grpc_error_handle /*err*/) {}, nullptr, - grpc_schedule_on_exec_ctx); - grpc_timer_init(&timer_closure->timer, GRPC_MILLIS_INF_FUTURE, - &timer_closure->closure); - grpc_timer_cancel(&timer_closure->timer); - exec_ctx.Flush(); - } - track_counters.Finish(state); -} -BENCHMARK(BM_InitCancelTimer); - -static void BM_TimerBatch(benchmark::State& state) { - constexpr int kTimerCount = 1024; - const bool check = state.range(0); - const bool reverse = state.range(1); - - const grpc_millis start = - reverse ? GRPC_MILLIS_INF_FUTURE : GRPC_MILLIS_INF_FUTURE - kTimerCount; - const grpc_millis end = - reverse ? GRPC_MILLIS_INF_FUTURE - kTimerCount : GRPC_MILLIS_INF_FUTURE; - const grpc_millis increment = reverse ? -1 : 1; - - TrackCounters track_counters; - grpc_core::ExecCtx exec_ctx; - std::vector<TimerClosure> timer_closures(kTimerCount); - for (auto _ : state) { - for (grpc_millis deadline = start; deadline != end; deadline += increment) { - TimerClosure* timer_closure = &timer_closures[deadline % kTimerCount]; - GRPC_CLOSURE_INIT( - &timer_closure->closure, - [](void* /*args*/, grpc_error_handle /*err*/) {}, nullptr, - grpc_schedule_on_exec_ctx); - - grpc_timer_init(&timer_closure->timer, deadline, &timer_closure->closure); - } - if (check) { - grpc_millis next = GRPC_MILLIS_INF_FUTURE; - grpc_timer_check(&next); - } - for (grpc_millis deadline = start; deadline != end; deadline += increment) { - TimerClosure* timer_closure = &timer_closures[deadline % kTimerCount]; - grpc_timer_cancel(&timer_closure->timer); - } - exec_ctx.Flush(); - } - track_counters.Finish(state); -} -BENCHMARK(BM_TimerBatch) - ->Args({/*check=*/false, /*reverse=*/false}) - ->Args({/*check=*/false, /*reverse=*/true}) - ->Args({/*check=*/true, /*reverse=*/false}) - ->Args({/*check=*/true, /*reverse=*/true}) - ->ThreadRange(1, 128); - -} // namespace testing -} // namespace grpc - -// Some distros have RunSpecifiedBenchmarks under the benchmark namespace, -// and others do not. This allows us to support both modes. -namespace benchmark { -void RunTheBenchmarksNamespaced() { RunSpecifiedBenchmarks(); } -} // namespace benchmark - -int main(int argc, char** argv) { - grpc::testing::TestEnvironment env(argc, argv); - LibraryInitializer libInit; - ::benchmark::Initialize(&argc, argv); - grpc::testing::InitTest(&argc, &argv, false); - benchmark::RunTheBenchmarksNamespaced(); - return 0; -} diff --git a/test/cpp/naming/cancel_ares_query_test.cc b/test/cpp/naming/cancel_ares_query_test.cc index 3c96e29c14c..ad66c884634 100644 --- a/test/cpp/naming/cancel_ares_query_test.cc +++ b/test/cpp/naming/cancel_ares_query_test.cc @@ -127,11 +127,9 @@ void PollPollsetUntilRequestDone(ArgsStruct* args) { grpc_pollset_worker* worker = nullptr; grpc_core::ExecCtx exec_ctx; gpr_mu_lock(args->mu); - GRPC_LOG_IF_ERROR( - "pollset_work", - grpc_pollset_work(args->pollset, &worker, - grpc_timespec_to_millis_round_up( - gpr_inf_future(GPR_CLOCK_REALTIME)))); + GRPC_LOG_IF_ERROR("pollset_work", + grpc_pollset_work(args->pollset, &worker, + grpc_core::Timestamp::InfFuture())); gpr_mu_unlock(args->mu); } } diff --git a/test/cpp/naming/resolver_component_test.cc b/test/cpp/naming/resolver_component_test.cc index a3bc02ad952..02baed03d71 100644 --- a/test/cpp/naming/resolver_component_test.cc +++ b/test/cpp/naming/resolver_component_test.cc @@ -255,10 +255,11 @@ void PollPollsetUntilRequestDone(ArgsStruct* args) { grpc_pollset_worker* worker = nullptr; grpc_core::ExecCtx exec_ctx; gpr_mu_lock(args->mu); - GRPC_LOG_IF_ERROR("pollset_work", - grpc_pollset_work(args->pollset, &worker, - grpc_timespec_to_millis_round_up( - NSecondDeadline(1)))); + GRPC_LOG_IF_ERROR( + "pollset_work", + grpc_pollset_work( + args->pollset, &worker, + grpc_core::Timestamp::FromTimespecRoundUp(NSecondDeadline(1)))); gpr_mu_unlock(args->mu); } gpr_event_set(&args->ev, reinterpret_cast<void*>(1)); diff --git a/tools/doxygen/Doxyfile.c++.internal b/tools/doxygen/Doxyfile.c++.internal index 8725c6b7ce6..11c48ad5c6c 100644 --- a/tools/doxygen/Doxyfile.c++.internal +++ b/tools/doxygen/Doxyfile.c++.internal @@ -1993,6 +1993,8 @@ src/core/lib/gprpp/table.h \ src/core/lib/gprpp/thd.h \ src/core/lib/gprpp/thd_posix.cc \ src/core/lib/gprpp/thd_windows.cc \ +src/core/lib/gprpp/time.cc \ +src/core/lib/gprpp/time.h \ src/core/lib/gprpp/time_util.cc \ src/core/lib/gprpp/time_util.h \ src/core/lib/http/format_request.cc \ diff --git a/tools/doxygen/Doxyfile.core.internal b/tools/doxygen/Doxyfile.core.internal index ef2d3ec3a5a..bc3c1fac9e4 100644 --- a/tools/doxygen/Doxyfile.core.internal +++ b/tools/doxygen/Doxyfile.core.internal @@ -1787,6 +1787,8 @@ src/core/lib/gprpp/table.h \ src/core/lib/gprpp/thd.h \ src/core/lib/gprpp/thd_posix.cc \ src/core/lib/gprpp/thd_windows.cc \ +src/core/lib/gprpp/time.cc \ +src/core/lib/gprpp/time.h \ src/core/lib/gprpp/time_util.cc \ src/core/lib/gprpp/time_util.h \ src/core/lib/http/format_request.cc \ diff --git a/tools/run_tests/generated/tests.json b/tools/run_tests/generated/tests.json index 4d64caa7400..9a8399c5bb5 100644 --- a/tools/run_tests/generated/tests.json +++ b/tools/run_tests/generated/tests.json @@ -6781,6 +6781,30 @@ ], "uses_polling": false }, + { + "args": [], + "benchmark": false, + "ci_platforms": [ + "linux", + "mac", + "posix", + "windows" + ], + "cpu_cost": 1.0, + "exclude_configs": [], + "exclude_iomgrs": [], + "flaky": false, + "gtest": true, + "language": "c++", + "name": "test_core_gprpp_time_test", + "platforms": [ + "linux", + "mac", + "posix", + "windows" + ], + "uses_polling": false + }, { "args": [], "benchmark": false,