Merge pull request #14894 from sreecha/fix-time

Fix grpc_millis type (timers are broken on 32-bit systems otherwise)
pull/15512/head
Sree Kuchibhotla 7 years ago committed by GitHub
commit 4a7a39fab6
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
  1. 2
      src/core/ext/filters/client_channel/client_channel.cc
  2. 4
      src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.cc
  3. 6
      src/core/ext/filters/client_channel/resolver/dns/c_ares/dns_resolver_ares.cc
  4. 6
      src/core/ext/filters/client_channel/resolver/dns/native/dns_resolver.cc
  5. 2
      src/core/ext/filters/client_channel/subchannel.cc
  6. 2
      src/core/lib/iomgr/ev_epollex_linux.cc
  7. 4
      src/core/lib/iomgr/ev_posix.cc
  8. 18
      src/core/lib/iomgr/exec_ctx.cc
  9. 6
      src/core/lib/iomgr/exec_ctx.h
  10. 4
      src/core/lib/iomgr/pollset_custom.cc
  11. 2
      src/core/lib/iomgr/timer.h
  12. 154
      src/core/lib/iomgr/timer_generic.cc
  13. 2
      src/core/lib/iomgr/timer_manager.cc
  14. 2
      src/core/lib/transport/transport_op_string.cc
  15. 2
      test/core/client_channel/resolvers/dns_resolver_cooldown_test.cc
  16. 2
      test/core/iomgr/resolve_address_posix_test.cc
  17. 2
      test/core/iomgr/resolve_address_test.cc
  18. 2
      test/core/iomgr/timer_heap_test.cc
  19. 2
      test/core/transport/timeout_encoding_test.cc
  20. 4
      test/cpp/end2end/client_lb_end2end_test.cc

@ -1421,7 +1421,7 @@ static void do_retry(grpc_call_element* elem,
}
if (grpc_client_channel_trace.enabled()) {
gpr_log(GPR_INFO,
"chand=%p calld=%p: retrying failed call in %" PRIuPTR " ms", chand,
"chand=%p calld=%p: retrying failed call in %" PRId64 " ms", chand,
calld, next_attempt_time - grpc_core::ExecCtx::Get()->Now());
}
// Schedule retry after computed delay.

@ -776,7 +776,7 @@ void GrpcLb::BalancerCallState::OnBalancerMessageReceivedLocked(
if (grpc_lb_glb_trace.enabled()) {
gpr_log(GPR_INFO,
"[grpclb %p] Received initial LB response message; "
"client load reporting interval = %" PRIdPTR " milliseconds",
"client load reporting interval = %" PRId64 " milliseconds",
grpclb_policy, lb_calld->client_stats_report_interval_);
}
} else if (grpc_lb_glb_trace.enabled()) {
@ -1417,7 +1417,7 @@ void GrpcLb::StartBalancerCallRetryTimerLocked() {
gpr_log(GPR_INFO, "[grpclb %p] Connection to LB server lost...", this);
grpc_millis timeout = next_try - ExecCtx::Get()->Now();
if (timeout > 0) {
gpr_log(GPR_INFO, "[grpclb %p] ... retry_timer_active in %" PRIuPTR "ms.",
gpr_log(GPR_INFO, "[grpclb %p] ... retry_timer_active in %" PRId64 "ms.",
this, timeout);
} else {
gpr_log(GPR_INFO, "[grpclb %p] ... retry_timer_active immediately.",

@ -346,7 +346,7 @@ void AresDnsResolver::OnResolvedLocked(void* arg, grpc_error* error) {
RefCountedPtr<Resolver> self = r->Ref(DEBUG_LOCATION, "retry-timer");
self.release();
if (timeout > 0) {
gpr_log(GPR_DEBUG, "retrying in %" PRIdPTR " milliseconds", timeout);
gpr_log(GPR_DEBUG, "retrying in %" PRId64 " milliseconds", timeout);
} else {
gpr_log(GPR_DEBUG, "retrying immediately");
}
@ -381,8 +381,8 @@ void AresDnsResolver::MaybeStartResolvingLocked() {
const grpc_millis last_resolution_ago =
grpc_core::ExecCtx::Get()->Now() - last_resolution_timestamp_;
gpr_log(GPR_DEBUG,
"In cooldown from last resolution (from %" PRIdPTR
" ms ago). Will resolve again in %" PRIdPTR " ms",
"In cooldown from last resolution (from %" PRId64
" ms ago). Will resolve again in %" PRId64 " ms",
last_resolution_ago, ms_until_next_resolution);
have_next_resolution_timer_ = true;
// TODO(roth): We currently deal with this ref manually. Once the

@ -218,7 +218,7 @@ void NativeDnsResolver::OnResolvedLocked(void* arg, grpc_error* error) {
r->Ref(DEBUG_LOCATION, "next_resolution_timer");
self.release();
if (timeout > 0) {
gpr_log(GPR_DEBUG, "retrying in %" PRIdPTR " milliseconds", timeout);
gpr_log(GPR_DEBUG, "retrying in %" PRId64 " milliseconds", timeout);
} else {
gpr_log(GPR_DEBUG, "retrying immediately");
}
@ -254,8 +254,8 @@ void NativeDnsResolver::MaybeStartResolvingLocked() {
const grpc_millis last_resolution_ago =
grpc_core::ExecCtx::Get()->Now() - last_resolution_timestamp_;
gpr_log(GPR_DEBUG,
"In cooldown from last resolution (from %" PRIdPTR
" ms ago). Will resolve again in %" PRIdPTR " ms",
"In cooldown from last resolution (from %" PRId64
" ms ago). Will resolve again in %" PRId64 " ms",
last_resolution_ago, ms_until_next_resolution);
have_next_resolution_timer_ = true;
// TODO(roth): We currently deal with this ref manually. Once the

@ -467,7 +467,7 @@ static void maybe_start_connecting_locked(grpc_subchannel* c) {
if (time_til_next <= 0) {
gpr_log(GPR_INFO, "Subchannel %p: Retry immediately", c);
} else {
gpr_log(GPR_INFO, "Subchannel %p: Retry in %" PRIdPTR " milliseconds", c,
gpr_log(GPR_INFO, "Subchannel %p: Retry in %" PRId64 " milliseconds", c,
time_til_next);
}
GRPC_CLOSURE_INIT(&c->on_alarm, on_alarm, c, grpc_schedule_on_exec_ctx);

@ -1066,7 +1066,7 @@ static grpc_error* pollset_work(grpc_pollset* pollset,
#endif
if (grpc_polling_trace.enabled()) {
gpr_log(GPR_INFO,
"PS:%p work hdl=%p worker=%p now=%" PRIdPTR " deadline=%" PRIdPTR
"PS:%p work hdl=%p worker=%p now=%" PRId64 " deadline=%" PRId64
" kwp=%d pollable=%p",
pollset, worker_hdl, WORKER_PTR, grpc_core::ExecCtx::Get()->Now(),
deadline, pollset->kicked_without_poller, pollset->active_pollable);

@ -251,10 +251,10 @@ static void pollset_destroy(grpc_pollset* pollset) {
static grpc_error* pollset_work(grpc_pollset* pollset,
grpc_pollset_worker** worker,
grpc_millis deadline) {
GRPC_POLLING_API_TRACE("pollset_work(%p, %" PRIdPTR ") begin", pollset,
GRPC_POLLING_API_TRACE("pollset_work(%p, %" PRId64 ") begin", pollset,
deadline);
grpc_error* err = g_event_engine->pollset_work(pollset, worker, deadline);
GRPC_POLLING_API_TRACE("pollset_work(%p, %" PRIdPTR ") end", pollset,
GRPC_POLLING_API_TRACE("pollset_work(%p, %" PRId64 ") end", pollset,
deadline);
return err;
}

@ -53,24 +53,24 @@ static void exec_ctx_sched(grpc_closure* closure, grpc_error* error) {
static gpr_timespec g_start_time;
static gpr_atm timespec_to_atm_round_down(gpr_timespec ts) {
static grpc_millis timespec_to_millis_round_down(gpr_timespec ts) {
ts = gpr_time_sub(ts, g_start_time);
double x = GPR_MS_PER_SEC * static_cast<double>(ts.tv_sec) +
static_cast<double>(ts.tv_nsec) / GPR_NS_PER_MS;
if (x < 0) return 0;
if (x > GPR_ATM_MAX) return GPR_ATM_MAX;
return static_cast<gpr_atm>(x);
if (x > GRPC_MILLIS_INF_FUTURE) return GRPC_MILLIS_INF_FUTURE;
return static_cast<grpc_millis>(x);
}
static gpr_atm timespec_to_atm_round_up(gpr_timespec ts) {
static grpc_millis timespec_to_millis_round_up(gpr_timespec ts) {
ts = gpr_time_sub(ts, g_start_time);
double x = GPR_MS_PER_SEC * static_cast<double>(ts.tv_sec) +
static_cast<double>(ts.tv_nsec) / GPR_NS_PER_MS +
static_cast<double>(GPR_NS_PER_SEC - 1) /
static_cast<double>(GPR_NS_PER_SEC);
if (x < 0) return 0;
if (x > GPR_ATM_MAX) return GPR_ATM_MAX;
return static_cast<gpr_atm>(x);
if (x > GRPC_MILLIS_INF_FUTURE) return GRPC_MILLIS_INF_FUTURE;
return static_cast<grpc_millis>(x);
}
gpr_timespec grpc_millis_to_timespec(grpc_millis millis,
@ -92,12 +92,12 @@ gpr_timespec grpc_millis_to_timespec(grpc_millis millis,
}
grpc_millis grpc_timespec_to_millis_round_down(gpr_timespec ts) {
return timespec_to_atm_round_down(
return timespec_to_millis_round_down(
gpr_convert_clock_type(ts, g_start_time.clock_type));
}
grpc_millis grpc_timespec_to_millis_round_up(gpr_timespec ts) {
return timespec_to_atm_round_up(
return timespec_to_millis_round_up(
gpr_convert_clock_type(ts, g_start_time.clock_type));
}
@ -138,7 +138,7 @@ bool ExecCtx::Flush() {
grpc_millis ExecCtx::Now() {
if (!now_is_valid_) {
now_ = timespec_to_atm_round_down(gpr_now(GPR_CLOCK_MONOTONIC));
now_ = timespec_to_millis_round_down(gpr_now(GPR_CLOCK_MONOTONIC));
now_is_valid_ = true;
}
return now_;

@ -29,10 +29,10 @@
#include "src/core/lib/gprpp/fork.h"
#include "src/core/lib/iomgr/closure.h"
typedef gpr_atm grpc_millis;
typedef int64_t grpc_millis;
#define GRPC_MILLIS_INF_FUTURE GPR_ATM_MAX
#define GRPC_MILLIS_INF_PAST GPR_ATM_MIN
#define GRPC_MILLIS_INF_FUTURE INT64_MAX
#define GRPC_MILLIS_INF_PAST INT64_MIN
/** A workqueue represents a list of work to be executed asynchronously.
Forward declared here to avoid a circular dependency with workqueue.h. */

@ -69,7 +69,7 @@ static grpc_error* pollset_work(grpc_pollset* pollset,
GRPC_CUSTOM_IOMGR_ASSERT_SAME_THREAD();
gpr_mu_unlock(&pollset->mu);
grpc_millis now = grpc_core::ExecCtx::Get()->Now();
size_t timeout = 0;
grpc_millis timeout = 0;
if (deadline > now) {
timeout = deadline - now;
}
@ -77,7 +77,7 @@ static grpc_error* pollset_work(grpc_pollset* pollset,
// control back to the application
grpc_core::ExecCtx* curr = grpc_core::ExecCtx::Get();
grpc_core::ExecCtx::Set(nullptr);
poller_vtable->poll(timeout);
poller_vtable->poll(static_cast<size_t>(timeout));
grpc_core::ExecCtx::Set(curr);
grpc_core::ExecCtx::Get()->InvalidateNow();
if (grpc_core::ExecCtx::Get()->HasWork()) {

@ -28,7 +28,7 @@
#include "src/core/lib/iomgr/iomgr.h"
typedef struct grpc_timer {
gpr_atm deadline;
grpc_millis deadline;
uint32_t heap_index; /* INVALID_HEAP_INDEX if not in heap */
bool pending;
struct grpc_timer* next;

@ -34,6 +34,7 @@
#include "src/core/lib/gpr/spinlock.h"
#include "src/core/lib/gpr/tls.h"
#include "src/core/lib/gpr/useful.h"
#include "src/core/lib/iomgr/exec_ctx.h"
#include "src/core/lib/iomgr/time_averaged_stats.h"
#include "src/core/lib/iomgr/timer_heap.h"
@ -59,9 +60,9 @@ typedef struct {
gpr_mu mu;
grpc_time_averaged_stats stats;
/* All and only timers with deadlines <= this will be in the heap. */
gpr_atm queue_deadline_cap;
grpc_millis queue_deadline_cap;
/* The deadline of the next timer due in this shard */
gpr_atm min_deadline;
grpc_millis min_deadline;
/* Index of this timer_shard in the g_shard_queue */
uint32_t shard_queue_index;
/* This holds all timers with deadlines < queue_deadline_cap. Timers in this
@ -209,15 +210,23 @@ static void validate_non_pending_timer(grpc_timer* t) {
#endif
#if GPR_ARCH_64
/* NOTE: TODO(sreek) - Currently the thread local storage support in grpc is
for intptr_t which means on 32-bit machines it is not wide enough to hold
grpc_millis which is 64-bit. Adding thread local support for 64 bit values
is a lot of work for very little gain. So we are currently restricting this
optimization to only 64 bit machines */
/* Thread local variable that stores the deadline of the next timer the thread
* has last-seen. This is an optimization to prevent the thread from checking
* shared_mutables.min_timer (which requires acquiring shared_mutables.mu lock,
* an expensive operation) */
GPR_TLS_DECL(g_last_seen_min_timer);
#endif
struct shared_mutables {
/* The deadline of the next timer due across all timer shards */
gpr_atm min_timer;
grpc_millis min_timer;
/* Allow only one run_some_expired_timers at once */
gpr_spinlock checker_mu;
bool initialized;
@ -227,18 +236,18 @@ struct shared_mutables {
static struct shared_mutables g_shared_mutables;
static gpr_atm saturating_add(gpr_atm a, gpr_atm b) {
if (a > GPR_ATM_MAX - b) {
return GPR_ATM_MAX;
static grpc_millis saturating_add(grpc_millis a, grpc_millis b) {
if (a > GRPC_MILLIS_INF_FUTURE - b) {
return GRPC_MILLIS_INF_FUTURE;
}
return a + b;
}
static grpc_timer_check_result run_some_expired_timers(gpr_atm now,
gpr_atm* next,
static grpc_timer_check_result run_some_expired_timers(grpc_millis now,
grpc_millis* next,
grpc_error* error);
static gpr_atm compute_min_deadline(timer_shard* shard) {
static grpc_millis compute_min_deadline(timer_shard* shard) {
return grpc_timer_heap_is_empty(&shard->heap)
? saturating_add(shard->queue_deadline_cap, 1)
: grpc_timer_heap_top(&shard->heap)->deadline;
@ -257,8 +266,11 @@ static void timer_list_init() {
g_shared_mutables.checker_mu = GPR_SPINLOCK_INITIALIZER;
gpr_mu_init(&g_shared_mutables.mu);
g_shared_mutables.min_timer = grpc_core::ExecCtx::Get()->Now();
#if GPR_ARCH_64
gpr_tls_init(&g_last_seen_min_timer);
gpr_tls_set(&g_last_seen_min_timer, 0);
#endif
for (i = 0; i < g_num_shards; i++) {
timer_shard* shard = &g_shards[i];
@ -287,7 +299,11 @@ static void timer_list_shutdown() {
grpc_timer_heap_destroy(&shard->heap);
}
gpr_mu_destroy(&g_shared_mutables.mu);
#if GPR_ARCH_64
gpr_tls_destroy(&g_last_seen_min_timer);
#endif
gpr_free(g_shards);
gpr_free(g_shard_queue);
g_shared_mutables.initialized = false;
@ -346,7 +362,7 @@ static void timer_init(grpc_timer* timer, grpc_millis deadline,
#endif
if (grpc_timer_trace.enabled()) {
gpr_log(GPR_INFO, "TIMER %p: SET %" PRIdPTR " now %" PRIdPTR " call %p[%p]",
gpr_log(GPR_INFO, "TIMER %p: SET %" PRId64 " now %" PRId64 " call %p[%p]",
timer, deadline, grpc_core::ExecCtx::Get()->Now(), closure,
closure->cb);
}
@ -383,7 +399,7 @@ static void timer_init(grpc_timer* timer, grpc_millis deadline,
}
if (grpc_timer_trace.enabled()) {
gpr_log(GPR_INFO,
" .. add to shard %d with queue_deadline_cap=%" PRIdPTR
" .. add to shard %d with queue_deadline_cap=%" PRId64
" => is_first_timer=%s",
static_cast<int>(shard - g_shards), shard->queue_deadline_cap,
is_first_timer ? "true" : "false");
@ -404,15 +420,27 @@ static void timer_init(grpc_timer* timer, grpc_millis deadline,
if (is_first_timer) {
gpr_mu_lock(&g_shared_mutables.mu);
if (grpc_timer_trace.enabled()) {
gpr_log(GPR_INFO, " .. old shard min_deadline=%" PRIdPTR,
gpr_log(GPR_INFO, " .. old shard min_deadline=%" PRId64,
shard->min_deadline);
}
if (deadline < shard->min_deadline) {
gpr_atm old_min_deadline = g_shard_queue[0]->min_deadline;
grpc_millis old_min_deadline = g_shard_queue[0]->min_deadline;
shard->min_deadline = deadline;
note_deadline_change(shard);
if (shard->shard_queue_index == 0 && deadline < old_min_deadline) {
gpr_atm_no_barrier_store(&g_shared_mutables.min_timer, deadline);
#if GPR_ARCH_64
// TODO: sreek - Using c-style cast here. static_cast<> gives an error
// (on mac platforms complaining that gpr_atm* is (long *) while
// (&g_shared_mutables.min_timer) is a (long long *). The cast should be
// safe since we know that both are pointer types and 64-bit wide.
gpr_atm_no_barrier_store((gpr_atm*)(&g_shared_mutables.min_timer),
deadline);
#else
// On 32-bit systems, gpr_atm_no_barrier_store does not work on 64-bit
// types (like grpc_millis). So all reads and writes to
// g_shared_mutables.min_timer varialbe under g_shared_mutables.mu
g_shared_mutables.min_timer = deadline;
#endif
grpc_kick_poller();
}
}
@ -421,8 +449,10 @@ static void timer_init(grpc_timer* timer, grpc_millis deadline,
}
static void timer_consume_kick(void) {
/* force re-evaluation of last seeen min */
#if GPR_ARCH_64
/* Force re-evaluation of last seen min */
gpr_tls_set(&g_last_seen_min_timer, 0);
#endif
}
static void timer_cancel(grpc_timer* timer) {
@ -459,7 +489,7 @@ static void timer_cancel(grpc_timer* timer) {
'queue_deadline_cap') into into shard->heap.
Returns 'true' if shard->heap has atleast ONE element
REQUIRES: shard->mu locked */
static int refill_heap(timer_shard* shard, gpr_atm now) {
static int refill_heap(timer_shard* shard, grpc_millis now) {
/* Compute the new queue window width and bound by the limits: */
double computed_deadline_delta =
grpc_time_averaged_stats_update_average(&shard->stats) *
@ -472,10 +502,10 @@ static int refill_heap(timer_shard* shard, gpr_atm now) {
/* Compute the new cap and put all timers under it into the queue: */
shard->queue_deadline_cap =
saturating_add(GPR_MAX(now, shard->queue_deadline_cap),
static_cast<gpr_atm>(deadline_delta * 1000.0));
static_cast<grpc_millis>(deadline_delta * 1000.0));
if (grpc_timer_check_trace.enabled()) {
gpr_log(GPR_INFO, " .. shard[%d]->queue_deadline_cap --> %" PRIdPTR,
gpr_log(GPR_INFO, " .. shard[%d]->queue_deadline_cap --> %" PRId64,
static_cast<int>(shard - g_shards), shard->queue_deadline_cap);
}
for (timer = shard->list.next; timer != &shard->list; timer = next) {
@ -483,7 +513,7 @@ static int refill_heap(timer_shard* shard, gpr_atm now) {
if (timer->deadline < shard->queue_deadline_cap) {
if (grpc_timer_check_trace.enabled()) {
gpr_log(GPR_INFO, " .. add timer with deadline %" PRIdPTR " to heap",
gpr_log(GPR_INFO, " .. add timer with deadline %" PRId64 " to heap",
timer->deadline);
}
list_remove(timer);
@ -496,7 +526,7 @@ static int refill_heap(timer_shard* shard, gpr_atm now) {
/* This pops the next non-cancelled timer with deadline <= now from the
queue, or returns NULL if there isn't one.
REQUIRES: shard->mu locked */
static grpc_timer* pop_one(timer_shard* shard, gpr_atm now) {
static grpc_timer* pop_one(timer_shard* shard, grpc_millis now) {
grpc_timer* timer;
for (;;) {
if (grpc_timer_check_trace.enabled()) {
@ -511,12 +541,12 @@ static grpc_timer* pop_one(timer_shard* shard, gpr_atm now) {
timer = grpc_timer_heap_top(&shard->heap);
if (grpc_timer_check_trace.enabled()) {
gpr_log(GPR_INFO,
" .. check top timer deadline=%" PRIdPTR " now=%" PRIdPTR,
" .. check top timer deadline=%" PRId64 " now=%" PRId64,
timer->deadline, now);
}
if (timer->deadline > now) return nullptr;
if (grpc_timer_trace.enabled()) {
gpr_log(GPR_INFO, "TIMER %p: FIRE %" PRIdPTR "ms late via %s scheduler",
gpr_log(GPR_INFO, "TIMER %p: FIRE %" PRId64 "ms late via %s scheduler",
timer, now - timer->deadline,
timer->closure->scheduler->vtable->name);
}
@ -527,8 +557,8 @@ static grpc_timer* pop_one(timer_shard* shard, gpr_atm now) {
}
/* REQUIRES: shard->mu unlocked */
static size_t pop_timers(timer_shard* shard, gpr_atm now,
gpr_atm* new_min_deadline, grpc_error* error) {
static size_t pop_timers(timer_shard* shard, grpc_millis now,
grpc_millis* new_min_deadline, grpc_error* error) {
size_t n = 0;
grpc_timer* timer;
gpr_mu_lock(&shard->mu);
@ -546,13 +576,27 @@ static size_t pop_timers(timer_shard* shard, gpr_atm now,
return n;
}
static grpc_timer_check_result run_some_expired_timers(gpr_atm now,
gpr_atm* next,
static grpc_timer_check_result run_some_expired_timers(grpc_millis now,
grpc_millis* next,
grpc_error* error) {
grpc_timer_check_result result = GRPC_TIMERS_NOT_CHECKED;
gpr_atm min_timer = gpr_atm_no_barrier_load(&g_shared_mutables.min_timer);
#if GPR_ARCH_64
// TODO: sreek - Using c-style cast here. static_cast<> gives an error (on
// mac platforms complaining that gpr_atm* is (long *) while
// (&g_shared_mutables.min_timer) is a (long long *). The cast should be
// safe since we know that both are pointer types and 64-bit wide
grpc_millis min_timer = static_cast<grpc_millis>(
gpr_atm_no_barrier_load((gpr_atm*)(&g_shared_mutables.min_timer)));
gpr_tls_set(&g_last_seen_min_timer, min_timer);
#else
// On 32-bit systems, gpr_atm_no_barrier_load does not work on 64-bit types
// (like grpc_millis). So all reads and writes to g_shared_mutables.min_timer
// are done under g_shared_mutables.mu
gpr_mu_lock(&g_shared_mutables.mu);
grpc_millis min_timer = g_shared_mutables.min_timer;
gpr_mu_unlock(&g_shared_mutables.mu);
#endif
if (now < min_timer) {
if (next != nullptr) *next = GPR_MIN(*next, min_timer);
return GRPC_TIMERS_CHECKED_AND_EMPTY;
@ -563,14 +607,15 @@ static grpc_timer_check_result run_some_expired_timers(gpr_atm now,
result = GRPC_TIMERS_CHECKED_AND_EMPTY;
if (grpc_timer_check_trace.enabled()) {
gpr_log(GPR_INFO, " .. shard[%d]->min_deadline = %" PRIdPTR,
gpr_log(GPR_INFO, " .. shard[%d]->min_deadline = %" PRId64,
static_cast<int>(g_shard_queue[0] - g_shards),
g_shard_queue[0]->min_deadline);
}
while (g_shard_queue[0]->min_deadline < now ||
(now != GPR_ATM_MAX && g_shard_queue[0]->min_deadline == now)) {
gpr_atm new_min_deadline;
(now != GRPC_MILLIS_INF_FUTURE &&
g_shard_queue[0]->min_deadline == now)) {
grpc_millis new_min_deadline;
/* For efficiency, we pop as many available timers as we can from the
shard. This may violate perfect timer deadline ordering, but that
@ -582,8 +627,8 @@ static grpc_timer_check_result run_some_expired_timers(gpr_atm now,
if (grpc_timer_check_trace.enabled()) {
gpr_log(GPR_INFO,
" .. result --> %d"
", shard[%d]->min_deadline %" PRIdPTR " --> %" PRIdPTR
", now=%" PRIdPTR,
", shard[%d]->min_deadline %" PRId64 " --> %" PRId64
", now=%" PRId64,
result, static_cast<int>(g_shard_queue[0] - g_shards),
g_shard_queue[0]->min_deadline, new_min_deadline, now);
}
@ -601,8 +646,19 @@ static grpc_timer_check_result run_some_expired_timers(gpr_atm now,
*next = GPR_MIN(*next, g_shard_queue[0]->min_deadline);
}
gpr_atm_no_barrier_store(&g_shared_mutables.min_timer,
#if GPR_ARCH_64
// TODO: sreek - Using c-style cast here. static_cast<> gives an error (on
// mac platforms complaining that gpr_atm* is (long *) while
// (&g_shared_mutables.min_timer) is a (long long *). The cast should be
// safe since we know that both are pointer types and 64-bit wide
gpr_atm_no_barrier_store((gpr_atm*)(&g_shared_mutables.min_timer),
g_shard_queue[0]->min_deadline);
#else
// On 32-bit systems, gpr_atm_no_barrier_store does not work on 64-bit
// types (like grpc_millis). So all reads and writes to
// g_shared_mutables.min_timer are done under g_shared_mutables.mu
g_shared_mutables.min_timer = g_shard_queue[0]->min_deadline;
#endif
gpr_mu_unlock(&g_shared_mutables.mu);
gpr_spinlock_unlock(&g_shared_mutables.checker_mu);
}
@ -616,17 +672,28 @@ static grpc_timer_check_result timer_check(grpc_millis* next) {
// prelude
grpc_millis now = grpc_core::ExecCtx::Get()->Now();
#if GPR_ARCH_64
/* fetch from a thread-local first: this avoids contention on a globally
mutable cacheline in the common case */
grpc_millis min_timer = gpr_tls_get(&g_last_seen_min_timer);
#else
// On 32-bit systems, we currently do not have thread local support for 64-bit
// types. In this case, directly read from g_shared_mutables.min_timer.
// Also, note that on 32-bit systems, gpr_atm_no_barrier_store does not work
// on 64-bit types (like grpc_millis). So all reads and writes to
// g_shared_mutables.min_timer are done under g_shared_mutables.mu
gpr_mu_lock(&g_shared_mutables.mu);
grpc_millis min_timer = g_shared_mutables.min_timer;
gpr_mu_unlock(&g_shared_mutables.mu);
#endif
if (now < min_timer) {
if (next != nullptr) {
*next = GPR_MIN(*next, min_timer);
}
if (grpc_timer_check_trace.enabled()) {
gpr_log(GPR_INFO,
"TIMER CHECK SKIP: now=%" PRIdPTR " min_timer=%" PRIdPTR, now,
min_timer);
gpr_log(GPR_INFO, "TIMER CHECK SKIP: now=%" PRId64 " min_timer=%" PRId64,
now, min_timer);
}
return GRPC_TIMERS_CHECKED_AND_EMPTY;
}
@ -642,13 +709,18 @@ static grpc_timer_check_result timer_check(grpc_millis* next) {
if (next == nullptr) {
next_str = gpr_strdup("NULL");
} else {
gpr_asprintf(&next_str, "%" PRIdPTR, *next);
gpr_asprintf(&next_str, "%" PRId64, *next);
}
#if GPR_ARCH_64
gpr_log(GPR_INFO,
"TIMER CHECK BEGIN: now=%" PRIdPTR " next=%s tls_min=%" PRIdPTR
"TIMER CHECK BEGIN: now=%" PRId64 " next=%s tls_min=%" PRId64
" glob_min=%" PRIdPTR,
now, next_str, gpr_tls_get(&g_last_seen_min_timer),
gpr_atm_no_barrier_load(&g_shared_mutables.min_timer));
now, next_str, min_timer,
gpr_atm_no_barrier_load((gpr_atm*)(&g_shared_mutables.min_timer)));
#else
gpr_log(GPR_INFO, "TIMER CHECK BEGIN: now=%" PRId64 " next=%s min=%" PRId64,
now, next_str, min_timer);
#endif
gpr_free(next_str);
}
// actual code
@ -660,7 +732,7 @@ static grpc_timer_check_result timer_check(grpc_millis* next) {
if (next == nullptr) {
next_str = gpr_strdup("NULL");
} else {
gpr_asprintf(&next_str, "%" PRIdPTR, *next);
gpr_asprintf(&next_str, "%" PRId64, *next);
}
gpr_log(GPR_INFO, "TIMER CHECK END: r=%d; next=%s", r, next_str);
gpr_free(next_str);

@ -172,7 +172,7 @@ static bool wait_until(grpc_millis next) {
if (grpc_timer_check_trace.enabled()) {
grpc_millis wait_time = next - grpc_core::ExecCtx::Get()->Now();
gpr_log(GPR_INFO, "sleep for a %" PRIdPTR " milliseconds", wait_time);
gpr_log(GPR_INFO, "sleep for a %" PRId64 " milliseconds", wait_time);
}
} else { // g_timed_waiter == true && next >= g_timed_waiter_deadline
next = GRPC_MILLIS_INF_FUTURE;

@ -52,7 +52,7 @@ static void put_metadata_list(gpr_strvec* b, grpc_metadata_batch md) {
}
if (md.deadline != GRPC_MILLIS_INF_FUTURE) {
char* tmp;
gpr_asprintf(&tmp, " deadline=%" PRIdPTR, md.deadline);
gpr_asprintf(&tmp, " deadline=%" PRId64, md.deadline);
gpr_strvec_add(b, tmp);
}
}

@ -128,7 +128,7 @@ static void poll_pollset_until_request_done(iomgr_args* args) {
break;
}
grpc_millis time_left = deadline - grpc_core::ExecCtx::Get()->Now();
gpr_log(GPR_DEBUG, "done=%d, time_left=%" PRIdPTR, done, time_left);
gpr_log(GPR_DEBUG, "done=%d, time_left=%" PRId64, done, time_left);
GPR_ASSERT(time_left >= 0);
grpc_pollset_worker* worker = nullptr;
gpr_mu_lock(args->mu);

@ -91,7 +91,7 @@ static void actually_poll(void* argsp) {
break;
}
grpc_millis time_left = deadline - grpc_core::ExecCtx::Get()->Now();
gpr_log(GPR_DEBUG, "done=%d, time_left=%" PRIdPTR, done, time_left);
gpr_log(GPR_DEBUG, "done=%d, time_left=%" PRId64, done, time_left);
GPR_ASSERT(time_left >= 0);
grpc_pollset_worker* worker = nullptr;
gpr_mu_lock(args->mu);

@ -82,7 +82,7 @@ static void poll_pollset_until_request_done(args_struct* args) {
break;
}
grpc_millis time_left = deadline - grpc_core::ExecCtx::Get()->Now();
gpr_log(GPR_DEBUG, "done=%d, time_left=%" PRIdPTR, done, time_left);
gpr_log(GPR_DEBUG, "done=%d, time_left=%" PRId64, done, time_left);
GPR_ASSERT(time_left >= 0);
grpc_pollset_worker* worker = nullptr;
gpr_mu_lock(args->mu);

@ -204,7 +204,7 @@ static void test2(void) {
}
if (num_inserted) {
gpr_atm* min_deadline = nullptr;
grpc_millis* min_deadline = nullptr;
for (size_t i = 0; i < elems_size; i++) {
if (elems[i].inserted) {
if (min_deadline == nullptr) {

@ -71,7 +71,7 @@ static void assert_decodes_as(const char* buffer, grpc_millis expected) {
GPR_ASSERT(1 == grpc_http2_decode_timeout(
grpc_slice_from_static_string(buffer), &got));
if (got != expected) {
gpr_log(GPR_ERROR, "got:'%" PRIdPTR "' != expected:'%" PRIdPTR "'", got,
gpr_log(GPR_ERROR, "got:'%" PRId64 "' != expected:'%" PRId64 "'", got,
expected);
abort();
}

@ -362,7 +362,7 @@ TEST_F(ClientLbEnd2endTest, PickFirstBackOffInitialReconnect) {
grpc_timeout_milliseconds_to_deadline(kInitialBackOffMs * 2)));
const gpr_timespec t1 = gpr_now(GPR_CLOCK_MONOTONIC);
const grpc_millis waited_ms = gpr_time_to_millis(gpr_time_sub(t1, t0));
gpr_log(GPR_DEBUG, "Waited %ld milliseconds", waited_ms);
gpr_log(GPR_DEBUG, "Waited %" PRId64 " milliseconds", waited_ms);
// We should have waited at least kInitialBackOffMs. We substract one to
// account for test and precision accuracy drift.
EXPECT_GE(waited_ms, kInitialBackOffMs - 1);
@ -391,7 +391,7 @@ TEST_F(ClientLbEnd2endTest, PickFirstBackOffMinReconnect) {
grpc_timeout_milliseconds_to_deadline(kMinReconnectBackOffMs * 2));
const gpr_timespec t1 = gpr_now(GPR_CLOCK_MONOTONIC);
const grpc_millis waited_ms = gpr_time_to_millis(gpr_time_sub(t1, t0));
gpr_log(GPR_DEBUG, "Waited %ld ms", waited_ms);
gpr_log(GPR_DEBUG, "Waited %" PRId64 " ms", waited_ms);
// We should have waited at least kMinReconnectBackOffMs. We substract one to
// account for test and precision accuracy drift.
EXPECT_GE(waited_ms, kMinReconnectBackOffMs - 1);

Loading…
Cancel
Save