Merge github.com:grpc/grpc into sometimes-its-good-just-to-check-in-with-each-other

pull/2477/head
Craig Tiller 10 years ago
commit b5980be9a0
  1. 3
      BUILD
  2. 82
      Makefile
  3. 29
      build.json
  4. 7
      include/grpc++/auth_context.h
  5. 77
      include/grpc++/auth_property_iterator.h
  6. 3
      include/grpc++/completion_queue.h
  7. 8
      include/grpc++/fixed_size_thread_pool.h
  8. 7
      include/grpc++/server_context.h
  9. 2
      include/grpc++/thread_pool_interface.h
  10. 42
      include/grpc/support/time.h
  11. 20
      include/grpc/support/useful.h
  12. 2
      src/compiler/objective_c_generator.cc
  13. 4
      src/core/channel/client_channel.c
  14. 2
      src/core/client_config/lb_policies/pick_first.h
  15. 2
      src/core/client_config/subchannel.c
  16. 4
      src/core/iomgr/alarm.c
  17. 2
      src/core/iomgr/iocp_windows.c
  18. 21
      src/core/iomgr/iomgr.c
  19. 17
      src/core/iomgr/pollset_multipoller_with_epoll.c
  20. 15
      src/core/iomgr/pollset_multipoller_with_poll_posix.c
  21. 51
      src/core/iomgr/pollset_posix.c
  22. 6
      src/core/iomgr/pollset_posix.h
  23. 2
      src/core/iomgr/pollset_set.h
  24. 2
      src/core/iomgr/tcp_server_windows.c
  25. 15
      src/core/security/credentials.c
  26. 4
      src/core/security/google_default_credentials.c
  27. 2
      src/core/security/json_token.c
  28. 27
      src/core/security/jwt_verifier.c
  29. 5
      src/core/support/cancellable.c
  30. 8
      src/core/support/stack_lockfree.c
  31. 3
      src/core/support/stack_lockfree.h
  32. 2
      src/core/support/sync_posix.c
  33. 2
      src/core/support/sync_win32.c
  34. 92
      src/core/support/time.c
  35. 11
      src/core/support/time_posix.c
  36. 1
      src/core/support/time_win32.c
  37. 8
      src/core/surface/byte_buffer_queue.c
  38. 2
      src/core/surface/byte_buffer_queue.h
  39. 19
      src/core/surface/call.c
  40. 11
      src/core/surface/completion_queue.c
  41. 2
      src/core/surface/lame_client.c
  42. 418
      src/core/surface/server.c
  43. 4
      src/core/transport/chttp2/frame_window_update.c
  44. 4
      src/core/transport/chttp2/incoming_metadata.c
  45. 20
      src/core/transport/chttp2/internal.h
  46. 9
      src/core/transport/chttp2/parsing.c
  47. 3
      src/core/transport/chttp2/stream_encoder.c
  48. 7
      src/core/transport/chttp2/stream_lists.c
  49. 14
      src/core/transport/chttp2/timeout_encoding.c
  50. 49
      src/core/transport/chttp2/writing.c
  51. 30
      src/core/transport/chttp2_transport.c
  52. 2
      src/core/transport/stream_op.c
  53. 4
      src/core/transport/transport.h
  54. 5
      src/core/transport/transport_op_string.c
  55. 6
      src/cpp/client/client_context.cc
  56. 6
      src/cpp/client/secure_credentials.cc
  57. 87
      src/cpp/common/auth_property_iterator.cc
  58. 6
      src/cpp/common/completion_queue.cc
  59. 16
      src/cpp/common/secure_auth_context.cc
  60. 4
      src/cpp/common/secure_auth_context.h
  61. 3
      src/cpp/server/fixed_size_thread_pool.cc
  62. 2
      src/cpp/server/server.cc
  63. 2
      src/cpp/server/server_context.cc
  64. 14
      src/cpp/util/time.cc
  65. 12
      src/csharp/Grpc.Core.Tests/TimespecTest.cs
  66. 15
      src/csharp/Grpc.Core/Internal/Enums.cs
  67. 8
      src/csharp/Grpc.Core/Internal/Timespec.cs
  68. 7
      src/csharp/ext/grpc_csharp_ext.c
  69. 3
      src/node/ext/completion_queue_async_worker.cc
  70. 3
      src/node/ext/server.cc
  71. 11
      src/node/ext/timeval.cc
  72. 42
      src/node/test/surface_test.js
  73. 4
      src/objective-c/GRPCClient/GRPCCall.h
  74. 4
      src/objective-c/GRPCClient/GRPCCall.m
  75. 5
      src/objective-c/GRPCClient/private/GRPCDelegateWrapper.h
  76. 4
      src/objective-c/GRPCClient/private/GRPCDelegateWrapper.m
  77. 2
      src/objective-c/ProtoRPC/ProtoRPC.h
  78. 16
      src/objective-c/ProtoRPC/ProtoRPC.m
  79. 4
      src/objective-c/ProtoRPC/ProtoService.h
  80. 2
      src/objective-c/ProtoRPC/ProtoService.m
  81. 2
      src/objective-c/RxLibrary/GRXBufferedPipe.h
  82. 43
      src/objective-c/RxLibrary/GRXForwardingWriter.h
  83. 112
      src/objective-c/RxLibrary/GRXForwardingWriter.m
  84. 14
      src/objective-c/RxLibrary/GRXImmediateWriter.h
  85. 12
      src/objective-c/RxLibrary/GRXImmediateWriter.m
  86. 12
      src/objective-c/RxLibrary/GRXWriter+Immediate.m
  87. 11
      src/objective-c/RxLibrary/GRXWriter.h
  88. 76
      src/objective-c/RxLibrary/GRXWriter.m
  89. 6
      src/objective-c/RxLibrary/transformations/GRXMappingWriter.h
  90. 6
      src/objective-c/RxLibrary/transformations/GRXMappingWriter.m
  91. 4
      src/objective-c/tests/GRPCClientTests.m
  92. 2
      src/objective-c/tests/InteropTests.m
  93. 6
      src/objective-c/tests/LocalClearTextTests.m
  94. 2
      src/php/ext/grpc/call.c
  95. 5
      src/php/ext/grpc/completion_queue.c
  96. 6
      src/php/ext/grpc/server.c
  97. 11
      src/php/ext/grpc/timeval.c
  98. 4
      src/python/src/grpc/_adapter/_c/utility.c
  99. 7
      src/ruby/ext/grpc/rb_completion_queue.c
  100. 14
      src/ruby/ext/grpc/rb_grpc.c
  101. Some files were not shown because too many files have changed in this diff Show More

@ -641,6 +641,7 @@ cc_library(
"src/cpp/common/create_auth_context.h",
"src/cpp/client/secure_channel_arguments.cc",
"src/cpp/client/secure_credentials.cc",
"src/cpp/common/auth_property_iterator.cc",
"src/cpp/common/secure_auth_context.cc",
"src/cpp/common/secure_create_auth_context.cc",
"src/cpp/server/secure_server_credentials.cc",
@ -673,6 +674,7 @@ cc_library(
"include/grpc++/async_generic_service.h",
"include/grpc++/async_unary_call.h",
"include/grpc++/auth_context.h",
"include/grpc++/auth_property_iterator.h",
"include/grpc++/byte_buffer.h",
"include/grpc++/channel_arguments.h",
"include/grpc++/channel_interface.h",
@ -757,6 +759,7 @@ cc_library(
"include/grpc++/async_generic_service.h",
"include/grpc++/async_unary_call.h",
"include/grpc++/auth_context.h",
"include/grpc++/auth_property_iterator.h",
"include/grpc++/byte_buffer.h",
"include/grpc++/channel_arguments.h",
"include/grpc++/channel_interface.h",

File diff suppressed because one or more lines are too long

@ -31,6 +31,7 @@
"include/grpc++/async_generic_service.h",
"include/grpc++/async_unary_call.h",
"include/grpc++/auth_context.h",
"include/grpc++/auth_property_iterator.h",
"include/grpc++/byte_buffer.h",
"include/grpc++/channel_arguments.h",
"include/grpc++/channel_interface.h",
@ -571,6 +572,7 @@
"src": [
"src/cpp/client/secure_channel_arguments.cc",
"src/cpp/client/secure_credentials.cc",
"src/cpp/common/auth_property_iterator.cc",
"src/cpp/common/secure_auth_context.cc",
"src/cpp/common/secure_create_auth_context.cc",
"src/cpp/server/secure_server_credentials.cc"
@ -1786,20 +1788,6 @@
"gpr"
]
},
{
"name": "time_test",
"build": "test",
"language": "c",
"src": [
"test/core/support/time_test.c"
],
"deps": [
"grpc_test_util",
"grpc",
"gpr_test_util",
"gpr"
]
},
{
"name": "timeout_encoding_test",
"build": "test",
@ -1920,6 +1908,19 @@
"gpr"
]
},
{
"name": "auth_property_iterator_test",
"build": "test",
"language": "c++",
"src": [
"test/cpp/common/auth_property_iterator_test.cc"
],
"deps": [
"grpc++",
"grpc",
"gpr"
]
},
{
"name": "channel_arguments_test",
"build": "test",

@ -36,14 +36,13 @@
#include <vector>
#include <grpc++/auth_property_iterator.h>
#include <grpc++/config.h>
namespace grpc {
class AuthContext {
public:
typedef std::pair<grpc::string, grpc::string> Property;
virtual ~AuthContext() {}
// A peer identity, in general is one or more properties (in which case they
@ -54,6 +53,10 @@ class AuthContext {
// Returns all the property values with the given name.
virtual std::vector<grpc::string> FindPropertyValues(
const grpc::string& name) const = 0;
// Iteration over all the properties.
virtual AuthPropertyIterator begin() const = 0;
virtual AuthPropertyIterator end() const = 0;
};
} // namespace grpc

@ -0,0 +1,77 @@
/*
*
* Copyright 2015, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following disclaimer
* in the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Google Inc. nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
#ifndef GRPCXX_AUTH_PROPERTY_ITERATOR_H
#define GRPCXX_AUTH_PROPERTY_ITERATOR_H
#include <iterator>
#include <vector>
#include <grpc++/config.h>
struct grpc_auth_context;
struct grpc_auth_property;
struct grpc_auth_property_iterator;
namespace grpc {
class SecureAuthContext;
typedef std::pair<grpc::string, grpc::string> AuthProperty;
class AuthPropertyIterator
: public std::iterator<std::input_iterator_tag, const AuthProperty> {
public:
~AuthPropertyIterator();
AuthPropertyIterator& operator++();
AuthPropertyIterator operator++(int);
bool operator==(const AuthPropertyIterator& rhs) const;
bool operator!=(const AuthPropertyIterator& rhs) const;
const AuthProperty operator*();
protected:
AuthPropertyIterator();
AuthPropertyIterator(const grpc_auth_property* property,
const grpc_auth_property_iterator* iter);
private:
friend class SecureAuthContext;
const grpc_auth_property* property_;
// The following items form a grpc_auth_property_iterator.
const grpc_auth_context* ctx_;
size_t index_;
const char* name_;
};
} // namespace grpc
#endif // GRPCXX_AUTH_PROPERTY_ITERATOR_H

@ -105,7 +105,8 @@ class CompletionQueue : public GrpcLibrary {
// Returns false if the queue is ready for destruction, true if event
bool Next(void** tag, bool* ok) {
return (AsyncNextInternal(tag, ok, gpr_inf_future) != SHUTDOWN);
return (AsyncNextInternal(tag, ok, gpr_inf_future(GPR_CLOCK_REALTIME)) !=
SHUTDOWN);
}
// Shutdown has to be called, and the CompletionQueue can only be

@ -31,8 +31,8 @@
*
*/
#ifndef GRPC_INTERNAL_CPP_SERVER_THREAD_POOL_H
#define GRPC_INTERNAL_CPP_SERVER_THREAD_POOL_H
#ifndef GRPCXX_FIXED_SIZE_THREAD_POOL_H
#define GRPCXX_FIXED_SIZE_THREAD_POOL_H
#include <grpc++/config.h>
@ -50,7 +50,7 @@ class FixedSizeThreadPool GRPC_FINAL : public ThreadPoolInterface {
explicit FixedSizeThreadPool(int num_threads);
~FixedSizeThreadPool();
void ScheduleCallback(const std::function<void()>& callback) GRPC_OVERRIDE;
void Add(const std::function<void()>& callback) GRPC_OVERRIDE;
private:
grpc::mutex mu_;
@ -64,4 +64,4 @@ class FixedSizeThreadPool GRPC_FINAL : public ThreadPoolInterface {
} // namespace grpc
#endif // GRPC_INTERNAL_CPP_SERVER_THREAD_POOL_H
#endif // GRPCXX_FIXED_SIZE_THREAD_POOL_H

@ -76,6 +76,10 @@ class CallOpBuffer;
class CompletionQueue;
class Server;
namespace testing {
class InteropContextInspector;
} // namespace testing
// Interface of server side rpc context.
class ServerContext {
public:
@ -93,7 +97,7 @@ class ServerContext {
void AddInitialMetadata(const grpc::string& key, const grpc::string& value);
void AddTrailingMetadata(const grpc::string& key, const grpc::string& value);
bool IsCancelled();
bool IsCancelled() const;
const std::multimap<grpc::string, grpc::string>& client_metadata() {
return client_metadata_;
@ -102,6 +106,7 @@ class ServerContext {
std::shared_ptr<const AuthContext> auth_context() const;
private:
friend class ::grpc::testing::InteropContextInspector;
friend class ::grpc::Server;
template <class W, class R>
friend class ::grpc::ServerAsyncReader;

@ -44,7 +44,7 @@ class ThreadPoolInterface {
virtual ~ThreadPoolInterface() {}
// Schedule the given callback for execution.
virtual void ScheduleCallback(const std::function<void()>& callback) = 0;
virtual void Add(const std::function<void()>& callback) = 0;
};
ThreadPoolInterface* CreateDefaultThreadPool();

@ -45,15 +45,30 @@
extern "C" {
#endif
/* The clocks we support. */
typedef enum {
/* Monotonic clock. Epoch undefined. Always moves forwards. */
GPR_CLOCK_MONOTONIC = 0,
/* Realtime clock. May jump forwards or backwards. Settable by
the system administrator. Has its epoch at 0:00:00 UTC 1 Jan 1970. */
GPR_CLOCK_REALTIME,
/* Unmeasurable clock type: no base, created by taking the difference
between two times */
GPR_TIMESPAN
} gpr_clock_type;
typedef struct gpr_timespec {
time_t tv_sec;
int tv_nsec;
/** Against which clock was this time measured? (or GPR_TIMESPAN if
this is a relative time meaure) */
gpr_clock_type clock_type;
} gpr_timespec;
/* Time constants. */
extern const gpr_timespec gpr_time_0; /* The zero time interval. */
extern const gpr_timespec gpr_inf_future; /* The far future */
extern const gpr_timespec gpr_inf_past; /* The far past. */
gpr_timespec gpr_time_0(gpr_clock_type type); /* The zero time interval. */
gpr_timespec gpr_inf_future(gpr_clock_type type); /* The far future */
gpr_timespec gpr_inf_past(gpr_clock_type type); /* The far past. */
#define GPR_MS_PER_SEC 1000
#define GPR_US_PER_SEC 1000000
@ -62,15 +77,6 @@ extern const gpr_timespec gpr_inf_past; /* The far past. */
#define GPR_NS_PER_US 1000
#define GPR_US_PER_MS 1000
/* The clocks we support. */
typedef enum {
/* Monotonic clock. Epoch undefined. Always moves forwards. */
GPR_CLOCK_MONOTONIC = 0,
/* Realtime clock. May jump forwards or backwards. Settable by
the system administrator. Has its epoch at 0:00:00 UTC 1 Jan 1970. */
GPR_CLOCK_REALTIME
} gpr_clock_type;
/* initialize time subsystem */
void gpr_time_init(void);
@ -90,12 +96,12 @@ gpr_timespec gpr_time_sub(gpr_timespec a, gpr_timespec b);
/* Return a timespec representing a given number of time units. LONG_MIN is
interpreted as gpr_inf_past, and LONG_MAX as gpr_inf_future. */
gpr_timespec gpr_time_from_micros(long x);
gpr_timespec gpr_time_from_nanos(long x);
gpr_timespec gpr_time_from_millis(long x);
gpr_timespec gpr_time_from_seconds(long x);
gpr_timespec gpr_time_from_minutes(long x);
gpr_timespec gpr_time_from_hours(long x);
gpr_timespec gpr_time_from_micros(long x, gpr_clock_type clock_type);
gpr_timespec gpr_time_from_nanos(long x, gpr_clock_type clock_type);
gpr_timespec gpr_time_from_millis(long x, gpr_clock_type clock_type);
gpr_timespec gpr_time_from_seconds(long x, gpr_clock_type clock_type);
gpr_timespec gpr_time_from_minutes(long x, gpr_clock_type clock_type);
gpr_timespec gpr_time_from_hours(long x, gpr_clock_type clock_type);
gpr_int32 gpr_time_to_millis(gpr_timespec timespec);

@ -52,4 +52,24 @@
b = x; \
} while (0)
/** Set the \a n-th bit of \a i (a mutable pointer). */
#define GPR_BITSET(i, n) ((*(i)) |= (1u << (n)))
/** Clear the \a n-th bit of \a i (a mutable pointer). */
#define GPR_BITCLEAR(i, n) ((*(i)) &= ~(1u << (n)))
/** Get the \a n-th bit of \a i */
#define GPR_BITGET(i, n) (((i) & (1u << (n))) != 0)
#define GPR_INTERNAL_HEXDIGIT_BITCOUNT(x) \
((x) - (((x) >> 1) & 0x77777777) - (((x) >> 2) & 0x33333333) - \
(((x) >> 3) & 0x11111111))
/** Returns number of bits set in bitset \a i */
#define GPR_BITCOUNT(i) \
(((GPR_INTERNAL_HEXDIGIT_BITCOUNT(i) + \
(GPR_INTERNAL_HEXDIGIT_BITCOUNT(i) >> 4)) & \
0x0f0f0f0f) % \
255)
#endif /* GRPC_SUPPORT_USEFUL_H */

@ -67,7 +67,7 @@ void PrintMethodSignature(Printer *printer, const MethodDescriptor *method,
printer->Print(vars, "- ($return_type$)$method_name$With");
if (method->client_streaming()) {
printer->Print("RequestsWriter:(id<GRXWriter>)requestWriter");
printer->Print("RequestsWriter:(GRXWriter *)requestWriter");
} else {
printer->Print(vars, "Request:($request_class$ *)request");
}

@ -145,7 +145,7 @@ static void handle_op_after_cancellation(grpc_call_element *elem,
mdb.list.head = &calld->status;
mdb.list.tail = &calld->details;
mdb.garbage.head = mdb.garbage.tail = NULL;
mdb.deadline = gpr_inf_future;
mdb.deadline = gpr_inf_future(GPR_CLOCK_REALTIME);
grpc_sopb_add_metadata(op->recv_ops, mdb);
*op->recv_state = GRPC_STREAM_CLOSED;
op->on_done_recv->cb(op->on_done_recv->cb_arg, 1);
@ -587,7 +587,7 @@ static void init_call_elem(grpc_call_element *elem,
gpr_mu_init(&calld->mu_state);
calld->elem = elem;
calld->state = CALL_CREATED;
calld->deadline = gpr_inf_future;
calld->deadline = gpr_inf_future(GPR_CLOCK_REALTIME);
}
/* Destructor for call_data */

@ -36,6 +36,8 @@
#include "src/core/client_config/lb_policy.h"
/** Returns a load balancing policy instance that picks up the first subchannel
* from \a subchannels to succesfully connect */
grpc_lb_policy *grpc_create_pick_first_lb_policy(grpc_subchannel **subchannels,
size_t num_subchannels);

@ -305,7 +305,7 @@ static void continue_connect(grpc_subchannel *c) {
static void start_connect(grpc_subchannel *c) {
gpr_timespec now = gpr_now(GPR_CLOCK_REALTIME);
c->next_attempt = now;
c->backoff_delta = gpr_time_from_seconds(1);
c->backoff_delta = gpr_time_from_seconds(1, GPR_TIMESPAN);
continue_connect(c);
}

@ -102,7 +102,8 @@ void grpc_alarm_list_init(gpr_timespec now) {
void grpc_alarm_list_shutdown(void) {
int i;
while (run_some_expired_alarms(NULL, gpr_inf_future, NULL, 0))
while (run_some_expired_alarms(NULL, gpr_inf_future(GPR_CLOCK_REALTIME), NULL,
0))
;
for (i = 0; i < NUM_SHARDS; i++) {
shard_type *shard = &g_shards[i];
@ -127,6 +128,7 @@ static gpr_timespec dbl_to_ts(double d) {
gpr_timespec ts;
ts.tv_sec = d;
ts.tv_nsec = 1e9 * (d - ts.tv_sec);
ts.clock_type = GPR_TIMESPAN;
return ts;
}

@ -157,7 +157,7 @@ void grpc_iocp_shutdown(void) {
BOOL success;
gpr_event_set(&g_shutdown_iocp, (void *)1);
grpc_iocp_kick();
gpr_event_wait(&g_iocp_done, gpr_inf_future);
gpr_event_wait(&g_iocp_done, gpr_inf_future(GPR_CLOCK_REALTIME));
success = CloseHandle(g_iocp);
GPR_ASSERT(success);
}

@ -57,9 +57,9 @@ static grpc_iomgr_object g_root_object;
static void background_callback_executor(void *ignored) {
gpr_mu_lock(&g_mu);
while (!g_shutdown) {
gpr_timespec deadline = gpr_inf_future;
gpr_timespec short_deadline =
gpr_time_add(gpr_now(GPR_CLOCK_REALTIME), gpr_time_from_millis(100));
gpr_timespec deadline = gpr_inf_future(GPR_CLOCK_REALTIME);
gpr_timespec short_deadline = gpr_time_add(
gpr_now(GPR_CLOCK_REALTIME), gpr_time_from_millis(100, GPR_TIMESPAN));
if (g_cbs_head) {
grpc_iomgr_closure *closure = g_cbs_head;
g_cbs_head = closure->next;
@ -110,8 +110,8 @@ static size_t count_objects(void) {
void grpc_iomgr_shutdown(void) {
grpc_iomgr_object *obj;
grpc_iomgr_closure *closure;
gpr_timespec shutdown_deadline =
gpr_time_add(gpr_now(GPR_CLOCK_REALTIME), gpr_time_from_seconds(10));
gpr_timespec shutdown_deadline = gpr_time_add(
gpr_now(GPR_CLOCK_REALTIME), gpr_time_from_seconds(10, GPR_TIMESPAN));
gpr_timespec last_warning_time = gpr_now(GPR_CLOCK_REALTIME);
gpr_mu_lock(&g_mu);
@ -119,7 +119,7 @@ void grpc_iomgr_shutdown(void) {
while (g_cbs_head != NULL || g_root_object.next != &g_root_object) {
if (gpr_time_cmp(
gpr_time_sub(gpr_now(GPR_CLOCK_REALTIME), last_warning_time),
gpr_time_from_seconds(1)) >= 0) {
gpr_time_from_seconds(1, GPR_TIMESPAN)) >= 0) {
if (g_cbs_head != NULL && g_root_object.next != &g_root_object) {
gpr_log(GPR_DEBUG,
"Waiting for %d iomgr objects to be destroyed and executing "
@ -145,13 +145,13 @@ void grpc_iomgr_shutdown(void) {
} while (g_cbs_head);
continue;
}
if (grpc_alarm_check(&g_mu, gpr_inf_future, NULL)) {
if (grpc_alarm_check(&g_mu, gpr_inf_future(GPR_CLOCK_REALTIME), NULL)) {
continue;
}
if (g_root_object.next != &g_root_object) {
int timeout = 0;
gpr_timespec short_deadline =
gpr_time_add(gpr_now(GPR_CLOCK_REALTIME), gpr_time_from_millis(100));
gpr_timespec short_deadline = gpr_time_add(
gpr_now(GPR_CLOCK_REALTIME), gpr_time_from_millis(100, GPR_TIMESPAN));
while (gpr_cv_wait(&g_rcv, &g_mu, short_deadline) && g_cbs_head == NULL) {
if (gpr_time_cmp(gpr_now(GPR_CLOCK_REALTIME), shutdown_deadline) > 0) {
timeout = 1;
@ -173,7 +173,8 @@ void grpc_iomgr_shutdown(void) {
gpr_mu_unlock(&g_mu);
grpc_kick_poller();
gpr_event_wait(&g_background_callback_executor_done, gpr_inf_future);
gpr_event_wait(&g_background_callback_executor_done,
gpr_inf_future(GPR_CLOCK_REALTIME));
grpc_alarm_list_shutdown();

@ -50,12 +50,17 @@ typedef struct {
} pollset_hdr;
static void multipoll_with_epoll_pollset_add_fd(grpc_pollset *pollset,
grpc_fd *fd) {
grpc_fd *fd,
int and_unlock_pollset) {
pollset_hdr *h = pollset->data.ptr;
struct epoll_event ev;
int err;
grpc_fd_watcher watcher;
if (and_unlock_pollset) {
gpr_mu_unlock(&pollset->mu);
}
/* We pretend to be polling whilst adding an fd to keep the fd from being
closed during the add. This may result in a spurious wakeup being assigned
to this pollset whilst adding, but that should be benign. */
@ -76,9 +81,15 @@ static void multipoll_with_epoll_pollset_add_fd(grpc_pollset *pollset,
}
static void multipoll_with_epoll_pollset_del_fd(grpc_pollset *pollset,
grpc_fd *fd) {
grpc_fd *fd,
int and_unlock_pollset) {
pollset_hdr *h = pollset->data.ptr;
int err;
if (and_unlock_pollset) {
gpr_mu_unlock(&pollset->mu);
}
/* Note that this can race with concurrent poll, but that should be fine since
* at worst it creates a spurious read event on a reused grpc_fd object. */
err = epoll_ctl(h->epoll_fd, EPOLL_CTL_DEL, fd->fd, NULL);
@ -183,7 +194,7 @@ static void epoll_become_multipoller(grpc_pollset *pollset, grpc_fd **fds,
abort();
}
for (i = 0; i < nfds; i++) {
multipoll_with_epoll_pollset_add_fd(pollset, fds[i]);
multipoll_with_epoll_pollset_add_fd(pollset, fds[i], 0);
}
grpc_wakeup_fd_create(&h->wakeup_fd);

@ -66,12 +66,13 @@ typedef struct {
} pollset_hdr;
static void multipoll_with_poll_pollset_add_fd(grpc_pollset *pollset,
grpc_fd *fd) {
grpc_fd *fd,
int and_unlock_pollset) {
size_t i;
pollset_hdr *h = pollset->data.ptr;
/* TODO(ctiller): this is O(num_fds^2); maybe switch to a hash set here */
for (i = 0; i < h->fd_count; i++) {
if (h->fds[i] == fd) return;
if (h->fds[i] == fd) goto exit;
}
if (h->fd_count == h->fd_capacity) {
h->fd_capacity = GPR_MAX(h->fd_capacity + 8, h->fd_count * 3 / 2);
@ -79,10 +80,15 @@ static void multipoll_with_poll_pollset_add_fd(grpc_pollset *pollset,
}
h->fds[h->fd_count++] = fd;
GRPC_FD_REF(fd, "multipoller");
exit:
if (and_unlock_pollset) {
gpr_mu_unlock(&pollset->mu);
}
}
static void multipoll_with_poll_pollset_del_fd(grpc_pollset *pollset,
grpc_fd *fd) {
grpc_fd *fd,
int and_unlock_pollset) {
/* will get removed next poll cycle */
pollset_hdr *h = pollset->data.ptr;
if (h->del_count == h->del_capacity) {
@ -91,6 +97,9 @@ static void multipoll_with_poll_pollset_del_fd(grpc_pollset *pollset,
}
h->dels[h->del_count++] = fd;
GRPC_FD_REF(fd, "multipoller_del");
if (and_unlock_pollset) {
gpr_mu_unlock(&pollset->mu);
}
}
static void end_polling(grpc_pollset *pollset) {

@ -105,14 +105,28 @@ void grpc_pollset_init(grpc_pollset *pollset) {
void grpc_pollset_add_fd(grpc_pollset *pollset, grpc_fd *fd) {
gpr_mu_lock(&pollset->mu);
pollset->vtable->add_fd(pollset, fd);
pollset->vtable->add_fd(pollset, fd, 1);
/* the following (enabled only in debug) will reacquire and then release
our lock - meaning that if the unlocking flag passed to del_fd above is
not respected, the code will deadlock (in a way that we have a chance of
debugging) */
#ifndef NDEBUG
gpr_mu_lock(&pollset->mu);
gpr_mu_unlock(&pollset->mu);
#endif
}
void grpc_pollset_del_fd(grpc_pollset *pollset, grpc_fd *fd) {
gpr_mu_lock(&pollset->mu);
pollset->vtable->del_fd(pollset, fd);
pollset->vtable->del_fd(pollset, fd, 1);
/* the following (enabled only in debug) will reacquire and then release
our lock - meaning that if the unlocking flag passed to del_fd above is
not respected, the code will deadlock (in a way that we have a chance of
debugging) */
#ifndef NDEBUG
gpr_mu_lock(&pollset->mu);
gpr_mu_unlock(&pollset->mu);
#endif
}
static void finish_shutdown(grpc_pollset *pollset) {
@ -191,17 +205,17 @@ int grpc_poll_deadline_to_millis_timeout(gpr_timespec deadline,
gpr_timespec now) {
gpr_timespec timeout;
static const int max_spin_polling_us = 10;
if (gpr_time_cmp(deadline, gpr_inf_future) == 0) {
if (gpr_time_cmp(deadline, gpr_inf_future(GPR_CLOCK_REALTIME)) == 0) {
return -1;
}
if (gpr_time_cmp(
deadline,
gpr_time_add(now, gpr_time_from_micros(max_spin_polling_us))) <= 0) {
if (gpr_time_cmp(deadline, gpr_time_add(now, gpr_time_from_micros(
max_spin_polling_us,
GPR_TIMESPAN))) <= 0) {
return 0;
}
timeout = gpr_time_sub(deadline, now);
return gpr_time_to_millis(
gpr_time_add(timeout, gpr_time_from_nanos(GPR_NS_PER_SEC - 1)));
return gpr_time_to_millis(gpr_time_add(
timeout, gpr_time_from_nanos(GPR_NS_PER_SEC - 1, GPR_TIMESPAN)));
}
/*
@ -257,7 +271,7 @@ static void basic_do_promote(void *args, int success) {
} else if (grpc_fd_is_orphaned(fd)) {
/* Don't try to add it to anything, we'll drop our ref on it below */
} else if (pollset->vtable != original_vtable) {
pollset->vtable->add_fd(pollset, fd);
pollset->vtable->add_fd(pollset, fd, 0);
} else if (fd != pollset->data.ptr) {
grpc_fd *fds[2];
fds[0] = pollset->data.ptr;
@ -287,10 +301,11 @@ static void basic_do_promote(void *args, int success) {
GRPC_FD_UNREF(fd, "basicpoll_add");
}
static void basic_pollset_add_fd(grpc_pollset *pollset, grpc_fd *fd) {
static void basic_pollset_add_fd(grpc_pollset *pollset, grpc_fd *fd,
int and_unlock_pollset) {
grpc_unary_promote_args *up_args;
GPR_ASSERT(fd);
if (fd == pollset->data.ptr) return;
if (fd == pollset->data.ptr) goto exit;
if (!pollset->counter) {
/* Fast path -- no in flight cbs */
@ -313,7 +328,7 @@ static void basic_pollset_add_fd(grpc_pollset *pollset, grpc_fd *fd) {
pollset->data.ptr = fd;
GRPC_FD_REF(fd, "basicpoll");
}
return;
goto exit;
}
/* Now we need to promote. This needs to happen when we're not polling. Since
@ -329,14 +344,24 @@ static void basic_pollset_add_fd(grpc_pollset *pollset, grpc_fd *fd) {
grpc_iomgr_add_callback(&up_args->promotion_closure);
grpc_pollset_kick(pollset);
exit:
if (and_unlock_pollset) {
gpr_mu_unlock(&pollset->mu);
}
}
static void basic_pollset_del_fd(grpc_pollset *pollset, grpc_fd *fd) {
static void basic_pollset_del_fd(grpc_pollset *pollset, grpc_fd *fd,
int and_unlock_pollset) {
GPR_ASSERT(fd);
if (fd == pollset->data.ptr) {
GRPC_FD_UNREF(pollset->data.ptr, "basicpoll");
pollset->data.ptr = NULL;
}
if (and_unlock_pollset) {
gpr_mu_unlock(&pollset->mu);
}
}
static void basic_pollset_maybe_work(grpc_pollset *pollset,

@ -66,8 +66,10 @@ typedef struct grpc_pollset {
} grpc_pollset;
struct grpc_pollset_vtable {
void (*add_fd)(grpc_pollset *pollset, struct grpc_fd *fd);
void (*del_fd)(grpc_pollset *pollset, struct grpc_fd *fd);
void (*add_fd)(grpc_pollset *pollset, struct grpc_fd *fd,
int and_unlock_pollset);
void (*del_fd)(grpc_pollset *pollset, struct grpc_fd *fd,
int and_unlock_pollset);
void (*maybe_work)(grpc_pollset *pollset, gpr_timespec deadline,
gpr_timespec now, int allow_synchronous_callback);
void (*kick)(grpc_pollset *pollset);

@ -38,7 +38,7 @@
/* A grpc_pollset_set is a set of pollsets that are interested in an
action. Adding a pollset to a pollset_set automatically adds any
fd's (etc) that have been registered with the set_set with that pollset.
fd's (etc) that have been registered with the set_set to that pollset.
Registering fd's automatically adds them to all current pollsets. */
#ifdef GPR_POSIX_SOCKET

@ -116,7 +116,7 @@ void grpc_tcp_server_destroy(grpc_tcp_server *s,
}
/* This happens asynchronously. Wait while that happens. */
while (s->active_ports) {
gpr_cv_wait(&s->cv, &s->mu, gpr_inf_future);
gpr_cv_wait(&s->cv, &s->mu, gpr_inf_future(GPR_CLOCK_REALTIME));
}
gpr_mu_unlock(&s->mu);

@ -324,7 +324,7 @@ static void jwt_reset_cache(grpc_jwt_credentials *c) {
gpr_free(c->cached.service_url);
c->cached.service_url = NULL;
}
c->cached.jwt_expiration = gpr_inf_past;
c->cached.jwt_expiration = gpr_inf_past(GPR_CLOCK_REALTIME);
}
static void jwt_destroy(grpc_credentials *creds) {
@ -347,8 +347,8 @@ static void jwt_get_request_metadata(grpc_credentials *creds,
grpc_credentials_metadata_cb cb,
void *user_data) {
grpc_jwt_credentials *c = (grpc_jwt_credentials *)creds;
gpr_timespec refresh_threshold = {GRPC_SECURE_TOKEN_REFRESH_THRESHOLD_SECS,
0};
gpr_timespec refresh_threshold = gpr_time_from_seconds(
GRPC_SECURE_TOKEN_REFRESH_THRESHOLD_SECS, GPR_TIMESPAN);
/* See if we can return a cached jwt. */
grpc_credentials_md_store *jwt_md = NULL;
@ -516,6 +516,7 @@ grpc_oauth2_token_fetcher_credentials_parse_server_response(
access_token->value);
token_lifetime->tv_sec = strtol(expires_in->value, NULL, 10);
token_lifetime->tv_nsec = 0;
token_lifetime->clock_type = GPR_TIMESPAN;
if (*token_md != NULL) grpc_credentials_md_store_unref(*token_md);
*token_md = grpc_credentials_md_store_create(1);
grpc_credentials_md_store_add_cstrings(
@ -552,7 +553,7 @@ static void on_oauth2_token_fetcher_http_response(
r->cb(r->user_data, c->access_token_md->entries,
c->access_token_md->num_entries, status);
} else {
c->token_expiration = gpr_inf_past;
c->token_expiration = gpr_inf_past(GPR_CLOCK_REALTIME);
r->cb(r->user_data, NULL, 0, status);
}
gpr_mu_unlock(&c->mu);
@ -564,8 +565,8 @@ static void oauth2_token_fetcher_get_request_metadata(
grpc_credentials_metadata_cb cb, void *user_data) {
grpc_oauth2_token_fetcher_credentials *c =
(grpc_oauth2_token_fetcher_credentials *)creds;
gpr_timespec refresh_threshold = {GRPC_SECURE_TOKEN_REFRESH_THRESHOLD_SECS,
0};
gpr_timespec refresh_threshold = gpr_time_from_seconds(
GRPC_SECURE_TOKEN_REFRESH_THRESHOLD_SECS, GPR_TIMESPAN);
grpc_credentials_md_store *cached_access_token_md = NULL;
{
gpr_mu_lock(&c->mu);
@ -596,7 +597,7 @@ static void init_oauth2_token_fetcher(grpc_oauth2_token_fetcher_credentials *c,
c->base.type = GRPC_CREDENTIALS_TYPE_OAUTH2;
gpr_ref_init(&c->base.refcount, 1);
gpr_mu_init(&c->mu);
c->token_expiration = gpr_inf_past;
c->token_expiration = gpr_inf_past(GPR_CLOCK_REALTIME);
c->fetch_func = fetch_func;
grpc_httpcli_context_init(&c->httpcli_context);
}

@ -91,7 +91,7 @@ static int is_stack_running_on_compute_engine(void) {
/* The http call is local. If it takes more than one sec, it is for sure not
on compute engine. */
gpr_timespec max_detection_delay = {1, 0};
gpr_timespec max_detection_delay = gpr_time_from_seconds(1, GPR_TIMESPAN);
grpc_pollset_init(&detector.pollset);
detector.is_done = 0;
@ -112,7 +112,7 @@ static int is_stack_running_on_compute_engine(void) {
called once for the lifetime of the process by the default credentials. */
gpr_mu_lock(GRPC_POLLSET_MU(&detector.pollset));
while (!detector.is_done) {
grpc_pollset_work(&detector.pollset, gpr_inf_future);
grpc_pollset_work(&detector.pollset, gpr_inf_future(GPR_CLOCK_REALTIME));
}
gpr_mu_unlock(GRPC_POLLSET_MU(&detector.pollset));

@ -49,7 +49,7 @@
/* --- Constants. --- */
/* 1 hour max. */
const gpr_timespec grpc_max_auth_token_lifetime = {3600, 0};
const gpr_timespec grpc_max_auth_token_lifetime = {3600, 0, GPR_TIMESPAN};
#define GRPC_JWT_RSA_SHA256_ALGORITHM "RS256"
#define GRPC_JWT_TYPE "JWT"

@ -109,7 +109,7 @@ static const char *validate_string_field(const grpc_json *json,
static gpr_timespec validate_time_field(const grpc_json *json,
const char *key) {
gpr_timespec result = gpr_time_0;
gpr_timespec result = gpr_time_0(GPR_CLOCK_REALTIME);
if (json->type != GRPC_JSON_NUMBER) {
gpr_log(GPR_ERROR, "Invalid %s field [%s]", key, json->value);
return result;
@ -221,17 +221,17 @@ const char *grpc_jwt_claims_audience(const grpc_jwt_claims *claims) {
}
gpr_timespec grpc_jwt_claims_issued_at(const grpc_jwt_claims *claims) {
if (claims == NULL) return gpr_inf_past;
if (claims == NULL) return gpr_inf_past(GPR_CLOCK_REALTIME);
return claims->iat;
}
gpr_timespec grpc_jwt_claims_expires_at(const grpc_jwt_claims *claims) {
if (claims == NULL) return gpr_inf_future;
if (claims == NULL) return gpr_inf_future(GPR_CLOCK_REALTIME);
return claims->exp;
}
gpr_timespec grpc_jwt_claims_not_before(const grpc_jwt_claims *claims) {
if (claims == NULL) return gpr_inf_past;
if (claims == NULL) return gpr_inf_past(GPR_CLOCK_REALTIME);
return claims->nbf;
}
@ -242,9 +242,9 @@ grpc_jwt_claims *grpc_jwt_claims_from_json(grpc_json *json, gpr_slice buffer) {
memset(claims, 0, sizeof(grpc_jwt_claims));
claims->json = json;
claims->buffer = buffer;
claims->iat = gpr_inf_past;
claims->nbf = gpr_inf_past;
claims->exp = gpr_inf_future;
claims->iat = gpr_inf_past(GPR_CLOCK_REALTIME);
claims->nbf = gpr_inf_past(GPR_CLOCK_REALTIME);
claims->exp = gpr_inf_future(GPR_CLOCK_REALTIME);
/* Per the spec, all fields are optional. */
for (cur = json->child; cur != NULL; cur = cur->next) {
@ -262,13 +262,16 @@ grpc_jwt_claims *grpc_jwt_claims_from_json(grpc_json *json, gpr_slice buffer) {
if (claims->jti == NULL) goto error;
} else if (strcmp(cur->key, "iat") == 0) {
claims->iat = validate_time_field(cur, "iat");
if (gpr_time_cmp(claims->iat, gpr_time_0) == 0) goto error;
if (gpr_time_cmp(claims->iat, gpr_time_0(GPR_CLOCK_REALTIME)) == 0)
goto error;
} else if (strcmp(cur->key, "exp") == 0) {
claims->exp = validate_time_field(cur, "exp");
if (gpr_time_cmp(claims->exp, gpr_time_0) == 0) goto error;
if (gpr_time_cmp(claims->exp, gpr_time_0(GPR_CLOCK_REALTIME)) == 0)
goto error;
} else if (strcmp(cur->key, "nbf") == 0) {
claims->nbf = validate_time_field(cur, "nbf");
if (gpr_time_cmp(claims->nbf, gpr_time_0) == 0) goto error;
if (gpr_time_cmp(claims->nbf, gpr_time_0(GPR_CLOCK_REALTIME)) == 0)
goto error;
}
}
return claims;
@ -359,10 +362,10 @@ void verifier_cb_ctx_destroy(verifier_cb_ctx *ctx) {
/* --- grpc_jwt_verifier object. --- */
/* Clock skew defaults to one minute. */
gpr_timespec grpc_jwt_verifier_clock_skew = {60, 0};
gpr_timespec grpc_jwt_verifier_clock_skew = {60, 0, GPR_TIMESPAN};
/* Max delay defaults to one minute. */
gpr_timespec grpc_jwt_verifier_max_delay = {60, 0};
gpr_timespec grpc_jwt_verifier_max_delay = {60, 0, GPR_TIMESPAN};
typedef struct {
char *email_domain;

@ -121,8 +121,9 @@ void gpr_cancellable_cancel(gpr_cancellable *c) {
} else {
gpr_event ev;
gpr_event_init(&ev);
gpr_event_wait(&ev, gpr_time_add(gpr_now(GPR_CLOCK_REALTIME),
gpr_time_from_micros(1000)));
gpr_event_wait(
&ev, gpr_time_add(gpr_now(GPR_CLOCK_REALTIME),
gpr_time_from_micros(1000, GPR_TIMESPAN)));
}
}
} while (failures != 0);

@ -65,8 +65,9 @@ typedef union lockfree_node {
} lockfree_node;
#define ENTRY_ALIGNMENT_BITS 3 /* make sure that entries aligned to 8-bytes */
#define INVALID_ENTRY_INDEX ((1 << 16) - 1) /* reserve this entry as invalid \
*/
#define INVALID_ENTRY_INDEX \
((1 << 16) - 1) /* reserve this entry as invalid \
*/
struct gpr_stack_lockfree {
lockfree_node *entries;
@ -96,7 +97,7 @@ void gpr_stack_lockfree_destroy(gpr_stack_lockfree *stack) {
gpr_free(stack);
}
void gpr_stack_lockfree_push(gpr_stack_lockfree *stack, int entry) {
int gpr_stack_lockfree_push(gpr_stack_lockfree *stack, int entry) {
lockfree_node head;
lockfree_node newhead;
@ -112,6 +113,7 @@ void gpr_stack_lockfree_push(gpr_stack_lockfree *stack, int entry) {
stack->entries[entry].contents.index = head.contents.index;
} while (!gpr_atm_rel_cas(&(stack->head.atm), head.atm, newhead.atm));
/* Use rel_cas above to make sure that entry index is set properly */
return head.contents.index == INVALID_ENTRY_INDEX;
}
int gpr_stack_lockfree_pop(gpr_stack_lockfree *stack) {

@ -42,7 +42,8 @@ gpr_stack_lockfree* gpr_stack_lockfree_create(int entries);
void gpr_stack_lockfree_destroy(gpr_stack_lockfree* stack);
/* Pass in a valid entry number for the next stack entry */
void gpr_stack_lockfree_push(gpr_stack_lockfree* stack, int entry);
/* Returns 1 if this is the first element on the stack, 0 otherwise */
int gpr_stack_lockfree_push(gpr_stack_lockfree*, int entry);
/* Returns -1 on empty or the actual entry number */
int gpr_stack_lockfree_pop(gpr_stack_lockfree* stack);

@ -63,7 +63,7 @@ void gpr_cv_destroy(gpr_cv *cv) { GPR_ASSERT(pthread_cond_destroy(cv) == 0); }
int gpr_cv_wait(gpr_cv *cv, gpr_mu *mu, gpr_timespec abs_deadline) {
int err = 0;
if (gpr_time_cmp(abs_deadline, gpr_inf_future) == 0) {
if (gpr_time_cmp(abs_deadline, gpr_inf_future(GPR_CLOCK_REALTIME)) == 0) {
err = pthread_cond_wait(cv, mu);
} else {
struct timespec abs_deadline_ts;

@ -83,7 +83,7 @@ int gpr_cv_wait(gpr_cv *cv, gpr_mu *mu, gpr_timespec abs_deadline) {
int timeout = 0;
DWORD timeout_max_ms;
mu->locked = 0;
if (gpr_time_cmp(abs_deadline, gpr_inf_future) == 0) {
if (gpr_time_cmp(abs_deadline, gpr_inf_future(GPR_CLOCK_REALTIME)) == 0) {
SleepConditionVariableCS(cv, &mu->cs, INFINITE);
} else {
gpr_timespec now = gpr_now(GPR_CLOCK_REALTIME);

@ -41,6 +41,7 @@
int gpr_time_cmp(gpr_timespec a, gpr_timespec b) {
int cmp = (a.tv_sec > b.tv_sec) - (a.tv_sec < b.tv_sec);
GPR_ASSERT(a.clock_type == b.clock_type);
if (cmp == 0) {
cmp = (a.tv_nsec > b.tv_nsec) - (a.tv_nsec < b.tv_nsec);
}
@ -71,19 +72,40 @@ gpr_timespec gpr_time_max(gpr_timespec a, gpr_timespec b) {
((t)(TYPE_IS_SIGNED(t) ? (TOP_BIT_OF_TYPE(t) - 1) \
: ((TOP_BIT_OF_TYPE(t) - 1) << 1) + 1))
const gpr_timespec gpr_time_0 = {0, 0};
const gpr_timespec gpr_inf_future = {TYPE_MAX(time_t), 0};
const gpr_timespec gpr_inf_past = {TYPE_MIN(time_t), 0};
gpr_timespec gpr_time_0(gpr_clock_type type) {
gpr_timespec out;
out.tv_sec = 0;
out.tv_nsec = 0;
out.clock_type = type;
return out;
}
gpr_timespec gpr_inf_future(gpr_clock_type type) {
gpr_timespec out;
out.tv_sec = TYPE_MAX(time_t);
out.tv_nsec = 0;
out.clock_type = type;
return out;
}
gpr_timespec gpr_inf_past(gpr_clock_type type) {
gpr_timespec out;
out.tv_sec = TYPE_MIN(time_t);
out.tv_nsec = 0;
out.clock_type = type;
return out;
}
/* TODO(ctiller): consider merging _nanos, _micros, _millis into a single
function for maintainability. Similarly for _seconds, _minutes, and _hours */
gpr_timespec gpr_time_from_nanos(long ns) {
gpr_timespec gpr_time_from_nanos(long ns, gpr_clock_type type) {
gpr_timespec result;
result.clock_type = type;
if (ns == LONG_MAX) {
result = gpr_inf_future;
result = gpr_inf_future(type);
} else if (ns == LONG_MIN) {
result = gpr_inf_past;
result = gpr_inf_past(type);
} else if (ns >= 0) {
result.tv_sec = ns / GPR_NS_PER_SEC;
result.tv_nsec = (int)(ns - result.tv_sec * GPR_NS_PER_SEC);
@ -95,12 +117,13 @@ gpr_timespec gpr_time_from_nanos(long ns) {
return result;
}
gpr_timespec gpr_time_from_micros(long us) {
gpr_timespec gpr_time_from_micros(long us, gpr_clock_type type) {
gpr_timespec result;
result.clock_type = type;
if (us == LONG_MAX) {
result = gpr_inf_future;
result = gpr_inf_future(type);
} else if (us == LONG_MIN) {
result = gpr_inf_past;
result = gpr_inf_past(type);
} else if (us >= 0) {
result.tv_sec = us / 1000000;
result.tv_nsec = (int)((us - result.tv_sec * 1000000) * 1000);
@ -112,12 +135,13 @@ gpr_timespec gpr_time_from_micros(long us) {
return result;
}
gpr_timespec gpr_time_from_millis(long ms) {
gpr_timespec gpr_time_from_millis(long ms, gpr_clock_type type) {
gpr_timespec result;
result.clock_type = type;
if (ms == LONG_MAX) {
result = gpr_inf_future;
result = gpr_inf_future(type);
} else if (ms == LONG_MIN) {
result = gpr_inf_past;
result = gpr_inf_past(type);
} else if (ms >= 0) {
result.tv_sec = ms / 1000;
result.tv_nsec = (int)((ms - result.tv_sec * 1000) * 1000000);
@ -129,12 +153,13 @@ gpr_timespec gpr_time_from_millis(long ms) {
return result;
}
gpr_timespec gpr_time_from_seconds(long s) {
gpr_timespec gpr_time_from_seconds(long s, gpr_clock_type type) {
gpr_timespec result;
result.clock_type = type;
if (s == LONG_MAX) {
result = gpr_inf_future;
result = gpr_inf_future(type);
} else if (s == LONG_MIN) {
result = gpr_inf_past;
result = gpr_inf_past(type);
} else {
result.tv_sec = s;
result.tv_nsec = 0;
@ -142,12 +167,13 @@ gpr_timespec gpr_time_from_seconds(long s) {
return result;
}
gpr_timespec gpr_time_from_minutes(long m) {
gpr_timespec gpr_time_from_minutes(long m, gpr_clock_type type) {
gpr_timespec result;
result.clock_type = type;
if (m >= LONG_MAX / 60) {
result = gpr_inf_future;
result = gpr_inf_future(type);
} else if (m <= LONG_MIN / 60) {
result = gpr_inf_past;
result = gpr_inf_past(type);
} else {
result.tv_sec = m * 60;
result.tv_nsec = 0;
@ -155,12 +181,13 @@ gpr_timespec gpr_time_from_minutes(long m) {
return result;
}
gpr_timespec gpr_time_from_hours(long h) {
gpr_timespec gpr_time_from_hours(long h, gpr_clock_type type) {
gpr_timespec result;
result.clock_type = type;
if (h >= LONG_MAX / 3600) {
result = gpr_inf_future;
result = gpr_inf_future(type);
} else if (h <= LONG_MIN / 3600) {
result = gpr_inf_past;
result = gpr_inf_past(type);
} else {
result.tv_sec = h * 3600;
result.tv_nsec = 0;
@ -171,6 +198,8 @@ gpr_timespec gpr_time_from_hours(long h) {
gpr_timespec gpr_time_add(gpr_timespec a, gpr_timespec b) {
gpr_timespec sum;
int inc = 0;
GPR_ASSERT(b.clock_type == GPR_TIMESPAN);
sum.clock_type = a.clock_type;
sum.tv_nsec = a.tv_nsec + b.tv_nsec;
if (sum.tv_nsec >= GPR_NS_PER_SEC) {
sum.tv_nsec -= GPR_NS_PER_SEC;
@ -180,14 +209,14 @@ gpr_timespec gpr_time_add(gpr_timespec a, gpr_timespec b) {
sum = a;
} else if (b.tv_sec == TYPE_MAX(time_t) ||
(b.tv_sec >= 0 && a.tv_sec >= TYPE_MAX(time_t) - b.tv_sec)) {
sum = gpr_inf_future;
sum = gpr_inf_future(sum.clock_type);
} else if (b.tv_sec == TYPE_MIN(time_t) ||
(b.tv_sec <= 0 && a.tv_sec <= TYPE_MIN(time_t) - b.tv_sec)) {
sum = gpr_inf_past;
sum = gpr_inf_past(sum.clock_type);
} else {
sum.tv_sec = a.tv_sec + b.tv_sec;
if (inc != 0 && sum.tv_sec == TYPE_MAX(time_t) - 1) {
sum = gpr_inf_future;
sum = gpr_inf_future(sum.clock_type);
} else {
sum.tv_sec += inc;
}
@ -198,6 +227,12 @@ gpr_timespec gpr_time_add(gpr_timespec a, gpr_timespec b) {
gpr_timespec gpr_time_sub(gpr_timespec a, gpr_timespec b) {
gpr_timespec diff;
int dec = 0;
if (b.clock_type == GPR_TIMESPAN) {
diff.clock_type = a.clock_type;
} else {
GPR_ASSERT(a.clock_type == b.clock_type);
diff.clock_type = GPR_TIMESPAN;
}
diff.tv_nsec = a.tv_nsec - b.tv_nsec;
if (diff.tv_nsec < 0) {
diff.tv_nsec += GPR_NS_PER_SEC;
@ -207,14 +242,14 @@ gpr_timespec gpr_time_sub(gpr_timespec a, gpr_timespec b) {
diff = a;
} else if (b.tv_sec == TYPE_MIN(time_t) ||
(b.tv_sec <= 0 && a.tv_sec >= TYPE_MAX(time_t) + b.tv_sec)) {
diff = gpr_inf_future;
diff = gpr_inf_future(GPR_CLOCK_REALTIME);
} else if (b.tv_sec == TYPE_MAX(time_t) ||
(b.tv_sec >= 0 && a.tv_sec <= TYPE_MIN(time_t) + b.tv_sec)) {
diff = gpr_inf_past;
diff = gpr_inf_past(GPR_CLOCK_REALTIME);
} else {
diff.tv_sec = a.tv_sec - b.tv_sec;
if (dec != 0 && diff.tv_sec == TYPE_MIN(time_t) + 1) {
diff = gpr_inf_past;
diff = gpr_inf_past(GPR_CLOCK_REALTIME);
} else {
diff.tv_sec -= dec;
}
@ -225,6 +260,9 @@ gpr_timespec gpr_time_sub(gpr_timespec a, gpr_timespec b) {
int gpr_time_similar(gpr_timespec a, gpr_timespec b, gpr_timespec threshold) {
int cmp_ab;
GPR_ASSERT(a.clock_type == b.clock_type);
GPR_ASSERT(threshold.clock_type == GPR_TIMESPAN);
cmp_ab = gpr_time_cmp(a, b);
if (cmp_ab == 0) return 1;
if (cmp_ab < 0) {

@ -38,6 +38,7 @@
#include <stdlib.h>
#include <time.h>
#include <unistd.h>
#include <grpc/support/log.h>
#include <grpc/support/time.h>
static struct timespec timespec_from_gpr(gpr_timespec gts) {
@ -48,10 +49,12 @@ static struct timespec timespec_from_gpr(gpr_timespec gts) {
}
#if _POSIX_TIMERS > 0
static gpr_timespec gpr_from_timespec(struct timespec ts) {
static gpr_timespec gpr_from_timespec(struct timespec ts,
gpr_clock_type clock) {
gpr_timespec rv;
rv.tv_sec = ts.tv_sec;
rv.tv_nsec = (int)ts.tv_nsec;
rv.clock_type = clock;
return rv;
}
@ -62,8 +65,9 @@ void gpr_time_init(void) {}
gpr_timespec gpr_now(gpr_clock_type clock) {
struct timespec now;
GPR_ASSERT(clock != GPR_TIMESPAN);
clock_gettime(clockid_for_gpr_clock[clock], &now);
return gpr_from_timespec(now);
return gpr_from_timespec(now, clock);
}
#else
/* For some reason Apple's OSes haven't implemented clock_gettime. */
@ -88,6 +92,7 @@ gpr_timespec gpr_now(gpr_clock_type clock) {
struct timeval now_tv;
double now_dbl;
now.clock_type = clock;
switch (clock) {
case GPR_CLOCK_REALTIME:
gettimeofday(&now_tv, NULL);
@ -99,6 +104,8 @@ gpr_timespec gpr_now(gpr_clock_type clock) {
now.tv_sec = now_dbl * 1e-9;
now.tv_nsec = now_dbl - now.tv_sec * 1e9;
break;
case GPR_TIMESPAN:
abort();
}
return now;

@ -55,6 +55,7 @@ gpr_timespec gpr_now(gpr_clock_type clock) {
struct _timeb now_tb;
LARGE_INTEGER timestamp;
double now_dbl;
now_tv.clock_type = clock;
switch (clock) {
case GPR_CLOCK_REALTIME:
_ftime_s(&now_tb);

@ -62,6 +62,7 @@ int grpc_bbq_empty(grpc_byte_buffer_queue *q) {
}
void grpc_bbq_push(grpc_byte_buffer_queue *q, grpc_byte_buffer *buffer) {
q->bytes += grpc_byte_buffer_length(buffer);
bba_push(&q->filling, buffer);
}
@ -72,8 +73,11 @@ void grpc_bbq_flush(grpc_byte_buffer_queue *q) {
}
}
size_t grpc_bbq_bytes(grpc_byte_buffer_queue *q) { return q->bytes; }
grpc_byte_buffer *grpc_bbq_pop(grpc_byte_buffer_queue *q) {
grpc_bbq_array temp_array;
grpc_byte_buffer *out;
if (q->drain_pos == q->draining.count) {
if (q->filling.count == 0) {
@ -87,5 +91,7 @@ grpc_byte_buffer *grpc_bbq_pop(grpc_byte_buffer_queue *q) {
q->draining = temp_array;
}
return q->draining.data[q->drain_pos++];
out = q->draining.data[q->drain_pos++];
q->bytes -= grpc_byte_buffer_length(out);
return out;
}

@ -49,6 +49,7 @@ typedef struct {
size_t drain_pos;
grpc_bbq_array filling;
grpc_bbq_array draining;
size_t bytes;
} grpc_byte_buffer_queue;
void grpc_bbq_destroy(grpc_byte_buffer_queue *q);
@ -56,5 +57,6 @@ grpc_byte_buffer *grpc_bbq_pop(grpc_byte_buffer_queue *q);
void grpc_bbq_flush(grpc_byte_buffer_queue *q);
int grpc_bbq_empty(grpc_byte_buffer_queue *q);
void grpc_bbq_push(grpc_byte_buffer_queue *q, grpc_byte_buffer *bb);
size_t grpc_bbq_bytes(grpc_byte_buffer_queue *q);
#endif /* GRPC_INTERNAL_CORE_SURFACE_BYTE_BUFFER_QUEUE_H */

@ -347,7 +347,7 @@ grpc_call *grpc_call_create(grpc_channel *channel, grpc_completion_queue *cq,
}
grpc_call_stack_init(channel_stack, server_transport_data, initial_op_ptr,
CALL_STACK_FROM_CALL(call));
if (gpr_time_cmp(send_deadline, gpr_inf_future) != 0) {
if (gpr_time_cmp(send_deadline, gpr_inf_future(GPR_CLOCK_REALTIME)) != 0) {
set_deadline_alarm(call, send_deadline);
}
return call;
@ -513,6 +513,8 @@ static void unlock(grpc_call *call) {
int completing_requests = 0;
int start_op = 0;
int i;
const gpr_uint32 MAX_RECV_PEEK_AHEAD = 65536;
size_t buffered_bytes;
int cancel_alarm = 0;
memset(&op, 0, sizeof(op));
@ -528,6 +530,17 @@ static void unlock(grpc_call *call) {
op.recv_ops = &call->recv_ops;
op.recv_state = &call->recv_state;
op.on_done_recv = &call->on_done_recv;
if (grpc_bbq_empty(&call->incoming_queue) && call->reading_message) {
op.max_recv_bytes = call->incoming_message_length -
call->incoming_message.length + MAX_RECV_PEEK_AHEAD;
} else {
buffered_bytes = grpc_bbq_bytes(&call->incoming_queue);
if (buffered_bytes > MAX_RECV_PEEK_AHEAD) {
op.max_recv_bytes = 0;
} else {
op.max_recv_bytes = MAX_RECV_PEEK_AHEAD - buffered_bytes;
}
}
call->receiving = 1;
GRPC_CALL_INTERNAL_REF(call, "receiving");
start_op = 1;
@ -972,7 +985,7 @@ static int fill_send_ops(grpc_call *call, grpc_transport_stream_op *op) {
mdb.list = chain_metadata_from_app(call, data.send_metadata.count,
data.send_metadata.metadata);
mdb.garbage.head = mdb.garbage.tail = NULL;
mdb.deadline = gpr_inf_future;
mdb.deadline = gpr_inf_future(GPR_CLOCK_REALTIME);
/* send status */
/* TODO(ctiller): cache common status values */
data = call->request_data[GRPC_IOREQ_SEND_STATUS];
@ -1325,7 +1338,7 @@ static void recv_metadata(grpc_call *call, grpc_metadata_batch *md) {
l->md = 0;
}
}
if (gpr_time_cmp(md->deadline, gpr_inf_future) != 0) {
if (gpr_time_cmp(md->deadline, gpr_inf_future(GPR_CLOCK_REALTIME)) != 0) {
set_deadline_alarm(call, md->deadline);
}
if (!is_trailing) {

@ -116,7 +116,7 @@ void grpc_cq_begin_op(grpc_completion_queue *cc) {
void grpc_cq_end_op(grpc_completion_queue *cc, void *tag, int success,
void (*done)(void *done_arg, grpc_cq_completion *storage),
void *done_arg, grpc_cq_completion *storage) {
int shutdown = gpr_unref(&cc->pending_events);
int shutdown;
storage->tag = tag;
storage->done = done;
@ -124,15 +124,15 @@ void grpc_cq_end_op(grpc_completion_queue *cc, void *tag, int success,
storage->next =
((gpr_uintptr)&cc->completed_head) | ((gpr_uintptr)(success != 0));
gpr_mu_lock(GRPC_POLLSET_MU(&cc->pollset));
shutdown = gpr_unref(&cc->pending_events);
if (!shutdown) {
gpr_mu_lock(GRPC_POLLSET_MU(&cc->pollset));
cc->completed_tail->next =
((gpr_uintptr)storage) | (1u & (gpr_uintptr)cc->completed_tail->next);
cc->completed_tail = storage;
grpc_pollset_kick(&cc->pollset);
gpr_mu_unlock(GRPC_POLLSET_MU(&cc->pollset));
} else {
gpr_mu_lock(GRPC_POLLSET_MU(&cc->pollset));
cc->completed_tail->next =
((gpr_uintptr)storage) | (1u & (gpr_uintptr)cc->completed_tail->next);
cc->completed_tail = storage;
@ -260,8 +260,9 @@ grpc_pollset *grpc_cq_pollset(grpc_completion_queue *cc) {
void grpc_cq_hack_spin_pollset(grpc_completion_queue *cc) {
gpr_mu_lock(GRPC_POLLSET_MU(&cc->pollset));
grpc_pollset_kick(&cc->pollset);
grpc_pollset_work(&cc->pollset, gpr_time_add(gpr_now(GPR_CLOCK_REALTIME),
gpr_time_from_millis(100)));
grpc_pollset_work(&cc->pollset,
gpr_time_add(gpr_now(GPR_CLOCK_REALTIME),
gpr_time_from_millis(100, GPR_TIMESPAN)));
gpr_mu_unlock(GRPC_POLLSET_MU(&cc->pollset));
}

@ -72,7 +72,7 @@ static void lame_start_transport_stream_op(grpc_call_element *elem,
mdb.list.head = &calld->status;
mdb.list.tail = &calld->details;
mdb.garbage.head = mdb.garbage.tail = NULL;
mdb.deadline = gpr_inf_future;
mdb.deadline = gpr_inf_future(GPR_CLOCK_REALTIME);
grpc_sopb_add_metadata(op->recv_ops, mdb);
*op->recv_state = GRPC_STREAM_CLOSED;
op->on_done_recv->cb(op->on_done_recv->cb_arg, 1);

@ -36,22 +36,22 @@
#include <stdlib.h>
#include <string.h>
#include <grpc/support/alloc.h>
#include <grpc/support/log.h>
#include <grpc/support/string_util.h>
#include <grpc/support/useful.h>
#include "src/core/channel/census_filter.h"
#include "src/core/channel/channel_args.h"
#include "src/core/channel/connected_channel.h"
#include "src/core/iomgr/iomgr.h"
#include "src/core/support/stack_lockfree.h"
#include "src/core/support/string.h"
#include "src/core/surface/call.h"
#include "src/core/surface/channel.h"
#include "src/core/surface/completion_queue.h"
#include "src/core/surface/init.h"
#include "src/core/transport/metadata.h"
#include <grpc/support/alloc.h>
#include <grpc/support/log.h>
#include <grpc/support/string_util.h>
#include <grpc/support/useful.h>
typedef enum { PENDING_START, CALL_LIST_COUNT } call_list;
typedef struct listener {
void *arg;
@ -74,8 +74,8 @@ typedef enum { BATCH_CALL, REGISTERED_CALL } requested_call_type;
typedef struct requested_call {
requested_call_type type;
struct requested_call *next;
void *tag;
grpc_server *server;
grpc_completion_queue *cq_bound_to_call;
grpc_completion_queue *cq_for_notification;
grpc_call **call;
@ -94,14 +94,6 @@ typedef struct requested_call {
} data;
} requested_call;
struct registered_method {
char *method;
char *host;
call_data *pending;
requested_call *requests;
registered_method *next;
};
typedef struct channel_registered_method {
registered_method *server_registered_method;
grpc_mdstr *method;
@ -130,44 +122,6 @@ typedef struct shutdown_tag {
grpc_cq_completion completion;
} shutdown_tag;
struct grpc_server {
size_t channel_filter_count;
const grpc_channel_filter **channel_filters;
grpc_channel_args *channel_args;
grpc_completion_queue **cqs;
grpc_pollset **pollsets;
size_t cq_count;
/* The two following mutexes control access to server-state
mu_global controls access to non-call-related state (e.g., channel state)
mu_call controls access to call-related state (e.g., the call lists)
If they are ever required to be nested, you must lock mu_global
before mu_call. This is currently used in shutdown processing
(grpc_server_shutdown_and_notify and maybe_finish_shutdown) */
gpr_mu mu_global; /* mutex for server and channel state */
gpr_mu mu_call; /* mutex for call-specific state */
registered_method *registered_methods;
requested_call *requests;
gpr_uint8 shutdown;
gpr_uint8 shutdown_published;
size_t num_shutdown_tags;
shutdown_tag *shutdown_tags;
call_data *lists[CALL_LIST_COUNT];
channel_data root_channel_data;
listener *listeners;
int listeners_destroyed;
gpr_refcount internal_refcount;
/** when did we print the last shutdown progress message */
gpr_timespec last_shutdown_message_time;
};
typedef enum {
/* waiting for metadata */
NOT_STARTED,
@ -179,6 +133,8 @@ typedef enum {
ZOMBIED
} call_state;
typedef struct request_matcher request_matcher;
struct call_data {
grpc_call *call;
@ -201,8 +157,20 @@ struct call_data {
grpc_iomgr_closure server_on_recv;
grpc_iomgr_closure kill_zombie_closure;
call_data **root[CALL_LIST_COUNT];
call_link links[CALL_LIST_COUNT];
call_data *pending_next;
};
struct request_matcher {
call_data *pending_head;
call_data *pending_tail;
gpr_stack_lockfree *requests;
};
struct registered_method {
char *method;
char *host;
request_matcher request_matcher;
registered_method *next;
};
typedef struct {
@ -210,6 +178,48 @@ typedef struct {
size_t num_channels;
} channel_broadcaster;
struct grpc_server {
size_t channel_filter_count;
const grpc_channel_filter **channel_filters;
grpc_channel_args *channel_args;
grpc_completion_queue **cqs;
grpc_pollset **pollsets;
size_t cq_count;
/* The two following mutexes control access to server-state
mu_global controls access to non-call-related state (e.g., channel state)
mu_call controls access to call-related state (e.g., the call lists)
If they are ever required to be nested, you must lock mu_global
before mu_call. This is currently used in shutdown processing
(grpc_server_shutdown_and_notify and maybe_finish_shutdown) */
gpr_mu mu_global; /* mutex for server and channel state */
gpr_mu mu_call; /* mutex for call-specific state */
registered_method *registered_methods;
request_matcher unregistered_request_matcher;
/** free list of available requested_calls indices */
gpr_stack_lockfree *request_freelist;
/** requested call backing data */
requested_call *requested_calls;
int max_requested_calls;
gpr_atm shutdown_flag;
gpr_uint8 shutdown_published;
size_t num_shutdown_tags;
shutdown_tag *shutdown_tags;
channel_data root_channel_data;
listener *listeners;
int listeners_destroyed;
gpr_refcount internal_refcount;
/** when did we print the last shutdown progress message */
gpr_timespec last_shutdown_message_time;
};
#define SERVER_FROM_CALL_ELEM(elem) \
(((channel_data *)(elem)->channel_data)->server)
@ -220,7 +230,9 @@ static void fail_call(grpc_server *server, requested_call *rc);
hold mu_call */
static void maybe_finish_shutdown(grpc_server *server);
/* channel broadcaster */
/*
* channel broadcaster
*/
/* assumes server locked */
static void channel_broadcaster_init(grpc_server *s, channel_broadcaster *cb) {
@ -281,55 +293,44 @@ static void channel_broadcaster_shutdown(channel_broadcaster *cb,
gpr_free(cb->channels);
}
/* call list */
/*
* request_matcher
*/
static int call_list_join(call_data **root, call_data *call, call_list list) {
GPR_ASSERT(!call->root[list]);
call->root[list] = root;
if (!*root) {
*root = call;
call->links[list].next = call->links[list].prev = call;
} else {
call->links[list].next = *root;
call->links[list].prev = (*root)->links[list].prev;
call->links[list].next->links[list].prev =
call->links[list].prev->links[list].next = call;
}
return 1;
static void request_matcher_init(request_matcher *request_matcher,
int entries) {
memset(request_matcher, 0, sizeof(*request_matcher));
request_matcher->requests = gpr_stack_lockfree_create(entries);
}
static call_data *call_list_remove_head(call_data **root, call_list list) {
call_data *out = *root;
if (out) {
out->root[list] = NULL;
if (out->links[list].next == out) {
*root = NULL;
} else {
*root = out->links[list].next;
out->links[list].next->links[list].prev = out->links[list].prev;
out->links[list].prev->links[list].next = out->links[list].next;
}
}
return out;
static void request_matcher_destroy(request_matcher *request_matcher) {
GPR_ASSERT(gpr_stack_lockfree_pop(request_matcher->requests) == -1);
gpr_stack_lockfree_destroy(request_matcher->requests);
}
static int call_list_remove(call_data *call, call_list list) {
call_data **root = call->root[list];
if (root == NULL) return 0;
call->root[list] = NULL;
if (*root == call) {
*root = call->links[list].next;
if (*root == call) {
*root = NULL;
return 1;
}
static void kill_zombie(void *elem, int success) {
grpc_call_destroy(grpc_call_from_top_element(elem));
}
static void request_matcher_zombify_all_pending_calls(
request_matcher *request_matcher) {
while (request_matcher->pending_head) {
call_data *calld = request_matcher->pending_head;
request_matcher->pending_head = calld->pending_next;
gpr_mu_lock(&calld->mu_state);
calld->state = ZOMBIED;
gpr_mu_unlock(&calld->mu_state);
grpc_iomgr_closure_init(
&calld->kill_zombie_closure, kill_zombie,
grpc_call_stack_element(grpc_call_get_call_stack(calld->call), 0));
grpc_iomgr_add_callback(&calld->kill_zombie_closure);
}
GPR_ASSERT(*root != call);
call->links[list].next->links[list].prev = call->links[list].prev;
call->links[list].prev->links[list].next = call->links[list].next;
return 1;
}
/*
* server proper
*/
static void server_ref(grpc_server *server) {
gpr_ref(&server->internal_refcount);
}
@ -343,6 +344,7 @@ static void server_delete(grpc_server *server) {
gpr_free(server->channel_filters);
while ((rm = server->registered_methods) != NULL) {
server->registered_methods = rm->next;
request_matcher_destroy(&rm->request_matcher);
gpr_free(rm->method);
gpr_free(rm->host);
gpr_free(rm);
@ -350,9 +352,12 @@ static void server_delete(grpc_server *server) {
for (i = 0; i < server->cq_count; i++) {
GRPC_CQ_INTERNAL_UNREF(server->cqs[i], "server");
}
request_matcher_destroy(&server->unregistered_request_matcher);
gpr_stack_lockfree_destroy(server->request_freelist);
gpr_free(server->cqs);
gpr_free(server->pollsets);
gpr_free(server->shutdown_tags);
gpr_free(server->requested_calls);
gpr_free(server);
}
@ -391,25 +396,29 @@ static void destroy_channel(channel_data *chand) {
}
static void finish_start_new_rpc(grpc_server *server, grpc_call_element *elem,
call_data **pending_root,
requested_call **requests) {
requested_call *rc;
request_matcher *request_matcher) {
call_data *calld = elem->call_data;
gpr_mu_lock(&server->mu_call);
rc = *requests;
if (rc == NULL) {
int request_id;
request_id = gpr_stack_lockfree_pop(request_matcher->requests);
if (request_id == -1) {
gpr_mu_lock(&server->mu_call);
gpr_mu_lock(&calld->mu_state);
calld->state = PENDING;
gpr_mu_unlock(&calld->mu_state);
call_list_join(pending_root, calld, PENDING_START);
if (request_matcher->pending_head == NULL) {
request_matcher->pending_tail = request_matcher->pending_head = calld;
} else {
request_matcher->pending_tail->pending_next = calld;
request_matcher->pending_tail = calld;
}
calld->pending_next = NULL;
gpr_mu_unlock(&server->mu_call);
} else {
*requests = rc->next;
gpr_mu_lock(&calld->mu_state);
calld->state = ACTIVATED;
gpr_mu_unlock(&calld->mu_state);
gpr_mu_unlock(&server->mu_call);
begin_call(server, calld, rc);
begin_call(server, calld, &server->requested_calls[request_id]);
}
}
@ -431,8 +440,8 @@ static void start_new_rpc(grpc_call_element *elem) {
if (!rm) break;
if (rm->host != calld->host) continue;
if (rm->method != calld->path) continue;
finish_start_new_rpc(server, elem, &rm->server_registered_method->pending,
&rm->server_registered_method->requests);
finish_start_new_rpc(server, elem,
&rm->server_registered_method->request_matcher);
return;
}
/* check for a wildcard method definition (no host set) */
@ -443,17 +452,12 @@ static void start_new_rpc(grpc_call_element *elem) {
if (!rm) break;
if (rm->host != NULL) continue;
if (rm->method != calld->path) continue;
finish_start_new_rpc(server, elem, &rm->server_registered_method->pending,
&rm->server_registered_method->requests);
finish_start_new_rpc(server, elem,
&rm->server_registered_method->request_matcher);
return;
}
}
finish_start_new_rpc(server, elem, &server->lists[PENDING_START],
&server->requests);
}
static void kill_zombie(void *elem, int success) {
grpc_call_destroy(grpc_call_from_top_element(elem));
finish_start_new_rpc(server, elem, &server->unregistered_request_matcher);
}
static int num_listeners(grpc_server *server) {
@ -481,15 +485,15 @@ static int num_channels(grpc_server *server) {
static void maybe_finish_shutdown(grpc_server *server) {
size_t i;
if (!server->shutdown || server->shutdown_published) {
if (!gpr_atm_acq_load(&server->shutdown_flag) || server->shutdown_published) {
return;
}
if (server->root_channel_data.next != &server->root_channel_data ||
server->listeners_destroyed < num_listeners(server)) {
if (gpr_time_cmp(
gpr_time_sub(gpr_now(GPR_CLOCK_REALTIME), server->last_shutdown_message_time),
gpr_time_from_seconds(1)) >= 0) {
if (gpr_time_cmp(gpr_time_sub(gpr_now(GPR_CLOCK_REALTIME),
server->last_shutdown_message_time),
gpr_time_from_seconds(1, GPR_TIMESPAN)) >= 0) {
server->last_shutdown_message_time = gpr_now(GPR_CLOCK_REALTIME);
gpr_log(GPR_DEBUG,
"Waiting for %d channels and %d/%d listeners to be destroyed"
@ -526,7 +530,6 @@ static grpc_mdelem *server_filter(void *user_data, grpc_mdelem *md) {
static void server_on_recv(void *ptr, int success) {
grpc_call_element *elem = ptr;
call_data *calld = elem->call_data;
channel_data *chand = elem->channel_data;
if (success && !calld->got_initial_metadata) {
size_t i;
@ -536,7 +539,8 @@ static void server_on_recv(void *ptr, int success) {
grpc_stream_op *op = &ops[i];
if (op->type != GRPC_OP_METADATA) continue;
grpc_metadata_batch_filter(&op->data.metadata, server_filter, elem);
if (0 != gpr_time_cmp(op->data.metadata.deadline, gpr_inf_future)) {
if (0 != gpr_time_cmp(op->data.metadata.deadline,
gpr_inf_future(GPR_CLOCK_REALTIME))) {
calld->deadline = op->data.metadata.deadline;
}
calld->got_initial_metadata = 1;
@ -571,11 +575,8 @@ static void server_on_recv(void *ptr, int success) {
} else if (calld->state == PENDING) {
calld->state = ZOMBIED;
gpr_mu_unlock(&calld->mu_state);
gpr_mu_lock(&chand->server->mu_call);
call_list_remove(calld, PENDING_START);
gpr_mu_unlock(&chand->server->mu_call);
grpc_iomgr_closure_init(&calld->kill_zombie_closure, kill_zombie, elem);
grpc_iomgr_add_callback(&calld->kill_zombie_closure);
/* zombied call will be destroyed when it's removed from the pending
queue... later */
} else {
gpr_mu_unlock(&calld->mu_state);
}
@ -610,7 +611,7 @@ static void accept_stream(void *cd, grpc_transport *transport,
channel_data *chand = cd;
/* create a call */
grpc_call_create(chand->channel, NULL, transport_server_data, NULL, 0,
gpr_inf_future);
gpr_inf_future(GPR_CLOCK_REALTIME));
}
static void channel_connectivity_changed(void *cd, int iomgr_status_ignored) {
@ -638,7 +639,7 @@ static void init_call_elem(grpc_call_element *elem,
call_data *calld = elem->call_data;
channel_data *chand = elem->channel_data;
memset(calld, 0, sizeof(call_data));
calld->deadline = gpr_inf_future;
calld->deadline = gpr_inf_future(GPR_CLOCK_REALTIME);
calld->call = grpc_call_from_top_element(elem);
gpr_mu_init(&calld->mu_state);
@ -653,11 +654,7 @@ static void destroy_call_elem(grpc_call_element *elem) {
channel_data *chand = elem->channel_data;
call_data *calld = elem->call_data;
if (calld->state == PENDING) {
gpr_mu_lock(&chand->server->mu_call);
call_list_remove(elem->call_data, PENDING_START);
gpr_mu_unlock(&chand->server->mu_call);
}
GPR_ASSERT(calld->state != PENDING);
if (calld->host) {
GRPC_MDSTR_UNREF(calld->host);
@ -764,6 +761,18 @@ grpc_server *grpc_server_create_from_filters(grpc_channel_filter **filters,
server->root_channel_data.next = server->root_channel_data.prev =
&server->root_channel_data;
/* TODO(ctiller): expose a channel_arg for this */
server->max_requested_calls = 32768;
server->request_freelist =
gpr_stack_lockfree_create(server->max_requested_calls);
for (i = 0; i < (size_t)server->max_requested_calls; i++) {
gpr_stack_lockfree_push(server->request_freelist, i);
}
request_matcher_init(&server->unregistered_request_matcher,
server->max_requested_calls);
server->requested_calls = gpr_malloc(server->max_requested_calls *
sizeof(*server->requested_calls));
/* Server filter stack is:
server_surface_filter - for making surface API calls
@ -811,6 +820,7 @@ void *grpc_server_register_method(grpc_server *server, const char *method,
}
m = gpr_malloc(sizeof(registered_method));
memset(m, 0, sizeof(*m));
request_matcher_init(&m->request_matcher, server->max_requested_calls);
m->method = gpr_strdup(method);
m->host = gpr_strdup(host);
m->next = server->registered_methods;
@ -926,13 +936,49 @@ void grpc_server_setup_transport(grpc_server *s, grpc_transport *transport,
grpc_transport_perform_op(transport, &op);
}
typedef struct {
requested_call **requests;
size_t count;
size_t capacity;
} request_killer;
static void request_killer_init(request_killer *rk) {
memset(rk, 0, sizeof(*rk));
}
static void request_killer_add(request_killer *rk, requested_call *rc) {
if (rk->capacity == rk->count) {
rk->capacity = GPR_MAX(8, rk->capacity * 2);
rk->requests =
gpr_realloc(rk->requests, rk->capacity * sizeof(*rk->requests));
}
rk->requests[rk->count++] = rc;
}
static void request_killer_add_request_matcher(request_killer *rk,
grpc_server *server,
request_matcher *rm) {
int request_id;
while ((request_id = gpr_stack_lockfree_pop(rm->requests)) != -1) {
request_killer_add(rk, &server->requested_calls[request_id]);
}
}
static void request_killer_run(request_killer *rk, grpc_server *server) {
size_t i;
for (i = 0; i < rk->count; i++) {
fail_call(server, rk->requests[i]);
}
gpr_free(rk->requests);
}
void grpc_server_shutdown_and_notify(grpc_server *server,
grpc_completion_queue *cq, void *tag) {
listener *l;
requested_call *requests = NULL;
registered_method *rm;
shutdown_tag *sdt;
channel_broadcaster broadcaster;
request_killer reqkill;
/* lock, and gather up some stuff to do */
gpr_mu_lock(&server->mu_global);
@ -943,7 +989,7 @@ void grpc_server_shutdown_and_notify(grpc_server *server,
sdt = &server->shutdown_tags[server->num_shutdown_tags++];
sdt->tag = tag;
sdt->cq = cq;
if (server->shutdown) {
if (gpr_atm_acq_load(&server->shutdown_flag)) {
gpr_mu_unlock(&server->mu_global);
return;
}
@ -951,31 +997,26 @@ void grpc_server_shutdown_and_notify(grpc_server *server,
server->last_shutdown_message_time = gpr_now(GPR_CLOCK_REALTIME);
channel_broadcaster_init(server, &broadcaster);
request_killer_init(&reqkill);
/* collect all unregistered then registered calls */
gpr_mu_lock(&server->mu_call);
requests = server->requests;
server->requests = NULL;
request_killer_add_request_matcher(&reqkill, server,
&server->unregistered_request_matcher);
request_matcher_zombify_all_pending_calls(
&server->unregistered_request_matcher);
for (rm = server->registered_methods; rm; rm = rm->next) {
while (rm->requests != NULL) {
requested_call *c = rm->requests;
rm->requests = c->next;
c->next = requests;
requests = c;
}
request_killer_add_request_matcher(&reqkill, server, &rm->request_matcher);
request_matcher_zombify_all_pending_calls(&rm->request_matcher);
}
gpr_mu_unlock(&server->mu_call);
server->shutdown = 1;
gpr_atm_rel_store(&server->shutdown_flag, 1);
maybe_finish_shutdown(server);
gpr_mu_unlock(&server->mu_global);
/* terminate all the requested calls */
while (requests != NULL) {
requested_call *next = requests->next;
fail_call(server, requests);
requests = next;
}
request_killer_run(&reqkill, server);
/* Shutdown listeners */
for (l = server->listeners; l; l = l->next) {
@ -1007,7 +1048,7 @@ void grpc_server_destroy(grpc_server *server) {
listener *l;
gpr_mu_lock(&server->mu_global);
GPR_ASSERT(server->shutdown || !server->listeners);
GPR_ASSERT(gpr_atm_acq_load(&server->shutdown_flag) || !server->listeners);
GPR_ASSERT(server->listeners_destroyed == num_listeners(server));
while (server->listeners) {
@ -1037,39 +1078,55 @@ void grpc_server_add_listener(grpc_server *server, void *arg,
static grpc_call_error queue_call_request(grpc_server *server,
requested_call *rc) {
call_data *calld = NULL;
requested_call **requests = NULL;
gpr_mu_lock(&server->mu_call);
if (server->shutdown) {
gpr_mu_unlock(&server->mu_call);
request_matcher *request_matcher = NULL;
int request_id;
if (gpr_atm_acq_load(&server->shutdown_flag)) {
fail_call(server, rc);
return GRPC_CALL_OK;
}
request_id = gpr_stack_lockfree_pop(server->request_freelist);
if (request_id == -1) {
/* out of request ids: just fail this one */
fail_call(server, rc);
return GRPC_CALL_OK;
}
switch (rc->type) {
case BATCH_CALL:
calld =
call_list_remove_head(&server->lists[PENDING_START], PENDING_START);
requests = &server->requests;
request_matcher = &server->unregistered_request_matcher;
break;
case REGISTERED_CALL:
calld = call_list_remove_head(
&rc->data.registered.registered_method->pending, PENDING_START);
requests = &rc->data.registered.registered_method->requests;
request_matcher = &rc->data.registered.registered_method->request_matcher;
break;
}
if (calld != NULL) {
gpr_mu_unlock(&server->mu_call);
gpr_mu_lock(&calld->mu_state);
GPR_ASSERT(calld->state == PENDING);
calld->state = ACTIVATED;
gpr_mu_unlock(&calld->mu_state);
begin_call(server, calld, rc);
return GRPC_CALL_OK;
} else {
rc->next = *requests;
*requests = rc;
server->requested_calls[request_id] = *rc;
gpr_free(rc);
if (gpr_stack_lockfree_push(request_matcher->requests, request_id)) {
/* this was the first queued request: we need to lock and start
matching calls */
gpr_mu_lock(&server->mu_call);
while ((calld = request_matcher->pending_head) != NULL) {
request_id = gpr_stack_lockfree_pop(request_matcher->requests);
if (request_id == -1) break;
request_matcher->pending_head = calld->pending_next;
gpr_mu_unlock(&server->mu_call);
gpr_mu_lock(&calld->mu_state);
if (calld->state == ZOMBIED) {
gpr_mu_unlock(&calld->mu_state);
grpc_iomgr_closure_init(
&calld->kill_zombie_closure, kill_zombie,
grpc_call_stack_element(grpc_call_get_call_stack(calld->call), 0));
grpc_iomgr_add_callback(&calld->kill_zombie_closure);
} else {
GPR_ASSERT(calld->state == PENDING);
calld->state = ACTIVATED;
gpr_mu_unlock(&calld->mu_state);
begin_call(server, calld, &server->requested_calls[request_id]);
}
gpr_mu_lock(&server->mu_call);
}
gpr_mu_unlock(&server->mu_call);
return GRPC_CALL_OK;
}
return GRPC_CALL_OK;
}
grpc_call_error grpc_server_request_call(
@ -1087,6 +1144,7 @@ grpc_call_error grpc_server_request_call(
}
grpc_cq_begin_op(cq_for_notification);
rc->type = BATCH_CALL;
rc->server = server;
rc->tag = tag;
rc->cq_bound_to_call = cq_bound_to_call;
rc->cq_for_notification = cq_for_notification;
@ -1109,6 +1167,7 @@ grpc_call_error grpc_server_request_registered_call(
}
grpc_cq_begin_op(cq_for_notification);
rc->type = REGISTERED_CALL;
rc->server = server;
rc->tag = tag;
rc->cq_bound_to_call = cq_bound_to_call;
rc->cq_for_notification = cq_for_notification;
@ -1188,7 +1247,16 @@ static void begin_call(grpc_server *server, call_data *calld,
}
static void done_request_event(void *req, grpc_cq_completion *c) {
gpr_free(req);
requested_call *rc = req;
grpc_server *server = rc->server;
if (rc >= server->requested_calls &&
rc < server->requested_calls + server->max_requested_calls) {
gpr_stack_lockfree_push(server->request_freelist,
rc - server->requested_calls);
} else {
gpr_free(req);
}
}
static void fail_call(grpc_server *server, requested_call *rc) {

@ -94,8 +94,8 @@ grpc_chttp2_parse_error grpc_chttp2_window_update_parser_parse(
}
GPR_ASSERT(is_last);
if (transport_parsing->incoming_stream_id) {
if (stream_parsing) {
if (transport_parsing->incoming_stream_id != 0) {
if (stream_parsing != NULL) {
GRPC_CHTTP2_FLOWCTL_TRACE_STREAM("update", transport_parsing,
stream_parsing, outgoing_window_update,
p->amount);

@ -42,7 +42,7 @@
void grpc_chttp2_incoming_metadata_buffer_init(
grpc_chttp2_incoming_metadata_buffer *buffer) {
buffer->deadline = gpr_inf_future;
buffer->deadline = gpr_inf_future(GPR_CLOCK_REALTIME);
}
void grpc_chttp2_incoming_metadata_buffer_destroy(
@ -87,7 +87,7 @@ void grpc_chttp2_incoming_metadata_buffer_place_metadata_batch_into(
b.list.tail = (void *)(gpr_intptr)buffer->count;
b.garbage.head = b.garbage.tail = NULL;
b.deadline = buffer->deadline;
buffer->deadline = gpr_inf_future;
buffer->deadline = gpr_inf_future(GPR_CLOCK_REALTIME);
grpc_sopb_add_metadata(sopb, b);
}

@ -353,7 +353,19 @@ typedef struct {
/** window available for us to send to peer */
gpr_int64 outgoing_window;
/** window available for peer to send to us - updated after parse */
/** The number of bytes the upper layers have offered to receive.
As the upper layer offers more bytes, this value increases.
As bytes are read, this value decreases. */
gpr_uint32 max_recv_bytes;
/** The number of bytes the upper layer has offered to read but we have
not yet announced to HTTP2 flow control.
As the upper layers offer to read more bytes, this value increases.
As we advertise incoming flow control window, this value decreases. */
gpr_uint32 unannounced_incoming_window;
/** The number of bytes of HTTP2 flow control we have advertised.
As we advertise incoming flow control window, this value increases.
As bytes are read, this value decreases.
Updated after parse. */
gpr_uint32 incoming_window;
/** stream ops the transport user would like to send */
grpc_stream_op_buffer *outgoing_sopb;
@ -391,6 +403,8 @@ typedef struct {
grpc_stream_op_buffer sopb;
/** how strongly should we indicate closure with the next write */
grpc_chttp2_send_closed send_closed;
/** how much window should we announce? */
gpr_uint32 announce_window;
} grpc_chttp2_stream_writing;
struct grpc_chttp2_stream_parsing {
@ -501,7 +515,9 @@ void grpc_chttp2_list_add_writable_window_update_stream(
grpc_chttp2_stream_global *stream_global);
int grpc_chttp2_list_pop_writable_window_update_stream(
grpc_chttp2_transport_global *transport_global,
grpc_chttp2_stream_global **stream_global);
grpc_chttp2_transport_writing *transport_writing,
grpc_chttp2_stream_global **stream_global,
grpc_chttp2_stream_writing **stream_writing);
void grpc_chttp2_list_remove_writable_window_update_stream(
grpc_chttp2_transport_global *transport_global,
grpc_chttp2_stream_global *stream_global);

@ -173,7 +173,14 @@ void grpc_chttp2_publish_reads(
GRPC_CHTTP2_FLOWCTL_TRACE_STREAM(
"parsed", transport_parsing, stream_parsing, incoming_window_delta,
-(gpr_int64)stream_parsing->incoming_window_delta);
GRPC_CHTTP2_FLOWCTL_TRACE_STREAM(
"parsed", transport_parsing, stream_global, max_recv_bytes,
-(gpr_int64)stream_parsing->incoming_window_delta);
stream_global->incoming_window -= stream_parsing->incoming_window_delta;
GPR_ASSERT(stream_global->max_recv_bytes >=
stream_parsing->incoming_window_delta);
stream_global->max_recv_bytes -=
stream_parsing->incoming_window_delta;
stream_parsing->incoming_window_delta = 0;
grpc_chttp2_list_add_writable_window_update_stream(transport_global,
stream_global);
@ -594,7 +601,7 @@ static void on_header(void *tp, grpc_mdelem *md) {
cached_timeout)) {
gpr_log(GPR_ERROR, "Ignoring bad timeout value '%s'",
grpc_mdstr_as_c_string(md->value));
*cached_timeout = gpr_inf_future;
*cached_timeout = gpr_inf_future(GPR_CLOCK_REALTIME);
}
grpc_mdelem_set_user_data(md, free_timeout, cached_timeout);
}

@ -585,7 +585,8 @@ void grpc_chttp2_encode(grpc_stream_op *ops, size_t ops_count, int eof,
l->md = hpack_enc(compressor, l->md, &st);
need_unref |= l->md != NULL;
}
if (gpr_time_cmp(op->data.metadata.deadline, gpr_inf_future) != 0) {
if (gpr_time_cmp(op->data.metadata.deadline,
gpr_inf_future(GPR_CLOCK_REALTIME)) != 0) {
deadline_enc(compressor, op->data.metadata.deadline, &st);
}
curop++;

@ -139,6 +139,7 @@ static void stream_list_add(grpc_chttp2_transport *t, grpc_chttp2_stream *s,
void grpc_chttp2_list_add_writable_stream(
grpc_chttp2_transport_global *transport_global,
grpc_chttp2_stream_global *stream_global) {
GPR_ASSERT(stream_global->id != 0);
stream_list_add(TRANSPORT_FROM_GLOBAL(transport_global),
STREAM_FROM_GLOBAL(stream_global), GRPC_CHTTP2_LIST_WRITABLE);
}
@ -204,6 +205,7 @@ int grpc_chttp2_list_pop_written_stream(
void grpc_chttp2_list_add_writable_window_update_stream(
grpc_chttp2_transport_global *transport_global,
grpc_chttp2_stream_global *stream_global) {
GPR_ASSERT(stream_global->id != 0);
stream_list_add(TRANSPORT_FROM_GLOBAL(transport_global),
STREAM_FROM_GLOBAL(stream_global),
GRPC_CHTTP2_LIST_WRITABLE_WINDOW_UPDATE);
@ -211,11 +213,14 @@ void grpc_chttp2_list_add_writable_window_update_stream(
int grpc_chttp2_list_pop_writable_window_update_stream(
grpc_chttp2_transport_global *transport_global,
grpc_chttp2_stream_global **stream_global) {
grpc_chttp2_transport_writing *transport_writing,
grpc_chttp2_stream_global **stream_global,
grpc_chttp2_stream_writing **stream_writing) {
grpc_chttp2_stream *stream;
int r = stream_list_pop(TRANSPORT_FROM_GLOBAL(transport_global), &stream,
GRPC_CHTTP2_LIST_WRITABLE_WINDOW_UPDATE);
*stream_global = &stream->global;
*stream_writing = &stream->writing;
return r;
}

@ -147,7 +147,7 @@ int grpc_chttp2_decode_timeout(const char *buffer, gpr_timespec *timeout) {
gpr_uint32 xp = x * 10 + *p - '0';
have_digit = 1;
if (xp < x) {
*timeout = gpr_inf_future;
*timeout = gpr_inf_future(GPR_CLOCK_REALTIME);
return 1;
}
x = xp;
@ -159,22 +159,22 @@ int grpc_chttp2_decode_timeout(const char *buffer, gpr_timespec *timeout) {
/* decode unit specifier */
switch (*p) {
case 'n':
*timeout = gpr_time_from_nanos(x);
*timeout = gpr_time_from_nanos(x, GPR_TIMESPAN);
break;
case 'u':
*timeout = gpr_time_from_micros(x);
*timeout = gpr_time_from_micros(x, GPR_TIMESPAN);
break;
case 'm':
*timeout = gpr_time_from_millis(x);
*timeout = gpr_time_from_millis(x, GPR_TIMESPAN);
break;
case 'S':
*timeout = gpr_time_from_seconds(x);
*timeout = gpr_time_from_seconds(x, GPR_TIMESPAN);
break;
case 'M':
*timeout = gpr_time_from_minutes(x);
*timeout = gpr_time_from_minutes(x, GPR_TIMESPAN);
break;
case 'H':
*timeout = gpr_time_from_hours(x);
*timeout = gpr_time_from_hours(x, GPR_TIMESPAN);
break;
default:
return 0;

@ -66,11 +66,9 @@ int grpc_chttp2_unlocking_check_writes(
/* for each grpc_chttp2_stream that's become writable, frame it's data
(according to
available window sizes) and add to the output buffer */
while (transport_global->outgoing_window &&
grpc_chttp2_list_pop_writable_stream(transport_global,
while (grpc_chttp2_list_pop_writable_stream(transport_global,
transport_writing, &stream_global,
&stream_writing) &&
stream_global->outgoing_window > 0) {
&stream_writing)) {
stream_writing->id = stream_global->id;
window_delta = grpc_chttp2_preencode(
stream_global->outgoing_sopb->ops, &stream_global->outgoing_sopb->nops,
@ -106,20 +104,21 @@ int grpc_chttp2_unlocking_check_writes(
/* for each grpc_chttp2_stream that wants to update its window, add that
* window here */
while (grpc_chttp2_list_pop_writable_window_update_stream(transport_global,
&stream_global)) {
window_delta =
transport_global->settings[GRPC_LOCAL_SETTINGS]
[GRPC_CHTTP2_SETTINGS_INITIAL_WINDOW_SIZE] -
stream_global->incoming_window;
if (!stream_global->read_closed && window_delta > 0) {
gpr_slice_buffer_add(
&transport_writing->outbuf,
grpc_chttp2_window_update_create(stream_global->id, window_delta));
transport_writing,
&stream_global,
&stream_writing)) {
stream_writing->id = stream_global->id;
if (!stream_global->read_closed && stream_global->unannounced_incoming_window > 0) {
stream_writing->announce_window = stream_global->unannounced_incoming_window;
GRPC_CHTTP2_FLOWCTL_TRACE_STREAM("write", transport_global, stream_global,
incoming_window, window_delta);
stream_global->incoming_window += window_delta;
incoming_window, stream_global->unannounced_incoming_window);
GRPC_CHTTP2_FLOWCTL_TRACE_STREAM("write", transport_global, stream_global,
unannounced_incoming_window, -(gpr_int64)stream_global->unannounced_incoming_window);
stream_global->incoming_window += stream_global->unannounced_incoming_window;
stream_global->unannounced_incoming_window = 0;
grpc_chttp2_list_add_incoming_window_updated(transport_global,
stream_global);
grpc_chttp2_list_add_writing_stream(transport_writing, stream_writing);
}
}
@ -169,10 +168,19 @@ static void finalize_outbuf(grpc_chttp2_transport_writing *transport_writing) {
while (
grpc_chttp2_list_pop_writing_stream(transport_writing, &stream_writing)) {
grpc_chttp2_encode(stream_writing->sopb.ops, stream_writing->sopb.nops,
stream_writing->send_closed != GRPC_DONT_SEND_CLOSED,
stream_writing->id, &transport_writing->hpack_compressor,
&transport_writing->outbuf);
if (stream_writing->sopb.nops > 0 || stream_writing->send_closed != GRPC_DONT_SEND_CLOSED) {
grpc_chttp2_encode(stream_writing->sopb.ops, stream_writing->sopb.nops,
stream_writing->send_closed != GRPC_DONT_SEND_CLOSED,
stream_writing->id, &transport_writing->hpack_compressor,
&transport_writing->outbuf);
}
if (stream_writing->announce_window > 0) {
gpr_slice_buffer_add(
&transport_writing->outbuf,
grpc_chttp2_window_update_create(
stream_writing->id, stream_writing->announce_window));
stream_writing->announce_window = 0;
}
stream_writing->sopb.nops = 0;
if (stream_writing->send_closed == GRPC_SEND_CLOSED_WITH_RST_STREAM) {
gpr_slice_buffer_add(&transport_writing->outbuf,
@ -197,7 +205,8 @@ void grpc_chttp2_cleanup_writing(
while (grpc_chttp2_list_pop_written_stream(
transport_global, transport_writing, &stream_global, &stream_writing)) {
if (stream_global->outgoing_sopb->nops == 0) {
if (stream_global->outgoing_sopb != NULL &&
stream_global->outgoing_sopb->nops == 0) {
stream_global->outgoing_sopb = NULL;
grpc_chttp2_schedule_closure(transport_global,
stream_global->send_done_closure, 1);

@ -360,7 +360,9 @@ static int init_stream(grpc_transport *gt, grpc_stream *gs,
s->global.outgoing_window =
t->global.settings[GRPC_PEER_SETTINGS]
[GRPC_CHTTP2_SETTINGS_INITIAL_WINDOW_SIZE];
s->parsing.incoming_window = s->global.incoming_window =
s->global.max_recv_bytes =
s->parsing.incoming_window =
s->global.incoming_window =
t->global.settings[GRPC_SENT_SETTINGS]
[GRPC_CHTTP2_SETTINGS_INITIAL_WINDOW_SIZE];
*t->accepting_stream = s;
@ -564,6 +566,8 @@ static void maybe_start_some_streams(
stream_global->incoming_window =
transport_global->settings[GRPC_SENT_SETTINGS]
[GRPC_CHTTP2_SETTINGS_INITIAL_WINDOW_SIZE];
stream_global->max_recv_bytes =
GPR_MAX(stream_global->incoming_window, stream_global->max_recv_bytes);
grpc_chttp2_stream_map_add(
&TRANSPORT_FROM_GLOBAL(transport_global)->new_stream_map,
stream_global->id, STREAM_FROM_GLOBAL(stream_global));
@ -572,6 +576,9 @@ static void maybe_start_some_streams(
grpc_chttp2_list_add_incoming_window_updated(transport_global,
stream_global);
grpc_chttp2_list_add_writable_stream(transport_global, stream_global);
grpc_chttp2_list_add_writable_window_update_stream(transport_global,
stream_global);
}
/* cancel out streams that will never be started */
while (transport_global->next_stream_id >= MAX_CLIENT_STREAM_ID &&
@ -622,12 +629,23 @@ static void perform_stream_op_locked(
stream_global->publish_sopb = op->recv_ops;
stream_global->publish_sopb->nops = 0;
stream_global->publish_state = op->recv_state;
if (stream_global->max_recv_bytes < op->max_recv_bytes) {
GRPC_CHTTP2_FLOWCTL_TRACE_STREAM("op", transport_global, stream_global,
max_recv_bytes, op->max_recv_bytes - stream_global->max_recv_bytes);
GRPC_CHTTP2_FLOWCTL_TRACE_STREAM(
"op", transport_global, stream_global, unannounced_incoming_window,
op->max_recv_bytes - stream_global->max_recv_bytes);
stream_global->unannounced_incoming_window += op->max_recv_bytes - stream_global->max_recv_bytes;
stream_global->max_recv_bytes = op->max_recv_bytes;
}
grpc_chttp2_incoming_metadata_live_op_buffer_end(
&stream_global->outstanding_metadata);
grpc_chttp2_list_add_read_write_state_changed(transport_global,
stream_global);
grpc_chttp2_list_add_writable_window_update_stream(transport_global,
stream_global);
if (stream_global->id != 0) {
grpc_chttp2_list_add_read_write_state_changed(transport_global,
stream_global);
grpc_chttp2_list_add_writable_window_update_stream(transport_global,
stream_global);
}
}
if (op->bind_pollset) {
@ -1056,7 +1074,7 @@ void grpc_chttp2_flowctl_trace(const char *file, int line, const char *reason,
identifier = gpr_strdup(context_scope);
}
gpr_log(GPR_INFO,
"FLOWCTL: %s %-10s %8s %-23s %8lld %c %8lld = %8lld %-10s [%s:%d]",
"FLOWCTL: %s %-10s %8s %-27s %8lld %c %8lld = %8lld %-10s [%s:%d]",
is_client ? "client" : "server", identifier, context_thread, var,
current_value, delta < 0 ? '-' : '+', delta < 0 ? -delta : delta,
current_value + delta, reason, file, line);

@ -205,7 +205,7 @@ void grpc_metadata_batch_assert_ok(grpc_metadata_batch *batch) {
void grpc_metadata_batch_init(grpc_metadata_batch *batch) {
batch->list.head = batch->list.tail = batch->garbage.head = batch->garbage.tail =
NULL;
batch->deadline = gpr_inf_future;
batch->deadline = gpr_inf_future(GPR_CLOCK_REALTIME);
}
void grpc_metadata_batch_destroy(grpc_metadata_batch *batch) {

@ -72,6 +72,10 @@ typedef struct grpc_transport_stream_op {
grpc_stream_op_buffer *recv_ops;
grpc_stream_state *recv_state;
/** The number of bytes this peer is currently prepared to receive.
These bytes will be eventually used to replenish per-stream flow control
windows. */
gpr_uint32 max_recv_bytes;
grpc_iomgr_closure *on_done_recv;
grpc_pollset *bind_pollset;

@ -61,7 +61,7 @@ static void put_metadata_list(gpr_strvec *b, grpc_metadata_batch md) {
if (m != md.list.head) gpr_strvec_add(b, gpr_strdup(", "));
put_metadata(b, m->md);
}
if (gpr_time_cmp(md.deadline, gpr_inf_future) != 0) {
if (gpr_time_cmp(md.deadline, gpr_inf_future(GPR_CLOCK_REALTIME)) != 0) {
char *tmp;
gpr_asprintf(&tmp, " deadline=%d.%09d", md.deadline.tv_sec,
md.deadline.tv_nsec);
@ -128,7 +128,8 @@ char *grpc_transport_stream_op_string(grpc_transport_stream_op *op) {
if (op->recv_ops) {
if (!first) gpr_strvec_add(&b, gpr_strdup(" "));
first = 0;
gpr_strvec_add(&b, gpr_strdup("RECV"));
gpr_asprintf(&tmp, "RECV:max_recv_bytes=%d", op->max_recv_bytes);
gpr_strvec_add(&b, tmp);
}
if (op->bind_pollset) {

@ -44,7 +44,7 @@ ClientContext::ClientContext()
: initial_metadata_received_(false),
call_(nullptr),
cq_(nullptr),
deadline_(gpr_inf_future) {}
deadline_(gpr_inf_future(GPR_CLOCK_REALTIME)) {}
ClientContext::~ClientContext() {
if (call_) {
@ -53,8 +53,8 @@ ClientContext::~ClientContext() {
if (cq_) {
// Drain cq_.
grpc_completion_queue_shutdown(cq_);
while (grpc_completion_queue_next(cq_, gpr_inf_future).type !=
GRPC_QUEUE_SHUTDOWN)
while (grpc_completion_queue_next(cq_, gpr_inf_future(GPR_CLOCK_REALTIME))
.type != GRPC_QUEUE_SHUTDOWN)
;
grpc_completion_queue_destroy(cq_);
}

@ -92,7 +92,8 @@ std::shared_ptr<Credentials> ServiceAccountCredentials(
"with non-positive lifetime");
return WrapCredentials(nullptr);
}
gpr_timespec lifetime = gpr_time_from_seconds(token_lifetime_seconds);
gpr_timespec lifetime =
gpr_time_from_seconds(token_lifetime_seconds, GPR_TIMESPAN);
return WrapCredentials(grpc_service_account_credentials_create(
json_key.c_str(), scope.c_str(), lifetime));
}
@ -105,7 +106,8 @@ std::shared_ptr<Credentials> JWTCredentials(const grpc::string& json_key,
"Trying to create JWTCredentials with non-positive lifetime");
return WrapCredentials(nullptr);
}
gpr_timespec lifetime = gpr_time_from_seconds(token_lifetime_seconds);
gpr_timespec lifetime =
gpr_time_from_seconds(token_lifetime_seconds, GPR_TIMESPAN);
return WrapCredentials(
grpc_jwt_credentials_create(json_key.c_str(), lifetime));
}

@ -0,0 +1,87 @@
/*
*
* Copyright 2015, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following disclaimer
* in the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Google Inc. nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
#include <grpc++/auth_property_iterator.h>
#include <grpc/grpc_security.h>
namespace grpc {
AuthPropertyIterator::AuthPropertyIterator()
: property_(nullptr), ctx_(nullptr), index_(0), name_(nullptr) {}
AuthPropertyIterator::AuthPropertyIterator(
const grpc_auth_property* property, const grpc_auth_property_iterator* iter)
: property_(property),
ctx_(iter->ctx),
index_(iter->index),
name_(iter->name) {}
AuthPropertyIterator::~AuthPropertyIterator() {}
AuthPropertyIterator& AuthPropertyIterator::operator++() {
grpc_auth_property_iterator iter = {ctx_, index_, name_};
property_ = grpc_auth_property_iterator_next(&iter);
ctx_ = iter.ctx;
index_ = iter.index;
name_ = iter.name;
return *this;
}
AuthPropertyIterator AuthPropertyIterator::operator++(int) {
AuthPropertyIterator tmp(*this);
operator++();
return tmp;
}
bool AuthPropertyIterator::operator==(
const AuthPropertyIterator& rhs) const {
if (property_ == nullptr || rhs.property_ == nullptr) {
return property_ == rhs.property_;
} else {
return index_ == rhs.index_;
}
}
bool AuthPropertyIterator::operator!=(
const AuthPropertyIterator& rhs) const {
return !operator==(rhs);
}
const AuthProperty AuthPropertyIterator::operator*() {
return std::make_pair<grpc::string, grpc::string>(
grpc::string(property_->name),
grpc::string(property_->value, property_->value_length));
}
} // namespace grpc

@ -70,7 +70,8 @@ CompletionQueue::NextStatus CompletionQueue::AsyncNextInternal(
}
bool CompletionQueue::Pluck(CompletionQueueTag* tag) {
auto ev = grpc_completion_queue_pluck(cq_, tag, gpr_inf_future);
auto ev =
grpc_completion_queue_pluck(cq_, tag, gpr_inf_future(GPR_CLOCK_REALTIME));
bool ok = ev.success != 0;
void* ignored = tag;
GPR_ASSERT(tag->FinalizeResult(&ignored, &ok));
@ -80,7 +81,8 @@ bool CompletionQueue::Pluck(CompletionQueueTag* tag) {
}
void CompletionQueue::TryPluck(CompletionQueueTag* tag) {
auto ev = grpc_completion_queue_pluck(cq_, tag, gpr_time_0);
auto ev =
grpc_completion_queue_pluck(cq_, tag, gpr_time_0(GPR_CLOCK_REALTIME));
if (ev.type == GRPC_QUEUE_TIMEOUT) return;
bool ok = ev.success != 0;
void* ignored = tag;

@ -77,4 +77,20 @@ std::vector<grpc::string> SecureAuthContext::FindPropertyValues(
return values;
}
AuthPropertyIterator SecureAuthContext::begin() const {
if (ctx_) {
grpc_auth_property_iterator iter =
grpc_auth_context_property_iterator(ctx_);
const grpc_auth_property* property =
grpc_auth_property_iterator_next(&iter);
return AuthPropertyIterator(property, &iter);
} else {
return end();
}
}
AuthPropertyIterator SecureAuthContext::end() const {
return AuthPropertyIterator();
}
} // namespace grpc

@ -53,6 +53,10 @@ class SecureAuthContext GRPC_FINAL : public AuthContext {
std::vector<grpc::string> FindPropertyValues(const grpc::string& name) const
GRPC_OVERRIDE;
AuthPropertyIterator begin() const GRPC_OVERRIDE;
AuthPropertyIterator end() const GRPC_OVERRIDE;
private:
grpc_auth_context* ctx_;
};

@ -76,8 +76,7 @@ FixedSizeThreadPool::~FixedSizeThreadPool() {
}
}
void FixedSizeThreadPool::ScheduleCallback(
const std::function<void()>& callback) {
void FixedSizeThreadPool::Add(const std::function<void()>& callback) {
grpc::lock_guard<grpc::mutex> lock(mu_);
callbacks_.push(callback);
cv_.notify_one();

@ -383,7 +383,7 @@ void Server::ScheduleCallback() {
grpc::unique_lock<grpc::mutex> lock(mu_);
num_running_cb_++;
}
thread_pool_->ScheduleCallback(std::bind(&Server::RunRpc, this));
thread_pool_->Add(std::bind(&Server::RunRpc, this));
}
void Server::RunRpc() {

@ -144,7 +144,7 @@ void ServerContext::AddTrailingMetadata(const grpc::string& key,
trailing_metadata_.insert(std::make_pair(key, value));
}
bool ServerContext::IsCancelled() {
bool ServerContext::IsCancelled() const {
return completion_op_ && completion_op_->CheckCancelled(cq_);
}

@ -51,13 +51,15 @@ void Timepoint2Timespec(const system_clock::time_point& from,
system_clock::duration deadline = from.time_since_epoch();
seconds secs = duration_cast<seconds>(deadline);
if (from == system_clock::time_point::max() ||
secs.count() >= gpr_inf_future.tv_sec || secs.count() < 0) {
*to = gpr_inf_future;
secs.count() >= gpr_inf_future(GPR_CLOCK_REALTIME).tv_sec ||
secs.count() < 0) {
*to = gpr_inf_future(GPR_CLOCK_REALTIME);
return;
}
nanoseconds nsecs = duration_cast<nanoseconds>(deadline - secs);
to->tv_sec = secs.count();
to->tv_nsec = nsecs.count();
to->clock_type = GPR_CLOCK_REALTIME;
}
void TimepointHR2Timespec(const high_resolution_clock::time_point& from,
@ -65,17 +67,19 @@ void TimepointHR2Timespec(const high_resolution_clock::time_point& from,
high_resolution_clock::duration deadline = from.time_since_epoch();
seconds secs = duration_cast<seconds>(deadline);
if (from == high_resolution_clock::time_point::max() ||
secs.count() >= gpr_inf_future.tv_sec || secs.count() < 0) {
*to = gpr_inf_future;
secs.count() >= gpr_inf_future(GPR_CLOCK_REALTIME).tv_sec ||
secs.count() < 0) {
*to = gpr_inf_future(GPR_CLOCK_REALTIME);
return;
}
nanoseconds nsecs = duration_cast<nanoseconds>(deadline - secs);
to->tv_sec = secs.count();
to->tv_nsec = nsecs.count();
to->clock_type = GPR_CLOCK_REALTIME;
}
system_clock::time_point Timespec2Timepoint(gpr_timespec t) {
if (gpr_time_cmp(t, gpr_inf_future) == 0) {
if (gpr_time_cmp(t, gpr_inf_future(GPR_CLOCK_REALTIME)) == 0) {
return system_clock::time_point::max();
}
system_clock::time_point tp;

@ -61,28 +61,28 @@ namespace Grpc.Core.Internal.Tests
[Test]
public void Add()
{
var t = new Timespec { tv_sec = new IntPtr(12345), tv_nsec = new IntPtr(123456789) };
var t = new Timespec { tv_sec = new IntPtr(12345), tv_nsec = 123456789 };
var result = t.Add(TimeSpan.FromTicks(TimeSpan.TicksPerSecond * 10));
Assert.AreEqual(result.tv_sec, new IntPtr(12355));
Assert.AreEqual(result.tv_nsec, new IntPtr(123456789));
Assert.AreEqual(result.tv_nsec, 123456789);
}
[Test]
public void Add_Nanos()
{
var t = new Timespec { tv_sec = new IntPtr(12345), tv_nsec = new IntPtr(123456789) };
var t = new Timespec { tv_sec = new IntPtr(12345), tv_nsec = 123456789 };
var result = t.Add(TimeSpan.FromTicks(10));
Assert.AreEqual(result.tv_sec, new IntPtr(12345));
Assert.AreEqual(result.tv_nsec, new IntPtr(123456789 + 1000));
Assert.AreEqual(result.tv_nsec, 123456789 + 1000);
}
[Test]
public void Add_NanosOverflow()
{
var t = new Timespec { tv_sec = new IntPtr(12345), tv_nsec = new IntPtr(999999999) };
var t = new Timespec { tv_sec = new IntPtr(12345), tv_nsec = 999999999 };
var result = t.Add(TimeSpan.FromTicks(TimeSpan.TicksPerSecond * 10 + 10));
Assert.AreEqual(result.tv_sec, new IntPtr(12356));
Assert.AreEqual(result.tv_nsec, new IntPtr(999));
Assert.AreEqual(result.tv_nsec, 999);
}
}
}

@ -90,4 +90,19 @@ namespace Grpc.Core.Internal
/* operation completion */
OpComplete
}
/// <summary>
/// gpr_clock_type from grpc/support/time.h
/// </summary>
internal enum GPRClockType
{
/* Monotonic clock */
Monotonic,
/* Realtime clock */
Realtime,
/* Timespan - the distance between two time points */
Timespan
}
}

@ -55,7 +55,8 @@ namespace Grpc.Core.Internal
// NOTE: on linux 64bit sizeof(gpr_timespec) = 16, on windows 32bit sizeof(gpr_timespec) = 8
// so IntPtr seems to have the right size to work on both.
public System.IntPtr tv_sec;
public System.IntPtr tv_nsec;
public int tv_nsec;
public GPRClockType clock_type;
/// <summary>
/// Timespec a long time in the future.
@ -99,12 +100,13 @@ namespace Grpc.Core.Internal
public Timespec Add(TimeSpan timeSpan)
{
long nanos = tv_nsec.ToInt64() + (timeSpan.Ticks % TimeSpan.TicksPerSecond) * NanosPerTick;
long nanos = (long)tv_nsec + (timeSpan.Ticks % TimeSpan.TicksPerSecond) * NanosPerTick;
long overflow_sec = (nanos > NanosPerSecond) ? 1 : 0;
Timespec result;
result.tv_nsec = new IntPtr(nanos % NanosPerSecond);
result.tv_nsec = (int)(nanos % NanosPerSecond);
result.tv_sec = new IntPtr(tv_sec.ToInt64() + (timeSpan.Ticks / TimeSpan.TicksPerSecond) + overflow_sec);
result.clock_type = GPRClockType.Realtime;
return result;
}
}

@ -302,12 +302,13 @@ grpcsharp_completion_queue_destroy(grpc_completion_queue *cq) {
GPR_EXPORT grpc_event GPR_CALLTYPE
grpcsharp_completion_queue_next(grpc_completion_queue *cq) {
return grpc_completion_queue_next(cq, gpr_inf_future);
return grpc_completion_queue_next(cq, gpr_inf_future(GPR_CLOCK_REALTIME));
}
GPR_EXPORT grpc_event GPR_CALLTYPE
grpcsharp_completion_queue_pluck(grpc_completion_queue *cq, void *tag) {
return grpc_completion_queue_pluck(cq, tag, gpr_inf_future);
return grpc_completion_queue_pluck(cq, tag,
gpr_inf_future(GPR_CLOCK_REALTIME));
}
/* Channel */
@ -382,7 +383,7 @@ grpcsharp_channel_args_destroy(grpc_channel_args *args) {
GPR_EXPORT gpr_timespec GPR_CALLTYPE gprsharp_now(void) { return gpr_now(GPR_CLOCK_REALTIME); }
GPR_EXPORT gpr_timespec GPR_CALLTYPE gprsharp_inf_future(void) {
return gpr_inf_future;
return gpr_inf_future(GPR_CLOCK_REALTIME);
}
GPR_EXPORT gpr_int32 GPR_CALLTYPE gprsharp_sizeof_timespec(void) {

@ -62,7 +62,8 @@ CompletionQueueAsyncWorker::CompletionQueueAsyncWorker()
CompletionQueueAsyncWorker::~CompletionQueueAsyncWorker() {}
void CompletionQueueAsyncWorker::Execute() {
result = grpc_completion_queue_next(queue, gpr_inf_future);
result =
grpc_completion_queue_next(queue, gpr_inf_future(GPR_CLOCK_REALTIME));
if (!result.success) {
SetErrorMessage("The batch encountered an error");
}

@ -161,7 +161,8 @@ void Server::ShutdownServer() {
grpc_server_shutdown_and_notify(this->wrapped_server,
this->shutdown_queue,
NULL);
grpc_completion_queue_pluck(this->shutdown_queue, NULL, gpr_inf_future);
grpc_completion_queue_pluck(this->shutdown_queue, NULL,
gpr_inf_future(GPR_CLOCK_REALTIME));
this->wrapped_server = NULL;
}
}

@ -42,18 +42,19 @@ namespace node {
gpr_timespec MillisecondsToTimespec(double millis) {
if (millis == std::numeric_limits<double>::infinity()) {
return gpr_inf_future;
return gpr_inf_future(GPR_CLOCK_REALTIME);
} else if (millis == -std::numeric_limits<double>::infinity()) {
return gpr_inf_past;
return gpr_inf_past(GPR_CLOCK_REALTIME);
} else {
return gpr_time_from_micros(static_cast<int64_t>(millis * 1000));
return gpr_time_from_micros(static_cast<int64_t>(millis * 1000),
GPR_CLOCK_REALTIME);
}
}
double TimespecToMilliseconds(gpr_timespec timespec) {
if (gpr_time_cmp(timespec, gpr_inf_future) == 0) {
if (gpr_time_cmp(timespec, gpr_inf_future(GPR_CLOCK_REALTIME)) == 0) {
return std::numeric_limits<double>::infinity();
} else if (gpr_time_cmp(timespec, gpr_inf_past) == 0) {
} else if (gpr_time_cmp(timespec, gpr_inf_past(GPR_CLOCK_REALTIME)) == 0) {
return -std::numeric_limits<double>::infinity();
} else {
return (static_cast<double>(timespec.tv_sec) * 1000 +

@ -418,6 +418,48 @@ describe('Other conditions', function() {
});
});
});
describe('Error object should contain the status', function() {
it('for a unary call', function(done) {
client.unary({error: true}, function(err, data) {
assert(err);
assert.strictEqual(err.code, grpc.status.UNKNOWN);
assert.strictEqual(err.message, 'Requested error');
done();
});
});
it('for a client stream call', function(done) {
var call = client.clientStream(function(err, data) {
assert(err);
assert.strictEqual(err.code, grpc.status.UNKNOWN);
assert.strictEqual(err.message, 'Requested error');
done();
});
call.write({error: false});
call.write({error: true});
call.end();
});
it('for a server stream call', function(done) {
var call = client.serverStream({error: true});
call.on('data', function(){});
call.on('error', function(error) {
assert.strictEqual(error.code, grpc.status.UNKNOWN);
assert.strictEqual(error.message, 'Requested error');
done();
});
});
it('for a bidi stream call', function(done) {
var call = client.bidiStream();
call.write({error: false});
call.write({error: true});
call.end();
call.on('data', function(){});
call.on('error', function(error) {
assert.strictEqual(error.code, grpc.status.UNKNOWN);
assert.strictEqual(error.message, 'Requested error');
done();
});
});
});
});
describe('Cancelling surface client', function() {
var client;

@ -52,7 +52,7 @@
extern id const kGRPCStatusMetadataKey;
// Represents a single gRPC remote call.
@interface GRPCCall : NSObject<GRXWriter>
@interface GRPCCall : GRXWriter
// These HTTP headers will be passed to the server as part of this call. Each HTTP header is a
// name-value pair with string names and either string or binary values.
@ -89,7 +89,7 @@ extern id const kGRPCStatusMetadataKey;
// To finish a call right away, invoke cancel.
- (instancetype)initWithHost:(NSString *)host
path:(NSString *)path
requestsWriter:(id<GRXWriter>)requestsWriter NS_DESIGNATED_INITIALIZER;
requestsWriter:(GRXWriter *)requestsWriter NS_DESIGNATED_INITIALIZER;
// Finishes the request side of this call, notifies the server that the RPC
// should be cancelled, and finishes the response side of the call with an error

@ -79,7 +79,7 @@ NSString * const kGRPCStatusMetadataKey = @"io.grpc.StatusMetadataKey";
// all. This wrapper over our actual writeable ensures thread-safety and
// correct ordering.
GRPCDelegateWrapper *_responseWriteable;
id<GRXWriter> _requestWriter;
GRXWriter *_requestWriter;
NSMutableDictionary *_requestMetadata;
NSMutableDictionary *_responseMetadata;
@ -94,7 +94,7 @@ NSString * const kGRPCStatusMetadataKey = @"io.grpc.StatusMetadataKey";
// Designated initializer
- (instancetype)initWithHost:(NSString *)host
path:(NSString *)path
requestsWriter:(id<GRXWriter>)requestWriter {
requestsWriter:(GRXWriter *)requestWriter {
if (!host || !path) {
[NSException raise:NSInvalidArgumentException format:@"Neither host nor method can be nil."];
}

@ -33,8 +33,9 @@
#import <Foundation/Foundation.h>
#import <RxLibrary/GRXWriter.h>
@protocol GRXWriteable;
@protocol GRXWriter;
// This is a thread-safe wrapper over a GRXWriteable instance. It lets one
// enqueue calls to a GRXWriteable instance for the main thread, guaranteeing
@ -54,7 +55,7 @@
// writesFinishedWithError: is sent to the writeable, and released after that.
// This is used to create a retain cycle that keeps both objects alive until the
// writing is explicitly finished.
- (instancetype)initWithWriteable:(id<GRXWriteable>)writeable writer:(id<GRXWriter>)writer
- (instancetype)initWithWriteable:(id<GRXWriteable>)writeable writer:(GRXWriter *)writer
NS_DESIGNATED_INITIALIZER;
// Enqueues writeValue: to be sent to the writeable in the main thread.

@ -38,7 +38,7 @@
@interface GRPCDelegateWrapper ()
// These are atomic so that cancellation can nillify them from any thread.
@property(atomic, strong) id<GRXWriteable> writeable;
@property(atomic, strong) id<GRXWriter> writer;
@property(atomic, strong) GRXWriter *writer;
@end
@implementation GRPCDelegateWrapper {
@ -52,7 +52,7 @@
}
// Designated initializer
- (instancetype)initWithWriteable:(id<GRXWriteable>)writeable writer:(id<GRXWriter>)writer {
- (instancetype)initWithWriteable:(id<GRXWriteable>)writeable writer:(GRXWriter *)writer {
if (self = [super init]) {
_writeableQueue = dispatch_get_main_queue();
_writeable = writeable;

@ -40,7 +40,7 @@
- (instancetype)initWithHost:(NSString *)host
method:(ProtoMethod *)method
requestsWriter:(id<GRXWriter>)requestsWriter
requestsWriter:(GRXWriter *)requestsWriter
responseClass:(Class)responseClass
responsesWriteable:(id<GRXWriteable>)responsesWriteable NS_DESIGNATED_INITIALIZER;

@ -35,7 +35,6 @@
#import <GPBProtocolBuffers.h>
#import <RxLibrary/GRXWriteable.h>
#import <RxLibrary/GRXWriter.h>
#import <RxLibrary/GRXWriter+Transformations.h>
@implementation ProtoRPC {
@ -46,7 +45,7 @@
#pragma clang diagnostic ignored "-Wobjc-designated-initializers"
- (instancetype)initWithHost:(NSString *)host
path:(NSString *)path
requestsWriter:(id<GRXWriter>)requestsWriter {
requestsWriter:(GRXWriter *)requestsWriter {
[NSException raise:NSInvalidArgumentException
format:@"Please use ProtoRPC's designated initializer instead."];
return nil;
@ -56,7 +55,7 @@
// Designated initializer
- (instancetype)initWithHost:(NSString *)host
method:(ProtoMethod *)method
requestsWriter:(id<GRXWriter>)requestsWriter
requestsWriter:(GRXWriter *)requestsWriter
responseClass:(Class)responseClass
responsesWriteable:(id<GRXWriteable>)responsesWriteable {
// Because we can't tell the type system to constrain the class, we need to check at runtime:
@ -65,12 +64,11 @@
format:@"A protobuf class to parse the responses must be provided."];
}
// A writer that serializes the proto messages to send.
id<GRXWriter> bytesWriter =
[[[GRXWriter alloc] initWithWriter:requestsWriter] map:^id(GPBMessage *proto) {
// TODO(jcanizales): Fail with an understandable error message if the requestsWriter isn't
// sending GPBMessages.
return [proto data];
}];
GRXWriter *bytesWriter = [requestsWriter map:^id(GPBMessage *proto) {
// TODO(jcanizales): Fail with an understandable error message if the requestsWriter isn't
// sending GPBMessages.
return [proto data];
}];
if ((self = [super initWithHost:host path:method.HTTPPath requestsWriter:bytesWriter])) {
// A writeable that parses the proto messages received.
_responseWriteable = [[GRXWriteable alloc] initWithValueHandler:^(NSData *value) {

@ -35,7 +35,7 @@
@class ProtoRPC;
@protocol GRXWriteable;
@protocol GRXWriter;
@class GRXWriter;
@interface ProtoService : NSObject
- (instancetype)initWithHost:(NSString *)host
@ -43,7 +43,7 @@
serviceName:(NSString *)serviceName NS_DESIGNATED_INITIALIZER;
- (ProtoRPC *)RPCToMethod:(NSString *)method
requestsWriter:(id<GRXWriter>)requestsWriter
requestsWriter:(GRXWriter *)requestsWriter
responseClass:(Class)responseClass
responsesWriteable:(id<GRXWriteable>)responsesWriteable;
@end

@ -66,7 +66,7 @@
}
- (ProtoRPC *)RPCToMethod:(NSString *)method
requestsWriter:(id<GRXWriter>)requestsWriter
requestsWriter:(GRXWriter *)requestsWriter
responseClass:(Class)responseClass
responsesWriteable:(id<GRXWriteable>)responsesWriteable {
ProtoMethod *methodName = [[ProtoMethod alloc] initWithPackage:_packageName

@ -51,7 +51,7 @@
// pipe will keep buffering all data written to it, your application could run out of memory and
// crash. If you want to react to flow control signals to prevent that, instead of using this class
// you can implement an object that conforms to GRXWriter.
@interface GRXBufferedPipe : NSObject<GRXWriteable, GRXWriter>
@interface GRXBufferedPipe : GRXWriter<GRXWriteable>
// Convenience constructor.
+ (instancetype)pipe;

@ -0,0 +1,43 @@
/*
*
* Copyright 2015, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following disclaimer
* in the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Google Inc. nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
#import "GRXWriter.h"
// A "proxy" class that simply forwards values, completion, and errors from its
// input writer to its writeable.
// It is useful as a superclass for pipes that act as a transformation of their
// input writer, and for classes that represent objects with input and
// output sequences of values, like an RPC.
@interface GRXForwardingWriter : GRXWriter
- (instancetype)initWithWriter:(GRXWriter *)writer NS_DESIGNATED_INITIALIZER;
@end

@ -0,0 +1,112 @@
/*
*
* Copyright 2015, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following disclaimer
* in the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Google Inc. nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
#import "GRXForwardingWriter.h"
@interface GRXForwardingWriter () <GRXWriteable>
@end
@implementation GRXForwardingWriter {
GRXWriter *_writer;
id<GRXWriteable> _writeable;
}
- (instancetype)init {
return [self initWithWriter:nil];
}
// Designated initializer
- (instancetype)initWithWriter:(GRXWriter *)writer {
if (!writer) {
[NSException raise:NSInvalidArgumentException format:@"writer can't be nil."];
}
if ((self = [super init])) {
_writer = writer;
}
return self;
}
// This is used to send a completion or an error to the writeable. It nillifies
// our reference to it in order to guarantee no more messages are sent to it,
// and to release it.
- (void)finishOutputWithError:(NSError *)errorOrNil {
id<GRXWriteable> writeable = _writeable;
_writeable = nil;
[writeable writesFinishedWithError:errorOrNil];
}
// This is used to stop the input writer. It nillifies our reference to it
// to release it.
- (void)finishInput {
GRXWriter *writer = _writer;
_writer = nil;
writer.state = GRXWriterStateFinished;
}
#pragma mark GRXWriteable implementation
- (void)writeValue:(id)value {
[_writeable writeValue:value];
}
- (void)writesFinishedWithError:(NSError *)errorOrNil {
_writer = nil;
[self finishOutputWithError:errorOrNil];
}
#pragma mark GRXWriter implementation
- (GRXWriterState)state {
return _writer ? _writer.state : GRXWriterStateFinished;
}
- (void)setState:(GRXWriterState)state {
if (state == GRXWriterStateFinished) {
_writeable = nil;
[self finishInput];
} else {
_writer.state = state;
}
}
- (void)startWithWriteable:(id<GRXWriteable>)writeable {
_writeable = writeable;
[_writer startWithWriteable:self];
}
- (void)finishWithError:(NSError *)errorOrNil {
[self finishOutputWithError:errorOrNil];
[self finishInput];
}
@end

@ -40,15 +40,15 @@
//
// Unless the writeable callback pauses them or stops them early, these writers will do all their
// interactions with the writeable before the start method returns.
@interface GRXImmediateWriter : NSObject<GRXWriter>
@interface GRXImmediateWriter : GRXWriter
// Returns a writer that pulls values from the passed NSEnumerator instance and pushes them to
// its writeable. The NSEnumerator is released when it finishes.
+ (id<GRXWriter>)writerWithEnumerator:(NSEnumerator *)enumerator;
+ (GRXWriter *)writerWithEnumerator:(NSEnumerator *)enumerator;
// Returns a writer that pushes to its writeable the successive values returned by the passed
// block. When the block first returns nil, it is released.
+ (id<GRXWriter>)writerWithValueSupplier:(id (^)())block;
+ (GRXWriter *)writerWithValueSupplier:(id (^)())block;
// Returns a writer that iterates over the values of the passed container and pushes them to
// its writeable. The container is released when the iteration is over.
@ -56,18 +56,18 @@
// Note that the usual speed gain of NSFastEnumeration over NSEnumerator results from not having to
// call one method per element. Because GRXWriteable instances accept values one by one, that speed
// gain doesn't happen here.
+ (id<GRXWriter>)writerWithContainer:(id<NSFastEnumeration>)container;
+ (GRXWriter *)writerWithContainer:(id<NSFastEnumeration>)container;
// Returns a writer that sends the passed value to its writeable and then finishes (releasing the
// value).
+ (id<GRXWriter>)writerWithValue:(id)value;
+ (GRXWriter *)writerWithValue:(id)value;
// Returns a writer that, as part of its start method, sends the passed error to the writeable
// (then releasing the error).
+ (id<GRXWriter>)writerWithError:(NSError *)error;
+ (GRXWriter *)writerWithError:(NSError *)error;
// Returns a writer that, as part of its start method, finishes immediately without sending any
// values to its writeable.
+ (id<GRXWriter>)emptyWriter;
+ (GRXWriter *)emptyWriter;
@end

@ -63,19 +63,19 @@
return [[self alloc] initWithEnumerator:enumerator error:errorOrNil];
}
+ (id<GRXWriter>)writerWithEnumerator:(NSEnumerator *)enumerator {
+ (GRXWriter *)writerWithEnumerator:(NSEnumerator *)enumerator {
return [self writerWithEnumerator:enumerator error:nil];
}
+ (id<GRXWriter>)writerWithValueSupplier:(id (^)())block {
+ (GRXWriter *)writerWithValueSupplier:(id (^)())block {
return [self writerWithEnumerator:[NSEnumerator grx_enumeratorWithValueSupplier:block]];
}
+ (id<GRXWriter>)writerWithContainer:(id<NSFastEnumeration>)container {
+ (GRXWriter *)writerWithContainer:(id<NSFastEnumeration>)container {
return [self writerWithEnumerator:[NSEnumerator grx_enumeratorWithContainer:container]];;
}
+ (id<GRXWriter>)writerWithValue:(id)value {
+ (GRXWriter *)writerWithValue:(id)value {
if (value) {
return [self writerWithEnumerator:[NSEnumerator grx_enumeratorWithSingleValue:value]];
} else {
@ -83,7 +83,7 @@
}
}
+ (id<GRXWriter>)writerWithError:(NSError *)error {
+ (GRXWriter *)writerWithError:(NSError *)error {
if (error) {
return [self writerWithEnumerator:nil error:error];
} else {
@ -91,7 +91,7 @@
}
}
+ (id<GRXWriter>)emptyWriter {
+ (GRXWriter *)emptyWriter {
static GRXImmediateWriter *emptyWriter;
static dispatch_once_t onceToken;
dispatch_once(&onceToken, ^{

@ -38,27 +38,27 @@
@implementation GRXWriter (Immediate)
+ (instancetype)writerWithEnumerator:(NSEnumerator *)enumerator {
return [[self alloc] initWithWriter:[GRXImmediateWriter writerWithEnumerator:enumerator]];
return [GRXImmediateWriter writerWithEnumerator:enumerator];
}
+ (instancetype)writerWithValueSupplier:(id (^)())block {
return [[self alloc] initWithWriter:[GRXImmediateWriter writerWithValueSupplier:block]];
return [GRXImmediateWriter writerWithValueSupplier:block];
}
+ (instancetype)writerWithContainer:(id<NSFastEnumeration>)container {
return [[self alloc] initWithWriter:[GRXImmediateWriter writerWithContainer:container]];
return [GRXImmediateWriter writerWithContainer:container];
}
+ (instancetype)writerWithValue:(id)value {
return [[self alloc] initWithWriter:[GRXImmediateWriter writerWithValue:value]];
return [GRXImmediateWriter writerWithValue:value];
}
+ (instancetype)writerWithError:(NSError *)error {
return [[self alloc] initWithWriter:[GRXImmediateWriter writerWithError:error]];
return [GRXImmediateWriter writerWithError:error];
}
+ (instancetype)emptyWriter {
return [[self alloc] initWithWriter:[GRXImmediateWriter emptyWriter]];
return [GRXImmediateWriter emptyWriter];
}
@end

@ -85,7 +85,7 @@ typedef NS_ENUM(NSInteger, GRXWriterState) {
// Unless otherwise indicated by a conforming class, no messages should be sent
// concurrently to a GRXWriter. I.e., conforming classes aren't required to
// be thread-safe.
@protocol GRXWriter <NSObject>
@interface GRXWriter : NSObject
// This property can be used to query the current state of the writer, which
// determines how it might currently use its writeable. Some state transitions can
@ -116,12 +116,3 @@ typedef NS_ENUM(NSInteger, GRXWriterState) {
// can't remember the details in the presence of concurrency.
- (void)finishWithError:(NSError *)errorOrNil;
@end
// A "proxy" class that simply forwards values, completion, and errors from its
// input writer to its writeable.
// It is useful as a superclass for pipes that act as a transformation of their
// input writer, and for classes that represent objects with input and
// output sequences of values, like an RPC.
@interface GRXWriter : NSObject<GRXWriter>
- (instancetype)initWithWriter:(id<GRXWriter>)writer NS_DESIGNATED_INITIALIZER;
@end

@ -33,80 +33,6 @@
#import "GRXWriter.h"
@interface GRXWriter () <GRXWriteable>
@end
@implementation GRXWriter {
id<GRXWriter> _writer;
id<GRXWriteable> _writeable;
}
- (instancetype)init {
return [self initWithWriter:nil];
}
// Designated initializer
- (instancetype)initWithWriter:(id<GRXWriter>)writer {
if (!writer) {
[NSException raise:NSInvalidArgumentException format:@"writer can't be nil."];
}
if ((self = [super init])) {
_writer = writer;
}
return self;
}
// This is used to send a completion or an error to the writeable. It nillifies
// our reference to it in order to guarantee no more messages are sent to it,
// and to release it.
- (void)finishOutputWithError:(NSError *)errorOrNil {
id<GRXWriteable> writeable = _writeable;
_writeable = nil;
[writeable writesFinishedWithError:errorOrNil];
}
// This is used to stop the input writer. It nillifies our reference to it
// to release it.
- (void)finishInput {
id<GRXWriter> writer = _writer;
_writer = nil;
writer.state = GRXWriterStateFinished;
}
#pragma mark GRXWriteable implementation
- (void)writeValue:(id)value {
[_writeable writeValue:value];
}
- (void)writesFinishedWithError:(NSError *)errorOrNil {
_writer = nil;
[self finishOutputWithError:errorOrNil];
}
#pragma mark GRXWriter implementation
- (GRXWriterState)state {
return _writer ? _writer.state : GRXWriterStateFinished;
}
- (void)setState:(GRXWriterState)state {
if (state == GRXWriterStateFinished) {
_writeable = nil;
[self finishInput];
} else {
_writer.state = state;
}
}
- (void)startWithWriteable:(id<GRXWriteable>)writeable {
_writeable = writeable;
[_writer startWithWriteable:self];
}
- (void)finishWithError:(NSError *)errorOrNil {
[self finishOutputWithError:errorOrNil];
[self finishInput];
}
@implementation GRXWriter
@end

@ -31,10 +31,10 @@
*
*/
#import "RxLibrary/GRXWriter.h"
#import "RxLibrary/GRXForwardingWriter.h"
// A "proxy" writer that transforms all the values of its input writer by using a mapping function.
@interface GRXMappingWriter : GRXWriter
- (instancetype)initWithWriter:(id<GRXWriter>)writer map:(id (^)(id value))map
@interface GRXMappingWriter : GRXForwardingWriter
- (instancetype)initWithWriter:(GRXWriter *)writer map:(id (^)(id value))map
NS_DESIGNATED_INITIALIZER;
@end

@ -37,19 +37,19 @@ static id (^kIdentity)(id value) = ^id(id value) {
return value;
};
@interface GRXWriter () <GRXWriteable>
@interface GRXForwardingWriter () <GRXWriteable>
@end
@implementation GRXMappingWriter {
id (^_map)(id value);
}
- (instancetype)initWithWriter:(id<GRXWriter>)writer {
- (instancetype)initWithWriter:(GRXWriter *)writer {
return [self initWithWriter:writer map:nil];
}
// Designated initializer
- (instancetype)initWithWriter:(id<GRXWriter>)writer map:(id (^)(id value))map {
- (instancetype)initWithWriter:(GRXWriter *)writer map:(id (^)(id value))map {
if ((self = [super initWithWriter:writer])) {
_map = map ?: kIdentity;
}

@ -120,7 +120,7 @@ static ProtoMethod *kUnaryCallMethod;
request.responseSize = 100;
request.fillUsername = YES;
request.fillOauthScope = YES;
id<GRXWriter> requestsWriter = [GRXWriter writerWithValue:[request data]];
GRXWriter *requestsWriter = [GRXWriter writerWithValue:[request data]];
GRPCCall *call = [[GRPCCall alloc] initWithHost:kHostAddress
path:kUnaryCallMethod.HTTPPath
@ -150,7 +150,7 @@ static ProtoMethod *kUnaryCallMethod;
RMTSimpleRequest *request = [RMTSimpleRequest message];
request.fillUsername = YES;
request.fillOauthScope = YES;
id<GRXWriter> requestsWriter = [GRXWriter writerWithValue:[request data]];
GRXWriter *requestsWriter = [GRXWriter writerWithValue:[request data]];
GRPCCall *call = [[GRPCCall alloc] initWithHost:kHostAddress
path:kUnaryCallMethod.HTTPPath

@ -143,7 +143,7 @@
RMTStreamingInputCallRequest *request4 = [RMTStreamingInputCallRequest message];
request4.payload.body = [NSMutableData dataWithLength:45904];
id<GRXWriter> writer = [GRXWriter writerWithContainer:@[request1, request2, request3, request4]];
GRXWriter *writer = [GRXWriter writerWithContainer:@[request1, request2, request3, request4]];
[_service streamingInputCallWithRequestsWriter:writer
handler:^(RMTStreamingInputCallResponse *response,

@ -64,7 +64,7 @@ static NSString * const kService = @"RouteGuide";
// interface:kService
// method:@"EmptyCall"];
//
// id<GRXWriter> requestsWriter = [GRXWriter writerWithValue:[NSData data]];
// GRXWriter *requestsWriter = [GRXWriter writerWithValue:[NSData data]];
//
// GRPCCall *call = [[GRPCCall alloc] initWithHost:kRouteGuideHost
// method:method
@ -91,7 +91,7 @@ static NSString * const kService = @"RouteGuide";
service:kService
method:@"RecordRoute"];
id<GRXWriter> requestsWriter = [GRXWriter emptyWriter];
GRXWriter *requestsWriter = [GRXWriter emptyWriter];
GRPCCall *call = [[GRPCCall alloc] initWithHost:kRouteGuideHost
path:method.HTTPPath
@ -122,7 +122,7 @@ static NSString * const kService = @"RouteGuide";
RGDPoint *point = [RGDPoint message];
point.latitude = 28E7;
point.longitude = -15E7;
id<GRXWriter> requestsWriter = [GRXWriter writerWithValue:[point data]];
GRXWriter *requestsWriter = [GRXWriter writerWithValue:[point data]];
GRPCCall *call = [[GRPCCall alloc] initWithHost:kRouteGuideHost
path:method.HTTPPath

@ -408,7 +408,7 @@ PHP_METHOD(Call, startBatch) {
goto cleanup;
}
event = grpc_completion_queue_pluck(completion_queue, call->wrapped,
gpr_inf_future);
gpr_inf_future(GPR_CLOCK_REALTIME));
if (!event.success) {
zend_throw_exception(spl_ce_LogicException,
"The batch failed for some reason",

@ -43,8 +43,9 @@ void grpc_php_init_completion_queue(TSRMLS_D) {
void grpc_php_shutdown_completion_queue(TSRMLS_D) {
grpc_completion_queue_shutdown(completion_queue);
while (grpc_completion_queue_next(completion_queue, gpr_inf_future).type !=
GRPC_QUEUE_SHUTDOWN)
while (grpc_completion_queue_next(completion_queue,
gpr_inf_future(GPR_CLOCK_REALTIME))
.type != GRPC_QUEUE_SHUTDOWN)
;
grpc_completion_queue_destroy(completion_queue);
}

@ -64,7 +64,8 @@ void free_wrapped_grpc_server(void *object TSRMLS_DC) {
wrapped_grpc_server *server = (wrapped_grpc_server *)object;
if (server->wrapped != NULL) {
grpc_server_shutdown_and_notify(server->wrapped, completion_queue, NULL);
grpc_completion_queue_pluck(completion_queue, NULL, gpr_inf_future);
grpc_completion_queue_pluck(completion_queue, NULL,
gpr_inf_future(GPR_CLOCK_REALTIME));
grpc_server_destroy(server->wrapped);
}
efree(server);
@ -143,7 +144,8 @@ PHP_METHOD(Server, requestCall) {
(long)error_code TSRMLS_CC);
goto cleanup;
}
event = grpc_completion_queue_pluck(completion_queue, NULL, gpr_inf_future);
event = grpc_completion_queue_pluck(completion_queue, NULL,
gpr_inf_future(GPR_CLOCK_REALTIME));
if (!event.success) {
zend_throw_exception(spl_ce_LogicException,
"Failed to request a call for some reason",

@ -98,7 +98,7 @@ PHP_METHOD(Timeval, __construct) {
"Timeval expects a long", 1 TSRMLS_CC);
return;
}
gpr_timespec time = gpr_time_from_micros(microseconds);
gpr_timespec time = gpr_time_from_micros(microseconds, GPR_TIMESPAN);
memcpy(&timeval->wrapped, &time, sizeof(gpr_timespec));
}
@ -217,7 +217,8 @@ PHP_METHOD(Timeval, now) {
* @return Timeval Zero length time interval
*/
PHP_METHOD(Timeval, zero) {
zval *grpc_php_timeval_zero = grpc_php_wrap_timeval(gpr_time_0);
zval *grpc_php_timeval_zero =
grpc_php_wrap_timeval(gpr_time_0(GPR_CLOCK_REALTIME));
RETURN_ZVAL(grpc_php_timeval_zero,
false, /* Copy original before returning? */
true /* Destroy original before returning */);
@ -228,7 +229,8 @@ PHP_METHOD(Timeval, zero) {
* @return Timeval Infinite future time value
*/
PHP_METHOD(Timeval, infFuture) {
zval *grpc_php_timeval_inf_future = grpc_php_wrap_timeval(gpr_inf_future);
zval *grpc_php_timeval_inf_future =
grpc_php_wrap_timeval(gpr_inf_future(GPR_CLOCK_REALTIME));
RETURN_DESTROY_ZVAL(grpc_php_timeval_inf_future);
}
@ -237,7 +239,8 @@ PHP_METHOD(Timeval, infFuture) {
* @return Timeval Infinite past time value
*/
PHP_METHOD(Timeval, infPast) {
zval *grpc_php_timeval_inf_past = grpc_php_wrap_timeval(gpr_inf_past);
zval *grpc_php_timeval_inf_past =
grpc_php_wrap_timeval(gpr_inf_past(GPR_CLOCK_REALTIME));
RETURN_DESTROY_ZVAL(grpc_php_timeval_inf_past);
}

@ -385,10 +385,12 @@ static int pygrpc_isinf(double x) {
gpr_timespec pygrpc_cast_double_to_gpr_timespec(double seconds) {
gpr_timespec result;
if (pygrpc_isinf(seconds)) {
result = seconds > 0.0 ? gpr_inf_future : gpr_inf_past;
result = seconds > 0.0 ? gpr_inf_future(GPR_CLOCK_REALTIME)
: gpr_inf_past(GPR_CLOCK_REALTIME);
} else {
result.tv_sec = (time_t)seconds;
result.tv_nsec = ((seconds - result.tv_sec) * 1e9);
result.clock_type = GPR_CLOCK_REALTIME;
}
return result;
}

@ -82,7 +82,7 @@ static void grpc_rb_completion_queue_shutdown_drain(grpc_completion_queue *cq) {
next_call.cq = cq;
next_call.event.type = GRPC_QUEUE_TIMEOUT;
/* TODO: the timeout should be a module level constant that defaults
* to gpr_inf_future.
* to gpr_inf_future(GPR_CLOCK_REALTIME).
*
* - at the moment this does not work, it stalls. Using a small timeout like
* this one works, and leads to fast test run times; a longer timeout was
@ -91,7 +91,8 @@ static void grpc_rb_completion_queue_shutdown_drain(grpc_completion_queue *cq) {
* - investigate further, this is probably another example of C-level cleanup
* not working consistently in all cases.
*/
next_call.timeout = gpr_time_add(gpr_now(GPR_CLOCK_REALTIME), gpr_time_from_micros(5e3));
next_call.timeout = gpr_time_add(gpr_now(GPR_CLOCK_REALTIME),
gpr_time_from_micros(5e3, GPR_TIMESPAN));
do {
rb_thread_call_without_gvl(grpc_rb_completion_queue_next_no_gil,
(void *)&next_call, NULL, NULL);
@ -143,7 +144,7 @@ grpc_event grpc_rb_completion_queue_pluck_event(VALUE self, VALUE tag,
TypedData_Get_Struct(self, grpc_completion_queue,
&grpc_rb_completion_queue_data_type, next_call.cq);
if (TYPE(timeout) == T_NIL) {
next_call.timeout = gpr_inf_future;
next_call.timeout = gpr_inf_future(GPR_CLOCK_REALTIME);
} else {
next_call.timeout = grpc_rb_time_timeval(timeout, /* absolute time*/ 0);
}

@ -98,6 +98,7 @@ gpr_timespec grpc_rb_time_timeval(VALUE time, int interval) {
const char *tstr = interval ? "time interval" : "time";
const char *want = " want <secs from epoch>|<Time>|<GRPC::TimeConst.*>";
t.clock_type = GPR_CLOCK_REALTIME;
switch (TYPE(time)) {
case T_DATA:
if (CLASS_OF(time) == grpc_rb_cTimeVal) {
@ -222,24 +223,31 @@ static VALUE grpc_rb_time_val_to_s(VALUE self) {
return rb_funcall(grpc_rb_time_val_to_time(self), id_to_s, 0);
}
static gpr_timespec zero_realtime;
static gpr_timespec inf_future_realtime;
static gpr_timespec inf_past_realtime;
/* Adds a module with constants that map to gpr's static timeval structs. */
static void Init_grpc_time_consts() {
VALUE grpc_rb_mTimeConsts =
rb_define_module_under(grpc_rb_mGrpcCore, "TimeConsts");
grpc_rb_cTimeVal =
rb_define_class_under(grpc_rb_mGrpcCore, "TimeSpec", rb_cObject);
zero_realtime = gpr_time_0(GPR_CLOCK_REALTIME);
inf_future_realtime = gpr_inf_future(GPR_CLOCK_REALTIME);
inf_past_realtime = gpr_inf_past(GPR_CLOCK_REALTIME);
rb_define_const(
grpc_rb_mTimeConsts, "ZERO",
TypedData_Wrap_Struct(grpc_rb_cTimeVal, &grpc_rb_timespec_data_type,
(void *)&gpr_time_0));
(void *)&zero_realtime));
rb_define_const(
grpc_rb_mTimeConsts, "INFINITE_FUTURE",
TypedData_Wrap_Struct(grpc_rb_cTimeVal, &grpc_rb_timespec_data_type,
(void *)&gpr_inf_future));
(void *)&inf_future_realtime));
rb_define_const(
grpc_rb_mTimeConsts, "INFINITE_PAST",
TypedData_Wrap_Struct(grpc_rb_cTimeVal, &grpc_rb_timespec_data_type,
(void *)&gpr_inf_past));
(void *)&inf_past_realtime));
rb_define_method(grpc_rb_cTimeVal, "to_time", grpc_rb_time_val_to_time, 0);
rb_define_method(grpc_rb_cTimeVal, "inspect", grpc_rb_time_val_inspect, 0);
rb_define_method(grpc_rb_cTimeVal, "to_s", grpc_rb_time_val_to_s, 0);

Some files were not shown because too many files have changed in this diff Show More

Loading…
Cancel
Save