Merge github.com:grpc/grpc into shutdown-c++

pull/2977/head
Craig Tiller 9 years ago
commit 5c6e6276be
  1. 4
      include/grpc++/async_unary_call.h
  2. 2
      include/grpc++/auth_context.h
  3. 4
      include/grpc++/client_context.h
  4. 5
      include/grpc++/dynamic_thread_pool.h
  5. 4
      include/grpc++/generic_stub.h
  6. 24
      include/grpc++/impl/call.h
  7. 1
      include/grpc++/impl/grpc_library.h
  8. 8
      include/grpc++/impl/sync_no_cxx11.h
  9. 21
      include/grpc++/impl/thd_no_cxx11.h
  10. 5
      include/grpc++/server.h
  11. 11
      include/grpc++/server_builder.h
  12. 7
      include/grpc++/stream.h
  13. 3
      include/grpc/grpc.h
  14. 35
      include/grpc/support/atm_win32.h
  15. 3
      include/grpc/support/port_platform.h
  16. 12
      include/grpc/support/sync_generic.h
  17. 3
      include/grpc/support/time.h
  18. 8
      include/grpc/support/tls_gcc.h
  19. 8
      include/grpc/support/tls_msvc.h
  20. 30
      src/core/channel/client_channel.c
  21. 3
      src/core/channel/client_channel.h
  22. 23
      src/core/channel/compress_filter.c
  23. 3
      src/core/client_config/resolvers/dns_resolver.c
  24. 2
      src/core/client_config/resolvers/zookeeper_resolver.c
  25. 3
      src/core/client_config/subchannel_factory_decorators/add_channel_arg.h
  26. 3
      src/core/client_config/subchannel_factory_decorators/merge_channel_args.h
  27. 2
      src/core/compression/algorithm.c
  28. 6
      src/core/debug/trace.c
  29. 6
      src/core/httpcli/format_request.c
  30. 3
      src/core/iomgr/alarm.c
  31. 4
      src/core/iomgr/alarm_heap.c
  32. 3
      src/core/iomgr/endpoint.c
  33. 3
      src/core/iomgr/endpoint.h
  34. 20
      src/core/iomgr/endpoint_pair_windows.c
  35. 33
      src/core/iomgr/iocp_windows.c
  36. 8
      src/core/iomgr/iocp_windows.h
  37. 3
      src/core/iomgr/pollset_multipoller_with_epoll.c
  38. 3
      src/core/iomgr/pollset_multipoller_with_poll_posix.c
  39. 4
      src/core/iomgr/pollset_posix.c
  40. 3
      src/core/iomgr/pollset_posix.h
  41. 6
      src/core/iomgr/pollset_windows.c
  42. 5
      src/core/iomgr/resolve_address_posix.c
  43. 6
      src/core/iomgr/sockaddr_utils.c
  44. 2
      src/core/iomgr/socket_windows.h
  45. 3
      src/core/iomgr/tcp_client_posix.c
  46. 3
      src/core/iomgr/tcp_posix.c
  47. 10
      src/core/iomgr/tcp_server_windows.c
  48. 57
      src/core/iomgr/tcp_windows.c
  49. 12
      src/core/iomgr/udp_server.c
  50. 3
      src/core/iomgr/wakeup_fd_eventfd.c
  51. 7
      src/core/iomgr/wakeup_fd_nospecial.c
  52. 4
      src/core/iomgr/wakeup_fd_posix.c
  53. 28
      src/core/json/json_reader.c
  54. 50
      src/core/json/json_string.c
  55. 33
      src/core/json/json_writer.c
  56. 12
      src/core/json/json_writer.h
  57. 8
      src/core/security/client_auth_filter.c
  58. 4
      src/core/security/credentials.c
  59. 5
      src/core/security/credentials.h
  60. 4
      src/core/security/credentials_metadata.c
  61. 4
      src/core/security/google_default_credentials.c
  62. 1
      src/core/security/jwt_verifier.h
  63. 4
      src/core/security/security_context.c
  64. 1
      src/core/security/security_context.h
  65. 3
      src/core/security/server_auth_filter.c
  66. 7
      src/core/statistics/census_tracing.c
  67. 8
      src/core/support/cpu_iphone.c
  68. 7
      src/core/support/histogram.c
  69. 3
      src/core/support/slice.c
  70. 3
      src/core/support/slice_buffer.c
  71. 14
      src/core/support/stack_lockfree.c
  72. 15
      src/core/support/string.c
  73. 8
      src/core/support/string_win32.c
  74. 3
      src/core/support/sync_posix.c
  75. 3
      src/core/support/sync_win32.c
  76. 4
      src/core/support/thd.c
  77. 14
      src/core/support/thd_posix.c
  78. 4
      src/core/support/thd_win32.c
  79. 3
      src/core/support/time.c
  80. 2
      src/core/support/tls_pthread.c
  81. 13
      src/core/surface/call.c
  82. 15
      src/core/surface/call_log_batch.c
  83. 18
      src/core/surface/channel.c
  84. 14
      src/core/surface/channel_connectivity.c
  85. 6
      src/core/surface/completion_queue.c
  86. 3
      src/core/surface/init_unsecure.c
  87. 3
      src/core/surface/server.c
  88. 2
      src/core/surface/server_create.c
  89. 4
      src/core/surface/version.c
  90. 3
      src/core/transport/chttp2/parsing.c
  91. 3
      src/core/transport/chttp2/stream_map.c
  92. 29
      src/core/transport/chttp2/writing.c
  93. 12
      src/core/transport/chttp2_transport.c
  94. 9
      src/core/transport/metadata.c
  95. 3
      src/core/transport/metadata.h
  96. 4
      src/core/transport/stream_op.c
  97. 11
      src/core/tsi/fake_transport_security.c
  98. 25
      src/core/tsi/ssl_transport_security.c
  99. 6
      src/cpp/client/channel.cc
  100. 3
      src/cpp/client/channel.h
  101. Some files were not shown because too many files have changed in this diff Show More

@ -121,8 +121,8 @@ class ServerAsyncResponseWriter GRPC_FINAL
} }
// The response is dropped if the status is not OK. // The response is dropped if the status is not OK.
if (status.ok()) { if (status.ok()) {
finish_buf_.ServerSendStatus( finish_buf_.ServerSendStatus(ctx_->trailing_metadata_,
ctx_->trailing_metadata_, finish_buf_.SendMessage(msg)); finish_buf_.SendMessage(msg));
} else { } else {
finish_buf_.ServerSendStatus(ctx_->trailing_metadata_, status); finish_buf_.ServerSendStatus(ctx_->trailing_metadata_, status);
} }

@ -62,6 +62,7 @@ class AuthPropertyIterator
AuthPropertyIterator(); AuthPropertyIterator();
AuthPropertyIterator(const grpc_auth_property* property, AuthPropertyIterator(const grpc_auth_property* property,
const grpc_auth_property_iterator* iter); const grpc_auth_property_iterator* iter);
private: private:
friend class SecureAuthContext; friend class SecureAuthContext;
const grpc_auth_property* property_; const grpc_auth_property* property_;
@ -92,4 +93,3 @@ class AuthContext {
} // namespace grpc } // namespace grpc
#endif // GRPCXX_AUTH_CONTEXT_H #endif // GRPCXX_AUTH_CONTEXT_H

@ -185,7 +185,9 @@ class ClientContext {
// Get and set census context // Get and set census context
void set_census_context(struct census_context* ccp) { census_context_ = ccp; } void set_census_context(struct census_context* ccp) { census_context_ = ccp; }
struct census_context* census_context() const { return census_context_; } struct census_context* census_context() const {
return census_context_;
}
void TryCancel(); void TryCancel();

@ -56,10 +56,11 @@ class DynamicThreadPool GRPC_FINAL : public ThreadPoolInterface {
private: private:
class DynamicThread { class DynamicThread {
public: public:
DynamicThread(DynamicThreadPool *pool); DynamicThread(DynamicThreadPool* pool);
~DynamicThread(); ~DynamicThread();
private: private:
DynamicThreadPool *pool_; DynamicThreadPool* pool_;
std::unique_ptr<grpc::thread> thd_; std::unique_ptr<grpc::thread> thd_;
void ThreadFunc(); void ThreadFunc();
}; };

@ -52,8 +52,8 @@ class GenericStub GRPC_FINAL {
// begin a call to a named method // begin a call to a named method
std::unique_ptr<GenericClientAsyncReaderWriter> Call( std::unique_ptr<GenericClientAsyncReaderWriter> Call(
ClientContext* context, const grpc::string& method, ClientContext* context, const grpc::string& method, CompletionQueue* cq,
CompletionQueue* cq, void* tag); void* tag);
private: private:
std::shared_ptr<ChannelInterface> channel_; std::shared_ptr<ChannelInterface> channel_;

@ -67,14 +67,10 @@ class WriteOptions {
WriteOptions(const WriteOptions& other) : flags_(other.flags_) {} WriteOptions(const WriteOptions& other) : flags_(other.flags_) {}
/// Clear all flags. /// Clear all flags.
inline void Clear() { inline void Clear() { flags_ = 0; }
flags_ = 0;
}
/// Returns raw flags bitset. /// Returns raw flags bitset.
inline gpr_uint32 flags() const { inline gpr_uint32 flags() const { return flags_; }
return flags_;
}
/// Sets flag for the disabling of compression for the next message write. /// Sets flag for the disabling of compression for the next message write.
/// ///
@ -122,9 +118,7 @@ class WriteOptions {
/// not go out on the wire immediately. /// not go out on the wire immediately.
/// ///
/// \sa GRPC_WRITE_BUFFER_HINT /// \sa GRPC_WRITE_BUFFER_HINT
inline bool get_buffer_hint() const { inline bool get_buffer_hint() const { return GetBit(GRPC_WRITE_BUFFER_HINT); }
return GetBit(GRPC_WRITE_BUFFER_HINT);
}
WriteOptions& operator=(const WriteOptions& rhs) { WriteOptions& operator=(const WriteOptions& rhs) {
flags_ = rhs.flags_; flags_ = rhs.flags_;
@ -132,17 +126,11 @@ class WriteOptions {
} }
private: private:
void SetBit(const gpr_int32 mask) { void SetBit(const gpr_int32 mask) { flags_ |= mask; }
flags_ |= mask;
}
void ClearBit(const gpr_int32 mask) { void ClearBit(const gpr_int32 mask) { flags_ &= ~mask; }
flags_ &= ~mask;
}
bool GetBit(const gpr_int32 mask) const { bool GetBit(const gpr_int32 mask) const { return flags_ & mask; }
return flags_ & mask;
}
gpr_uint32 flags_; gpr_uint32 flags_;
}; };

@ -46,5 +46,4 @@ class GrpcLibrary {
} // namespace grpc } // namespace grpc
#endif // GRPCXX_IMPL_GRPC_LIBRARY_H #endif // GRPCXX_IMPL_GRPC_LIBRARY_H

@ -38,7 +38,7 @@
namespace grpc { namespace grpc {
template<class mutex> template <class mutex>
class lock_guard; class lock_guard;
class condition_variable; class condition_variable;
@ -46,6 +46,7 @@ class mutex {
public: public:
mutex() { gpr_mu_init(&mu_); } mutex() { gpr_mu_init(&mu_); }
~mutex() { gpr_mu_destroy(&mu_); } ~mutex() { gpr_mu_destroy(&mu_); }
private: private:
::gpr_mu mu_; ::gpr_mu mu_;
template <class mutex> template <class mutex>
@ -58,6 +59,7 @@ class lock_guard {
public: public:
lock_guard(mutex &mu) : mu_(mu), locked(true) { gpr_mu_lock(&mu.mu_); } lock_guard(mutex &mu) : mu_(mu), locked(true) { gpr_mu_lock(&mu.mu_); }
~lock_guard() { unlock_internal(); } ~lock_guard() { unlock_internal(); }
protected: protected:
void lock_internal() { void lock_internal() {
if (!locked) gpr_mu_lock(&mu_.mu_); if (!locked) gpr_mu_lock(&mu_.mu_);
@ -67,6 +69,7 @@ class lock_guard {
if (locked) gpr_mu_unlock(&mu_.mu_); if (locked) gpr_mu_unlock(&mu_.mu_);
locked = false; locked = false;
} }
private: private:
mutex &mu_; mutex &mu_;
bool locked; bool locked;
@ -76,7 +79,7 @@ class lock_guard {
template <class mutex> template <class mutex>
class unique_lock : public lock_guard<mutex> { class unique_lock : public lock_guard<mutex> {
public: public:
unique_lock(mutex &mu) : lock_guard<mutex>(mu) { } unique_lock(mutex &mu) : lock_guard<mutex>(mu) {}
void lock() { this->lock_internal(); } void lock() { this->lock_internal(); }
void unlock() { this->unlock_internal(); } void unlock() { this->unlock_internal(); }
}; };
@ -92,6 +95,7 @@ class condition_variable {
} }
void notify_one() { gpr_cv_signal(&cv_); } void notify_one() { gpr_cv_signal(&cv_); }
void notify_all() { gpr_cv_broadcast(&cv_); } void notify_all() { gpr_cv_broadcast(&cv_); }
private: private:
gpr_cv cv_; gpr_cv cv_;
}; };

@ -40,7 +40,8 @@ namespace grpc {
class thread { class thread {
public: public:
template<class T> thread(void (T::*fptr)(), T *obj) { template <class T>
thread(void (T::*fptr)(), T *obj) {
func_ = new thread_function<T>(fptr, obj); func_ = new thread_function<T>(fptr, obj);
joined_ = false; joined_ = false;
start(); start();
@ -53,28 +54,28 @@ class thread {
gpr_thd_join(thd_); gpr_thd_join(thd_);
joined_ = true; joined_ = true;
} }
private: private:
void start() { void start() {
gpr_thd_options options = gpr_thd_options_default(); gpr_thd_options options = gpr_thd_options_default();
gpr_thd_options_set_joinable(&options); gpr_thd_options_set_joinable(&options);
gpr_thd_new(&thd_, thread_func, (void *) func_, &options); gpr_thd_new(&thd_, thread_func, (void *)func_, &options);
} }
static void thread_func(void *arg) { static void thread_func(void *arg) {
thread_function_base *func = (thread_function_base *) arg; thread_function_base *func = (thread_function_base *)arg;
func->call(); func->call();
} }
class thread_function_base { class thread_function_base {
public: public:
virtual ~thread_function_base() { } virtual ~thread_function_base() {}
virtual void call() = 0; virtual void call() = 0;
}; };
template<class T> template <class T>
class thread_function : public thread_function_base { class thread_function : public thread_function_base {
public: public:
thread_function(void (T::*fptr)(), T *obj) thread_function(void (T::*fptr)(), T *obj) : fptr_(fptr), obj_(obj) {}
: fptr_(fptr)
, obj_(obj) { }
virtual void call() { (obj_->*fptr_)(); } virtual void call() { (obj_->*fptr_)(); }
private: private:
void (T::*fptr_)(); void (T::*fptr_)();
T *obj_; T *obj_;
@ -84,8 +85,8 @@ class thread {
bool joined_; bool joined_;
// Disallow copy and assign. // Disallow copy and assign.
thread(const thread&); thread(const thread &);
void operator=(const thread&); void operator=(const thread &);
}; };
} // namespace grpc } // namespace grpc

@ -90,8 +90,9 @@ class Server GRPC_FINAL : public GrpcLibrary, private CallHook {
int max_message_size); int max_message_size);
// Register a service. This call does not take ownership of the service. // Register a service. This call does not take ownership of the service.
// The service must exist for the lifetime of the Server instance. // The service must exist for the lifetime of the Server instance.
bool RegisterService(const grpc::string *host, RpcService* service); bool RegisterService(const grpc::string* host, RpcService* service);
bool RegisterAsyncService(const grpc::string *host, AsynchronousService* service); bool RegisterAsyncService(const grpc::string* host,
AsynchronousService* service);
void RegisterAsyncGenericService(AsyncGenericService* service); void RegisterAsyncGenericService(AsyncGenericService* service);
// Add a listening port. Can be called multiple times. // Add a listening port. Can be called multiple times.
int AddListeningPort(const grpc::string& addr, ServerCredentials* creds); int AddListeningPort(const grpc::string& addr, ServerCredentials* creds);

@ -76,8 +76,7 @@ class ServerBuilder {
// The service must exist for the lifetime of the Server instance returned by // The service must exist for the lifetime of the Server instance returned by
// BuildAndStart(). // BuildAndStart().
// Only matches requests with :authority \a host // Only matches requests with :authority \a host
void RegisterService(const grpc::string& host, void RegisterService(const grpc::string& host, SynchronousService* service);
SynchronousService* service);
// Register an asynchronous service. // Register an asynchronous service.
// This call does not take ownership of the service or completion queue. // This call does not take ownership of the service or completion queue.
@ -117,9 +116,10 @@ class ServerBuilder {
}; };
typedef std::unique_ptr<grpc::string> HostString; typedef std::unique_ptr<grpc::string> HostString;
template <class T> struct NamedService { template <class T>
struct NamedService {
explicit NamedService(T* s) : service(s) {} explicit NamedService(T* s) : service(s) {}
NamedService(const grpc::string& h, T *s) NamedService(const grpc::string& h, T* s)
: host(new grpc::string(h)), service(s) {} : host(new grpc::string(h)), service(s) {}
HostString host; HostString host;
T* service; T* service;
@ -127,7 +127,8 @@ class ServerBuilder {
int max_message_size_; int max_message_size_;
std::vector<std::unique_ptr<NamedService<RpcService>>> services_; std::vector<std::unique_ptr<NamedService<RpcService>>> services_;
std::vector<std::unique_ptr<NamedService<AsynchronousService>>> async_services_; std::vector<std::unique_ptr<NamedService<AsynchronousService>>>
async_services_;
std::vector<Port> ports_; std::vector<Port> ports_;
std::vector<ServerCompletionQueue*> cqs_; std::vector<ServerCompletionQueue*> cqs_;
std::shared_ptr<ServerCredentials> creds_; std::shared_ptr<ServerCredentials> creds_;

@ -85,9 +85,7 @@ class WriterInterface {
// Returns false when the stream has been closed. // Returns false when the stream has been closed.
virtual bool Write(const W& msg, const WriteOptions& options) = 0; virtual bool Write(const W& msg, const WriteOptions& options) = 0;
inline bool Write(const W& msg) { inline bool Write(const W& msg) { return Write(msg, WriteOptions()); }
return Write(msg, WriteOptions());
}
}; };
template <class R> template <class R>
@ -640,8 +638,7 @@ class ServerAsyncReader GRPC_FINAL : public ServerAsyncStreamingInterface,
} }
// The response is dropped if the status is not OK. // The response is dropped if the status is not OK.
if (status.ok()) { if (status.ok()) {
finish_ops_.ServerSendStatus( finish_ops_.ServerSendStatus(ctx_->trailing_metadata_,
ctx_->trailing_metadata_,
finish_ops_.SendMessage(msg)); finish_ops_.SendMessage(msg));
} else { } else {
finish_ops_.ServerSendStatus(ctx_->trailing_metadata_, status); finish_ops_.ServerSendStatus(ctx_->trailing_metadata_, status);

@ -629,8 +629,7 @@ grpc_call_error grpc_server_request_registered_call(
be specified with args. If no additional configuration is needed, args can be specified with args. If no additional configuration is needed, args can
be NULL. See grpc_channel_args for more. The data in 'args' need only live be NULL. See grpc_channel_args for more. The data in 'args' need only live
through the invocation of this function. */ through the invocation of this function. */
grpc_server *grpc_server_create(const grpc_channel_args *args, grpc_server *grpc_server_create(const grpc_channel_args *args, void *reserved);
void *reserved);
/** Register a completion queue with the server. Must be done for any /** Register a completion queue with the server. Must be done for any
notification completion queue that is passed to grpc_server_request_*_call notification completion queue that is passed to grpc_server_request_*_call

@ -66,31 +66,31 @@ static __inline int gpr_atm_no_barrier_cas(gpr_atm *p, gpr_atm o, gpr_atm n) {
/* InterlockedCompareExchangePointerNoFence() not available on vista or /* InterlockedCompareExchangePointerNoFence() not available on vista or
windows7 */ windows7 */
#ifdef GPR_ARCH_64 #ifdef GPR_ARCH_64
return o == (gpr_atm)InterlockedCompareExchangeAcquire64((volatile LONGLONG *) p, return o == (gpr_atm)InterlockedCompareExchangeAcquire64(
(LONGLONG) n, (LONGLONG) o); (volatile LONGLONG *)p, (LONGLONG)n, (LONGLONG)o);
#else #else
return o == (gpr_atm)InterlockedCompareExchangeAcquire((volatile LONG *) p, return o == (gpr_atm)InterlockedCompareExchangeAcquire((volatile LONG *)p,
(LONG) n, (LONG) o); (LONG)n, (LONG)o);
#endif #endif
} }
static __inline int gpr_atm_acq_cas(gpr_atm *p, gpr_atm o, gpr_atm n) { static __inline int gpr_atm_acq_cas(gpr_atm *p, gpr_atm o, gpr_atm n) {
#ifdef GPR_ARCH_64 #ifdef GPR_ARCH_64
return o == (gpr_atm)InterlockedCompareExchangeAcquire64((volatile LONGLONG *) p, return o == (gpr_atm)InterlockedCompareExchangeAcquire64(
(LONGLONG) n, (LONGLONG) o); (volatile LONGLONG *)p, (LONGLONG)n, (LONGLONG)o);
#else #else
return o == (gpr_atm)InterlockedCompareExchangeAcquire((volatile LONG *) p, return o == (gpr_atm)InterlockedCompareExchangeAcquire((volatile LONG *)p,
(LONG) n, (LONG) o); (LONG)n, (LONG)o);
#endif #endif
} }
static __inline int gpr_atm_rel_cas(gpr_atm *p, gpr_atm o, gpr_atm n) { static __inline int gpr_atm_rel_cas(gpr_atm *p, gpr_atm o, gpr_atm n) {
#ifdef GPR_ARCH_64 #ifdef GPR_ARCH_64
return o == (gpr_atm)InterlockedCompareExchangeRelease64((volatile LONGLONG *) p, return o == (gpr_atm)InterlockedCompareExchangeRelease64(
(LONGLONG) n, (LONGLONG) o); (volatile LONGLONG *)p, (LONGLONG)n, (LONGLONG)o);
#else #else
return o == (gpr_atm)InterlockedCompareExchangeRelease((volatile LONG *) p, return o == (gpr_atm)InterlockedCompareExchangeRelease((volatile LONG *)p,
(LONG) n, (LONG) o); (LONG)n, (LONG)o);
#endif #endif
} }
@ -110,15 +110,14 @@ static __inline gpr_atm gpr_atm_full_fetch_add(gpr_atm *p, gpr_atm delta) {
#ifdef GPR_ARCH_64 #ifdef GPR_ARCH_64
do { do {
old = *p; old = *p;
} while (old != (gpr_atm)InterlockedCompareExchange64((volatile LONGLONG *) p, } while (old != (gpr_atm)InterlockedCompareExchange64((volatile LONGLONG *)p,
(LONGLONG) old + delta, (LONGLONG)old + delta,
(LONGLONG) old)); (LONGLONG)old));
#else #else
do { do {
old = *p; old = *p;
} while (old != (gpr_atm)InterlockedCompareExchange((volatile LONG *) p, } while (old != (gpr_atm)InterlockedCompareExchange(
(LONG) old + delta, (volatile LONG *)p, (LONG)old + delta, (LONG)old));
(LONG) old));
#endif #endif
return old; return old;
} }

@ -64,7 +64,8 @@
#undef GRPC_NOMINMAX_WAS_NOT_DEFINED #undef GRPC_NOMINMAX_WAS_NOT_DEFINED
#undef NOMINMAX #undef NOMINMAX
#endif /* GRPC_WIN32_LEAN_AND_MEAN_WAS_NOT_DEFINED */ #endif /* GRPC_WIN32_LEAN_AND_MEAN_WAS_NOT_DEFINED */
#endif /* defined(_WIN64) || defined(WIN64) || defined(_WIN32) || defined(WIN32) */ #endif /* defined(_WIN64) || defined(WIN64) || defined(_WIN32) || \
defined(WIN32) */
/* Override this file with one for your platform if you need to redefine /* Override this file with one for your platform if you need to redefine
things. */ things. */

@ -38,22 +38,16 @@
#include <grpc/support/atm.h> #include <grpc/support/atm.h>
/* gpr_event */ /* gpr_event */
typedef struct { typedef struct { gpr_atm state; } gpr_event;
gpr_atm state;
} gpr_event;
#define GPR_EVENT_INIT \ #define GPR_EVENT_INIT \
{ 0 } { 0 }
/* gpr_refcount */ /* gpr_refcount */
typedef struct { typedef struct { gpr_atm count; } gpr_refcount;
gpr_atm count;
} gpr_refcount;
/* gpr_stats_counter */ /* gpr_stats_counter */
typedef struct { typedef struct { gpr_atm value; } gpr_stats_counter;
gpr_atm value;
} gpr_stats_counter;
#define GPR_STATS_INIT \ #define GPR_STATS_INIT \
{ 0 } { 0 }

@ -84,7 +84,8 @@ void gpr_time_init(void);
gpr_timespec gpr_now(gpr_clock_type clock); gpr_timespec gpr_now(gpr_clock_type clock);
/* Convert a timespec from one clock to another */ /* Convert a timespec from one clock to another */
gpr_timespec gpr_convert_clock_type(gpr_timespec t, gpr_clock_type target_clock); gpr_timespec gpr_convert_clock_type(gpr_timespec t,
gpr_clock_type target_clock);
/* Return -ve, 0, or +ve according to whether a < b, a == b, or a > b /* Return -ve, 0, or +ve according to whether a < b, a == b, or a > b
respectively. */ respectively. */

@ -44,8 +44,12 @@ struct gpr_gcc_thread_local {
#define GPR_TLS_DECL(name) \ #define GPR_TLS_DECL(name) \
static __thread struct gpr_gcc_thread_local name = {0} static __thread struct gpr_gcc_thread_local name = {0}
#define gpr_tls_init(tls) do {} while (0) #define gpr_tls_init(tls) \
#define gpr_tls_destroy(tls) do {} while (0) do { \
} while (0)
#define gpr_tls_destroy(tls) \
do { \
} while (0)
#define gpr_tls_set(tls, new_value) (((tls)->value) = (new_value)) #define gpr_tls_set(tls, new_value) (((tls)->value) = (new_value))
#define gpr_tls_get(tls) ((tls)->value) #define gpr_tls_get(tls) ((tls)->value)

@ -44,8 +44,12 @@ struct gpr_msvc_thread_local {
#define GPR_TLS_DECL(name) \ #define GPR_TLS_DECL(name) \
static __declspec(thread) struct gpr_msvc_thread_local name = {0} static __declspec(thread) struct gpr_msvc_thread_local name = {0}
#define gpr_tls_init(tls) do {} while (0) #define gpr_tls_init(tls) \
#define gpr_tls_destroy(tls) do {} while (0) do { \
} while (0)
#define gpr_tls_destroy(tls) \
do { \
} while (0)
#define gpr_tls_set(tls, new_value) (((tls)->value) = (new_value)) #define gpr_tls_set(tls, new_value) (((tls)->value) = (new_value))
#define gpr_tls_get(tls) ((tls)->value) #define gpr_tls_get(tls) ((tls)->value)

@ -84,8 +84,10 @@ typedef struct {
grpc_pollset_set pollset_set; grpc_pollset_set pollset_set;
} channel_data; } channel_data;
/** We create one watcher for each new lb_policy that is returned from a resolver, /** We create one watcher for each new lb_policy that is returned from a
to watch for state changes from the lb_policy. When a state change is seen, we resolver,
to watch for state changes from the lb_policy. When a state change is seen,
we
update the channel, and create a new watcher */ update the channel, and create a new watcher */
typedef struct { typedef struct {
channel_data *chand; channel_data *chand;
@ -380,7 +382,8 @@ static void perform_transport_stream_op(grpc_call_element *elem,
if (lb_policy) { if (lb_policy) {
grpc_transport_stream_op *op = &calld->waiting_op; grpc_transport_stream_op *op = &calld->waiting_op;
grpc_pollset *bind_pollset = op->bind_pollset; grpc_pollset *bind_pollset = op->bind_pollset;
grpc_metadata_batch *initial_metadata = &op->send_ops->ops[0].data.metadata; grpc_metadata_batch *initial_metadata =
&op->send_ops->ops[0].data.metadata;
GRPC_LB_POLICY_REF(lb_policy, "pick"); GRPC_LB_POLICY_REF(lb_policy, "pick");
gpr_mu_unlock(&chand->mu_config); gpr_mu_unlock(&chand->mu_config);
calld->state = CALL_WAITING_FOR_PICK; calld->state = CALL_WAITING_FOR_PICK;
@ -388,13 +391,14 @@ static void perform_transport_stream_op(grpc_call_element *elem,
GPR_ASSERT(op->bind_pollset); GPR_ASSERT(op->bind_pollset);
GPR_ASSERT(op->send_ops); GPR_ASSERT(op->send_ops);
GPR_ASSERT(op->send_ops->nops >= 1); GPR_ASSERT(op->send_ops->nops >= 1);
GPR_ASSERT( GPR_ASSERT(op->send_ops->ops[0].type == GRPC_OP_METADATA);
op->send_ops->ops[0].type == GRPC_OP_METADATA);
gpr_mu_unlock(&calld->mu_state); gpr_mu_unlock(&calld->mu_state);
grpc_iomgr_closure_init(&calld->async_setup_task, picked_target, calld); grpc_iomgr_closure_init(&calld->async_setup_task, picked_target,
calld);
grpc_lb_policy_pick(lb_policy, bind_pollset, initial_metadata, grpc_lb_policy_pick(lb_policy, bind_pollset, initial_metadata,
&calld->picked_channel, &calld->async_setup_task); &calld->picked_channel,
&calld->async_setup_task);
GRPC_LB_POLICY_UNREF(lb_policy, "pick"); GRPC_LB_POLICY_UNREF(lb_policy, "pick");
} else if (chand->resolver != NULL) { } else if (chand->resolver != NULL) {
@ -430,7 +434,8 @@ static void cc_start_transport_stream_op(grpc_call_element *elem,
perform_transport_stream_op(elem, op, 0); perform_transport_stream_op(elem, op, 0);
} }
static void watch_lb_policy(channel_data *chand, grpc_lb_policy *lb_policy, grpc_connectivity_state current_state); static void watch_lb_policy(channel_data *chand, grpc_lb_policy *lb_policy,
grpc_connectivity_state current_state);
static void on_lb_policy_state_changed(void *arg, int iomgr_success) { static void on_lb_policy_state_changed(void *arg, int iomgr_success) {
lb_policy_connectivity_watcher *w = arg; lb_policy_connectivity_watcher *w = arg;
@ -450,7 +455,8 @@ static void on_lb_policy_state_changed(void *arg, int iomgr_success) {
gpr_free(w); gpr_free(w);
} }
static void watch_lb_policy(channel_data *chand, grpc_lb_policy *lb_policy, grpc_connectivity_state current_state) { static void watch_lb_policy(channel_data *chand, grpc_lb_policy *lb_policy,
grpc_connectivity_state current_state) {
lb_policy_connectivity_watcher *w = gpr_malloc(sizeof(*w)); lb_policy_connectivity_watcher *w = gpr_malloc(sizeof(*w));
GRPC_CHANNEL_INTERNAL_REF(chand->master, "watch_lb_policy"); GRPC_CHANNEL_INTERNAL_REF(chand->master, "watch_lb_policy");
@ -663,7 +669,8 @@ static void init_channel_elem(grpc_channel_element *elem, grpc_channel *master,
grpc_iomgr_closure_init(&chand->on_config_changed, cc_on_config_changed, grpc_iomgr_closure_init(&chand->on_config_changed, cc_on_config_changed,
chand); chand);
grpc_connectivity_state_init(&chand->state_tracker, GRPC_CHANNEL_IDLE, "client_channel"); grpc_connectivity_state_init(&chand->state_tracker, GRPC_CHANNEL_IDLE,
"client_channel");
} }
/* Destructor for channel_data */ /* Destructor for channel_data */
@ -747,7 +754,8 @@ void grpc_client_channel_watch_connectivity_state(
gpr_mu_unlock(&chand->mu_config); gpr_mu_unlock(&chand->mu_config);
} }
grpc_pollset_set *grpc_client_channel_get_connecting_pollset_set(grpc_channel_element *elem) { grpc_pollset_set *grpc_client_channel_get_connecting_pollset_set(
grpc_channel_element *elem) {
channel_data *chand = elem->channel_data; channel_data *chand = elem->channel_data;
return &chand->pollset_set; return &chand->pollset_set;
} }

@ -59,7 +59,8 @@ void grpc_client_channel_watch_connectivity_state(
grpc_channel_element *elem, grpc_connectivity_state *state, grpc_channel_element *elem, grpc_connectivity_state *state,
grpc_iomgr_closure *on_complete); grpc_iomgr_closure *on_complete);
grpc_pollset_set *grpc_client_channel_get_connecting_pollset_set(grpc_channel_element *elem); grpc_pollset_set *grpc_client_channel_get_connecting_pollset_set(
grpc_channel_element *elem);
void grpc_client_channel_add_interested_party(grpc_channel_element *channel, void grpc_client_channel_add_interested_party(grpc_channel_element *channel,
grpc_pollset *pollset); grpc_pollset *pollset);

@ -93,7 +93,7 @@ static int compress_send_sb(grpc_compression_algorithm algorithm,
/** For each \a md element from the incoming metadata, filter out the entry for /** For each \a md element from the incoming metadata, filter out the entry for
* "grpc-encoding", using its value to populate the call data's * "grpc-encoding", using its value to populate the call data's
* compression_algorithm field. */ * compression_algorithm field. */
static grpc_mdelem* compression_md_filter(void *user_data, grpc_mdelem *md) { static grpc_mdelem *compression_md_filter(void *user_data, grpc_mdelem *md) {
grpc_call_element *elem = user_data; grpc_call_element *elem = user_data;
call_data *calld = elem->call_data; call_data *calld = elem->call_data;
channel_data *channeld = elem->channel_data; channel_data *channeld = elem->channel_data;
@ -293,7 +293,7 @@ static void init_channel_elem(grpc_channel_element *elem, grpc_channel *master,
int is_first, int is_last) { int is_first, int is_last) {
channel_data *channeld = elem->channel_data; channel_data *channeld = elem->channel_data;
grpc_compression_algorithm algo_idx; grpc_compression_algorithm algo_idx;
const char* supported_algorithms_names[GRPC_COMPRESS_ALGORITHMS_COUNT-1]; const char *supported_algorithms_names[GRPC_COMPRESS_ALGORITHMS_COUNT - 1];
char *accept_encoding_str; char *accept_encoding_str;
size_t accept_encoding_str_len; size_t accept_encoding_str_len;
@ -318,22 +318,18 @@ static void init_channel_elem(grpc_channel_element *elem, grpc_channel *master,
GRPC_MDSTR_REF(channeld->mdstr_outgoing_compression_algorithm_key), GRPC_MDSTR_REF(channeld->mdstr_outgoing_compression_algorithm_key),
grpc_mdstr_from_string(mdctx, algorithm_name, 0)); grpc_mdstr_from_string(mdctx, algorithm_name, 0));
if (algo_idx > 0) { if (algo_idx > 0) {
supported_algorithms_names[algo_idx-1] = algorithm_name; supported_algorithms_names[algo_idx - 1] = algorithm_name;
} }
} }
/* TODO(dgq): gpr_strjoin_sep could be made to work with statically allocated /* TODO(dgq): gpr_strjoin_sep could be made to work with statically allocated
* arrays, as to avoid the heap allocs */ * arrays, as to avoid the heap allocs */
accept_encoding_str = accept_encoding_str = gpr_strjoin_sep(
gpr_strjoin_sep(supported_algorithms_names, supported_algorithms_names, GPR_ARRAY_SIZE(supported_algorithms_names),
GPR_ARRAY_SIZE(supported_algorithms_names), ", ", &accept_encoding_str_len);
", ",
&accept_encoding_str_len);
channeld->mdelem_accept_encoding = channeld->mdelem_accept_encoding = grpc_mdelem_from_metadata_strings(
grpc_mdelem_from_metadata_strings( mdctx, GRPC_MDSTR_REF(channeld->mdstr_compression_capabilities_key),
mdctx,
GRPC_MDSTR_REF(channeld->mdstr_compression_capabilities_key),
grpc_mdstr_from_string(mdctx, accept_encoding_str, 0)); grpc_mdstr_from_string(mdctx, accept_encoding_str, 0));
gpr_free(accept_encoding_str); gpr_free(accept_encoding_str);
@ -348,8 +344,7 @@ static void destroy_channel_elem(grpc_channel_element *elem) {
GRPC_MDSTR_UNREF(channeld->mdstr_request_compression_algorithm_key); GRPC_MDSTR_UNREF(channeld->mdstr_request_compression_algorithm_key);
GRPC_MDSTR_UNREF(channeld->mdstr_outgoing_compression_algorithm_key); GRPC_MDSTR_UNREF(channeld->mdstr_outgoing_compression_algorithm_key);
GRPC_MDSTR_UNREF(channeld->mdstr_compression_capabilities_key); GRPC_MDSTR_UNREF(channeld->mdstr_compression_capabilities_key);
for (algo_idx = 0; algo_idx < GRPC_COMPRESS_ALGORITHMS_COUNT; for (algo_idx = 0; algo_idx < GRPC_COMPRESS_ALGORITHMS_COUNT; ++algo_idx) {
++algo_idx) {
GRPC_MDELEM_UNREF(channeld->mdelem_compression_algorithms[algo_idx]); GRPC_MDELEM_UNREF(channeld->mdelem_compression_algorithms[algo_idx]);
} }
GRPC_MDELEM_UNREF(channeld->mdelem_accept_encoding); GRPC_MDELEM_UNREF(channeld->mdelem_accept_encoding);

@ -219,7 +219,8 @@ static grpc_resolver *dns_create(
default_host_arg.type = GRPC_ARG_STRING; default_host_arg.type = GRPC_ARG_STRING;
default_host_arg.key = GRPC_ARG_DEFAULT_AUTHORITY; default_host_arg.key = GRPC_ARG_DEFAULT_AUTHORITY;
default_host_arg.value.string = host; default_host_arg.value.string = host;
subchannel_factory = grpc_subchannel_factory_add_channel_arg(subchannel_factory, &default_host_arg); subchannel_factory = grpc_subchannel_factory_add_channel_arg(
subchannel_factory, &default_host_arg);
gpr_free(host); gpr_free(host);
gpr_free(port); gpr_free(port);

@ -249,7 +249,7 @@ static char *zookeeper_parse_address(const char *value, int value_len) {
grpc_json *cur; grpc_json *cur;
const char *host; const char *host;
const char *port; const char *port;
char* buffer; char *buffer;
char *address = NULL; char *address = NULL;
buffer = gpr_malloc(value_len); buffer = gpr_malloc(value_len);

@ -42,4 +42,5 @@
grpc_subchannel_factory *grpc_subchannel_factory_add_channel_arg( grpc_subchannel_factory *grpc_subchannel_factory_add_channel_arg(
grpc_subchannel_factory *input, const grpc_arg *arg); grpc_subchannel_factory *input, const grpc_arg *arg);
#endif /* GRPC_INTERNAL_CORE_CLIENT_CONFIG_SUBCHANNEL_FACTORY_DECORATORS_ADD_CHANNEL_ARG_H */ #endif /* GRPC_INTERNAL_CORE_CLIENT_CONFIG_SUBCHANNEL_FACTORY_DECORATORS_ADD_CHANNEL_ARG_H \
*/

@ -42,4 +42,5 @@
grpc_subchannel_factory *grpc_subchannel_factory_merge_channel_args( grpc_subchannel_factory *grpc_subchannel_factory_merge_channel_args(
grpc_subchannel_factory *input, const grpc_channel_args *args); grpc_subchannel_factory *input, const grpc_channel_args *args);
#endif /* GRPC_INTERNAL_CORE_CLIENT_CONFIG_SUBCHANNEL_FACTORY_DECORATORS_MERGE_CHANNEL_ARGS_H */ #endif /* GRPC_INTERNAL_CORE_CLIENT_CONFIG_SUBCHANNEL_FACTORY_DECORATORS_MERGE_CHANNEL_ARGS_H \
*/

@ -35,7 +35,7 @@
#include <string.h> #include <string.h>
#include <grpc/compression.h> #include <grpc/compression.h>
int grpc_compression_algorithm_parse(const char* name, size_t name_length, int grpc_compression_algorithm_parse(const char *name, size_t name_length,
grpc_compression_algorithm *algorithm) { grpc_compression_algorithm *algorithm) {
/* we use strncmp not only because it's safer (even though in this case it /* we use strncmp not only because it's safer (even though in this case it
* doesn't matter, given that we are comparing against string literals, but * doesn't matter, given that we are comparing against string literals, but

@ -61,8 +61,8 @@ static void add(const char *beg, const char *end, char ***ss, size_t *ns) {
size_t np = n + 1; size_t np = n + 1;
char *s = gpr_malloc(end - beg + 1); char *s = gpr_malloc(end - beg + 1);
memcpy(s, beg, end - beg); memcpy(s, beg, end - beg);
s[end-beg] = 0; s[end - beg] = 0;
*ss = gpr_realloc(*ss, sizeof(char**) * np); *ss = gpr_realloc(*ss, sizeof(char **) * np);
(*ss)[n] = s; (*ss)[n] = s;
*ns = np; *ns = np;
} }
@ -73,7 +73,7 @@ static void split(const char *s, char ***ss, size_t *ns) {
add(s, s + strlen(s), ss, ns); add(s, s + strlen(s), ss, ns);
} else { } else {
add(s, c, ss, ns); add(s, c, ss, ns);
split(c+1, ss, ns); split(c + 1, ss, ns);
} }
} }

@ -43,7 +43,8 @@
#include <grpc/support/string_util.h> #include <grpc/support/string_util.h>
#include <grpc/support/useful.h> #include <grpc/support/useful.h>
static void fill_common_header(const grpc_httpcli_request *request, gpr_strvec *buf) { static void fill_common_header(const grpc_httpcli_request *request,
gpr_strvec *buf) {
size_t i; size_t i;
gpr_strvec_add(buf, gpr_strdup(request->path)); gpr_strvec_add(buf, gpr_strdup(request->path));
gpr_strvec_add(buf, gpr_strdup(" HTTP/1.0\r\n")); gpr_strvec_add(buf, gpr_strdup(" HTTP/1.0\r\n"));
@ -52,7 +53,8 @@ static void fill_common_header(const grpc_httpcli_request *request, gpr_strvec *
gpr_strvec_add(buf, gpr_strdup(request->host)); gpr_strvec_add(buf, gpr_strdup(request->host));
gpr_strvec_add(buf, gpr_strdup("\r\n")); gpr_strvec_add(buf, gpr_strdup("\r\n"));
gpr_strvec_add(buf, gpr_strdup("Connection: close\r\n")); gpr_strvec_add(buf, gpr_strdup("Connection: close\r\n"));
gpr_strvec_add(buf, gpr_strdup("User-Agent: "GRPC_HTTPCLI_USER_AGENT"\r\n")); gpr_strvec_add(buf,
gpr_strdup("User-Agent: " GRPC_HTTPCLI_USER_AGENT "\r\n"));
/* user supplied headers */ /* user supplied headers */
for (i = 0; i < request->hdr_count; i++) { for (i = 0; i < request->hdr_count; i++) {
gpr_strvec_add(buf, gpr_strdup(request->hdrs[i].key)); gpr_strvec_add(buf, gpr_strdup(request->hdrs[i].key));

@ -105,8 +105,7 @@ void grpc_alarm_list_init(gpr_timespec now) {
void grpc_alarm_list_shutdown(void) { void grpc_alarm_list_shutdown(void) {
int i; int i;
while (run_some_expired_alarms(NULL, gpr_inf_future(g_clock_type), NULL, while (run_some_expired_alarms(NULL, gpr_inf_future(g_clock_type), NULL, 0))
0))
; ;
for (i = 0; i < NUM_SHARDS; i++) { for (i = 0; i < NUM_SHARDS; i++) {
shard_type *shard = &g_shards[i]; shard_type *shard = &g_shards[i];

@ -66,8 +66,8 @@ static void adjust_downwards(grpc_alarm **first, int i, int length,
int next_i; int next_i;
if (left_child >= length) break; if (left_child >= length) break;
right_child = left_child + 1; right_child = left_child + 1;
next_i = next_i = right_child < length &&
right_child < length && gpr_time_cmp(first[left_child]->deadline, gpr_time_cmp(first[left_child]->deadline,
first[right_child]->deadline) < 0 first[right_child]->deadline) < 0
? right_child ? right_child
: left_child; : left_child;

@ -50,7 +50,8 @@ void grpc_endpoint_add_to_pollset(grpc_endpoint *ep, grpc_pollset *pollset) {
ep->vtable->add_to_pollset(ep, pollset); ep->vtable->add_to_pollset(ep, pollset);
} }
void grpc_endpoint_add_to_pollset_set(grpc_endpoint *ep, grpc_pollset_set *pollset_set) { void grpc_endpoint_add_to_pollset_set(grpc_endpoint *ep,
grpc_pollset_set *pollset_set) {
ep->vtable->add_to_pollset_set(ep, pollset_set); ep->vtable->add_to_pollset_set(ep, pollset_set);
} }

@ -103,7 +103,8 @@ void grpc_endpoint_destroy(grpc_endpoint *ep);
/* Add an endpoint to a pollset, so that when the pollset is polled, events from /* Add an endpoint to a pollset, so that when the pollset is polled, events from
this endpoint are considered */ this endpoint are considered */
void grpc_endpoint_add_to_pollset(grpc_endpoint *ep, grpc_pollset *pollset); void grpc_endpoint_add_to_pollset(grpc_endpoint *ep, grpc_pollset *pollset);
void grpc_endpoint_add_to_pollset_set(grpc_endpoint *ep, grpc_pollset_set *pollset_set); void grpc_endpoint_add_to_pollset_set(grpc_endpoint *ep,
grpc_pollset_set *pollset_set);
struct grpc_endpoint { struct grpc_endpoint {
const grpc_endpoint_vtable *vtable; const grpc_endpoint_vtable *vtable;

@ -52,21 +52,26 @@ static void create_sockets(SOCKET sv[2]) {
SOCKADDR_IN addr; SOCKADDR_IN addr;
int addr_len = sizeof(addr); int addr_len = sizeof(addr);
lst_sock = WSASocket(AF_INET, SOCK_STREAM, IPPROTO_TCP, NULL, 0, WSA_FLAG_OVERLAPPED); lst_sock = WSASocket(AF_INET, SOCK_STREAM, IPPROTO_TCP, NULL, 0,
WSA_FLAG_OVERLAPPED);
GPR_ASSERT(lst_sock != INVALID_SOCKET); GPR_ASSERT(lst_sock != INVALID_SOCKET);
memset(&addr, 0, sizeof(addr)); memset(&addr, 0, sizeof(addr));
addr.sin_addr.s_addr = htonl(INADDR_LOOPBACK); addr.sin_addr.s_addr = htonl(INADDR_LOOPBACK);
addr.sin_family = AF_INET; addr.sin_family = AF_INET;
GPR_ASSERT(bind(lst_sock, (struct sockaddr*)&addr, sizeof(addr)) != SOCKET_ERROR); GPR_ASSERT(bind(lst_sock, (struct sockaddr *)&addr, sizeof(addr)) !=
SOCKET_ERROR);
GPR_ASSERT(listen(lst_sock, SOMAXCONN) != SOCKET_ERROR); GPR_ASSERT(listen(lst_sock, SOMAXCONN) != SOCKET_ERROR);
GPR_ASSERT(getsockname(lst_sock, (struct sockaddr*)&addr, &addr_len) != SOCKET_ERROR); GPR_ASSERT(getsockname(lst_sock, (struct sockaddr *)&addr, &addr_len) !=
SOCKET_ERROR);
cli_sock = WSASocket(AF_INET, SOCK_STREAM, IPPROTO_TCP, NULL, 0, WSA_FLAG_OVERLAPPED); cli_sock = WSASocket(AF_INET, SOCK_STREAM, IPPROTO_TCP, NULL, 0,
WSA_FLAG_OVERLAPPED);
GPR_ASSERT(cli_sock != INVALID_SOCKET); GPR_ASSERT(cli_sock != INVALID_SOCKET);
GPR_ASSERT(WSAConnect(cli_sock, (struct sockaddr*)&addr, addr_len, NULL, NULL, NULL, NULL) == 0); GPR_ASSERT(WSAConnect(cli_sock, (struct sockaddr *)&addr, addr_len, NULL,
svr_sock = accept(lst_sock, (struct sockaddr*)&addr, &addr_len); NULL, NULL, NULL) == 0);
svr_sock = accept(lst_sock, (struct sockaddr *)&addr, &addr_len);
GPR_ASSERT(svr_sock != INVALID_SOCKET); GPR_ASSERT(svr_sock != INVALID_SOCKET);
closesocket(lst_sock); closesocket(lst_sock);
@ -77,7 +82,8 @@ static void create_sockets(SOCKET sv[2]) {
sv[0] = svr_sock; sv[0] = svr_sock;
} }
grpc_endpoint_pair grpc_iomgr_create_endpoint_pair(const char *name, size_t read_slice_size) { grpc_endpoint_pair grpc_iomgr_create_endpoint_pair(const char *name,
size_t read_slice_size) {
SOCKET sv[2]; SOCKET sv[2];
grpc_endpoint_pair p; grpc_endpoint_pair p;
create_sockets(sv); create_sockets(sv);

@ -65,18 +65,17 @@ static void do_iocp_work() {
LPOVERLAPPED overlapped; LPOVERLAPPED overlapped;
grpc_winsocket *socket; grpc_winsocket *socket;
grpc_winsocket_callback_info *info; grpc_winsocket_callback_info *info;
void(*f)(void *, int) = NULL; void (*f)(void *, int) = NULL;
void *opaque = NULL; void *opaque = NULL;
success = GetQueuedCompletionStatus(g_iocp, &bytes, success = GetQueuedCompletionStatus(g_iocp, &bytes, &completion_key,
&completion_key, &overlapped, &overlapped, INFINITE);
INFINITE);
/* success = 0 and overlapped = NULL means the deadline got attained. /* success = 0 and overlapped = NULL means the deadline got attained.
Which is impossible. since our wait time is +inf */ Which is impossible. since our wait time is +inf */
GPR_ASSERT(success || overlapped); GPR_ASSERT(success || overlapped);
GPR_ASSERT(completion_key && overlapped); GPR_ASSERT(completion_key && overlapped);
if (overlapped == &g_iocp_custom_overlap) { if (overlapped == &g_iocp_custom_overlap) {
gpr_atm_full_fetch_add(&g_custom_events, -1); gpr_atm_full_fetch_add(&g_custom_events, -1);
if (completion_key == (ULONG_PTR) &g_iocp_kick_token) { if (completion_key == (ULONG_PTR)&g_iocp_kick_token) {
/* We were awoken from a kick. */ /* We were awoken from a kick. */
return; return;
} }
@ -84,7 +83,7 @@ static void do_iocp_work() {
abort(); abort();
} }
socket = (grpc_winsocket*) completion_key; socket = (grpc_winsocket *)completion_key;
if (overlapped == &socket->write_info.overlapped) { if (overlapped == &socket->write_info.overlapped) {
info = &socket->write_info; info = &socket->write_info;
} else if (overlapped == &socket->read_info.overlapped) { } else if (overlapped == &socket->read_info.overlapped) {
@ -121,8 +120,7 @@ static void do_iocp_work() {
} }
static void iocp_loop(void *p) { static void iocp_loop(void *p) {
while (gpr_atm_acq_load(&g_orphans) || while (gpr_atm_acq_load(&g_orphans) || gpr_atm_acq_load(&g_custom_events) ||
gpr_atm_acq_load(&g_custom_events) ||
!gpr_event_get(&g_shutdown_iocp)) { !gpr_event_get(&g_shutdown_iocp)) {
grpc_maybe_call_delayed_callbacks(NULL, 1); grpc_maybe_call_delayed_callbacks(NULL, 1);
do_iocp_work(); do_iocp_work();
@ -134,8 +132,8 @@ static void iocp_loop(void *p) {
void grpc_iocp_init(void) { void grpc_iocp_init(void) {
gpr_thd_id id; gpr_thd_id id;
g_iocp = CreateIoCompletionPort(INVALID_HANDLE_VALUE, g_iocp =
NULL, (ULONG_PTR)NULL, 0); CreateIoCompletionPort(INVALID_HANDLE_VALUE, NULL, (ULONG_PTR)NULL, 0);
GPR_ASSERT(g_iocp); GPR_ASSERT(g_iocp);
gpr_event_init(&g_iocp_done); gpr_event_init(&g_iocp_done);
@ -147,8 +145,7 @@ void grpc_iocp_kick(void) {
BOOL success; BOOL success;
gpr_atm_full_fetch_add(&g_custom_events, 1); gpr_atm_full_fetch_add(&g_custom_events, 1);
success = PostQueuedCompletionStatus(g_iocp, 0, success = PostQueuedCompletionStatus(g_iocp, 0, (ULONG_PTR)&g_iocp_kick_token,
(ULONG_PTR) &g_iocp_kick_token,
&g_iocp_custom_overlap); &g_iocp_custom_overlap);
GPR_ASSERT(success); GPR_ASSERT(success);
} }
@ -165,8 +162,8 @@ void grpc_iocp_shutdown(void) {
void grpc_iocp_add_socket(grpc_winsocket *socket) { void grpc_iocp_add_socket(grpc_winsocket *socket) {
HANDLE ret; HANDLE ret;
if (socket->added_to_iocp) return; if (socket->added_to_iocp) return;
ret = CreateIoCompletionPort((HANDLE)socket->socket, ret = CreateIoCompletionPort((HANDLE)socket->socket, g_iocp,
g_iocp, (gpr_uintptr) socket, 0); (gpr_uintptr)socket, 0);
if (!ret) { if (!ret) {
char *utf8_message = gpr_format_message(WSAGetLastError()); char *utf8_message = gpr_format_message(WSAGetLastError());
gpr_log(GPR_ERROR, "Unable to add socket to iocp: %s", utf8_message); gpr_log(GPR_ERROR, "Unable to add socket to iocp: %s", utf8_message);
@ -189,7 +186,7 @@ void grpc_iocp_socket_orphan(grpc_winsocket *socket) {
the callback now. the callback now.
-) The IOCP hasn't completed yet, and we're queuing it for later. */ -) The IOCP hasn't completed yet, and we're queuing it for later. */
static void socket_notify_on_iocp(grpc_winsocket *socket, static void socket_notify_on_iocp(grpc_winsocket *socket,
void(*cb)(void *, int), void *opaque, void (*cb)(void *, int), void *opaque,
grpc_winsocket_callback_info *info) { grpc_winsocket_callback_info *info) {
int run_now = 0; int run_now = 0;
GPR_ASSERT(!info->cb); GPR_ASSERT(!info->cb);
@ -206,12 +203,12 @@ static void socket_notify_on_iocp(grpc_winsocket *socket,
} }
void grpc_socket_notify_on_write(grpc_winsocket *socket, void grpc_socket_notify_on_write(grpc_winsocket *socket,
void(*cb)(void *, int), void *opaque) { void (*cb)(void *, int), void *opaque) {
socket_notify_on_iocp(socket, cb, opaque, &socket->write_info); socket_notify_on_iocp(socket, cb, opaque, &socket->write_info);
} }
void grpc_socket_notify_on_read(grpc_winsocket *socket, void grpc_socket_notify_on_read(grpc_winsocket *socket, void (*cb)(void *, int),
void(*cb)(void *, int), void *opaque) { void *opaque) {
socket_notify_on_iocp(socket, cb, opaque, &socket->read_info); socket_notify_on_iocp(socket, cb, opaque, &socket->read_info);
} }

@ -44,10 +44,10 @@ void grpc_iocp_shutdown(void);
void grpc_iocp_add_socket(grpc_winsocket *); void grpc_iocp_add_socket(grpc_winsocket *);
void grpc_iocp_socket_orphan(grpc_winsocket *); void grpc_iocp_socket_orphan(grpc_winsocket *);
void grpc_socket_notify_on_write(grpc_winsocket *, void(*cb)(void *, int success), void grpc_socket_notify_on_write(grpc_winsocket *,
void *opaque); void (*cb)(void *, int success), void *opaque);
void grpc_socket_notify_on_read(grpc_winsocket *, void(*cb)(void *, int success), void grpc_socket_notify_on_read(grpc_winsocket *,
void *opaque); void (*cb)(void *, int success), void *opaque);
#endif /* GRPC_INTERNAL_CORE_IOMGR_IOCP_WINDOWS_H */ #endif /* GRPC_INTERNAL_CORE_IOMGR_IOCP_WINDOWS_H */

@ -234,8 +234,7 @@ static void multipoll_with_epoll_pollset_destroy(grpc_pollset *pollset) {
} }
static const grpc_pollset_vtable multipoll_with_epoll_pollset = { static const grpc_pollset_vtable multipoll_with_epoll_pollset = {
multipoll_with_epoll_pollset_add_fd, multipoll_with_epoll_pollset_add_fd, multipoll_with_epoll_pollset_del_fd,
multipoll_with_epoll_pollset_del_fd,
multipoll_with_epoll_pollset_maybe_work, multipoll_with_epoll_pollset_maybe_work,
multipoll_with_epoll_pollset_finish_shutdown, multipoll_with_epoll_pollset_finish_shutdown,
multipoll_with_epoll_pollset_destroy}; multipoll_with_epoll_pollset_destroy};

@ -202,8 +202,7 @@ static void multipoll_with_poll_pollset_destroy(grpc_pollset *pollset) {
} }
static const grpc_pollset_vtable multipoll_with_poll_pollset = { static const grpc_pollset_vtable multipoll_with_poll_pollset = {
multipoll_with_poll_pollset_add_fd, multipoll_with_poll_pollset_add_fd, multipoll_with_poll_pollset_del_fd,
multipoll_with_poll_pollset_del_fd,
multipoll_with_poll_pollset_maybe_work, multipoll_with_poll_pollset_maybe_work,
multipoll_with_poll_pollset_finish_shutdown, multipoll_with_poll_pollset_finish_shutdown,
multipoll_with_poll_pollset_destroy}; multipoll_with_poll_pollset_destroy};

@ -140,7 +140,7 @@ void grpc_pollset_init(grpc_pollset *pollset) {
void grpc_pollset_add_fd(grpc_pollset *pollset, grpc_fd *fd) { void grpc_pollset_add_fd(grpc_pollset *pollset, grpc_fd *fd) {
gpr_mu_lock(&pollset->mu); gpr_mu_lock(&pollset->mu);
pollset->vtable->add_fd(pollset, fd, 1); pollset->vtable->add_fd(pollset, fd, 1);
/* the following (enabled only in debug) will reacquire and then release /* the following (enabled only in debug) will reacquire and then release
our lock - meaning that if the unlocking flag passed to del_fd above is our lock - meaning that if the unlocking flag passed to del_fd above is
not respected, the code will deadlock (in a way that we have a chance of not respected, the code will deadlock (in a way that we have a chance of
debugging) */ debugging) */
@ -153,7 +153,7 @@ void grpc_pollset_add_fd(grpc_pollset *pollset, grpc_fd *fd) {
void grpc_pollset_del_fd(grpc_pollset *pollset, grpc_fd *fd) { void grpc_pollset_del_fd(grpc_pollset *pollset, grpc_fd *fd) {
gpr_mu_lock(&pollset->mu); gpr_mu_lock(&pollset->mu);
pollset->vtable->del_fd(pollset, fd, 1); pollset->vtable->del_fd(pollset, fd, 1);
/* the following (enabled only in debug) will reacquire and then release /* the following (enabled only in debug) will reacquire and then release
our lock - meaning that if the unlocking flag passed to del_fd above is our lock - meaning that if the unlocking flag passed to del_fd above is
not respected, the code will deadlock (in a way that we have a chance of not respected, the code will deadlock (in a way that we have a chance of
debugging) */ debugging) */

@ -102,7 +102,8 @@ void grpc_kick_drain(grpc_pollset *p);
- longer than a millisecond polls are rounded up to the next nearest - longer than a millisecond polls are rounded up to the next nearest
millisecond to avoid spinning millisecond to avoid spinning
- infinite timeouts are converted to -1 */ - infinite timeouts are converted to -1 */
int grpc_poll_deadline_to_millis_timeout(gpr_timespec deadline, gpr_timespec now); int grpc_poll_deadline_to_millis_timeout(gpr_timespec deadline,
gpr_timespec now);
/* turn a pollset into a multipoller: platform specific */ /* turn a pollset into a multipoller: platform specific */
typedef void (*grpc_platform_become_multipoller_type)(grpc_pollset *pollset, typedef void (*grpc_platform_become_multipoller_type)(grpc_pollset *pollset,

@ -56,8 +56,7 @@ static grpc_pollset_worker *pop_front_worker(grpc_pollset *p) {
grpc_pollset_worker *w = p->root_worker.next; grpc_pollset_worker *w = p->root_worker.next;
remove_worker(p, w); remove_worker(p, w);
return w; return w;
} } else {
else {
return NULL; return NULL;
} }
} }
@ -100,7 +99,8 @@ void grpc_pollset_destroy(grpc_pollset *pollset) {
gpr_mu_destroy(&pollset->mu); gpr_mu_destroy(&pollset->mu);
} }
int grpc_pollset_work(grpc_pollset *pollset, grpc_pollset_worker *worker, gpr_timespec deadline) { int grpc_pollset_work(grpc_pollset *pollset, grpc_pollset_worker *worker,
gpr_timespec deadline) {
gpr_timespec now; gpr_timespec now;
int added_worker = 0; int added_worker = 0;
now = gpr_now(GPR_CLOCK_MONOTONIC); now = gpr_now(GPR_CLOCK_MONOTONIC);

@ -105,10 +105,7 @@ grpc_resolved_addresses *grpc_blocking_resolve_address(
s = getaddrinfo(host, port, &hints, &result); s = getaddrinfo(host, port, &hints, &result);
if (s != 0) { if (s != 0) {
/* Retry if well-known service name is recognized */ /* Retry if well-known service name is recognized */
char *svc[][2] = { char *svc[][2] = {{"http", "80"}, {"https", "443"}};
{"http", "80"},
{"https", "443"}
};
int i; int i;
for (i = 0; i < (int)(sizeof(svc) / sizeof(svc[0])); i++) { for (i = 0; i < (int)(sizeof(svc) / sizeof(svc[0])); i++) {
if (strcmp(port, svc[i][0]) == 0) { if (strcmp(port, svc[i][0]) == 0) {

@ -206,7 +206,8 @@ int grpc_sockaddr_get_port(const struct sockaddr *addr) {
case AF_UNIX: case AF_UNIX:
return 1; return 1;
default: default:
gpr_log(GPR_ERROR, "Unknown socket family %d in grpc_sockaddr_get_port", addr->sa_family); gpr_log(GPR_ERROR, "Unknown socket family %d in grpc_sockaddr_get_port",
addr->sa_family);
return 0; return 0;
} }
} }
@ -220,7 +221,8 @@ int grpc_sockaddr_set_port(const struct sockaddr *addr, int port) {
((struct sockaddr_in6 *)addr)->sin6_port = htons(port); ((struct sockaddr_in6 *)addr)->sin6_port = htons(port);
return 1; return 1;
default: default:
gpr_log(GPR_ERROR, "Unknown socket family %d in grpc_sockaddr_set_port", addr->sa_family); gpr_log(GPR_ERROR, "Unknown socket family %d in grpc_sockaddr_set_port",
addr->sa_family);
return 0; return 0;
} }
} }

@ -54,7 +54,7 @@ typedef struct grpc_winsocket_callback_info {
OVERLAPPED overlapped; OVERLAPPED overlapped;
/* The callback information for the pending operation. May be empty if the /* The callback information for the pending operation. May be empty if the
caller hasn't registered a callback yet. */ caller hasn't registered a callback yet. */
void(*cb)(void *opaque, int success); void (*cb)(void *opaque, int success);
void *opaque; void *opaque;
/* A boolean to describe if the IO Completion Port got a notification for /* A boolean to describe if the IO Completion Port got a notification for
that operation. This will happen if the operation completed before the that operation. This will happen if the operation completed before the

@ -264,7 +264,8 @@ void grpc_tcp_client_connect(void (*cb)(void *arg, grpc_endpoint *ep),
ac->write_closure.cb_arg = ac; ac->write_closure.cb_arg = ac;
gpr_mu_lock(&ac->mu); gpr_mu_lock(&ac->mu);
grpc_alarm_init(&ac->alarm, gpr_convert_clock_type(deadline, GPR_CLOCK_MONOTONIC), grpc_alarm_init(&ac->alarm,
gpr_convert_clock_type(deadline, GPR_CLOCK_MONOTONIC),
tc_on_alarm, ac, gpr_now(GPR_CLOCK_MONOTONIC)); tc_on_alarm, ac, gpr_now(GPR_CLOCK_MONOTONIC));
grpc_fd_notify_on_write(ac->fd, &ac->write_closure); grpc_fd_notify_on_write(ac->fd, &ac->write_closure);
gpr_mu_unlock(&ac->mu); gpr_mu_unlock(&ac->mu);

@ -572,7 +572,8 @@ static void grpc_tcp_add_to_pollset(grpc_endpoint *ep, grpc_pollset *pollset) {
grpc_pollset_add_fd(pollset, tcp->em_fd); grpc_pollset_add_fd(pollset, tcp->em_fd);
} }
static void grpc_tcp_add_to_pollset_set(grpc_endpoint *ep, grpc_pollset_set *pollset_set) { static void grpc_tcp_add_to_pollset_set(grpc_endpoint *ep,
grpc_pollset_set *pollset_set) {
grpc_tcp *tcp = (grpc_tcp *)ep; grpc_tcp *tcp = (grpc_tcp *)ep;
grpc_pollset_set_add_fd(pollset_set, tcp->em_fd); grpc_pollset_set_add_fd(pollset_set, tcp->em_fd);
} }

@ -79,7 +79,8 @@ struct grpc_tcp_server {
/* active port count: how many ports are actually still listening */ /* active port count: how many ports are actually still listening */
int active_ports; int active_ports;
/* number of iomgr callbacks that have been explicitly scheduled during shutdown */ /* number of iomgr callbacks that have been explicitly scheduled during
* shutdown */
int iomgr_callbacks_pending; int iomgr_callbacks_pending;
/* all listening ports */ /* all listening ports */
@ -309,16 +310,15 @@ static void on_accept(void *arg, int from_iocp) {
if (!sp->shutting_down) { if (!sp->shutting_down) {
peer_name_string = NULL; peer_name_string = NULL;
err = setsockopt(sock, SOL_SOCKET, SO_UPDATE_ACCEPT_CONTEXT, err = setsockopt(sock, SOL_SOCKET, SO_UPDATE_ACCEPT_CONTEXT,
(char *)&sp->socket->socket, (char *)&sp->socket->socket, sizeof(sp->socket->socket));
sizeof(sp->socket->socket));
if (err) { if (err) {
char *utf8_message = gpr_format_message(WSAGetLastError()); char *utf8_message = gpr_format_message(WSAGetLastError());
gpr_log(GPR_ERROR, "setsockopt error: %s", utf8_message); gpr_log(GPR_ERROR, "setsockopt error: %s", utf8_message);
gpr_free(utf8_message); gpr_free(utf8_message);
} }
err = getpeername(sock, (struct sockaddr*)&peer_name, &peer_name_len); err = getpeername(sock, (struct sockaddr *)&peer_name, &peer_name_len);
if (!err) { if (!err) {
peer_name_string = grpc_sockaddr_to_uri((struct sockaddr*)&peer_name); peer_name_string = grpc_sockaddr_to_uri((struct sockaddr *)&peer_name);
} else { } else {
char *utf8_message = gpr_format_message(WSAGetLastError()); char *utf8_message = gpr_format_message(WSAGetLastError());
gpr_log(GPR_ERROR, "getpeername error: %s", utf8_message); gpr_log(GPR_ERROR, "getpeername error: %s", utf8_message);

@ -55,24 +55,22 @@ static int set_non_block(SOCKET sock) {
int status; int status;
unsigned long param = 1; unsigned long param = 1;
DWORD ret; DWORD ret;
status = WSAIoctl(sock, FIONBIO, &param, sizeof(param), NULL, 0, &ret, status =
NULL, NULL); WSAIoctl(sock, FIONBIO, &param, sizeof(param), NULL, 0, &ret, NULL, NULL);
return status == 0; return status == 0;
} }
static int set_dualstack(SOCKET sock) { static int set_dualstack(SOCKET sock) {
int status; int status;
unsigned long param = 0; unsigned long param = 0;
status = setsockopt(sock, IPPROTO_IPV6, IPV6_V6ONLY, status = setsockopt(sock, IPPROTO_IPV6, IPV6_V6ONLY, (const char *)&param,
(const char *) &param, sizeof(param)); sizeof(param));
return status == 0; return status == 0;
} }
int grpc_tcp_prepare_socket(SOCKET sock) { int grpc_tcp_prepare_socket(SOCKET sock) {
if (!set_non_block(sock)) if (!set_non_block(sock)) return 0;
return 0; if (!set_dualstack(sock)) return 0;
if (!set_dualstack(sock))
return 0;
return 1; return 1;
} }
@ -100,9 +98,7 @@ typedef struct grpc_tcp {
char *peer_string; char *peer_string;
} grpc_tcp; } grpc_tcp;
static void tcp_ref(grpc_tcp *tcp) { static void tcp_ref(grpc_tcp *tcp) { gpr_ref(&tcp->refcount); }
gpr_ref(&tcp->refcount);
}
static void tcp_unref(grpc_tcp *tcp) { static void tcp_unref(grpc_tcp *tcp) {
if (gpr_unref(&tcp->refcount)) { if (gpr_unref(&tcp->refcount)) {
@ -116,7 +112,7 @@ static void tcp_unref(grpc_tcp *tcp) {
/* Asynchronous callback from the IOCP, or the background thread. */ /* Asynchronous callback from the IOCP, or the background thread. */
static void on_read(void *tcpp, int from_iocp) { static void on_read(void *tcpp, int from_iocp) {
grpc_tcp *tcp = (grpc_tcp *) tcpp; grpc_tcp *tcp = (grpc_tcp *)tcpp;
grpc_winsocket *socket = tcp->socket; grpc_winsocket *socket = tcp->socket;
gpr_slice sub; gpr_slice sub;
gpr_slice *slice = NULL; gpr_slice *slice = NULL;
@ -175,9 +171,9 @@ static void on_read(void *tcpp, int from_iocp) {
cb(opaque, slice, nslices, status); cb(opaque, slice, nslices, status);
} }
static void win_notify_on_read(grpc_endpoint *ep, static void win_notify_on_read(grpc_endpoint *ep, grpc_endpoint_read_cb cb,
grpc_endpoint_read_cb cb, void *arg) { void *arg) {
grpc_tcp *tcp = (grpc_tcp *) ep; grpc_tcp *tcp = (grpc_tcp *)ep;
grpc_winsocket *handle = tcp->socket; grpc_winsocket *handle = tcp->socket;
grpc_winsocket_callback_info *info = &handle->read_info; grpc_winsocket_callback_info *info = &handle->read_info;
int status; int status;
@ -201,8 +197,8 @@ static void win_notify_on_read(grpc_endpoint *ep,
buffer.buf = (char *)GPR_SLICE_START_PTR(tcp->read_slice); buffer.buf = (char *)GPR_SLICE_START_PTR(tcp->read_slice);
/* First let's try a synchronous, non-blocking read. */ /* First let's try a synchronous, non-blocking read. */
status = WSARecv(tcp->socket->socket, &buffer, 1, &bytes_read, &flags, status =
NULL, NULL); WSARecv(tcp->socket->socket, &buffer, 1, &bytes_read, &flags, NULL, NULL);
info->wsa_error = status == 0 ? 0 : WSAGetLastError(); info->wsa_error = status == 0 ? 0 : WSAGetLastError();
/* Did we get data immediately ? Yay. */ /* Did we get data immediately ? Yay. */
@ -232,7 +228,7 @@ static void win_notify_on_read(grpc_endpoint *ep,
/* Asynchronous callback from the IOCP, or the background thread. */ /* Asynchronous callback from the IOCP, or the background thread. */
static void on_write(void *tcpp, int from_iocp) { static void on_write(void *tcpp, int from_iocp) {
grpc_tcp *tcp = (grpc_tcp *) tcpp; grpc_tcp *tcp = (grpc_tcp *)tcpp;
grpc_winsocket *handle = tcp->socket; grpc_winsocket *handle = tcp->socket;
grpc_winsocket_callback_info *info = &handle->write_info; grpc_winsocket_callback_info *info = &handle->write_info;
grpc_endpoint_cb_status status = GRPC_ENDPOINT_CB_OK; grpc_endpoint_cb_status status = GRPC_ENDPOINT_CB_OK;
@ -286,7 +282,7 @@ static grpc_endpoint_write_status win_write(grpc_endpoint *ep,
gpr_slice *slices, size_t nslices, gpr_slice *slices, size_t nslices,
grpc_endpoint_write_cb cb, grpc_endpoint_write_cb cb,
void *arg) { void *arg) {
grpc_tcp *tcp = (grpc_tcp *) ep; grpc_tcp *tcp = (grpc_tcp *)ep;
grpc_winsocket *socket = tcp->socket; grpc_winsocket *socket = tcp->socket;
grpc_winsocket_callback_info *info = &socket->write_info; grpc_winsocket_callback_info *info = &socket->write_info;
unsigned i; unsigned i;
@ -309,7 +305,7 @@ static grpc_endpoint_write_status win_write(grpc_endpoint *ep,
gpr_slice_buffer_addn(&tcp->write_slices, slices, nslices); gpr_slice_buffer_addn(&tcp->write_slices, slices, nslices);
if (tcp->write_slices.count > GPR_ARRAY_SIZE(local_buffers)) { if (tcp->write_slices.count > GPR_ARRAY_SIZE(local_buffers)) {
buffers = (WSABUF *) gpr_malloc(sizeof(WSABUF) * tcp->write_slices.count); buffers = (WSABUF *)gpr_malloc(sizeof(WSABUF) * tcp->write_slices.count);
allocated = buffers; allocated = buffers;
} }
@ -370,15 +366,15 @@ static grpc_endpoint_write_status win_write(grpc_endpoint *ep,
static void win_add_to_pollset(grpc_endpoint *ep, grpc_pollset *ps) { static void win_add_to_pollset(grpc_endpoint *ep, grpc_pollset *ps) {
grpc_tcp *tcp; grpc_tcp *tcp;
(void) ps; (void)ps;
tcp = (grpc_tcp *) ep; tcp = (grpc_tcp *)ep;
grpc_iocp_add_socket(tcp->socket); grpc_iocp_add_socket(tcp->socket);
} }
static void win_add_to_pollset_set(grpc_endpoint *ep, grpc_pollset_set *pss) { static void win_add_to_pollset_set(grpc_endpoint *ep, grpc_pollset_set *pss) {
grpc_tcp *tcp; grpc_tcp *tcp;
(void) pss; (void)pss;
tcp = (grpc_tcp *) ep; tcp = (grpc_tcp *)ep;
grpc_iocp_add_socket(tcp->socket); grpc_iocp_add_socket(tcp->socket);
} }
@ -389,7 +385,7 @@ static void win_add_to_pollset_set(grpc_endpoint *ep, grpc_pollset_set *pss) {
callback will happen from another thread, so we need to protect against callback will happen from another thread, so we need to protect against
concurrent access of the data structure in that regard. */ concurrent access of the data structure in that regard. */
static void win_shutdown(grpc_endpoint *ep) { static void win_shutdown(grpc_endpoint *ep) {
grpc_tcp *tcp = (grpc_tcp *) ep; grpc_tcp *tcp = (grpc_tcp *)ep;
int extra_refs = 0; int extra_refs = 0;
gpr_mu_lock(&tcp->mu); gpr_mu_lock(&tcp->mu);
/* At that point, what may happen is that we're already inside the IOCP /* At that point, what may happen is that we're already inside the IOCP
@ -401,7 +397,7 @@ static void win_shutdown(grpc_endpoint *ep) {
} }
static void win_destroy(grpc_endpoint *ep) { static void win_destroy(grpc_endpoint *ep) {
grpc_tcp *tcp = (grpc_tcp *) ep; grpc_tcp *tcp = (grpc_tcp *)ep;
tcp_unref(tcp); tcp_unref(tcp);
} }
@ -410,13 +406,12 @@ static char *win_get_peer(grpc_endpoint *ep) {
return gpr_strdup(tcp->peer_string); return gpr_strdup(tcp->peer_string);
} }
static grpc_endpoint_vtable vtable = {win_notify_on_read, win_write, static grpc_endpoint_vtable vtable = {
win_add_to_pollset, win_add_to_pollset_set, win_notify_on_read, win_write, win_add_to_pollset, win_add_to_pollset_set,
win_shutdown, win_destroy, win_shutdown, win_destroy, win_get_peer};
win_get_peer};
grpc_endpoint *grpc_tcp_create(grpc_winsocket *socket, char *peer_string) { grpc_endpoint *grpc_tcp_create(grpc_winsocket *socket, char *peer_string) {
grpc_tcp *tcp = (grpc_tcp *) gpr_malloc(sizeof(grpc_tcp)); grpc_tcp *tcp = (grpc_tcp *)gpr_malloc(sizeof(grpc_tcp));
memset(tcp, 0, sizeof(grpc_tcp)); memset(tcp, 0, sizeof(grpc_tcp));
tcp->base.vtable = &vtable; tcp->base.vtable = &vtable;
tcp->socket = socket; tcp->socket = socket;

@ -232,11 +232,11 @@ static int prepare_socket(int fd, const struct sockaddr *addr, int addr_len) {
} }
get_local_ip = 1; get_local_ip = 1;
rc = setsockopt(fd, IPPROTO_IP, IP_PKTINFO, rc = setsockopt(fd, IPPROTO_IP, IP_PKTINFO, &get_local_ip,
&get_local_ip, sizeof(get_local_ip)); sizeof(get_local_ip));
if (rc == 0 && addr->sa_family == AF_INET6) { if (rc == 0 && addr->sa_family == AF_INET6) {
rc = setsockopt(fd, IPPROTO_IPV6, IPV6_RECVPKTINFO, rc = setsockopt(fd, IPPROTO_IPV6, IPV6_RECVPKTINFO, &get_local_ip,
&get_local_ip, sizeof(get_local_ip)); sizeof(get_local_ip));
} }
if (bind(fd, addr, addr_len) < 0) { if (bind(fd, addr, addr_len) < 0) {
@ -317,8 +317,8 @@ static int add_socket_to_server(grpc_udp_server *s, int fd,
return port; return port;
} }
int grpc_udp_server_add_port(grpc_udp_server *s, const void *addr, int grpc_udp_server_add_port(grpc_udp_server *s, const void *addr, int addr_len,
int addr_len, grpc_udp_server_read_cb read_cb) { grpc_udp_server_read_cb read_cb) {
int allocated_port1 = -1; int allocated_port1 = -1;
int allocated_port2 = -1; int allocated_port2 = -1;
unsigned i; unsigned i;

@ -76,7 +76,6 @@ static int eventfd_check_availability(void) {
const grpc_wakeup_fd_vtable grpc_specialized_wakeup_fd_vtable = { const grpc_wakeup_fd_vtable grpc_specialized_wakeup_fd_vtable = {
eventfd_create, eventfd_consume, eventfd_wakeup, eventfd_destroy, eventfd_create, eventfd_consume, eventfd_wakeup, eventfd_destroy,
eventfd_check_availability eventfd_check_availability};
};
#endif /* GPR_LINUX_EVENTFD */ #endif /* GPR_LINUX_EVENTFD */

@ -43,12 +43,9 @@
#include "src/core/iomgr/wakeup_fd_posix.h" #include "src/core/iomgr/wakeup_fd_posix.h"
#include <stddef.h> #include <stddef.h>
static int check_availability_invalid(void) { static int check_availability_invalid(void) { return 0; }
return 0;
}
const grpc_wakeup_fd_vtable grpc_specialized_wakeup_fd_vtable = { const grpc_wakeup_fd_vtable grpc_specialized_wakeup_fd_vtable = {
NULL, NULL, NULL, NULL, check_availability_invalid NULL, NULL, NULL, NULL, check_availability_invalid};
};
#endif /* GPR_POSIX_NO_SPECIAL_WAKEUP_FD */ #endif /* GPR_POSIX_NO_SPECIAL_WAKEUP_FD */

@ -53,9 +53,7 @@ void grpc_wakeup_fd_global_init_force_fallback(void) {
wakeup_fd_vtable = &grpc_pipe_wakeup_fd_vtable; wakeup_fd_vtable = &grpc_pipe_wakeup_fd_vtable;
} }
void grpc_wakeup_fd_global_destroy(void) { void grpc_wakeup_fd_global_destroy(void) { wakeup_fd_vtable = NULL; }
wakeup_fd_vtable = NULL;
}
void grpc_wakeup_fd_init(grpc_wakeup_fd *fd_info) { void grpc_wakeup_fd_init(grpc_wakeup_fd *fd_info) {
wakeup_fd_vtable->init(fd_info); wakeup_fd_vtable->init(fd_info);

@ -51,8 +51,7 @@ static void json_reader_string_add_utf32(grpc_json_reader* reader,
reader->vtable->string_add_utf32(reader->userdata, utf32); reader->vtable->string_add_utf32(reader->userdata, utf32);
} }
static gpr_uint32 static gpr_uint32 grpc_json_reader_read_char(grpc_json_reader* reader) {
grpc_json_reader_read_char(grpc_json_reader* reader) {
return reader->vtable->read_char(reader->userdata); return reader->vtable->read_char(reader->userdata);
} }
@ -61,8 +60,8 @@ static void json_reader_container_begins(grpc_json_reader* reader,
reader->vtable->container_begins(reader->userdata, type); reader->vtable->container_begins(reader->userdata, type);
} }
static grpc_json_type static grpc_json_type grpc_json_reader_container_ends(
grpc_json_reader_container_ends(grpc_json_reader* reader) { grpc_json_reader* reader) {
return reader->vtable->container_ends(reader->userdata); return reader->vtable->container_ends(reader->userdata);
} }
@ -101,7 +100,8 @@ void grpc_json_reader_init(grpc_json_reader* reader,
} }
int grpc_json_reader_is_complete(grpc_json_reader* reader) { int grpc_json_reader_is_complete(grpc_json_reader* reader) {
return ((reader->depth == 0) && ((reader->state == GRPC_JSON_STATE_END) || return ((reader->depth == 0) &&
((reader->state == GRPC_JSON_STATE_END) ||
(reader->state == GRPC_JSON_STATE_VALUE_END))); (reader->state == GRPC_JSON_STATE_VALUE_END)));
} }
@ -143,7 +143,8 @@ grpc_json_reader_status grpc_json_reader_run(grpc_json_reader* reader) {
case GRPC_JSON_STATE_OBJECT_KEY_STRING: case GRPC_JSON_STATE_OBJECT_KEY_STRING:
case GRPC_JSON_STATE_VALUE_STRING: case GRPC_JSON_STATE_VALUE_STRING:
if (c != ' ') return GRPC_JSON_PARSE_ERROR; if (c != ' ') return GRPC_JSON_PARSE_ERROR;
if (reader->unicode_high_surrogate != 0) return GRPC_JSON_PARSE_ERROR; if (reader->unicode_high_surrogate != 0)
return GRPC_JSON_PARSE_ERROR;
json_reader_string_add_char(reader, c); json_reader_string_add_char(reader, c);
break; break;
@ -169,7 +170,8 @@ grpc_json_reader_status grpc_json_reader_run(grpc_json_reader* reader) {
switch (reader->state) { switch (reader->state) {
case GRPC_JSON_STATE_OBJECT_KEY_STRING: case GRPC_JSON_STATE_OBJECT_KEY_STRING:
case GRPC_JSON_STATE_VALUE_STRING: case GRPC_JSON_STATE_VALUE_STRING:
if (reader->unicode_high_surrogate != 0) return GRPC_JSON_PARSE_ERROR; if (reader->unicode_high_surrogate != 0)
return GRPC_JSON_PARSE_ERROR;
json_reader_string_add_char(reader, c); json_reader_string_add_char(reader, c);
break; break;
@ -253,7 +255,8 @@ grpc_json_reader_status grpc_json_reader_run(grpc_json_reader* reader) {
/* This is the \\ case. */ /* This is the \\ case. */
case GRPC_JSON_STATE_STRING_ESCAPE: case GRPC_JSON_STATE_STRING_ESCAPE:
if (reader->unicode_high_surrogate != 0) return GRPC_JSON_PARSE_ERROR; if (reader->unicode_high_surrogate != 0)
return GRPC_JSON_PARSE_ERROR;
json_reader_string_add_char(reader, '\\'); json_reader_string_add_char(reader, '\\');
if (reader->escaped_string_was_key) { if (reader->escaped_string_was_key) {
reader->state = GRPC_JSON_STATE_OBJECT_KEY_STRING; reader->state = GRPC_JSON_STATE_OBJECT_KEY_STRING;
@ -276,7 +279,8 @@ grpc_json_reader_status grpc_json_reader_run(grpc_json_reader* reader) {
break; break;
case GRPC_JSON_STATE_OBJECT_KEY_STRING: case GRPC_JSON_STATE_OBJECT_KEY_STRING:
if (reader->unicode_high_surrogate != 0) return GRPC_JSON_PARSE_ERROR; if (reader->unicode_high_surrogate != 0)
return GRPC_JSON_PARSE_ERROR;
if (c == '"') { if (c == '"') {
reader->state = GRPC_JSON_STATE_OBJECT_KEY_END; reader->state = GRPC_JSON_STATE_OBJECT_KEY_END;
json_reader_set_key(reader); json_reader_set_key(reader);
@ -288,7 +292,8 @@ grpc_json_reader_status grpc_json_reader_run(grpc_json_reader* reader) {
break; break;
case GRPC_JSON_STATE_VALUE_STRING: case GRPC_JSON_STATE_VALUE_STRING:
if (reader->unicode_high_surrogate != 0) return GRPC_JSON_PARSE_ERROR; if (reader->unicode_high_surrogate != 0)
return GRPC_JSON_PARSE_ERROR;
if (c == '"') { if (c == '"') {
reader->state = GRPC_JSON_STATE_VALUE_END; reader->state = GRPC_JSON_STATE_VALUE_END;
json_reader_set_string(reader); json_reader_set_string(reader);
@ -438,7 +443,8 @@ grpc_json_reader_status grpc_json_reader_run(grpc_json_reader* reader) {
if (reader->unicode_high_surrogate == 0) if (reader->unicode_high_surrogate == 0)
return GRPC_JSON_PARSE_ERROR; return GRPC_JSON_PARSE_ERROR;
utf32 = 0x10000; utf32 = 0x10000;
utf32 += (gpr_uint32)((reader->unicode_high_surrogate - 0xd800) * 0x400); utf32 += (gpr_uint32)(
(reader->unicode_high_surrogate - 0xd800) * 0x400);
utf32 += (gpr_uint32)(reader->unicode_char - 0xdc00); utf32 += (gpr_uint32)(reader->unicode_char - 0xdc00);
json_reader_string_add_utf32(reader, utf32); json_reader_string_add_utf32(reader, utf32);
reader->unicode_high_surrogate = 0; reader->unicode_high_surrogate = 0;

@ -73,7 +73,6 @@ typedef struct {
size_t allocated; size_t allocated;
} json_writer_userdata; } json_writer_userdata;
/* This function checks if there's enough space left in the output buffer, /* This function checks if there's enough space left in the output buffer,
* and will enlarge it if necessary. We're only allocating chunks of 256 * and will enlarge it if necessary. We're only allocating chunks of 256
* bytes at a time (or multiples thereof). * bytes at a time (or multiples thereof).
@ -97,8 +96,8 @@ static void json_writer_output_char(void* userdata, char c) {
state->free_space--; state->free_space--;
} }
static void json_writer_output_string_with_len(void* userdata, static void json_writer_output_string_with_len(void* userdata, const char* str,
const char* str, size_t len) { size_t len) {
json_writer_userdata* state = userdata; json_writer_userdata* state = userdata;
json_writer_output_check(userdata, len); json_writer_output_check(userdata, len);
memcpy(state->output + state->string_len, str, len); memcpy(state->output + state->string_len, str, len);
@ -106,8 +105,7 @@ static void json_writer_output_string_with_len(void* userdata,
state->free_space -= len; state->free_space -= len;
} }
static void json_writer_output_string(void* userdata, static void json_writer_output_string(void* userdata, const char* str) {
const char* str) {
size_t len = strlen(str); size_t len = strlen(str);
json_writer_output_string_with_len(userdata, str, len); json_writer_output_string_with_len(userdata, str, len);
} }
@ -184,8 +182,7 @@ static gpr_uint32 json_reader_read_char(void* userdata) {
/* Helper function to create a new grpc_json object and link it into /* Helper function to create a new grpc_json object and link it into
* our tree-in-progress inside our opaque structure. * our tree-in-progress inside our opaque structure.
*/ */
static grpc_json* json_create_and_link(void* userdata, static grpc_json* json_create_and_link(void* userdata, grpc_json_type type) {
grpc_json_type type) {
json_reader_userdata* state = userdata; json_reader_userdata* state = userdata;
grpc_json* json = grpc_json_create(type); grpc_json* json = grpc_json_create(type);
@ -201,7 +198,7 @@ static grpc_json* json_create_and_link(void* userdata,
json->parent->child = json; json->parent->child = json;
} }
if (json->parent->type == GRPC_JSON_OBJECT) { if (json->parent->type == GRPC_JSON_OBJECT) {
json->key = (char*) state->key; json->key = (char*)state->key;
} }
} }
if (!state->top) { if (!state->top) {
@ -261,13 +258,13 @@ static void json_reader_set_key(void* userdata) {
static void json_reader_set_string(void* userdata) { static void json_reader_set_string(void* userdata) {
json_reader_userdata* state = userdata; json_reader_userdata* state = userdata;
grpc_json* json = json_create_and_link(userdata, GRPC_JSON_STRING); grpc_json* json = json_create_and_link(userdata, GRPC_JSON_STRING);
json->value = (char*) state->string; json->value = (char*)state->string;
} }
static int json_reader_set_number(void* userdata) { static int json_reader_set_number(void* userdata) {
json_reader_userdata* state = userdata; json_reader_userdata* state = userdata;
grpc_json* json = json_create_and_link(userdata, GRPC_JSON_NUMBER); grpc_json* json = json_create_and_link(userdata, GRPC_JSON_NUMBER);
json->value = (char*) state->string; json->value = (char*)state->string;
return 1; return 1;
} }
@ -287,32 +284,25 @@ static void json_reader_set_null(void* userdata) {
} }
static grpc_json_reader_vtable reader_vtable = { static grpc_json_reader_vtable reader_vtable = {
json_reader_string_clear, json_reader_string_clear, json_reader_string_add_char,
json_reader_string_add_char, json_reader_string_add_utf32, json_reader_read_char,
json_reader_string_add_utf32, json_reader_container_begins, json_reader_container_ends,
json_reader_read_char, json_reader_set_key, json_reader_set_string,
json_reader_container_begins, json_reader_set_number, json_reader_set_true,
json_reader_container_ends, json_reader_set_false, json_reader_set_null};
json_reader_set_key,
json_reader_set_string,
json_reader_set_number,
json_reader_set_true,
json_reader_set_false,
json_reader_set_null
};
/* And finally, let's define our public API. */ /* And finally, let's define our public API. */
grpc_json* grpc_json_parse_string_with_len(char* input, size_t size) { grpc_json* grpc_json_parse_string_with_len(char* input, size_t size) {
grpc_json_reader reader; grpc_json_reader reader;
json_reader_userdata state; json_reader_userdata state;
grpc_json *json = NULL; grpc_json* json = NULL;
grpc_json_reader_status status; grpc_json_reader_status status;
if (!input) return NULL; if (!input) return NULL;
state.top = state.current_container = state.current_value = NULL; state.top = state.current_container = state.current_value = NULL;
state.string = state.key = NULL; state.string = state.key = NULL;
state.string_ptr = state.input = (gpr_uint8*) input; state.string_ptr = state.input = (gpr_uint8*)input;
state.remaining_input = size; state.remaining_input = size;
grpc_json_reader_init(&reader, &reader_vtable, &state); grpc_json_reader_init(&reader, &reader_vtable, &state);
@ -333,8 +323,8 @@ grpc_json* grpc_json_parse_string(char* input) {
return grpc_json_parse_string_with_len(input, UNBOUND_JSON_STRING_LENGTH); return grpc_json_parse_string_with_len(input, UNBOUND_JSON_STRING_LENGTH);
} }
static void json_dump_recursive(grpc_json_writer* writer, static void json_dump_recursive(grpc_json_writer* writer, grpc_json* json,
grpc_json* json, int in_object) { int in_object) {
while (json) { while (json) {
if (in_object) grpc_json_writer_object_key(writer, json->key); if (in_object) grpc_json_writer_object_key(writer, json->key);
@ -370,10 +360,8 @@ static void json_dump_recursive(grpc_json_writer* writer,
} }
static grpc_json_writer_vtable writer_vtable = { static grpc_json_writer_vtable writer_vtable = {
json_writer_output_char, json_writer_output_char, json_writer_output_string,
json_writer_output_string, json_writer_output_string_with_len};
json_writer_output_string_with_len
};
char* grpc_json_dump_to_string(grpc_json* json, int indent) { char* grpc_json_dump_to_string(grpc_json* json, int indent) {
grpc_json_writer writer; grpc_json_writer writer;

@ -41,11 +41,13 @@ static void json_writer_output_char(grpc_json_writer* writer, char c) {
writer->vtable->output_char(writer->userdata, c); writer->vtable->output_char(writer->userdata, c);
} }
static void json_writer_output_string(grpc_json_writer* writer, const char* str) { static void json_writer_output_string(grpc_json_writer* writer,
const char* str) {
writer->vtable->output_string(writer->userdata, str); writer->vtable->output_string(writer->userdata, str);
} }
static void json_writer_output_string_with_len(grpc_json_writer* writer, const char* str, size_t len) { static void json_writer_output_string_with_len(grpc_json_writer* writer,
const char* str, size_t len) {
writer->vtable->output_string_with_len(writer->userdata, str, len); writer->vtable->output_string_with_len(writer->userdata, str, len);
} }
@ -58,8 +60,7 @@ void grpc_json_writer_init(grpc_json_writer* writer, int indent,
writer->userdata = userdata; writer->userdata = userdata;
} }
static void json_writer_output_indent( static void json_writer_output_indent(grpc_json_writer* writer) {
grpc_json_writer* writer) {
static const char spacesstr[] = static const char spacesstr[] =
" " " "
" " " "
@ -99,14 +100,15 @@ static void json_writer_value_end(grpc_json_writer* writer) {
} }
} }
static void json_writer_escape_utf16(grpc_json_writer* writer, gpr_uint16 utf16) { static void json_writer_escape_utf16(grpc_json_writer* writer,
gpr_uint16 utf16) {
static const char hex[] = "0123456789abcdef"; static const char hex[] = "0123456789abcdef";
json_writer_output_string_with_len(writer, "\\u", 2); json_writer_output_string_with_len(writer, "\\u", 2);
json_writer_output_char(writer, hex[(utf16 >> 12) & 0x0f]); json_writer_output_char(writer, hex[(utf16 >> 12) & 0x0f]);
json_writer_output_char(writer, hex[(utf16 >> 8) & 0x0f]); json_writer_output_char(writer, hex[(utf16 >> 8) & 0x0f]);
json_writer_output_char(writer, hex[(utf16 >> 4) & 0x0f]); json_writer_output_char(writer, hex[(utf16 >> 4) & 0x0f]);
json_writer_output_char(writer, hex[(utf16) & 0x0f]); json_writer_output_char(writer, hex[(utf16)&0x0f]);
} }
static void json_writer_escape_string(grpc_json_writer* writer, static void json_writer_escape_string(grpc_json_writer* writer,
@ -173,8 +175,8 @@ static void json_writer_escape_string(grpc_json_writer* writer,
* Any other range is technically reserved for future usage, so if we * Any other range is technically reserved for future usage, so if we
* don't want the software to break in the future, we have to allow * don't want the software to break in the future, we have to allow
* anything else. The first non-unicode character is 0x110000. */ * anything else. The first non-unicode character is 0x110000. */
if (((utf32 >= 0xd800) && (utf32 <= 0xdfff)) || if (((utf32 >= 0xd800) && (utf32 <= 0xdfff)) || (utf32 >= 0x110000))
(utf32 >= 0x110000)) break; break;
if (utf32 >= 0x10000) { if (utf32 >= 0x10000) {
/* If utf32 contains a character that is above 0xffff, it needs to be /* If utf32 contains a character that is above 0xffff, it needs to be
* broken down into a utf-16 surrogate pair. A surrogate pair is first * broken down into a utf-16 surrogate pair. A surrogate pair is first
@ -194,7 +196,8 @@ static void json_writer_escape_string(grpc_json_writer* writer,
*/ */
utf32 -= 0x10000; utf32 -= 0x10000;
json_writer_escape_utf16(writer, (gpr_uint16)(0xd800 | (utf32 >> 10))); json_writer_escape_utf16(writer, (gpr_uint16)(0xd800 | (utf32 >> 10)));
json_writer_escape_utf16(writer, (gpr_uint16)(0xdc00 | (utf32 & 0x3ff))); json_writer_escape_utf16(writer,
(gpr_uint16)(0xdc00 | (utf32 & 0x3ff)));
} else { } else {
json_writer_escape_utf16(writer, (gpr_uint16)utf32); json_writer_escape_utf16(writer, (gpr_uint16)utf32);
} }
@ -204,7 +207,8 @@ static void json_writer_escape_string(grpc_json_writer* writer,
json_writer_output_char(writer, '"'); json_writer_output_char(writer, '"');
} }
void grpc_json_writer_container_begins(grpc_json_writer* writer, grpc_json_type type) { void grpc_json_writer_container_begins(grpc_json_writer* writer,
grpc_json_type type) {
if (!writer->got_key) json_writer_value_end(writer); if (!writer->got_key) json_writer_value_end(writer);
json_writer_output_indent(writer); json_writer_output_indent(writer);
json_writer_output_char(writer, type == GRPC_JSON_OBJECT ? '{' : '['); json_writer_output_char(writer, type == GRPC_JSON_OBJECT ? '{' : '[');
@ -213,7 +217,8 @@ void grpc_json_writer_container_begins(grpc_json_writer* writer, grpc_json_type
writer->depth++; writer->depth++;
} }
void grpc_json_writer_container_ends(grpc_json_writer* writer, grpc_json_type type) { void grpc_json_writer_container_ends(grpc_json_writer* writer,
grpc_json_type type) {
if (writer->indent && !writer->container_empty) if (writer->indent && !writer->container_empty)
json_writer_output_char(writer, '\n'); json_writer_output_char(writer, '\n');
writer->depth--; writer->depth--;
@ -238,14 +243,16 @@ void grpc_json_writer_value_raw(grpc_json_writer* writer, const char* string) {
writer->got_key = 0; writer->got_key = 0;
} }
void grpc_json_writer_value_raw_with_len(grpc_json_writer* writer, const char* string, size_t len) { void grpc_json_writer_value_raw_with_len(grpc_json_writer* writer,
const char* string, size_t len) {
if (!writer->got_key) json_writer_value_end(writer); if (!writer->got_key) json_writer_value_end(writer);
json_writer_output_indent(writer); json_writer_output_indent(writer);
json_writer_output_string_with_len(writer, string, len); json_writer_output_string_with_len(writer, string, len);
writer->got_key = 0; writer->got_key = 0;
} }
void grpc_json_writer_value_string(grpc_json_writer* writer, const char* string) { void grpc_json_writer_value_string(grpc_json_writer* writer,
const char* string) {
if (!writer->got_key) json_writer_value_end(writer); if (!writer->got_key) json_writer_value_end(writer);
json_writer_output_indent(writer); json_writer_output_indent(writer);
json_writer_escape_string(writer, string); json_writer_escape_string(writer, string);

@ -78,16 +78,20 @@ void grpc_json_writer_init(grpc_json_writer* writer, int indent,
grpc_json_writer_vtable* vtable, void* userdata); grpc_json_writer_vtable* vtable, void* userdata);
/* Signals the beginning of a container. */ /* Signals the beginning of a container. */
void grpc_json_writer_container_begins(grpc_json_writer* writer, grpc_json_type type); void grpc_json_writer_container_begins(grpc_json_writer* writer,
grpc_json_type type);
/* Signals the end of a container. */ /* Signals the end of a container. */
void grpc_json_writer_container_ends(grpc_json_writer* writer, grpc_json_type type); void grpc_json_writer_container_ends(grpc_json_writer* writer,
grpc_json_type type);
/* Writes down an object key for the next value. */ /* Writes down an object key for the next value. */
void grpc_json_writer_object_key(grpc_json_writer* writer, const char* string); void grpc_json_writer_object_key(grpc_json_writer* writer, const char* string);
/* Sets a raw value. Useful for numbers. */ /* Sets a raw value. Useful for numbers. */
void grpc_json_writer_value_raw(grpc_json_writer* writer, const char* string); void grpc_json_writer_value_raw(grpc_json_writer* writer, const char* string);
/* Sets a raw value with its length. Useful for values like true or false. */ /* Sets a raw value with its length. Useful for values like true or false. */
void grpc_json_writer_value_raw_with_len(grpc_json_writer* writer, const char* string, size_t len); void grpc_json_writer_value_raw_with_len(grpc_json_writer* writer,
const char* string, size_t len);
/* Sets a string value. It'll be escaped, and utf-8 validated. */ /* Sets a string value. It'll be escaped, and utf-8 validated. */
void grpc_json_writer_value_string(grpc_json_writer* writer, const char* string); void grpc_json_writer_value_string(grpc_json_writer* writer,
const char* string);
#endif /* GRPC_INTERNAL_CORE_JSON_JSON_WRITER_H */ #endif /* GRPC_INTERNAL_CORE_JSON_JSON_WRITER_H */

@ -200,7 +200,7 @@ static void auth_start_transport_op(grpc_call_element *elem,
channel_data *chand = elem->channel_data; channel_data *chand = elem->channel_data;
grpc_linked_mdelem *l; grpc_linked_mdelem *l;
size_t i; size_t i;
grpc_client_security_context* sec_ctx = NULL; grpc_client_security_context *sec_ctx = NULL;
if (calld->security_context_set == 0) { if (calld->security_context_set == 0) {
calld->security_context_set = 1; calld->security_context_set = 1;
@ -316,9 +316,11 @@ static void init_channel_elem(grpc_channel_element *elem, grpc_channel *master,
(grpc_channel_security_connector *)GRPC_SECURITY_CONNECTOR_REF( (grpc_channel_security_connector *)GRPC_SECURITY_CONNECTOR_REF(
sc, "client_auth_filter"); sc, "client_auth_filter");
chand->md_ctx = metadata_context; chand->md_ctx = metadata_context;
chand->authority_string = grpc_mdstr_from_string(chand->md_ctx, ":authority", 0); chand->authority_string =
grpc_mdstr_from_string(chand->md_ctx, ":authority", 0);
chand->path_string = grpc_mdstr_from_string(chand->md_ctx, ":path", 0); chand->path_string = grpc_mdstr_from_string(chand->md_ctx, ":path", 0);
chand->error_msg_key = grpc_mdstr_from_string(chand->md_ctx, "grpc-message", 0); chand->error_msg_key =
grpc_mdstr_from_string(chand->md_ctx, "grpc-message", 0);
chand->status_key = grpc_mdstr_from_string(chand->md_ctx, "grpc-status", 0); chand->status_key = grpc_mdstr_from_string(chand->md_ctx, "grpc-status", 0);
} }

@ -793,8 +793,8 @@ void on_simulated_token_fetch_done(void *user_data, int success) {
(grpc_credentials_metadata_request *)user_data; (grpc_credentials_metadata_request *)user_data;
grpc_md_only_test_credentials *c = (grpc_md_only_test_credentials *)r->creds; grpc_md_only_test_credentials *c = (grpc_md_only_test_credentials *)r->creds;
GPR_ASSERT(success); GPR_ASSERT(success);
r->cb(r->user_data, c->md_store->entries, r->cb(r->user_data, c->md_store->entries, c->md_store->num_entries,
c->md_store->num_entries, GRPC_CREDENTIALS_OK); GRPC_CREDENTIALS_OK);
grpc_credentials_metadata_request_destroy(r); grpc_credentials_metadata_request_destroy(r);
} }

@ -192,8 +192,9 @@ void grpc_flush_cached_google_default_credentials(void);
/* Metadata-only credentials with the specified key and value where /* Metadata-only credentials with the specified key and value where
asynchronicity can be simulated for testing. */ asynchronicity can be simulated for testing. */
grpc_credentials *grpc_md_only_test_credentials_create( grpc_credentials *grpc_md_only_test_credentials_create(const char *md_key,
const char *md_key, const char *md_value, int is_async); const char *md_value,
int is_async);
/* Private constructor for jwt credentials from an already parsed json key. /* Private constructor for jwt credentials from an already parsed json key.
Takes ownership of the key. */ Takes ownership of the key. */

@ -47,7 +47,8 @@ static void store_ensure_capacity(grpc_credentials_md_store *store) {
grpc_credentials_md_store *grpc_credentials_md_store_create( grpc_credentials_md_store *grpc_credentials_md_store_create(
size_t initial_capacity) { size_t initial_capacity) {
grpc_credentials_md_store *store = gpr_malloc(sizeof(grpc_credentials_md_store)); grpc_credentials_md_store *store =
gpr_malloc(sizeof(grpc_credentials_md_store));
memset(store, 0, sizeof(grpc_credentials_md_store)); memset(store, 0, sizeof(grpc_credentials_md_store));
if (initial_capacity > 0) { if (initial_capacity > 0) {
store->entries = gpr_malloc(initial_capacity * sizeof(grpc_credentials_md)); store->entries = gpr_malloc(initial_capacity * sizeof(grpc_credentials_md));
@ -98,4 +99,3 @@ void grpc_credentials_md_store_unref(grpc_credentials_md_store *store) {
gpr_free(store); gpr_free(store);
} }
} }

@ -203,8 +203,8 @@ end:
/* Blend with default ssl credentials and add a global reference so that it /* Blend with default ssl credentials and add a global reference so that it
can be cached and re-served. */ can be cached and re-served. */
grpc_credentials *ssl_creds = grpc_ssl_credentials_create(NULL, NULL); grpc_credentials *ssl_creds = grpc_ssl_credentials_create(NULL, NULL);
default_credentials = grpc_credentials_ref(grpc_composite_credentials_create( default_credentials = grpc_credentials_ref(
ssl_creds, result)); grpc_composite_credentials_create(ssl_creds, result));
GPR_ASSERT(default_credentials != NULL); GPR_ASSERT(default_credentials != NULL);
grpc_credentials_unref(ssl_creds); grpc_credentials_unref(ssl_creds);
grpc_credentials_unref(result); grpc_credentials_unref(result);

@ -133,4 +133,3 @@ grpc_jwt_verifier_status grpc_jwt_claims_check(const grpc_jwt_claims *claims,
const char *audience); const char *audience);
#endif /* GRPC_INTERNAL_CORE_SECURITY_JWT_VERIFIER_H */ #endif /* GRPC_INTERNAL_CORE_SECURITY_JWT_VERIFIER_H */

@ -204,8 +204,7 @@ int grpc_auth_context_set_peer_identity_property_name(grpc_auth_context *ctx,
return 1; return 1;
} }
int grpc_auth_context_peer_is_authenticated( int grpc_auth_context_peer_is_authenticated(const grpc_auth_context *ctx) {
const grpc_auth_context *ctx) {
return ctx->peer_identity_property_name == NULL ? 0 : 1; return ctx->peer_identity_property_name == NULL ? 0 : 1;
} }
@ -326,4 +325,3 @@ grpc_auth_metadata_processor *grpc_find_auth_metadata_processor_in_args(
} }
return NULL; return NULL;
} }

@ -113,4 +113,3 @@ grpc_auth_metadata_processor *grpc_find_auth_metadata_processor_in_args(
const grpc_channel_args *args); const grpc_channel_args *args);
#endif /* GRPC_INTERNAL_CORE_SECURITY_SECURITY_CONTEXT_H */ #endif /* GRPC_INTERNAL_CORE_SECURITY_SECURITY_CONTEXT_H */

@ -212,8 +212,7 @@ static void init_call_elem(grpc_call_element *elem,
} }
/* Destructor for call_data */ /* Destructor for call_data */
static void destroy_call_elem(grpc_call_element *elem) { static void destroy_call_elem(grpc_call_element *elem) {}
}
/* Constructor for channel_data */ /* Constructor for channel_data */
static void init_channel_elem(grpc_channel_element *elem, grpc_channel *master, static void init_channel_elem(grpc_channel_element *elem, grpc_channel *master,

@ -60,8 +60,11 @@ static void delete_trace_obj(void* obj) {
} }
static const census_ht_option ht_opt = { static const census_ht_option ht_opt = {
CENSUS_HT_UINT64 /* key type*/, 571 /* n_of_buckets */, NULL /* hash */, CENSUS_HT_UINT64 /* key type*/,
NULL /* compare_keys */, delete_trace_obj /* delete data */, 571 /* n_of_buckets */,
NULL /* hash */,
NULL /* compare_keys */,
delete_trace_obj /* delete data */,
NULL /* delete key */ NULL /* delete key */
}; };

@ -36,9 +36,7 @@
#ifdef GPR_CPU_IPHONE #ifdef GPR_CPU_IPHONE
/* Probably 2 instead of 1, but see comment on gpr_cpu_current_cpu. */ /* Probably 2 instead of 1, but see comment on gpr_cpu_current_cpu. */
unsigned gpr_cpu_num_cores(void) { unsigned gpr_cpu_num_cores(void) { return 1; }
return 1;
}
/* Most code that's using this is using it to shard across work queues. So /* Most code that's using this is using it to shard across work queues. So
unless profiling shows it's a problem or there appears a way to detect the unless profiling shows it's a problem or there appears a way to detect the
@ -46,8 +44,6 @@ unsigned gpr_cpu_num_cores(void) {
Note that the interface in cpu.h lets gpr_cpu_num_cores return 0, but doing Note that the interface in cpu.h lets gpr_cpu_num_cores return 0, but doing
it makes it impossible for gpr_cpu_current_cpu to satisfy its stated range, it makes it impossible for gpr_cpu_current_cpu to satisfy its stated range,
and some code might be relying on it. */ and some code might be relying on it. */
unsigned gpr_cpu_current_cpu(void) { unsigned gpr_cpu_current_cpu(void) { return 0; }
return 0;
}
#endif /* GPR_CPU_IPHONE */ #endif /* GPR_CPU_IPHONE */

@ -191,13 +191,16 @@ static double threshold_for_count_below(gpr_histogram *h, double count_below) {
break; break;
} }
} }
return (bucket_start(h, (double)lower_idx) + bucket_start(h, (double)upper_idx)) / 2.0; return (bucket_start(h, (double)lower_idx) +
bucket_start(h, (double)upper_idx)) /
2.0;
} else { } else {
/* treat values as uniform throughout the bucket, and find where this value /* treat values as uniform throughout the bucket, and find where this value
should lie */ should lie */
lower_bound = bucket_start(h, (double)lower_idx); lower_bound = bucket_start(h, (double)lower_idx);
upper_bound = bucket_start(h, (double)(lower_idx + 1)); upper_bound = bucket_start(h, (double)(lower_idx + 1));
return GPR_CLAMP(upper_bound - (upper_bound - lower_bound) * return GPR_CLAMP(upper_bound -
(upper_bound - lower_bound) *
(count_so_far - count_below) / (count_so_far - count_below) /
h->buckets[lower_idx], h->buckets[lower_idx],
h->min_seen, h->max_seen); h->min_seen, h->max_seen);

@ -284,7 +284,8 @@ gpr_slice gpr_slice_split_head(gpr_slice *source, size_t split) {
head.refcount = NULL; head.refcount = NULL;
head.data.inlined.length = (gpr_uint8)split; head.data.inlined.length = (gpr_uint8)split;
memcpy(head.data.inlined.bytes, source->data.inlined.bytes, split); memcpy(head.data.inlined.bytes, source->data.inlined.bytes, split);
source->data.inlined.length = (gpr_uint8)(source->data.inlined.length - split); source->data.inlined.length =
(gpr_uint8)(source->data.inlined.length - split);
memmove(source->data.inlined.bytes, source->data.inlined.bytes + split, memmove(source->data.inlined.bytes, source->data.inlined.bytes + split,
source->data.inlined.length); source->data.inlined.length);
} else if (split < sizeof(head.data.inlined.bytes)) { } else if (split < sizeof(head.data.inlined.bytes)) {

@ -116,7 +116,8 @@ void gpr_slice_buffer_add(gpr_slice_buffer *sb, gpr_slice s) {
GPR_SLICE_INLINED_SIZE) { GPR_SLICE_INLINED_SIZE) {
memcpy(back->data.inlined.bytes + back->data.inlined.length, memcpy(back->data.inlined.bytes + back->data.inlined.length,
s.data.inlined.bytes, s.data.inlined.length); s.data.inlined.bytes, s.data.inlined.length);
back->data.inlined.length = (gpr_uint8)(back->data.inlined.length + s.data.inlined.length); back->data.inlined.length =
(gpr_uint8)(back->data.inlined.length + s.data.inlined.length);
} else { } else {
size_t cp1 = GPR_SLICE_INLINED_SIZE - back->data.inlined.length; size_t cp1 = GPR_SLICE_INLINED_SIZE - back->data.inlined.length;
memcpy(back->data.inlined.bytes + back->data.inlined.length, memcpy(back->data.inlined.bytes + back->data.inlined.length,

@ -75,7 +75,7 @@ struct gpr_stack_lockfree {
#ifndef NDEBUG #ifndef NDEBUG
/* Bitmap of pushed entries to check for double-push or pop */ /* Bitmap of pushed entries to check for double-push or pop */
gpr_atm pushed[(INVALID_ENTRY_INDEX+1)/(8*sizeof(gpr_atm))]; gpr_atm pushed[(INVALID_ENTRY_INDEX + 1) / (8 * sizeof(gpr_atm))];
#endif #endif
}; };
@ -123,13 +123,13 @@ int gpr_stack_lockfree_push(gpr_stack_lockfree *stack, int entry) {
#ifndef NDEBUG #ifndef NDEBUG
/* Check for double push */ /* Check for double push */
{ {
int pushed_index = entry / (8*sizeof(gpr_atm)); int pushed_index = entry / (8 * sizeof(gpr_atm));
int pushed_bit = entry % (8*sizeof(gpr_atm)); int pushed_bit = entry % (8 * sizeof(gpr_atm));
gpr_atm old_val; gpr_atm old_val;
old_val = gpr_atm_no_barrier_fetch_add(&stack->pushed[pushed_index], old_val = gpr_atm_no_barrier_fetch_add(&stack->pushed[pushed_index],
(gpr_atm)(1UL << pushed_bit)); (gpr_atm)(1UL << pushed_bit));
GPR_ASSERT((old_val & (1UL<<pushed_bit)) == 0); GPR_ASSERT((old_val & (1UL << pushed_bit)) == 0);
} }
#endif #endif
@ -161,13 +161,13 @@ int gpr_stack_lockfree_pop(gpr_stack_lockfree *stack) {
#ifndef NDEBUG #ifndef NDEBUG
/* Check for valid pop */ /* Check for valid pop */
{ {
int pushed_index = head.contents.index / (8*sizeof(gpr_atm)); int pushed_index = head.contents.index / (8 * sizeof(gpr_atm));
int pushed_bit = head.contents.index % (8*sizeof(gpr_atm)); int pushed_bit = head.contents.index % (8 * sizeof(gpr_atm));
gpr_atm old_val; gpr_atm old_val;
old_val = gpr_atm_no_barrier_fetch_add(&stack->pushed[pushed_index], old_val = gpr_atm_no_barrier_fetch_add(&stack->pushed[pushed_index],
-(gpr_atm)(1UL << pushed_bit)); -(gpr_atm)(1UL << pushed_bit));
GPR_ASSERT((old_val & (1UL<<pushed_bit)) != 0); GPR_ASSERT((old_val & (1UL << pushed_bit)) != 0);
} }
#endif #endif

@ -125,7 +125,6 @@ char *gpr_dump_slice(gpr_slice s, gpr_uint32 flags) {
flags); flags);
} }
int gpr_parse_bytes_to_uint32(const char *buf, size_t len, gpr_uint32 *result) { int gpr_parse_bytes_to_uint32(const char *buf, size_t len, gpr_uint32 *result) {
gpr_uint32 out = 0; gpr_uint32 out = 0;
gpr_uint32 new; gpr_uint32 new;
@ -214,10 +213,8 @@ char *gpr_strjoin_sep(const char **strs, size_t nstrs, const char *sep,
* str. * str.
* *
* Returns 1 and updates \a begin and \a end. Returns 0 otherwise. */ * Returns 1 and updates \a begin and \a end. Returns 0 otherwise. */
static int slice_find_separator_offset(const gpr_slice str, static int slice_find_separator_offset(const gpr_slice str, const char *sep,
const char *sep, const size_t read_offset, size_t *begin,
const size_t read_offset,
size_t *begin,
size_t *end) { size_t *end) {
size_t i; size_t i;
const gpr_uint8 *str_ptr = GPR_SLICE_START_PTR(str) + read_offset; const gpr_uint8 *str_ptr = GPR_SLICE_START_PTR(str) + read_offset;
@ -255,9 +252,7 @@ void gpr_slice_split(gpr_slice str, const char *sep, gpr_slice_buffer *dst) {
} }
} }
void gpr_strvec_init(gpr_strvec *sv) { void gpr_strvec_init(gpr_strvec *sv) { memset(sv, 0, sizeof(*sv)); }
memset(sv, 0, sizeof(*sv));
}
void gpr_strvec_destroy(gpr_strvec *sv) { void gpr_strvec_destroy(gpr_strvec *sv) {
size_t i; size_t i;
@ -270,11 +265,11 @@ void gpr_strvec_destroy(gpr_strvec *sv) {
void gpr_strvec_add(gpr_strvec *sv, char *str) { void gpr_strvec_add(gpr_strvec *sv, char *str) {
if (sv->count == sv->capacity) { if (sv->count == sv->capacity) {
sv->capacity = GPR_MAX(sv->capacity + 8, sv->capacity * 2); sv->capacity = GPR_MAX(sv->capacity + 8, sv->capacity * 2);
sv->strs = gpr_realloc(sv->strs, sizeof(char*) * sv->capacity); sv->strs = gpr_realloc(sv->strs, sizeof(char *) * sv->capacity);
} }
sv->strs[sv->count++] = str; sv->strs[sv->count++] = str;
} }
char *gpr_strvec_flatten(gpr_strvec *sv, size_t *final_length) { char *gpr_strvec_flatten(gpr_strvec *sv, size_t *final_length) {
return gpr_strjoin((const char**)sv->strs, sv->count, final_length); return gpr_strjoin((const char **)sv->strs, sv->count, final_length);
} }

@ -99,13 +99,9 @@ LPSTR gpr_tchar_to_char(LPCTSTR input) {
return ret; return ret;
} }
#else #else
char *gpr_tchar_to_char(LPTSTR input) { char *gpr_tchar_to_char(LPTSTR input) { return gpr_strdup(input); }
return gpr_strdup(input);
}
char *gpr_char_to_tchar(LPTSTR input) { char *gpr_char_to_tchar(LPTSTR input) { return gpr_strdup(input); }
return gpr_strdup(input);
}
#endif #endif
#endif /* GPR_WIN32 */ #endif /* GPR_WIN32 */

@ -63,7 +63,8 @@ void gpr_cv_destroy(gpr_cv *cv) { GPR_ASSERT(pthread_cond_destroy(cv) == 0); }
int gpr_cv_wait(gpr_cv *cv, gpr_mu *mu, gpr_timespec abs_deadline) { int gpr_cv_wait(gpr_cv *cv, gpr_mu *mu, gpr_timespec abs_deadline) {
int err = 0; int err = 0;
if (gpr_time_cmp(abs_deadline, gpr_inf_future(abs_deadline.clock_type)) == 0) { if (gpr_time_cmp(abs_deadline, gpr_inf_future(abs_deadline.clock_type)) ==
0) {
err = pthread_cond_wait(cv, mu); err = pthread_cond_wait(cv, mu);
} else { } else {
struct timespec abs_deadline_ts; struct timespec abs_deadline_ts;

@ -83,7 +83,8 @@ int gpr_cv_wait(gpr_cv *cv, gpr_mu *mu, gpr_timespec abs_deadline) {
int timeout = 0; int timeout = 0;
DWORD timeout_max_ms; DWORD timeout_max_ms;
mu->locked = 0; mu->locked = 0;
if (gpr_time_cmp(abs_deadline, gpr_inf_future(abs_deadline.clock_type)) == 0) { if (gpr_time_cmp(abs_deadline, gpr_inf_future(abs_deadline.clock_type)) ==
0) {
SleepConditionVariableCS(cv, &mu->cs, INFINITE); SleepConditionVariableCS(cv, &mu->cs, INFINITE);
} else { } else {
gpr_timespec now = gpr_now(abs_deadline.clock_type); gpr_timespec now = gpr_now(abs_deadline.clock_type);

@ -37,9 +37,7 @@
#include <grpc/support/thd.h> #include <grpc/support/thd.h>
enum { enum { GPR_THD_JOINABLE = 1 };
GPR_THD_JOINABLE = 1
};
gpr_thd_options gpr_thd_options_default(void) { gpr_thd_options gpr_thd_options_default(void) {
gpr_thd_options options; gpr_thd_options options;

@ -69,9 +69,11 @@ int gpr_thd_new(gpr_thd_id *t, void (*thd_body)(void *arg), void *arg,
GPR_ASSERT(pthread_attr_init(&attr) == 0); GPR_ASSERT(pthread_attr_init(&attr) == 0);
if (gpr_thd_options_is_detached(options)) { if (gpr_thd_options_is_detached(options)) {
GPR_ASSERT(pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED) == 0); GPR_ASSERT(pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED) ==
0);
} else { } else {
GPR_ASSERT(pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_JOINABLE) == 0); GPR_ASSERT(pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_JOINABLE) ==
0);
} }
thread_started = (pthread_create(&p, &attr, &thread_body, a) == 0); thread_started = (pthread_create(&p, &attr, &thread_body, a) == 0);
GPR_ASSERT(pthread_attr_destroy(&attr) == 0); GPR_ASSERT(pthread_attr_destroy(&attr) == 0);
@ -82,12 +84,8 @@ int gpr_thd_new(gpr_thd_id *t, void (*thd_body)(void *arg), void *arg,
return thread_started; return thread_started;
} }
gpr_thd_id gpr_thd_currentid(void) { gpr_thd_id gpr_thd_currentid(void) { return (gpr_thd_id)pthread_self(); }
return (gpr_thd_id)pthread_self();
}
void gpr_thd_join(gpr_thd_id t) { void gpr_thd_join(gpr_thd_id t) { pthread_join((pthread_t)t, NULL); }
pthread_join((pthread_t)t, NULL);
}
#endif /* GPR_POSIX_SYNC */ #endif /* GPR_POSIX_SYNC */

@ -105,9 +105,7 @@ int gpr_thd_new(gpr_thd_id *t, void (*thd_body)(void *arg), void *arg,
return handle != NULL; return handle != NULL;
} }
gpr_thd_id gpr_thd_currentid(void) { gpr_thd_id gpr_thd_currentid(void) { return (gpr_thd_id)g_thd_info; }
return (gpr_thd_id)g_thd_info;
}
void gpr_thd_join(gpr_thd_id t) { void gpr_thd_join(gpr_thd_id t) {
struct thd_info *info = (struct thd_info *)t; struct thd_info *info = (struct thd_info *)t;

@ -315,5 +315,6 @@ gpr_timespec gpr_convert_clock_type(gpr_timespec t, gpr_clock_type clock_type) {
return gpr_time_add(gpr_now(clock_type), t); return gpr_time_add(gpr_now(clock_type), t);
} }
return gpr_time_add(gpr_now(clock_type), gpr_time_sub(t, gpr_now(t.clock_type))); return gpr_time_add(gpr_now(clock_type),
gpr_time_sub(t, gpr_now(t.clock_type)));
} }

@ -38,7 +38,7 @@
#include <grpc/support/tls.h> #include <grpc/support/tls.h>
gpr_intptr gpr_tls_set(struct gpr_pthread_thread_local *tls, gpr_intptr value) { gpr_intptr gpr_tls_set(struct gpr_pthread_thread_local *tls, gpr_intptr value) {
GPR_ASSERT(0 == pthread_setspecific(tls->key, (void*)value)); GPR_ASSERT(0 == pthread_setspecific(tls->key, (void *)value));
return value; return value;
} }

@ -276,7 +276,8 @@ struct grpc_call {
/** completion events - for completion queue use */ /** completion events - for completion queue use */
grpc_cq_completion completions[MAX_CONCURRENT_COMPLETIONS]; grpc_cq_completion completions[MAX_CONCURRENT_COMPLETIONS];
/** siblings: children of the same parent form a list, and this list is protected under /** siblings: children of the same parent form a list, and this list is
protected under
parent->mu */ parent->mu */
grpc_call *sibling_next; grpc_call *sibling_next;
grpc_call *sibling_prev; grpc_call *sibling_prev;
@ -398,7 +399,8 @@ grpc_call *grpc_call_create(grpc_channel *channel, grpc_call *parent_call,
} else { } else {
call->sibling_next = parent_call->first_child; call->sibling_next = parent_call->first_child;
call->sibling_prev = parent_call->first_child->sibling_prev; call->sibling_prev = parent_call->first_child->sibling_prev;
call->sibling_next->sibling_prev = call->sibling_prev->sibling_next = call; call->sibling_next->sibling_prev = call->sibling_prev->sibling_next =
call;
} }
gpr_mu_unlock(&parent_call->mu); gpr_mu_unlock(&parent_call->mu);
@ -536,9 +538,8 @@ grpc_compression_algorithm grpc_call_get_compression_algorithm(
return call->compression_algorithm; return call->compression_algorithm;
} }
static void set_encodings_accepted_by_peer(
static void set_encodings_accepted_by_peer(grpc_call *call, grpc_call *call, const gpr_slice accept_encoding_slice) {
const gpr_slice accept_encoding_slice) {
size_t i; size_t i;
grpc_compression_algorithm algorithm; grpc_compression_algorithm algorithm;
gpr_slice_buffer accept_encoding_parts; gpr_slice_buffer accept_encoding_parts;
@ -1324,7 +1325,7 @@ grpc_call_error grpc_call_cancel_with_status(grpc_call *c,
const char *description, const char *description,
void *reserved) { void *reserved) {
grpc_call_error r; grpc_call_error r;
(void) reserved; (void)reserved;
lock(c); lock(c);
r = cancel_with_status(c, status, description); r = cancel_with_status(c, status, description);
unlock(c); unlock(c);

@ -41,7 +41,7 @@ int grpc_trace_batch = 0;
static void add_metadata(gpr_strvec *b, const grpc_metadata *md, size_t count) { static void add_metadata(gpr_strvec *b, const grpc_metadata *md, size_t count) {
size_t i; size_t i;
for(i = 0; i < count; i++) { for (i = 0; i < count; i++) {
gpr_strvec_add(b, gpr_strdup("\nkey=")); gpr_strvec_add(b, gpr_strdup("\nkey="));
gpr_strvec_add(b, gpr_strdup(md[i].key)); gpr_strvec_add(b, gpr_strdup(md[i].key));
@ -113,8 +113,9 @@ void grpc_call_log_batch(char *file, int line, gpr_log_severity severity,
char *tmp; char *tmp;
size_t i; size_t i;
gpr_log(file, line, severity, gpr_log(file, line, severity,
"grpc_call_start_batch(call=%p, ops=%p, nops=%d, tag=%p)", call, ops, nops, tag); "grpc_call_start_batch(call=%p, ops=%p, nops=%d, tag=%p)", call, ops,
for(i = 0; i < nops; i++) { nops, tag);
for (i = 0; i < nops; i++) {
tmp = grpc_op_string(&ops[i]); tmp = grpc_op_string(&ops[i]);
gpr_log(file, line, severity, "ops[%d]: %s", i, tmp); gpr_log(file, line, severity, "ops[%d]: %s", i, tmp);
gpr_free(tmp); gpr_free(tmp);
@ -123,8 +124,7 @@ void grpc_call_log_batch(char *file, int line, gpr_log_severity severity,
void grpc_server_log_request_call(char *file, int line, void grpc_server_log_request_call(char *file, int line,
gpr_log_severity severity, gpr_log_severity severity,
grpc_server *server, grpc_server *server, grpc_call **call,
grpc_call **call,
grpc_call_details *details, grpc_call_details *details,
grpc_metadata_array *initial_metadata, grpc_metadata_array *initial_metadata,
grpc_completion_queue *cq_bound_to_call, grpc_completion_queue *cq_bound_to_call,
@ -133,8 +133,9 @@ void grpc_server_log_request_call(char *file, int line,
gpr_log(file, line, severity, gpr_log(file, line, severity,
"grpc_server_request_call(server=%p, call=%p, details=%p, " "grpc_server_request_call(server=%p, call=%p, details=%p, "
"initial_metadata=%p, cq_bound_to_call=%p, cq_for_notification=%p, " "initial_metadata=%p, cq_bound_to_call=%p, cq_for_notification=%p, "
"tag=%p)", server, call, details, initial_metadata, "tag=%p)",
cq_bound_to_call, cq_for_notification, tag); server, call, details, initial_metadata, cq_bound_to_call,
cq_for_notification, tag);
} }
void grpc_server_log_shutdown(char *file, int line, gpr_log_severity severity, void grpc_server_log_shutdown(char *file, int line, gpr_log_severity severity,

@ -179,10 +179,11 @@ grpc_call *grpc_channel_create_call(grpc_channel *channel,
grpc_mdelem_from_metadata_strings( grpc_mdelem_from_metadata_strings(
channel->metadata_context, GRPC_MDSTR_REF(channel->path_string), channel->metadata_context, GRPC_MDSTR_REF(channel->path_string),
grpc_mdstr_from_string(channel->metadata_context, method, 0)), grpc_mdstr_from_string(channel->metadata_context, method, 0)),
host ? host ? grpc_mdelem_from_metadata_strings(
grpc_mdelem_from_metadata_strings( channel->metadata_context,
channel->metadata_context, GRPC_MDSTR_REF(channel->authority_string), GRPC_MDSTR_REF(channel->authority_string),
grpc_mdstr_from_string(channel->metadata_context, host, 0)) : NULL, grpc_mdstr_from_string(channel->metadata_context, host, 0))
: NULL,
deadline); deadline);
} }
@ -193,9 +194,12 @@ void *grpc_channel_register_call(grpc_channel *channel, const char *method,
rc->path = grpc_mdelem_from_metadata_strings( rc->path = grpc_mdelem_from_metadata_strings(
channel->metadata_context, GRPC_MDSTR_REF(channel->path_string), channel->metadata_context, GRPC_MDSTR_REF(channel->path_string),
grpc_mdstr_from_string(channel->metadata_context, method, 0)); grpc_mdstr_from_string(channel->metadata_context, method, 0));
rc->authority = host ? grpc_mdelem_from_metadata_strings( rc->authority =
channel->metadata_context, GRPC_MDSTR_REF(channel->authority_string), host ? grpc_mdelem_from_metadata_strings(
grpc_mdstr_from_string(channel->metadata_context, host, 0)) : NULL; channel->metadata_context,
GRPC_MDSTR_REF(channel->authority_string),
grpc_mdstr_from_string(channel->metadata_context, host, 0))
: NULL;
gpr_mu_lock(&channel->registered_call_mu); gpr_mu_lock(&channel->registered_call_mu);
rc->next = channel->registered_calls; rc->next = channel->registered_calls;
channel->registered_calls = rc; channel->registered_calls = rc;

@ -77,9 +77,10 @@ typedef struct {
} state_watcher; } state_watcher;
static void delete_state_watcher(state_watcher *w) { static void delete_state_watcher(state_watcher *w) {
grpc_channel_element *client_channel_elem = grpc_channel_element *client_channel_elem = grpc_channel_stack_last_element(
grpc_channel_stack_last_element(grpc_channel_get_channel_stack(w->channel)); grpc_channel_get_channel_stack(w->channel));
grpc_client_channel_del_interested_party(client_channel_elem, grpc_cq_pollset(w->cq)); grpc_client_channel_del_interested_party(client_channel_elem,
grpc_cq_pollset(w->cq));
GRPC_CHANNEL_INTERNAL_UNREF(w->channel, "watch_connectivity"); GRPC_CHANNEL_INTERNAL_UNREF(w->channel, "watch_connectivity");
gpr_mu_destroy(&w->mu); gpr_mu_destroy(&w->mu);
gpr_free(w); gpr_free(w);
@ -166,8 +167,8 @@ void grpc_channel_watch_connectivity_state(
w->tag = tag; w->tag = tag;
w->channel = channel; w->channel = channel;
grpc_alarm_init( grpc_alarm_init(&w->alarm,
&w->alarm, gpr_convert_clock_type(deadline, GPR_CLOCK_MONOTONIC), gpr_convert_clock_type(deadline, GPR_CLOCK_MONOTONIC),
timeout_complete, w, gpr_now(GPR_CLOCK_MONOTONIC)); timeout_complete, w, gpr_now(GPR_CLOCK_MONOTONIC));
if (client_channel_elem->filter != &grpc_client_channel_filter) { if (client_channel_elem->filter != &grpc_client_channel_filter) {
@ -178,7 +179,8 @@ void grpc_channel_watch_connectivity_state(
grpc_iomgr_add_delayed_callback(&w->on_complete, 1); grpc_iomgr_add_delayed_callback(&w->on_complete, 1);
} else { } else {
GRPC_CHANNEL_INTERNAL_REF(channel, "watch_connectivity"); GRPC_CHANNEL_INTERNAL_REF(channel, "watch_connectivity");
grpc_client_channel_add_interested_party(client_channel_elem, grpc_cq_pollset(cq)); grpc_client_channel_add_interested_party(client_channel_elem,
grpc_cq_pollset(cq));
grpc_client_channel_watch_connectivity_state(client_channel_elem, &w->state, grpc_client_channel_watch_connectivity_state(client_channel_elem, &w->state,
&w->on_complete); &w->on_complete);
} }

@ -167,8 +167,7 @@ void grpc_cq_end_op(grpc_completion_queue *cc, void *tag, int success,
} }
grpc_event grpc_completion_queue_next(grpc_completion_queue *cc, grpc_event grpc_completion_queue_next(grpc_completion_queue *cc,
gpr_timespec deadline, gpr_timespec deadline, void *reserved) {
void *reserved) {
grpc_event ret; grpc_event ret;
grpc_pollset_worker worker; grpc_pollset_worker worker;
GPR_ASSERT(!reserved); GPR_ASSERT(!reserved);
@ -273,7 +272,8 @@ grpc_event grpc_completion_queue_pluck(grpc_completion_queue *cc, void *tag,
} }
if (!add_plucker(cc, tag, &worker)) { if (!add_plucker(cc, tag, &worker)) {
gpr_log(GPR_DEBUG, gpr_log(GPR_DEBUG,
"Too many outstanding grpc_completion_queue_pluck calls: maximum is %d", "Too many outstanding grpc_completion_queue_pluck calls: maximum "
"is %d",
GRPC_MAX_COMPLETION_QUEUE_PLUCKERS); GRPC_MAX_COMPLETION_QUEUE_PLUCKERS);
gpr_mu_unlock(GRPC_POLLSET_MU(&cc->pollset)); gpr_mu_unlock(GRPC_POLLSET_MU(&cc->pollset));
memset(&ret, 0, sizeof(ret)); memset(&ret, 0, sizeof(ret));

@ -33,5 +33,4 @@
#include "src/core/surface/init.h" #include "src/core/surface/init.h"
void grpc_security_pre_init(void) { void grpc_security_pre_init(void) {}
}

@ -712,7 +712,8 @@ static void init_channel_elem(grpc_channel_element *elem, grpc_channel *master,
chand->server = NULL; chand->server = NULL;
chand->channel = NULL; chand->channel = NULL;
chand->path_key = grpc_mdstr_from_string(metadata_context, ":path", 0); chand->path_key = grpc_mdstr_from_string(metadata_context, ":path", 0);
chand->authority_key = grpc_mdstr_from_string(metadata_context, ":authority", 0); chand->authority_key =
grpc_mdstr_from_string(metadata_context, ":authority", 0);
chand->next = chand->prev = chand; chand->next = chand->prev = chand;
chand->registered_methods = NULL; chand->registered_methods = NULL;
chand->connectivity_state = GRPC_CHANNEL_IDLE; chand->connectivity_state = GRPC_CHANNEL_IDLE;

@ -38,7 +38,7 @@
grpc_server *grpc_server_create(const grpc_channel_args *args, void *reserved) { grpc_server *grpc_server_create(const grpc_channel_args *args, void *reserved) {
const grpc_channel_filter *filters[] = {&grpc_compress_filter}; const grpc_channel_filter *filters[] = {&grpc_compress_filter};
(void) reserved; (void)reserved;
return grpc_server_create_from_filters(filters, GPR_ARRAY_SIZE(filters), return grpc_server_create_from_filters(filters, GPR_ARRAY_SIZE(filters),
args); args);
} }

@ -36,6 +36,4 @@
#include <grpc/grpc.h> #include <grpc/grpc.h>
const char *grpc_version_string(void) { const char *grpc_version_string(void) { return "0.10.1.0"; }
return "0.10.1.0";
}

@ -179,8 +179,7 @@ void grpc_chttp2_publish_reads(
stream_global->incoming_window -= stream_parsing->incoming_window_delta; stream_global->incoming_window -= stream_parsing->incoming_window_delta;
GPR_ASSERT(stream_global->max_recv_bytes >= GPR_ASSERT(stream_global->max_recv_bytes >=
stream_parsing->incoming_window_delta); stream_parsing->incoming_window_delta);
stream_global->max_recv_bytes -= stream_global->max_recv_bytes -= stream_parsing->incoming_window_delta;
stream_parsing->incoming_window_delta;
stream_parsing->incoming_window_delta = 0; stream_parsing->incoming_window_delta = 0;
grpc_chttp2_list_add_writable_stream(transport_global, stream_global); grpc_chttp2_list_add_writable_stream(transport_global, stream_global);
} }

@ -123,8 +123,7 @@ void grpc_chttp2_stream_map_move_into(grpc_chttp2_stream_map *src,
dst->values = gpr_realloc(dst->values, dst->capacity * sizeof(void *)); dst->values = gpr_realloc(dst->values, dst->capacity * sizeof(void *));
} }
memcpy(dst->keys + dst->count, src->keys, src->count * sizeof(gpr_uint32)); memcpy(dst->keys + dst->count, src->keys, src->count * sizeof(gpr_uint32));
memcpy(dst->values + dst->count, src->values, memcpy(dst->values + dst->count, src->values, src->count * sizeof(void *));
src->count * sizeof(void*));
dst->count += src->count; dst->count += src->count;
dst->free += src->free; dst->free += src->free;
src->count = 0; src->count = 0;

@ -112,13 +112,18 @@ int grpc_chttp2_unlocking_check_writes(
} }
} }
if (!stream_global->read_closed && stream_global->unannounced_incoming_window > 0) { if (!stream_global->read_closed &&
stream_writing->announce_window = stream_global->unannounced_incoming_window; stream_global->unannounced_incoming_window > 0) {
GRPC_CHTTP2_FLOWCTL_TRACE_STREAM("write", transport_global, stream_global, stream_writing->announce_window =
incoming_window, stream_global->unannounced_incoming_window); stream_global->unannounced_incoming_window;
GRPC_CHTTP2_FLOWCTL_TRACE_STREAM("write", transport_global, stream_global, GRPC_CHTTP2_FLOWCTL_TRACE_STREAM(
unannounced_incoming_window, -(gpr_int64)stream_global->unannounced_incoming_window); "write", transport_global, stream_global, incoming_window,
stream_global->incoming_window += stream_global->unannounced_incoming_window; stream_global->unannounced_incoming_window);
GRPC_CHTTP2_FLOWCTL_TRACE_STREAM(
"write", transport_global, stream_global, unannounced_incoming_window,
-(gpr_int64)stream_global->unannounced_incoming_window);
stream_global->incoming_window +=
stream_global->unannounced_incoming_window;
stream_global->unannounced_incoming_window = 0; stream_global->unannounced_incoming_window = 0;
grpc_chttp2_list_add_incoming_window_updated(transport_global, grpc_chttp2_list_add_incoming_window_updated(transport_global,
stream_global); stream_global);
@ -179,18 +184,20 @@ static void finalize_outbuf(grpc_chttp2_transport_writing *transport_writing) {
while ( while (
grpc_chttp2_list_pop_writing_stream(transport_writing, &stream_writing)) { grpc_chttp2_list_pop_writing_stream(transport_writing, &stream_writing)) {
if (stream_writing->sopb.nops > 0 || stream_writing->send_closed != GRPC_DONT_SEND_CLOSED) { if (stream_writing->sopb.nops > 0 ||
stream_writing->send_closed != GRPC_DONT_SEND_CLOSED) {
grpc_chttp2_encode(stream_writing->sopb.ops, stream_writing->sopb.nops, grpc_chttp2_encode(stream_writing->sopb.ops, stream_writing->sopb.nops,
stream_writing->send_closed != GRPC_DONT_SEND_CLOSED, stream_writing->send_closed != GRPC_DONT_SEND_CLOSED,
stream_writing->id, &transport_writing->hpack_compressor, stream_writing->id,
&transport_writing->hpack_compressor,
&transport_writing->outbuf); &transport_writing->outbuf);
stream_writing->sopb.nops = 0; stream_writing->sopb.nops = 0;
} }
if (stream_writing->announce_window > 0) { if (stream_writing->announce_window > 0) {
gpr_slice_buffer_add( gpr_slice_buffer_add(
&transport_writing->outbuf, &transport_writing->outbuf,
grpc_chttp2_window_update_create( grpc_chttp2_window_update_create(stream_writing->id,
stream_writing->id, stream_writing->announce_window)); stream_writing->announce_window));
stream_writing->announce_window = 0; stream_writing->announce_window = 0;
} }
if (stream_writing->send_closed == GRPC_SEND_CLOSED_WITH_RST_STREAM) { if (stream_writing->send_closed == GRPC_SEND_CLOSED_WITH_RST_STREAM) {

@ -368,8 +368,7 @@ static int init_stream(grpc_transport *gt, grpc_stream *gs,
s->global.outgoing_window = s->global.outgoing_window =
t->global.settings[GRPC_PEER_SETTINGS] t->global.settings[GRPC_PEER_SETTINGS]
[GRPC_CHTTP2_SETTINGS_INITIAL_WINDOW_SIZE]; [GRPC_CHTTP2_SETTINGS_INITIAL_WINDOW_SIZE];
s->global.max_recv_bytes = s->global.max_recv_bytes = s->parsing.incoming_window =
s->parsing.incoming_window =
s->global.incoming_window = s->global.incoming_window =
t->global.settings[GRPC_SENT_SETTINGS] t->global.settings[GRPC_SENT_SETTINGS]
[GRPC_CHTTP2_SETTINGS_INITIAL_WINDOW_SIZE]; [GRPC_CHTTP2_SETTINGS_INITIAL_WINDOW_SIZE];
@ -590,7 +589,6 @@ static void maybe_start_some_streams(
grpc_chttp2_list_add_incoming_window_updated(transport_global, grpc_chttp2_list_add_incoming_window_updated(transport_global,
stream_global); stream_global);
grpc_chttp2_list_add_writable_stream(transport_global, stream_global); grpc_chttp2_list_add_writable_stream(transport_global, stream_global);
} }
/* cancel out streams that will never be started */ /* cancel out streams that will never be started */
while (transport_global->next_stream_id >= MAX_CLIENT_STREAM_ID && while (transport_global->next_stream_id >= MAX_CLIENT_STREAM_ID &&
@ -648,12 +646,14 @@ static void perform_stream_op_locked(
stream_global->publish_sopb->nops = 0; stream_global->publish_sopb->nops = 0;
stream_global->publish_state = op->recv_state; stream_global->publish_state = op->recv_state;
if (stream_global->max_recv_bytes < op->max_recv_bytes) { if (stream_global->max_recv_bytes < op->max_recv_bytes) {
GRPC_CHTTP2_FLOWCTL_TRACE_STREAM("op", transport_global, stream_global, GRPC_CHTTP2_FLOWCTL_TRACE_STREAM(
max_recv_bytes, op->max_recv_bytes - stream_global->max_recv_bytes); "op", transport_global, stream_global, max_recv_bytes,
op->max_recv_bytes - stream_global->max_recv_bytes);
GRPC_CHTTP2_FLOWCTL_TRACE_STREAM( GRPC_CHTTP2_FLOWCTL_TRACE_STREAM(
"op", transport_global, stream_global, unannounced_incoming_window, "op", transport_global, stream_global, unannounced_incoming_window,
op->max_recv_bytes - stream_global->max_recv_bytes); op->max_recv_bytes - stream_global->max_recv_bytes);
stream_global->unannounced_incoming_window += op->max_recv_bytes - stream_global->max_recv_bytes; stream_global->unannounced_incoming_window +=
op->max_recv_bytes - stream_global->max_recv_bytes;
stream_global->max_recv_bytes = op->max_recv_bytes; stream_global->max_recv_bytes = op->max_recv_bytes;
} }
grpc_chttp2_incoming_metadata_live_op_buffer_end( grpc_chttp2_incoming_metadata_live_op_buffer_end(

@ -133,7 +133,7 @@ static void unlock(grpc_mdctx *ctx) {
case), since otherwise we can be stuck waiting for a garbage collection case), since otherwise we can be stuck waiting for a garbage collection
that will never happen. */ that will never happen. */
if (ctx->refs == 0) { if (ctx->refs == 0) {
/* uncomment if you're having trouble diagnosing an mdelem leak to make /* uncomment if you're having trouble diagnosing an mdelem leak to make
things clearer (slows down destruction a lot, however) */ things clearer (slows down destruction a lot, however) */
#ifdef GRPC_METADATA_REFCOUNT_DEBUG #ifdef GRPC_METADATA_REFCOUNT_DEBUG
gc_mdtab(ctx); gc_mdtab(ctx);
@ -311,7 +311,8 @@ static void slice_unref(void *p) {
unlock(ctx); unlock(ctx);
} }
grpc_mdstr *grpc_mdstr_from_string(grpc_mdctx *ctx, const char *str, int canonicalize_key) { grpc_mdstr *grpc_mdstr_from_string(grpc_mdctx *ctx, const char *str,
int canonicalize_key) {
if (canonicalize_key) { if (canonicalize_key) {
size_t len; size_t len;
size_t i; size_t i;
@ -522,8 +523,8 @@ grpc_mdelem *grpc_mdelem_from_metadata_strings(grpc_mdctx *ctx,
grpc_mdelem *grpc_mdelem_from_strings(grpc_mdctx *ctx, const char *key, grpc_mdelem *grpc_mdelem_from_strings(grpc_mdctx *ctx, const char *key,
const char *value) { const char *value) {
return grpc_mdelem_from_metadata_strings(ctx, return grpc_mdelem_from_metadata_strings(
grpc_mdstr_from_string(ctx, key, 0), ctx, grpc_mdstr_from_string(ctx, key, 0),
grpc_mdstr_from_string(ctx, value, 0)); grpc_mdstr_from_string(ctx, value, 0));
} }

@ -95,7 +95,8 @@ size_t grpc_mdctx_get_mdtab_free_test_only(grpc_mdctx *mdctx);
/* Constructors for grpc_mdstr instances; take a variety of data types that /* Constructors for grpc_mdstr instances; take a variety of data types that
clients may have handy */ clients may have handy */
grpc_mdstr *grpc_mdstr_from_string(grpc_mdctx *ctx, const char *str, int perform_key_canonicalization); grpc_mdstr *grpc_mdstr_from_string(grpc_mdctx *ctx, const char *str,
int perform_key_canonicalization);
/* Unrefs the slice. */ /* Unrefs the slice. */
grpc_mdstr *grpc_mdstr_from_slice(grpc_mdctx *ctx, gpr_slice slice); grpc_mdstr *grpc_mdstr_from_slice(grpc_mdctx *ctx, gpr_slice slice);
grpc_mdstr *grpc_mdstr_from_buffer(grpc_mdctx *ctx, const gpr_uint8 *str, grpc_mdstr *grpc_mdstr_from_buffer(grpc_mdctx *ctx, const gpr_uint8 *str,

@ -203,8 +203,8 @@ void grpc_metadata_batch_assert_ok(grpc_metadata_batch *batch) {
#endif /* NDEBUG */ #endif /* NDEBUG */
void grpc_metadata_batch_init(grpc_metadata_batch *batch) { void grpc_metadata_batch_init(grpc_metadata_batch *batch) {
batch->list.head = batch->list.tail = batch->garbage.head = batch->garbage.tail = batch->list.head = batch->list.tail = batch->garbage.head =
NULL; batch->garbage.tail = NULL;
batch->deadline = gpr_inf_future(GPR_CLOCK_REALTIME); batch->deadline = gpr_inf_future(GPR_CLOCK_REALTIME);
} }

@ -121,7 +121,7 @@ static void store32_little_endian(gpr_uint32 value, unsigned char* buf) {
buf[3] = (unsigned char)(value >> 24) & 0xFF; buf[3] = (unsigned char)(value >> 24) & 0xFF;
buf[2] = (unsigned char)(value >> 16) & 0xFF; buf[2] = (unsigned char)(value >> 16) & 0xFF;
buf[1] = (unsigned char)(value >> 8) & 0xFF; buf[1] = (unsigned char)(value >> 8) & 0xFF;
buf[0] = (unsigned char)(value) & 0xFF; buf[0] = (unsigned char)(value)&0xFF;
} }
static void tsi_fake_frame_reset(tsi_fake_frame* frame, int needs_draining) { static void tsi_fake_frame_reset(tsi_fake_frame* frame, int needs_draining) {
@ -370,7 +370,8 @@ static void fake_protector_destroy(tsi_frame_protector* self) {
static const tsi_frame_protector_vtable frame_protector_vtable = { static const tsi_frame_protector_vtable frame_protector_vtable = {
fake_protector_protect, fake_protector_protect_flush, fake_protector_protect, fake_protector_protect_flush,
fake_protector_unprotect, fake_protector_destroy, }; fake_protector_unprotect, fake_protector_destroy,
};
/* --- tsi_handshaker methods implementation. ---*/ /* --- tsi_handshaker methods implementation. ---*/
@ -393,7 +394,8 @@ static tsi_result fake_handshaker_get_bytes_to_send_to_peer(
next_message_to_send = TSI_FAKE_HANDSHAKE_MESSAGE_MAX; next_message_to_send = TSI_FAKE_HANDSHAKE_MESSAGE_MAX;
} }
if (tsi_tracing_enabled) { if (tsi_tracing_enabled) {
gpr_log(GPR_INFO, "%s prepared %s.", impl->is_client ? "Client" : "Server", gpr_log(GPR_INFO, "%s prepared %s.",
impl->is_client ? "Client" : "Server",
tsi_fake_handshake_message_to_string(impl->next_message_to_send)); tsi_fake_handshake_message_to_string(impl->next_message_to_send));
} }
impl->next_message_to_send = next_message_to_send; impl->next_message_to_send = next_message_to_send;
@ -493,7 +495,8 @@ static const tsi_handshaker_vtable handshaker_vtable = {
fake_handshaker_get_result, fake_handshaker_get_result,
fake_handshaker_extract_peer, fake_handshaker_extract_peer,
fake_handshaker_create_frame_protector, fake_handshaker_create_frame_protector,
fake_handshaker_destroy, }; fake_handshaker_destroy,
};
tsi_handshaker* tsi_create_fake_handshaker(int is_client) { tsi_handshaker* tsi_create_fake_handshaker(int is_client) {
tsi_fake_handshaker* impl = calloc(1, sizeof(tsi_fake_handshaker)); tsi_fake_handshaker* impl = calloc(1, sizeof(tsi_fake_handshaker));

@ -54,7 +54,6 @@
#define TSI_SSL_MAX_PROTECTED_FRAME_SIZE_UPPER_BOUND 16384 #define TSI_SSL_MAX_PROTECTED_FRAME_SIZE_UPPER_BOUND 16384
#define TSI_SSL_MAX_PROTECTED_FRAME_SIZE_LOWER_BOUND 1024 #define TSI_SSL_MAX_PROTECTED_FRAME_SIZE_LOWER_BOUND 1024
/* Putting a macro like this and littering the source file with #if is really /* Putting a macro like this and littering the source file with #if is really
bad practice. bad practice.
TODO(jboeuf): refactor all the #if / #endif in a separate module. */ TODO(jboeuf): refactor all the #if / #endif in a separate module. */
@ -116,7 +115,7 @@ typedef struct {
/* --- Library Initialization. ---*/ /* --- Library Initialization. ---*/
static gpr_once init_openssl_once = GPR_ONCE_INIT; static gpr_once init_openssl_once = GPR_ONCE_INIT;
static gpr_mu *openssl_mutexes = NULL; static gpr_mu* openssl_mutexes = NULL;
static void openssl_locking_cb(int mode, int type, const char* file, int line) { static void openssl_locking_cb(int mode, int type, const char* file, int line) {
if (mode & CRYPTO_LOCK) { if (mode & CRYPTO_LOCK) {
@ -195,7 +194,7 @@ static void ssl_info_callback(const SSL* ssl, int where, int ret) {
/* Returns 1 if name looks like an IP address, 0 otherwise. /* Returns 1 if name looks like an IP address, 0 otherwise.
This is a very rough heuristic as it does not handle IPV6 or things like: This is a very rough heuristic as it does not handle IPV6 or things like:
0300.0250.00.01, 0xC0.0Xa8.0x0.0x1, 000030052000001, 0xc0.052000001 */ 0300.0250.00.01, 0xC0.0Xa8.0x0.0x1, 000030052000001, 0xc0.052000001 */
static int looks_like_ip_address(const char *name) { static int looks_like_ip_address(const char* name) {
size_t i; size_t i;
size_t dot_count = 0; size_t dot_count = 0;
size_t num_size = 0; size_t num_size = 0;
@ -215,7 +214,6 @@ static int looks_like_ip_address(const char *name) {
return 1; return 1;
} }
/* Gets the subject CN from an X509 cert. */ /* Gets the subject CN from an X509 cert. */
static tsi_result ssl_get_x509_common_name(X509* cert, unsigned char** utf8, static tsi_result ssl_get_x509_common_name(X509* cert, unsigned char** utf8,
size_t* utf8_size) { size_t* utf8_size) {
@ -630,7 +628,8 @@ static tsi_result build_alpn_protocol_name_list(
} }
/* Safety check. */ /* Safety check. */
if ((current < *protocol_name_list) || if ((current < *protocol_name_list) ||
((gpr_uintptr)(current - *protocol_name_list) != *protocol_name_list_length)) { ((gpr_uintptr)(current - *protocol_name_list) !=
*protocol_name_list_length)) {
return TSI_INTERNAL_ERROR; return TSI_INTERNAL_ERROR;
} }
return TSI_OK; return TSI_OK;
@ -768,7 +767,8 @@ static void ssl_protector_destroy(tsi_frame_protector* self) {
static const tsi_frame_protector_vtable frame_protector_vtable = { static const tsi_frame_protector_vtable frame_protector_vtable = {
ssl_protector_protect, ssl_protector_protect_flush, ssl_protector_unprotect, ssl_protector_protect, ssl_protector_protect_flush, ssl_protector_unprotect,
ssl_protector_destroy, }; ssl_protector_destroy,
};
/* --- tsi_handshaker methods implementation. ---*/ /* --- tsi_handshaker methods implementation. ---*/
@ -948,7 +948,8 @@ static const tsi_handshaker_vtable handshaker_vtable = {
ssl_handshaker_get_result, ssl_handshaker_get_result,
ssl_handshaker_extract_peer, ssl_handshaker_extract_peer,
ssl_handshaker_create_frame_protector, ssl_handshaker_create_frame_protector,
ssl_handshaker_destroy, }; ssl_handshaker_destroy,
};
/* --- tsi_ssl_handshaker_factory common methods. --- */ /* --- tsi_ssl_handshaker_factory common methods. --- */
@ -1075,9 +1076,11 @@ static void ssl_client_handshaker_factory_destroy(
free(impl); free(impl);
} }
static int client_handshaker_factory_npn_callback( static int client_handshaker_factory_npn_callback(SSL* ssl, unsigned char** out,
SSL* ssl, unsigned char** out, unsigned char* outlen, unsigned char* outlen,
const unsigned char* in, unsigned int inlen, void* arg) { const unsigned char* in,
unsigned int inlen,
void* arg) {
tsi_ssl_client_handshaker_factory* factory = tsi_ssl_client_handshaker_factory* factory =
(tsi_ssl_client_handshaker_factory*)arg; (tsi_ssl_client_handshaker_factory*)arg;
return select_protocol_list((const unsigned char**)out, outlen, return select_protocol_list((const unsigned char**)out, outlen,
@ -1121,7 +1124,7 @@ static void ssl_server_handshaker_factory_destroy(
static int does_entry_match_name(const char* entry, size_t entry_length, static int does_entry_match_name(const char* entry, size_t entry_length,
const char* name) { const char* name) {
const char *dot; const char* dot;
const char* name_subdomain = NULL; const char* name_subdomain = NULL;
size_t name_length = strlen(name); size_t name_length = strlen(name);
size_t name_subdomain_length; size_t name_subdomain_length;

@ -98,9 +98,8 @@ void Channel::PerformOpsOnCall(CallOpSetInterface* ops, Call* call) {
} }
void* Channel::RegisterMethod(const char* method) { void* Channel::RegisterMethod(const char* method) {
return grpc_channel_register_call(c_channel_, method, return grpc_channel_register_call(
host_.empty() ? NULL : host_.c_str(), c_channel_, method, host_.empty() ? NULL : host_.c_str(), nullptr);
nullptr);
} }
grpc_connectivity_state Channel::GetState(bool try_to_connect) { grpc_connectivity_state Channel::GetState(bool try_to_connect) {
@ -117,6 +116,7 @@ class TagSaver GRPC_FINAL : public CompletionQueueTag {
delete this; delete this;
return true; return true;
} }
private: private:
void* tag_; void* tag_;
}; };

@ -59,8 +59,7 @@ class Channel GRPC_FINAL : public GrpcLibrary, public ChannelInterface {
void* RegisterMethod(const char* method) GRPC_OVERRIDE; void* RegisterMethod(const char* method) GRPC_OVERRIDE;
Call CreateCall(const RpcMethod& method, ClientContext* context, Call CreateCall(const RpcMethod& method, ClientContext* context,
CompletionQueue* cq) GRPC_OVERRIDE; CompletionQueue* cq) GRPC_OVERRIDE;
void PerformOpsOnCall(CallOpSetInterface* ops, void PerformOpsOnCall(CallOpSetInterface* ops, Call* call) GRPC_OVERRIDE;
Call* call) GRPC_OVERRIDE;
grpc_connectivity_state GetState(bool try_to_connect) GRPC_OVERRIDE; grpc_connectivity_state GetState(bool try_to_connect) GRPC_OVERRIDE;

Some files were not shown because too many files have changed in this diff Show More

Loading…
Cancel
Save