Merge github.com:grpc/grpc into shutdown-c++

pull/2977/head
Craig Tiller 9 years ago
commit 5c6e6276be
  1. 4
      include/grpc++/async_unary_call.h
  2. 2
      include/grpc++/auth_context.h
  3. 4
      include/grpc++/byte_buffer.h
  4. 4
      include/grpc++/client_context.h
  5. 9
      include/grpc++/dynamic_thread_pool.h
  6. 4
      include/grpc++/generic_stub.h
  7. 24
      include/grpc++/impl/call.h
  8. 1
      include/grpc++/impl/grpc_library.h
  9. 6
      include/grpc++/impl/serialization_traits.h
  10. 8
      include/grpc++/impl/sync_no_cxx11.h
  11. 21
      include/grpc++/impl/thd_no_cxx11.h
  12. 5
      include/grpc++/server.h
  13. 13
      include/grpc++/server_builder.h
  14. 9
      include/grpc++/stream.h
  15. 9
      include/grpc/grpc.h
  16. 2
      include/grpc/status.h
  17. 2
      include/grpc/support/alloc.h
  18. 2
      include/grpc/support/atm.h
  19. 2
      include/grpc/support/atm_gcc_atomic.h
  20. 2
      include/grpc/support/atm_gcc_sync.h
  21. 37
      include/grpc/support/atm_win32.h
  22. 2
      include/grpc/support/cmdline.h
  23. 2
      include/grpc/support/cpu.h
  24. 2
      include/grpc/support/histogram.h
  25. 2
      include/grpc/support/host_port.h
  26. 2
      include/grpc/support/log.h
  27. 2
      include/grpc/support/log_win32.h
  28. 3
      include/grpc/support/port_platform.h
  29. 2
      include/grpc/support/slice.h
  30. 2
      include/grpc/support/string_util.h
  31. 2
      include/grpc/support/subprocess.h
  32. 2
      include/grpc/support/sync.h
  33. 14
      include/grpc/support/sync_generic.h
  34. 2
      include/grpc/support/sync_posix.h
  35. 2
      include/grpc/support/sync_win32.h
  36. 2
      include/grpc/support/thd.h
  37. 3
      include/grpc/support/time.h
  38. 4
      include/grpc/support/tls.h
  39. 10
      include/grpc/support/tls_gcc.h
  40. 10
      include/grpc/support/tls_msvc.h
  41. 10
      include/grpc/support/useful.h
  42. 2
      src/core/channel/census_filter.h
  43. 34
      src/core/channel/client_channel.c
  44. 7
      src/core/channel/client_channel.h
  45. 41
      src/core/channel/compress_filter.c
  46. 2
      src/core/channel/compress_filter.h
  47. 2
      src/core/channel/http_client_filter.h
  48. 2
      src/core/channel/http_server_filter.h
  49. 2
      src/core/channel/noop_filter.h
  50. 3
      src/core/client_config/resolvers/dns_resolver.c
  51. 8
      src/core/client_config/resolvers/zookeeper_resolver.c
  52. 10
      src/core/client_config/subchannel_factory_decorators/add_channel_arg.c
  53. 5
      src/core/client_config/subchannel_factory_decorators/add_channel_arg.h
  54. 4
      src/core/client_config/subchannel_factory_decorators/merge_channel_args.c
  55. 5
      src/core/client_config/subchannel_factory_decorators/merge_channel_args.h
  56. 2
      src/core/compression/algorithm.c
  57. 8
      src/core/debug/trace.c
  58. 2
      src/core/debug/trace.h
  59. 6
      src/core/httpcli/format_request.c
  60. 2
      src/core/httpcli/format_request.h
  61. 2
      src/core/httpcli/parser.h
  62. 5
      src/core/iomgr/alarm.c
  63. 2
      src/core/iomgr/alarm.h
  64. 10
      src/core/iomgr/alarm_heap.c
  65. 2
      src/core/iomgr/alarm_heap.h
  66. 2
      src/core/iomgr/alarm_internal.h
  67. 3
      src/core/iomgr/endpoint.c
  68. 5
      src/core/iomgr/endpoint.h
  69. 2
      src/core/iomgr/endpoint_pair.h
  70. 20
      src/core/iomgr/endpoint_pair_windows.c
  71. 35
      src/core/iomgr/iocp_windows.c
  72. 10
      src/core/iomgr/iocp_windows.h
  73. 2
      src/core/iomgr/iomgr.h
  74. 2
      src/core/iomgr/iomgr_internal.h
  75. 2
      src/core/iomgr/iomgr_posix.c
  76. 2
      src/core/iomgr/iomgr_posix.h
  77. 2
      src/core/iomgr/iomgr_windows.c
  78. 3
      src/core/iomgr/pollset_multipoller_with_epoll.c
  79. 5
      src/core/iomgr/pollset_multipoller_with_poll_posix.c
  80. 16
      src/core/iomgr/pollset_posix.c
  81. 3
      src/core/iomgr/pollset_posix.h
  82. 10
      src/core/iomgr/pollset_windows.c
  83. 2
      src/core/iomgr/resolve_address.h
  84. 5
      src/core/iomgr/resolve_address_posix.c
  85. 2
      src/core/iomgr/sockaddr.h
  86. 2
      src/core/iomgr/sockaddr_posix.h
  87. 6
      src/core/iomgr/sockaddr_utils.c
  88. 2
      src/core/iomgr/sockaddr_utils.h
  89. 2
      src/core/iomgr/sockaddr_win32.h
  90. 2
      src/core/iomgr/socket_utils_posix.h
  91. 2
      src/core/iomgr/socket_windows.c
  92. 4
      src/core/iomgr/socket_windows.h
  93. 2
      src/core/iomgr/tcp_client.h
  94. 3
      src/core/iomgr/tcp_client_posix.c
  95. 3
      src/core/iomgr/tcp_posix.c
  96. 2
      src/core/iomgr/tcp_posix.h
  97. 12
      src/core/iomgr/tcp_server_windows.c
  98. 59
      src/core/iomgr/tcp_windows.c
  99. 2
      src/core/iomgr/tcp_windows.h
  100. 2
      src/core/iomgr/time_averaged_stats.h
  101. Some files were not shown because too many files have changed in this diff Show More

@ -121,8 +121,8 @@ class ServerAsyncResponseWriter GRPC_FINAL
}
// The response is dropped if the status is not OK.
if (status.ok()) {
finish_buf_.ServerSendStatus(
ctx_->trailing_metadata_, finish_buf_.SendMessage(msg));
finish_buf_.ServerSendStatus(ctx_->trailing_metadata_,
finish_buf_.SendMessage(msg));
} else {
finish_buf_.ServerSendStatus(ctx_->trailing_metadata_, status);
}

@ -62,6 +62,7 @@ class AuthPropertyIterator
AuthPropertyIterator();
AuthPropertyIterator(const grpc_auth_property* property,
const grpc_auth_property_iterator* iter);
private:
friend class SecureAuthContext;
const grpc_auth_property* property_;
@ -92,4 +93,3 @@ class AuthContext {
} // namespace grpc
#endif // GRPCXX_AUTH_CONTEXT_H

@ -91,8 +91,8 @@ class SerializationTraits<ByteBuffer, void> {
dest->set_buffer(byte_buffer);
return Status::OK;
}
static Status Serialize(const ByteBuffer& source, grpc_byte_buffer** buffer,
bool* own_buffer) {
static Status Serialize(const ByteBuffer& source, grpc_byte_buffer** buffer,
bool* own_buffer) {
*buffer = source.buffer();
*own_buffer = false;
return Status::OK;

@ -185,7 +185,9 @@ class ClientContext {
// Get and set census context
void set_census_context(struct census_context* ccp) { census_context_ = ccp; }
struct census_context* census_context() const { return census_context_; }
struct census_context* census_context() const {
return census_context_;
}
void TryCancel();

@ -55,11 +55,12 @@ class DynamicThreadPool GRPC_FINAL : public ThreadPoolInterface {
private:
class DynamicThread {
public:
DynamicThread(DynamicThreadPool *pool);
public:
DynamicThread(DynamicThreadPool* pool);
~DynamicThread();
private:
DynamicThreadPool *pool_;
private:
DynamicThreadPool* pool_;
std::unique_ptr<grpc::thread> thd_;
void ThreadFunc();
};

@ -52,8 +52,8 @@ class GenericStub GRPC_FINAL {
// begin a call to a named method
std::unique_ptr<GenericClientAsyncReaderWriter> Call(
ClientContext* context, const grpc::string& method,
CompletionQueue* cq, void* tag);
ClientContext* context, const grpc::string& method, CompletionQueue* cq,
void* tag);
private:
std::shared_ptr<ChannelInterface> channel_;

@ -67,14 +67,10 @@ class WriteOptions {
WriteOptions(const WriteOptions& other) : flags_(other.flags_) {}
/// Clear all flags.
inline void Clear() {
flags_ = 0;
}
inline void Clear() { flags_ = 0; }
/// Returns raw flags bitset.
inline gpr_uint32 flags() const {
return flags_;
}
inline gpr_uint32 flags() const { return flags_; }
/// Sets flag for the disabling of compression for the next message write.
///
@ -122,9 +118,7 @@ class WriteOptions {
/// not go out on the wire immediately.
///
/// \sa GRPC_WRITE_BUFFER_HINT
inline bool get_buffer_hint() const {
return GetBit(GRPC_WRITE_BUFFER_HINT);
}
inline bool get_buffer_hint() const { return GetBit(GRPC_WRITE_BUFFER_HINT); }
WriteOptions& operator=(const WriteOptions& rhs) {
flags_ = rhs.flags_;
@ -132,17 +126,11 @@ class WriteOptions {
}
private:
void SetBit(const gpr_int32 mask) {
flags_ |= mask;
}
void SetBit(const gpr_int32 mask) { flags_ |= mask; }
void ClearBit(const gpr_int32 mask) {
flags_ &= ~mask;
}
void ClearBit(const gpr_int32 mask) { flags_ &= ~mask; }
bool GetBit(const gpr_int32 mask) const {
return flags_ & mask;
}
bool GetBit(const gpr_int32 mask) const { return flags_ & mask; }
gpr_uint32 flags_;
};

@ -46,5 +46,4 @@ class GrpcLibrary {
} // namespace grpc
#endif // GRPCXX_IMPL_GRPC_LIBRARY_H

@ -37,12 +37,12 @@
namespace grpc {
/// Defines how to serialize and deserialize some type.
///
///
/// Used for hooking different message serialization API's into GRPC.
/// Each SerializationTraits implementation must provide the following
/// functions:
/// static Status Serialize(const Message& msg,
/// grpc_byte_buffer** buffer,
/// grpc_byte_buffer** buffer,
// bool* own_buffer);
/// static Status Deserialize(grpc_byte_buffer* buffer,
/// Message* msg,
@ -57,7 +57,7 @@ namespace grpc {
/// msg. max_message_size is passed in as a bound on the maximum number of
/// message bytes Deserialize should accept.
///
/// Both functions return a Status, allowing them to explain what went
/// Both functions return a Status, allowing them to explain what went
/// wrong if required.
template <class Message,
class UnusedButHereForPartialTemplateSpecialization = void>

@ -38,7 +38,7 @@
namespace grpc {
template<class mutex>
template <class mutex>
class lock_guard;
class condition_variable;
@ -46,6 +46,7 @@ class mutex {
public:
mutex() { gpr_mu_init(&mu_); }
~mutex() { gpr_mu_destroy(&mu_); }
private:
::gpr_mu mu_;
template <class mutex>
@ -58,6 +59,7 @@ class lock_guard {
public:
lock_guard(mutex &mu) : mu_(mu), locked(true) { gpr_mu_lock(&mu.mu_); }
~lock_guard() { unlock_internal(); }
protected:
void lock_internal() {
if (!locked) gpr_mu_lock(&mu_.mu_);
@ -67,6 +69,7 @@ class lock_guard {
if (locked) gpr_mu_unlock(&mu_.mu_);
locked = false;
}
private:
mutex &mu_;
bool locked;
@ -76,7 +79,7 @@ class lock_guard {
template <class mutex>
class unique_lock : public lock_guard<mutex> {
public:
unique_lock(mutex &mu) : lock_guard<mutex>(mu) { }
unique_lock(mutex &mu) : lock_guard<mutex>(mu) {}
void lock() { this->lock_internal(); }
void unlock() { this->unlock_internal(); }
};
@ -92,6 +95,7 @@ class condition_variable {
}
void notify_one() { gpr_cv_signal(&cv_); }
void notify_all() { gpr_cv_broadcast(&cv_); }
private:
gpr_cv cv_;
};

@ -40,7 +40,8 @@ namespace grpc {
class thread {
public:
template<class T> thread(void (T::*fptr)(), T *obj) {
template <class T>
thread(void (T::*fptr)(), T *obj) {
func_ = new thread_function<T>(fptr, obj);
joined_ = false;
start();
@ -53,28 +54,28 @@ class thread {
gpr_thd_join(thd_);
joined_ = true;
}
private:
void start() {
gpr_thd_options options = gpr_thd_options_default();
gpr_thd_options_set_joinable(&options);
gpr_thd_new(&thd_, thread_func, (void *) func_, &options);
gpr_thd_new(&thd_, thread_func, (void *)func_, &options);
}
static void thread_func(void *arg) {
thread_function_base *func = (thread_function_base *) arg;
thread_function_base *func = (thread_function_base *)arg;
func->call();
}
class thread_function_base {
public:
virtual ~thread_function_base() { }
virtual ~thread_function_base() {}
virtual void call() = 0;
};
template<class T>
template <class T>
class thread_function : public thread_function_base {
public:
thread_function(void (T::*fptr)(), T *obj)
: fptr_(fptr)
, obj_(obj) { }
thread_function(void (T::*fptr)(), T *obj) : fptr_(fptr), obj_(obj) {}
virtual void call() { (obj_->*fptr_)(); }
private:
void (T::*fptr_)();
T *obj_;
@ -84,8 +85,8 @@ class thread {
bool joined_;
// Disallow copy and assign.
thread(const thread&);
void operator=(const thread&);
thread(const thread &);
void operator=(const thread &);
};
} // namespace grpc

@ -90,8 +90,9 @@ class Server GRPC_FINAL : public GrpcLibrary, private CallHook {
int max_message_size);
// Register a service. This call does not take ownership of the service.
// The service must exist for the lifetime of the Server instance.
bool RegisterService(const grpc::string *host, RpcService* service);
bool RegisterAsyncService(const grpc::string *host, AsynchronousService* service);
bool RegisterService(const grpc::string* host, RpcService* service);
bool RegisterAsyncService(const grpc::string* host,
AsynchronousService* service);
void RegisterAsyncGenericService(AsyncGenericService* service);
// Add a listening port. Can be called multiple times.
int AddListeningPort(const grpc::string& addr, ServerCredentials* creds);

@ -76,15 +76,14 @@ class ServerBuilder {
// The service must exist for the lifetime of the Server instance returned by
// BuildAndStart().
// Only matches requests with :authority \a host
void RegisterService(const grpc::string& host,
SynchronousService* service);
void RegisterService(const grpc::string& host, SynchronousService* service);
// Register an asynchronous service.
// This call does not take ownership of the service or completion queue.
// The service and completion queuemust exist for the lifetime of the Server
// instance returned by BuildAndStart().
// Only matches requests with :authority \a host
void RegisterAsyncService(const grpc::string& host,
void RegisterAsyncService(const grpc::string& host,
AsynchronousService* service);
// Set max message size in bytes.
@ -117,9 +116,10 @@ class ServerBuilder {
};
typedef std::unique_ptr<grpc::string> HostString;
template <class T> struct NamedService {
template <class T>
struct NamedService {
explicit NamedService(T* s) : service(s) {}
NamedService(const grpc::string& h, T *s)
NamedService(const grpc::string& h, T* s)
: host(new grpc::string(h)), service(s) {}
HostString host;
T* service;
@ -127,7 +127,8 @@ class ServerBuilder {
int max_message_size_;
std::vector<std::unique_ptr<NamedService<RpcService>>> services_;
std::vector<std::unique_ptr<NamedService<AsynchronousService>>> async_services_;
std::vector<std::unique_ptr<NamedService<AsynchronousService>>>
async_services_;
std::vector<Port> ports_;
std::vector<ServerCompletionQueue*> cqs_;
std::shared_ptr<ServerCredentials> creds_;

@ -85,9 +85,7 @@ class WriterInterface {
// Returns false when the stream has been closed.
virtual bool Write(const W& msg, const WriteOptions& options) = 0;
inline bool Write(const W& msg) {
return Write(msg, WriteOptions());
}
inline bool Write(const W& msg) { return Write(msg, WriteOptions()); }
};
template <class R>
@ -640,9 +638,8 @@ class ServerAsyncReader GRPC_FINAL : public ServerAsyncStreamingInterface,
}
// The response is dropped if the status is not OK.
if (status.ok()) {
finish_ops_.ServerSendStatus(
ctx_->trailing_metadata_,
finish_ops_.SendMessage(msg));
finish_ops_.ServerSendStatus(ctx_->trailing_metadata_,
finish_ops_.SendMessage(msg));
} else {
finish_ops_.ServerSendStatus(ctx_->trailing_metadata_, status);
}

@ -378,11 +378,11 @@ typedef struct grpc_op {
/** Registers a plugin to be initialized and destroyed with the library.
The \a init and \a destroy functions will be invoked as part of
\a grpc_init() and \a grpc_shutdown(), respectively.
The \a init and \a destroy functions will be invoked as part of
\a grpc_init() and \a grpc_shutdown(), respectively.
Note that these functions can be invoked an arbitrary number of times
(and hence so will \a init and \a destroy).
It is safe to pass NULL to either argument. Plugins are destroyed in
It is safe to pass NULL to either argument. Plugins are destroyed in
the reverse order they were initialized. */
void grpc_register_plugin(void (*init)(void), void (*destroy)(void));
@ -629,8 +629,7 @@ grpc_call_error grpc_server_request_registered_call(
be specified with args. If no additional configuration is needed, args can
be NULL. See grpc_channel_args for more. The data in 'args' need only live
through the invocation of this function. */
grpc_server *grpc_server_create(const grpc_channel_args *args,
void *reserved);
grpc_server *grpc_server_create(const grpc_channel_args *args, void *reserved);
/** Register a completion queue with the server. Must be done for any
notification completion queue that is passed to grpc_server_request_*_call

@ -160,4 +160,4 @@ typedef enum {
}
#endif
#endif /* GRPC_STATUS_H */
#endif /* GRPC_STATUS_H */

@ -55,4 +55,4 @@ void gpr_free_aligned(void *ptr);
}
#endif
#endif /* GRPC_SUPPORT_ALLOC_H */
#endif /* GRPC_SUPPORT_ALLOC_H */

@ -89,4 +89,4 @@
#error could not determine platform for atm
#endif
#endif /* GRPC_SUPPORT_ATM_H */
#endif /* GRPC_SUPPORT_ATM_H */

@ -69,4 +69,4 @@ static __inline int gpr_atm_rel_cas(gpr_atm *p, gpr_atm o, gpr_atm n) {
__ATOMIC_RELAXED);
}
#endif /* GRPC_SUPPORT_ATM_GCC_ATOMIC_H */
#endif /* GRPC_SUPPORT_ATM_GCC_ATOMIC_H */

@ -84,4 +84,4 @@ static __inline void gpr_atm_no_barrier_store(gpr_atm *p, gpr_atm value) {
#define gpr_atm_acq_cas(p, o, n) (__sync_bool_compare_and_swap((p), (o), (n)))
#define gpr_atm_rel_cas(p, o, n) gpr_atm_acq_cas((p), (o), (n))
#endif /* GRPC_SUPPORT_ATM_GCC_SYNC_H */
#endif /* GRPC_SUPPORT_ATM_GCC_SYNC_H */

@ -66,31 +66,31 @@ static __inline int gpr_atm_no_barrier_cas(gpr_atm *p, gpr_atm o, gpr_atm n) {
/* InterlockedCompareExchangePointerNoFence() not available on vista or
windows7 */
#ifdef GPR_ARCH_64
return o == (gpr_atm)InterlockedCompareExchangeAcquire64((volatile LONGLONG *) p,
(LONGLONG) n, (LONGLONG) o);
return o == (gpr_atm)InterlockedCompareExchangeAcquire64(
(volatile LONGLONG *)p, (LONGLONG)n, (LONGLONG)o);
#else
return o == (gpr_atm)InterlockedCompareExchangeAcquire((volatile LONG *) p,
(LONG) n, (LONG) o);
return o == (gpr_atm)InterlockedCompareExchangeAcquire((volatile LONG *)p,
(LONG)n, (LONG)o);
#endif
}
static __inline int gpr_atm_acq_cas(gpr_atm *p, gpr_atm o, gpr_atm n) {
#ifdef GPR_ARCH_64
return o == (gpr_atm)InterlockedCompareExchangeAcquire64((volatile LONGLONG *) p,
(LONGLONG) n, (LONGLONG) o);
return o == (gpr_atm)InterlockedCompareExchangeAcquire64(
(volatile LONGLONG *)p, (LONGLONG)n, (LONGLONG)o);
#else
return o == (gpr_atm)InterlockedCompareExchangeAcquire((volatile LONG *) p,
(LONG) n, (LONG) o);
return o == (gpr_atm)InterlockedCompareExchangeAcquire((volatile LONG *)p,
(LONG)n, (LONG)o);
#endif
}
static __inline int gpr_atm_rel_cas(gpr_atm *p, gpr_atm o, gpr_atm n) {
#ifdef GPR_ARCH_64
return o == (gpr_atm)InterlockedCompareExchangeRelease64((volatile LONGLONG *) p,
(LONGLONG) n, (LONGLONG) o);
return o == (gpr_atm)InterlockedCompareExchangeRelease64(
(volatile LONGLONG *)p, (LONGLONG)n, (LONGLONG)o);
#else
return o == (gpr_atm)InterlockedCompareExchangeRelease((volatile LONG *) p,
(LONG) n, (LONG) o);
return o == (gpr_atm)InterlockedCompareExchangeRelease((volatile LONG *)p,
(LONG)n, (LONG)o);
#endif
}
@ -110,17 +110,16 @@ static __inline gpr_atm gpr_atm_full_fetch_add(gpr_atm *p, gpr_atm delta) {
#ifdef GPR_ARCH_64
do {
old = *p;
} while (old != (gpr_atm)InterlockedCompareExchange64((volatile LONGLONG *) p,
(LONGLONG) old + delta,
(LONGLONG) old));
} while (old != (gpr_atm)InterlockedCompareExchange64((volatile LONGLONG *)p,
(LONGLONG)old + delta,
(LONGLONG)old));
#else
do {
old = *p;
} while (old != (gpr_atm)InterlockedCompareExchange((volatile LONG *) p,
(LONG) old + delta,
(LONG) old));
} while (old != (gpr_atm)InterlockedCompareExchange(
(volatile LONG *)p, (LONG)old + delta, (LONG)old));
#endif
return old;
}
#endif /* GRPC_SUPPORT_ATM_WIN32_H */
#endif /* GRPC_SUPPORT_ATM_WIN32_H */

@ -94,4 +94,4 @@ char *gpr_cmdline_usage_string(gpr_cmdline *cl, const char *argv0);
}
#endif
#endif /* GRPC_SUPPORT_CMDLINE_H */
#endif /* GRPC_SUPPORT_CMDLINE_H */

@ -54,4 +54,4 @@ unsigned gpr_cpu_current_cpu(void);
} // extern "C"
#endif
#endif /* GRPC_SUPPORT_CPU_H */
#endif /* GRPC_SUPPORT_CPU_H */

@ -73,4 +73,4 @@ void gpr_histogram_merge_contents(gpr_histogram *histogram,
}
#endif
#endif /* GRPC_SUPPORT_HISTOGRAM_H */
#endif /* GRPC_SUPPORT_HISTOGRAM_H */

@ -61,4 +61,4 @@ int gpr_split_host_port(const char *name, char **host, char **port);
}
#endif
#endif /* GRPC_SUPPORT_HOST_PORT_H */
#endif /* GRPC_SUPPORT_HOST_PORT_H */

@ -105,4 +105,4 @@ void gpr_set_log_function(gpr_log_func func);
}
#endif
#endif /* GRPC_SUPPORT_LOG_H */
#endif /* GRPC_SUPPORT_LOG_H */

@ -48,4 +48,4 @@ char *gpr_format_message(DWORD messageid);
}
#endif
#endif /* GRPC_SUPPORT_LOG_WIN32_H */
#endif /* GRPC_SUPPORT_LOG_WIN32_H */

@ -64,7 +64,8 @@
#undef GRPC_NOMINMAX_WAS_NOT_DEFINED
#undef NOMINMAX
#endif /* GRPC_WIN32_LEAN_AND_MEAN_WAS_NOT_DEFINED */
#endif /* defined(_WIN64) || defined(WIN64) || defined(_WIN32) || defined(WIN32) */
#endif /* defined(_WIN64) || defined(WIN64) || defined(_WIN32) || \
defined(WIN32) */
/* Override this file with one for your platform if you need to redefine
things. */

@ -96,7 +96,7 @@ typedef struct gpr_slice {
#define GPR_SLICE_LENGTH(slice) \
((slice).refcount ? (slice).data.refcounted.length \
: (slice).data.inlined.length)
#define GPR_SLICE_SET_LENGTH(slice, newlen) \
#define GPR_SLICE_SET_LENGTH(slice, newlen) \
((slice).refcount ? ((slice).data.refcounted.length = (size_t)(newlen)) \
: ((slice).data.inlined.length = (gpr_uint8)(newlen)))
#define GPR_SLICE_END_PTR(slice) \

@ -58,4 +58,4 @@ int gpr_asprintf(char **strp, const char *format, ...);
}
#endif
#endif /* GRPC_SUPPORT_STRING_UTIL_H */
#endif /* GRPC_SUPPORT_STRING_UTIL_H */

@ -36,7 +36,7 @@
#ifdef __cplusplus
extern "C" {
#endif
#endif
typedef struct gpr_subprocess gpr_subprocess;

@ -312,4 +312,4 @@ gpr_intptr gpr_stats_read(const gpr_stats_counter *c);
}
#endif
#endif /* GRPC_SUPPORT_SYNC_H */
#endif /* GRPC_SUPPORT_SYNC_H */

@ -38,24 +38,18 @@
#include <grpc/support/atm.h>
/* gpr_event */
typedef struct {
gpr_atm state;
} gpr_event;
typedef struct { gpr_atm state; } gpr_event;
#define GPR_EVENT_INIT \
{ 0 }
/* gpr_refcount */
typedef struct {
gpr_atm count;
} gpr_refcount;
typedef struct { gpr_atm count; } gpr_refcount;
/* gpr_stats_counter */
typedef struct {
gpr_atm value;
} gpr_stats_counter;
typedef struct { gpr_atm value; } gpr_stats_counter;
#define GPR_STATS_INIT \
{ 0 }
#endif /* GRPC_SUPPORT_SYNC_GENERIC_H */
#endif /* GRPC_SUPPORT_SYNC_GENERIC_H */

@ -44,4 +44,4 @@ typedef pthread_once_t gpr_once;
#define GPR_ONCE_INIT PTHREAD_ONCE_INIT
#endif /* GRPC_SUPPORT_SYNC_POSIX_H */
#endif /* GRPC_SUPPORT_SYNC_POSIX_H */

@ -46,4 +46,4 @@ typedef CONDITION_VARIABLE gpr_cv;
typedef INIT_ONCE gpr_once;
#define GPR_ONCE_INIT INIT_ONCE_STATIC_INIT
#endif /* GRPC_SUPPORT_SYNC_WIN32_H */
#endif /* GRPC_SUPPORT_SYNC_WIN32_H */

@ -88,4 +88,4 @@ void gpr_thd_join(gpr_thd_id t);
}
#endif
#endif /* GRPC_SUPPORT_THD_H */
#endif /* GRPC_SUPPORT_THD_H */

@ -84,7 +84,8 @@ void gpr_time_init(void);
gpr_timespec gpr_now(gpr_clock_type clock);
/* Convert a timespec from one clock to another */
gpr_timespec gpr_convert_clock_type(gpr_timespec t, gpr_clock_type target_clock);
gpr_timespec gpr_convert_clock_type(gpr_timespec t,
gpr_clock_type target_clock);
/* Return -ve, 0, or +ve according to whether a < b, a == b, or a > b
respectively. */

@ -47,7 +47,7 @@
GPR_TLS_DECL(foo);
Thread locals always have static scope.
Initializing a thread local (must be done at library initialization
Initializing a thread local (must be done at library initialization
time):
gpr_tls_init(&foo);
@ -58,7 +58,7 @@
gpr_tls_set(&foo, new_value);
Accessing a thread local:
current_value = gpr_tls_get(&foo, value);
current_value = gpr_tls_get(&foo, value);
ALL functions here may be implemented as macros. */

@ -42,10 +42,14 @@ struct gpr_gcc_thread_local {
};
#define GPR_TLS_DECL(name) \
static __thread struct gpr_gcc_thread_local name = {0}
static __thread struct gpr_gcc_thread_local name = {0}
#define gpr_tls_init(tls) do {} while (0)
#define gpr_tls_destroy(tls) do {} while (0)
#define gpr_tls_init(tls) \
do { \
} while (0)
#define gpr_tls_destroy(tls) \
do { \
} while (0)
#define gpr_tls_set(tls, new_value) (((tls)->value) = (new_value))
#define gpr_tls_get(tls) ((tls)->value)

@ -42,10 +42,14 @@ struct gpr_msvc_thread_local {
};
#define GPR_TLS_DECL(name) \
static __declspec(thread) struct gpr_msvc_thread_local name = {0}
static __declspec(thread) struct gpr_msvc_thread_local name = {0}
#define gpr_tls_init(tls) do {} while (0)
#define gpr_tls_destroy(tls) do {} while (0)
#define gpr_tls_init(tls) \
do { \
} while (0)
#define gpr_tls_destroy(tls) \
do { \
} while (0)
#define gpr_tls_set(tls, new_value) (((tls)->value) = (new_value))
#define gpr_tls_get(tls) ((tls)->value)

@ -46,10 +46,10 @@
#define GPR_ARRAY_SIZE(array) (sizeof(array) / sizeof(*(array)))
#define GPR_SWAP(type, a, b) \
do { \
type x = a; \
a = b; \
b = x; \
do { \
type x = a; \
a = b; \
b = x; \
} while (0)
/** Set the \a n-th bit of \a i (a mutable pointer). */
@ -72,4 +72,4 @@
0x0f0f0f0f) % \
255)
#endif /* GRPC_SUPPORT_USEFUL_H */
#endif /* GRPC_SUPPORT_USEFUL_H */

@ -41,4 +41,4 @@
extern const grpc_channel_filter grpc_client_census_filter;
extern const grpc_channel_filter grpc_server_census_filter;
#endif /* GRPC_INTERNAL_CORE_CHANNEL_CENSUS_FILTER_H */
#endif /* GRPC_INTERNAL_CORE_CHANNEL_CENSUS_FILTER_H */

@ -84,8 +84,10 @@ typedef struct {
grpc_pollset_set pollset_set;
} channel_data;
/** We create one watcher for each new lb_policy that is returned from a resolver,
to watch for state changes from the lb_policy. When a state change is seen, we
/** We create one watcher for each new lb_policy that is returned from a
resolver,
to watch for state changes from the lb_policy. When a state change is seen,
we
update the channel, and create a new watcher */
typedef struct {
channel_data *chand;
@ -380,7 +382,8 @@ static void perform_transport_stream_op(grpc_call_element *elem,
if (lb_policy) {
grpc_transport_stream_op *op = &calld->waiting_op;
grpc_pollset *bind_pollset = op->bind_pollset;
grpc_metadata_batch *initial_metadata = &op->send_ops->ops[0].data.metadata;
grpc_metadata_batch *initial_metadata =
&op->send_ops->ops[0].data.metadata;
GRPC_LB_POLICY_REF(lb_policy, "pick");
gpr_mu_unlock(&chand->mu_config);
calld->state = CALL_WAITING_FOR_PICK;
@ -388,13 +391,14 @@ static void perform_transport_stream_op(grpc_call_element *elem,
GPR_ASSERT(op->bind_pollset);
GPR_ASSERT(op->send_ops);
GPR_ASSERT(op->send_ops->nops >= 1);
GPR_ASSERT(
op->send_ops->ops[0].type == GRPC_OP_METADATA);
GPR_ASSERT(op->send_ops->ops[0].type == GRPC_OP_METADATA);
gpr_mu_unlock(&calld->mu_state);
grpc_iomgr_closure_init(&calld->async_setup_task, picked_target, calld);
grpc_iomgr_closure_init(&calld->async_setup_task, picked_target,
calld);
grpc_lb_policy_pick(lb_policy, bind_pollset, initial_metadata,
&calld->picked_channel, &calld->async_setup_task);
&calld->picked_channel,
&calld->async_setup_task);
GRPC_LB_POLICY_UNREF(lb_policy, "pick");
} else if (chand->resolver != NULL) {
@ -430,7 +434,8 @@ static void cc_start_transport_stream_op(grpc_call_element *elem,
perform_transport_stream_op(elem, op, 0);
}
static void watch_lb_policy(channel_data *chand, grpc_lb_policy *lb_policy, grpc_connectivity_state current_state);
static void watch_lb_policy(channel_data *chand, grpc_lb_policy *lb_policy,
grpc_connectivity_state current_state);
static void on_lb_policy_state_changed(void *arg, int iomgr_success) {
lb_policy_connectivity_watcher *w = arg;
@ -450,7 +455,8 @@ static void on_lb_policy_state_changed(void *arg, int iomgr_success) {
gpr_free(w);
}
static void watch_lb_policy(channel_data *chand, grpc_lb_policy *lb_policy, grpc_connectivity_state current_state) {
static void watch_lb_policy(channel_data *chand, grpc_lb_policy *lb_policy,
grpc_connectivity_state current_state) {
lb_policy_connectivity_watcher *w = gpr_malloc(sizeof(*w));
GRPC_CHANNEL_INTERNAL_REF(chand->master, "watch_lb_policy");
@ -663,7 +669,8 @@ static void init_channel_elem(grpc_channel_element *elem, grpc_channel *master,
grpc_iomgr_closure_init(&chand->on_config_changed, cc_on_config_changed,
chand);
grpc_connectivity_state_init(&chand->state_tracker, GRPC_CHANNEL_IDLE, "client_channel");
grpc_connectivity_state_init(&chand->state_tracker, GRPC_CHANNEL_IDLE,
"client_channel");
}
/* Destructor for channel_data */
@ -747,19 +754,20 @@ void grpc_client_channel_watch_connectivity_state(
gpr_mu_unlock(&chand->mu_config);
}
grpc_pollset_set *grpc_client_channel_get_connecting_pollset_set(grpc_channel_element *elem) {
grpc_pollset_set *grpc_client_channel_get_connecting_pollset_set(
grpc_channel_element *elem) {
channel_data *chand = elem->channel_data;
return &chand->pollset_set;
}
void grpc_client_channel_add_interested_party(grpc_channel_element *elem,
grpc_pollset *pollset) {
grpc_pollset *pollset) {
channel_data *chand = elem->channel_data;
grpc_pollset_set_add_pollset(&chand->pollset_set, pollset);
}
void grpc_client_channel_del_interested_party(grpc_channel_element *elem,
grpc_pollset *pollset) {
grpc_pollset *pollset) {
channel_data *chand = elem->channel_data;
grpc_pollset_set_del_pollset(&chand->pollset_set, pollset);
}

@ -59,11 +59,12 @@ void grpc_client_channel_watch_connectivity_state(
grpc_channel_element *elem, grpc_connectivity_state *state,
grpc_iomgr_closure *on_complete);
grpc_pollset_set *grpc_client_channel_get_connecting_pollset_set(grpc_channel_element *elem);
grpc_pollset_set *grpc_client_channel_get_connecting_pollset_set(
grpc_channel_element *elem);
void grpc_client_channel_add_interested_party(grpc_channel_element *channel,
grpc_pollset *pollset);
grpc_pollset *pollset);
void grpc_client_channel_del_interested_party(grpc_channel_element *channel,
grpc_pollset *pollset);
grpc_pollset *pollset);
#endif /* GRPC_INTERNAL_CORE_CHANNEL_CLIENT_CHANNEL_H */

@ -53,7 +53,7 @@ typedef struct call_data {
/** Compression algorithm we'll try to use. It may be given by incoming
* metadata, or by the channel's default compression settings. */
grpc_compression_algorithm compression_algorithm;
/** If true, contents of \a compression_algorithm are authoritative */
/** If true, contents of \a compression_algorithm are authoritative */
int has_compression_algorithm;
} call_data;
@ -78,7 +78,7 @@ typedef struct channel_data {
*
* Returns 1 if the data was actually compress and 0 otherwise. */
static int compress_send_sb(grpc_compression_algorithm algorithm,
gpr_slice_buffer *slices) {
gpr_slice_buffer *slices) {
int did_compress;
gpr_slice_buffer tmp;
gpr_slice_buffer_init(&tmp);
@ -93,7 +93,7 @@ static int compress_send_sb(grpc_compression_algorithm algorithm,
/** For each \a md element from the incoming metadata, filter out the entry for
* "grpc-encoding", using its value to populate the call data's
* compression_algorithm field. */
static grpc_mdelem* compression_md_filter(void *user_data, grpc_mdelem *md) {
static grpc_mdelem *compression_md_filter(void *user_data, grpc_mdelem *md) {
grpc_call_element *elem = user_data;
call_data *calld = elem->call_data;
channel_data *channeld = elem->channel_data;
@ -115,10 +115,10 @@ static grpc_mdelem* compression_md_filter(void *user_data, grpc_mdelem *md) {
static int skip_compression(channel_data *channeld, call_data *calld) {
if (calld->has_compression_algorithm) {
if (calld->compression_algorithm == GRPC_COMPRESS_NONE) {
return 1;
}
return 0; /* we have an actual call-specific algorithm */
if (calld->compression_algorithm == GRPC_COMPRESS_NONE) {
return 1;
}
return 0; /* we have an actual call-specific algorithm */
}
/* no per-call compression override */
return channeld->default_compression_algorithm == GRPC_COMPRESS_NONE;
@ -191,7 +191,7 @@ static void process_send_ops(grpc_call_element *elem,
* given by GRPC_OP_BEGIN_MESSAGE) */
calld->remaining_slice_bytes = sop->data.begin_message.length;
if (sop->data.begin_message.flags & GRPC_WRITE_NO_COMPRESS) {
calld->has_compression_algorithm = 1; /* GPR_TRUE */
calld->has_compression_algorithm = 1; /* GPR_TRUE */
calld->compression_algorithm = GRPC_COMPRESS_NONE;
}
break;
@ -293,7 +293,7 @@ static void init_channel_elem(grpc_channel_element *elem, grpc_channel *master,
int is_first, int is_last) {
channel_data *channeld = elem->channel_data;
grpc_compression_algorithm algo_idx;
const char* supported_algorithms_names[GRPC_COMPRESS_ALGORITHMS_COUNT-1];
const char *supported_algorithms_names[GRPC_COMPRESS_ALGORITHMS_COUNT - 1];
char *accept_encoding_str;
size_t accept_encoding_str_len;
@ -318,23 +318,19 @@ static void init_channel_elem(grpc_channel_element *elem, grpc_channel *master,
GRPC_MDSTR_REF(channeld->mdstr_outgoing_compression_algorithm_key),
grpc_mdstr_from_string(mdctx, algorithm_name, 0));
if (algo_idx > 0) {
supported_algorithms_names[algo_idx-1] = algorithm_name;
supported_algorithms_names[algo_idx - 1] = algorithm_name;
}
}
/* TODO(dgq): gpr_strjoin_sep could be made to work with statically allocated
* arrays, as to avoid the heap allocs */
accept_encoding_str =
gpr_strjoin_sep(supported_algorithms_names,
GPR_ARRAY_SIZE(supported_algorithms_names),
", ",
&accept_encoding_str_len);
channeld->mdelem_accept_encoding =
grpc_mdelem_from_metadata_strings(
mdctx,
GRPC_MDSTR_REF(channeld->mdstr_compression_capabilities_key),
grpc_mdstr_from_string(mdctx, accept_encoding_str, 0));
accept_encoding_str = gpr_strjoin_sep(
supported_algorithms_names, GPR_ARRAY_SIZE(supported_algorithms_names),
", ", &accept_encoding_str_len);
channeld->mdelem_accept_encoding = grpc_mdelem_from_metadata_strings(
mdctx, GRPC_MDSTR_REF(channeld->mdstr_compression_capabilities_key),
grpc_mdstr_from_string(mdctx, accept_encoding_str, 0));
gpr_free(accept_encoding_str);
GPR_ASSERT(!is_last);
@ -348,8 +344,7 @@ static void destroy_channel_elem(grpc_channel_element *elem) {
GRPC_MDSTR_UNREF(channeld->mdstr_request_compression_algorithm_key);
GRPC_MDSTR_UNREF(channeld->mdstr_outgoing_compression_algorithm_key);
GRPC_MDSTR_UNREF(channeld->mdstr_compression_capabilities_key);
for (algo_idx = 0; algo_idx < GRPC_COMPRESS_ALGORITHMS_COUNT;
++algo_idx) {
for (algo_idx = 0; algo_idx < GRPC_COMPRESS_ALGORITHMS_COUNT; ++algo_idx) {
GRPC_MDELEM_UNREF(channeld->mdelem_compression_algorithms[algo_idx]);
}
GRPC_MDELEM_UNREF(channeld->mdelem_accept_encoding);

@ -62,4 +62,4 @@
extern const grpc_channel_filter grpc_compress_filter;
#endif /* GRPC_INTERNAL_CORE_CHANNEL_COMPRESS_FILTER_H */
#endif /* GRPC_INTERNAL_CORE_CHANNEL_COMPRESS_FILTER_H */

@ -41,4 +41,4 @@ extern const grpc_channel_filter grpc_http_client_filter;
#define GRPC_ARG_HTTP2_SCHEME "grpc.http2_scheme"
#endif /* GRPC_INTERNAL_CORE_CHANNEL_HTTP_CLIENT_FILTER_H */
#endif /* GRPC_INTERNAL_CORE_CHANNEL_HTTP_CLIENT_FILTER_H */

@ -39,4 +39,4 @@
/* Processes metadata on the client side for HTTP2 transports */
extern const grpc_channel_filter grpc_http_server_filter;
#endif /* GRPC_INTERNAL_CORE_CHANNEL_HTTP_SERVER_FILTER_H */
#endif /* GRPC_INTERNAL_CORE_CHANNEL_HTTP_SERVER_FILTER_H */

@ -41,4 +41,4 @@
customize for their own filters */
extern const grpc_channel_filter grpc_no_op_filter;
#endif /* GRPC_INTERNAL_CORE_CHANNEL_NOOP_FILTER_H */
#endif /* GRPC_INTERNAL_CORE_CHANNEL_NOOP_FILTER_H */

@ -219,7 +219,8 @@ static grpc_resolver *dns_create(
default_host_arg.type = GRPC_ARG_STRING;
default_host_arg.key = GRPC_ARG_DEFAULT_AUTHORITY;
default_host_arg.value.string = host;
subchannel_factory = grpc_subchannel_factory_add_channel_arg(subchannel_factory, &default_host_arg);
subchannel_factory = grpc_subchannel_factory_add_channel_arg(
subchannel_factory, &default_host_arg);
gpr_free(host);
gpr_free(port);

@ -142,7 +142,7 @@ static void zookeeper_next(grpc_resolver *resolver,
gpr_mu_unlock(&r->mu);
}
/** Zookeeper global watcher for connection management
/** Zookeeper global watcher for connection management
TODO: better connection management besides logs */
static void zookeeper_global_watcher(zhandle_t *zookeeper_handle, int type,
int state, const char *path,
@ -172,7 +172,7 @@ static void zookeeper_watcher(zhandle_t *zookeeper_handle, int type, int state,
}
}
/** Callback function after getting all resolved addresses
/** Callback function after getting all resolved addresses
Creates a subchannel for each address */
static void zookeeper_on_resolved(void *arg,
grpc_resolved_addresses *addresses) {
@ -249,7 +249,7 @@ static char *zookeeper_parse_address(const char *value, int value_len) {
grpc_json *cur;
const char *host;
const char *port;
char* buffer;
char *buffer;
char *address = NULL;
buffer = gpr_malloc(value_len);
@ -449,7 +449,7 @@ static grpc_resolver *zookeeper_create(
gpr_mu_init(&r->mu);
grpc_resolver_init(&r->base, &zookeeper_resolver_vtable);
r->name = gpr_strdup(path);
r->subchannel_factory = subchannel_factory;
r->lb_policy_factory = lb_policy_factory;
grpc_subchannel_factory_ref(subchannel_factory);

@ -35,9 +35,9 @@
#include "src/core/client_config/subchannel_factory_decorators/merge_channel_args.h"
grpc_subchannel_factory *grpc_subchannel_factory_add_channel_arg(
grpc_subchannel_factory *input, const grpc_arg *arg) {
grpc_channel_args args;
args.num_args = 1;
args.args = (grpc_arg *)arg;
return grpc_subchannel_factory_merge_channel_args(input, &args);
grpc_subchannel_factory *input, const grpc_arg *arg) {
grpc_channel_args args;
args.num_args = 1;
args.args = (grpc_arg *)arg;
return grpc_subchannel_factory_merge_channel_args(input, &args);
}

@ -40,6 +40,7 @@
channel_args by adding a new argument; ownership of input, arg is retained
by the caller. */
grpc_subchannel_factory *grpc_subchannel_factory_add_channel_arg(
grpc_subchannel_factory *input, const grpc_arg *arg);
grpc_subchannel_factory *input, const grpc_arg *arg);
#endif /* GRPC_INTERNAL_CORE_CLIENT_CONFIG_SUBCHANNEL_FACTORY_DECORATORS_ADD_CHANNEL_ARG_H */
#endif /* GRPC_INTERNAL_CORE_CLIENT_CONFIG_SUBCHANNEL_FACTORY_DECORATORS_ADD_CHANNEL_ARG_H \
*/

@ -50,7 +50,7 @@ static void merge_args_factory_ref(grpc_subchannel_factory *scf) {
static void merge_args_factory_unref(grpc_subchannel_factory *scf) {
merge_args_factory *f = (merge_args_factory *)scf;
if (gpr_unref(&f->refs)) {
grpc_subchannel_factory_unref(f->wrapped);
grpc_subchannel_factory_unref(f->wrapped);
grpc_channel_args_destroy(f->merge_args);
gpr_free(f);
}
@ -73,7 +73,7 @@ static const grpc_subchannel_factory_vtable merge_args_factory_vtable = {
merge_args_factory_create_subchannel};
grpc_subchannel_factory *grpc_subchannel_factory_merge_channel_args(
grpc_subchannel_factory *input, const grpc_channel_args *args) {
grpc_subchannel_factory *input, const grpc_channel_args *args) {
merge_args_factory *f = gpr_malloc(sizeof(*f));
f->base.vtable = &merge_args_factory_vtable;
gpr_ref_init(&f->refs, 1);

@ -40,6 +40,7 @@
channel_args by adding a new argument; ownership of input, args is retained
by the caller. */
grpc_subchannel_factory *grpc_subchannel_factory_merge_channel_args(
grpc_subchannel_factory *input, const grpc_channel_args *args);
grpc_subchannel_factory *input, const grpc_channel_args *args);
#endif /* GRPC_INTERNAL_CORE_CLIENT_CONFIG_SUBCHANNEL_FACTORY_DECORATORS_MERGE_CHANNEL_ARGS_H */
#endif /* GRPC_INTERNAL_CORE_CLIENT_CONFIG_SUBCHANNEL_FACTORY_DECORATORS_MERGE_CHANNEL_ARGS_H \
*/

@ -35,7 +35,7 @@
#include <string.h>
#include <grpc/compression.h>
int grpc_compression_algorithm_parse(const char* name, size_t name_length,
int grpc_compression_algorithm_parse(const char *name, size_t name_length,
grpc_compression_algorithm *algorithm) {
/* we use strncmp not only because it's safer (even though in this case it
* doesn't matter, given that we are comparing against string literals, but

@ -61,8 +61,8 @@ static void add(const char *beg, const char *end, char ***ss, size_t *ns) {
size_t np = n + 1;
char *s = gpr_malloc(end - beg + 1);
memcpy(s, beg, end - beg);
s[end-beg] = 0;
*ss = gpr_realloc(*ss, sizeof(char**) * np);
s[end - beg] = 0;
*ss = gpr_realloc(*ss, sizeof(char **) * np);
(*ss)[n] = s;
*ns = np;
}
@ -73,7 +73,7 @@ static void split(const char *s, char ***ss, size_t *ns) {
add(s, s + strlen(s), ss, ns);
} else {
add(s, c, ss, ns);
split(c+1, ss, ns);
split(c + 1, ss, ns);
}
}
@ -125,7 +125,7 @@ int grpc_tracer_set_enabled(const char *name, int enabled) {
}
if (!found) {
gpr_log(GPR_ERROR, "Unknown trace var: '%s'", name);
return 0; /* early return */
return 0; /* early return */
}
}
return 1;

@ -40,4 +40,4 @@ void grpc_register_tracer(const char *name, int *flag);
void grpc_tracer_init(const char *env_var_name);
void grpc_tracer_shutdown(void);
#endif /* GRPC_INTERNAL_CORE_DEBUG_TRACE_H */
#endif /* GRPC_INTERNAL_CORE_DEBUG_TRACE_H */

@ -43,7 +43,8 @@
#include <grpc/support/string_util.h>
#include <grpc/support/useful.h>
static void fill_common_header(const grpc_httpcli_request *request, gpr_strvec *buf) {
static void fill_common_header(const grpc_httpcli_request *request,
gpr_strvec *buf) {
size_t i;
gpr_strvec_add(buf, gpr_strdup(request->path));
gpr_strvec_add(buf, gpr_strdup(" HTTP/1.0\r\n"));
@ -52,7 +53,8 @@ static void fill_common_header(const grpc_httpcli_request *request, gpr_strvec *
gpr_strvec_add(buf, gpr_strdup(request->host));
gpr_strvec_add(buf, gpr_strdup("\r\n"));
gpr_strvec_add(buf, gpr_strdup("Connection: close\r\n"));
gpr_strvec_add(buf, gpr_strdup("User-Agent: "GRPC_HTTPCLI_USER_AGENT"\r\n"));
gpr_strvec_add(buf,
gpr_strdup("User-Agent: " GRPC_HTTPCLI_USER_AGENT "\r\n"));
/* user supplied headers */
for (i = 0; i < request->hdr_count; i++) {
gpr_strvec_add(buf, gpr_strdup(request->hdrs[i].key));

@ -42,4 +42,4 @@ gpr_slice grpc_httpcli_format_post_request(const grpc_httpcli_request *request,
const char *body_bytes,
size_t body_size);
#endif /* GRPC_INTERNAL_CORE_HTTPCLI_FORMAT_REQUEST_H */
#endif /* GRPC_INTERNAL_CORE_HTTPCLI_FORMAT_REQUEST_H */

@ -61,4 +61,4 @@ void grpc_httpcli_parser_destroy(grpc_httpcli_parser *parser);
int grpc_httpcli_parser_parse(grpc_httpcli_parser *parser, gpr_slice slice);
int grpc_httpcli_parser_eof(grpc_httpcli_parser *parser);
#endif /* GRPC_INTERNAL_CORE_HTTPCLI_PARSER_H */
#endif /* GRPC_INTERNAL_CORE_HTTPCLI_PARSER_H */

@ -105,8 +105,7 @@ void grpc_alarm_list_init(gpr_timespec now) {
void grpc_alarm_list_shutdown(void) {
int i;
while (run_some_expired_alarms(NULL, gpr_inf_future(g_clock_type), NULL,
0))
while (run_some_expired_alarms(NULL, gpr_inf_future(g_clock_type), NULL, 0))
;
for (i = 0; i < NUM_SHARDS; i++) {
shard_type *shard = &g_shards[i];
@ -362,7 +361,7 @@ static int run_some_expired_alarms(gpr_mu *drop_mu, gpr_timespec now,
int grpc_alarm_check(gpr_mu *drop_mu, gpr_timespec now, gpr_timespec *next) {
GPR_ASSERT(now.clock_type == g_clock_type);
return run_some_expired_alarms(
drop_mu, now, next,
drop_mu, now, next,
gpr_time_cmp(now, gpr_inf_future(now.clock_type)) != 0);
}

@ -86,4 +86,4 @@ void grpc_alarm_init(grpc_alarm *alarm, gpr_timespec deadline,
Requires: cancel() must happen after add() on a given alarm */
void grpc_alarm_cancel(grpc_alarm *alarm);
#endif /* GRPC_INTERNAL_CORE_IOMGR_ALARM_H */
#endif /* GRPC_INTERNAL_CORE_IOMGR_ALARM_H */

@ -66,11 +66,11 @@ static void adjust_downwards(grpc_alarm **first, int i, int length,
int next_i;
if (left_child >= length) break;
right_child = left_child + 1;
next_i =
right_child < length && gpr_time_cmp(first[left_child]->deadline,
first[right_child]->deadline) < 0
? right_child
: left_child;
next_i = right_child < length &&
gpr_time_cmp(first[left_child]->deadline,
first[right_child]->deadline) < 0
? right_child
: left_child;
if (gpr_time_cmp(t->deadline, first[next_i]->deadline) >= 0) break;
first[i] = first[next_i];
first[i]->heap_index = i;

@ -54,4 +54,4 @@ void grpc_alarm_heap_pop(grpc_alarm_heap *heap);
int grpc_alarm_heap_is_empty(grpc_alarm_heap *heap);
#endif /* GRPC_INTERNAL_CORE_IOMGR_ALARM_HEAP_H */
#endif /* GRPC_INTERNAL_CORE_IOMGR_ALARM_HEAP_H */

@ -59,4 +59,4 @@ gpr_timespec grpc_alarm_list_next_timeout(void);
void grpc_kick_poller(void);
#endif /* GRPC_INTERNAL_CORE_IOMGR_ALARM_INTERNAL_H */
#endif /* GRPC_INTERNAL_CORE_IOMGR_ALARM_INTERNAL_H */

@ -50,7 +50,8 @@ void grpc_endpoint_add_to_pollset(grpc_endpoint *ep, grpc_pollset *pollset) {
ep->vtable->add_to_pollset(ep, pollset);
}
void grpc_endpoint_add_to_pollset_set(grpc_endpoint *ep, grpc_pollset_set *pollset_set) {
void grpc_endpoint_add_to_pollset_set(grpc_endpoint *ep,
grpc_pollset_set *pollset_set) {
ep->vtable->add_to_pollset_set(ep, pollset_set);
}

@ -103,10 +103,11 @@ void grpc_endpoint_destroy(grpc_endpoint *ep);
/* Add an endpoint to a pollset, so that when the pollset is polled, events from
this endpoint are considered */
void grpc_endpoint_add_to_pollset(grpc_endpoint *ep, grpc_pollset *pollset);
void grpc_endpoint_add_to_pollset_set(grpc_endpoint *ep, grpc_pollset_set *pollset_set);
void grpc_endpoint_add_to_pollset_set(grpc_endpoint *ep,
grpc_pollset_set *pollset_set);
struct grpc_endpoint {
const grpc_endpoint_vtable *vtable;
};
#endif /* GRPC_INTERNAL_CORE_IOMGR_ENDPOINT_H */
#endif /* GRPC_INTERNAL_CORE_IOMGR_ENDPOINT_H */

@ -44,4 +44,4 @@ typedef struct {
grpc_endpoint_pair grpc_iomgr_create_endpoint_pair(const char *name,
size_t read_slice_size);
#endif /* GRPC_INTERNAL_CORE_IOMGR_ENDPOINT_PAIR_H */
#endif /* GRPC_INTERNAL_CORE_IOMGR_ENDPOINT_PAIR_H */

@ -52,21 +52,26 @@ static void create_sockets(SOCKET sv[2]) {
SOCKADDR_IN addr;
int addr_len = sizeof(addr);
lst_sock = WSASocket(AF_INET, SOCK_STREAM, IPPROTO_TCP, NULL, 0, WSA_FLAG_OVERLAPPED);
lst_sock = WSASocket(AF_INET, SOCK_STREAM, IPPROTO_TCP, NULL, 0,
WSA_FLAG_OVERLAPPED);
GPR_ASSERT(lst_sock != INVALID_SOCKET);
memset(&addr, 0, sizeof(addr));
addr.sin_addr.s_addr = htonl(INADDR_LOOPBACK);
addr.sin_family = AF_INET;
GPR_ASSERT(bind(lst_sock, (struct sockaddr*)&addr, sizeof(addr)) != SOCKET_ERROR);
GPR_ASSERT(bind(lst_sock, (struct sockaddr *)&addr, sizeof(addr)) !=
SOCKET_ERROR);
GPR_ASSERT(listen(lst_sock, SOMAXCONN) != SOCKET_ERROR);
GPR_ASSERT(getsockname(lst_sock, (struct sockaddr*)&addr, &addr_len) != SOCKET_ERROR);
GPR_ASSERT(getsockname(lst_sock, (struct sockaddr *)&addr, &addr_len) !=
SOCKET_ERROR);
cli_sock = WSASocket(AF_INET, SOCK_STREAM, IPPROTO_TCP, NULL, 0, WSA_FLAG_OVERLAPPED);
cli_sock = WSASocket(AF_INET, SOCK_STREAM, IPPROTO_TCP, NULL, 0,
WSA_FLAG_OVERLAPPED);
GPR_ASSERT(cli_sock != INVALID_SOCKET);
GPR_ASSERT(WSAConnect(cli_sock, (struct sockaddr*)&addr, addr_len, NULL, NULL, NULL, NULL) == 0);
svr_sock = accept(lst_sock, (struct sockaddr*)&addr, &addr_len);
GPR_ASSERT(WSAConnect(cli_sock, (struct sockaddr *)&addr, addr_len, NULL,
NULL, NULL, NULL) == 0);
svr_sock = accept(lst_sock, (struct sockaddr *)&addr, &addr_len);
GPR_ASSERT(svr_sock != INVALID_SOCKET);
closesocket(lst_sock);
@ -77,7 +82,8 @@ static void create_sockets(SOCKET sv[2]) {
sv[0] = svr_sock;
}
grpc_endpoint_pair grpc_iomgr_create_endpoint_pair(const char *name, size_t read_slice_size) {
grpc_endpoint_pair grpc_iomgr_create_endpoint_pair(const char *name,
size_t read_slice_size) {
SOCKET sv[2];
grpc_endpoint_pair p;
create_sockets(sv);

@ -65,18 +65,17 @@ static void do_iocp_work() {
LPOVERLAPPED overlapped;
grpc_winsocket *socket;
grpc_winsocket_callback_info *info;
void(*f)(void *, int) = NULL;
void (*f)(void *, int) = NULL;
void *opaque = NULL;
success = GetQueuedCompletionStatus(g_iocp, &bytes,
&completion_key, &overlapped,
INFINITE);
success = GetQueuedCompletionStatus(g_iocp, &bytes, &completion_key,
&overlapped, INFINITE);
/* success = 0 and overlapped = NULL means the deadline got attained.
Which is impossible. since our wait time is +inf */
GPR_ASSERT(success || overlapped);
GPR_ASSERT(completion_key && overlapped);
if (overlapped == &g_iocp_custom_overlap) {
gpr_atm_full_fetch_add(&g_custom_events, -1);
if (completion_key == (ULONG_PTR) &g_iocp_kick_token) {
if (completion_key == (ULONG_PTR)&g_iocp_kick_token) {
/* We were awoken from a kick. */
return;
}
@ -84,7 +83,7 @@ static void do_iocp_work() {
abort();
}
socket = (grpc_winsocket*) completion_key;
socket = (grpc_winsocket *)completion_key;
if (overlapped == &socket->write_info.overlapped) {
info = &socket->write_info;
} else if (overlapped == &socket->read_info.overlapped) {
@ -121,8 +120,7 @@ static void do_iocp_work() {
}
static void iocp_loop(void *p) {
while (gpr_atm_acq_load(&g_orphans) ||
gpr_atm_acq_load(&g_custom_events) ||
while (gpr_atm_acq_load(&g_orphans) || gpr_atm_acq_load(&g_custom_events) ||
!gpr_event_get(&g_shutdown_iocp)) {
grpc_maybe_call_delayed_callbacks(NULL, 1);
do_iocp_work();
@ -134,8 +132,8 @@ static void iocp_loop(void *p) {
void grpc_iocp_init(void) {
gpr_thd_id id;
g_iocp = CreateIoCompletionPort(INVALID_HANDLE_VALUE,
NULL, (ULONG_PTR)NULL, 0);
g_iocp =
CreateIoCompletionPort(INVALID_HANDLE_VALUE, NULL, (ULONG_PTR)NULL, 0);
GPR_ASSERT(g_iocp);
gpr_event_init(&g_iocp_done);
@ -147,8 +145,7 @@ void grpc_iocp_kick(void) {
BOOL success;
gpr_atm_full_fetch_add(&g_custom_events, 1);
success = PostQueuedCompletionStatus(g_iocp, 0,
(ULONG_PTR) &g_iocp_kick_token,
success = PostQueuedCompletionStatus(g_iocp, 0, (ULONG_PTR)&g_iocp_kick_token,
&g_iocp_custom_overlap);
GPR_ASSERT(success);
}
@ -165,8 +162,8 @@ void grpc_iocp_shutdown(void) {
void grpc_iocp_add_socket(grpc_winsocket *socket) {
HANDLE ret;
if (socket->added_to_iocp) return;
ret = CreateIoCompletionPort((HANDLE)socket->socket,
g_iocp, (gpr_uintptr) socket, 0);
ret = CreateIoCompletionPort((HANDLE)socket->socket, g_iocp,
(gpr_uintptr)socket, 0);
if (!ret) {
char *utf8_message = gpr_format_message(WSAGetLastError());
gpr_log(GPR_ERROR, "Unable to add socket to iocp: %s", utf8_message);
@ -189,7 +186,7 @@ void grpc_iocp_socket_orphan(grpc_winsocket *socket) {
the callback now.
-) The IOCP hasn't completed yet, and we're queuing it for later. */
static void socket_notify_on_iocp(grpc_winsocket *socket,
void(*cb)(void *, int), void *opaque,
void (*cb)(void *, int), void *opaque,
grpc_winsocket_callback_info *info) {
int run_now = 0;
GPR_ASSERT(!info->cb);
@ -206,13 +203,13 @@ static void socket_notify_on_iocp(grpc_winsocket *socket,
}
void grpc_socket_notify_on_write(grpc_winsocket *socket,
void(*cb)(void *, int), void *opaque) {
void (*cb)(void *, int), void *opaque) {
socket_notify_on_iocp(socket, cb, opaque, &socket->write_info);
}
void grpc_socket_notify_on_read(grpc_winsocket *socket,
void(*cb)(void *, int), void *opaque) {
void grpc_socket_notify_on_read(grpc_winsocket *socket, void (*cb)(void *, int),
void *opaque) {
socket_notify_on_iocp(socket, cb, opaque, &socket->read_info);
}
#endif /* GPR_WINSOCK_SOCKET */
#endif /* GPR_WINSOCK_SOCKET */

@ -44,10 +44,10 @@ void grpc_iocp_shutdown(void);
void grpc_iocp_add_socket(grpc_winsocket *);
void grpc_iocp_socket_orphan(grpc_winsocket *);
void grpc_socket_notify_on_write(grpc_winsocket *, void(*cb)(void *, int success),
void *opaque);
void grpc_socket_notify_on_write(grpc_winsocket *,
void (*cb)(void *, int success), void *opaque);
void grpc_socket_notify_on_read(grpc_winsocket *, void(*cb)(void *, int success),
void *opaque);
void grpc_socket_notify_on_read(grpc_winsocket *,
void (*cb)(void *, int success), void *opaque);
#endif /* GRPC_INTERNAL_CORE_IOMGR_IOCP_WINDOWS_H */
#endif /* GRPC_INTERNAL_CORE_IOMGR_IOCP_WINDOWS_H */

@ -77,4 +77,4 @@ void grpc_iomgr_add_callback(grpc_iomgr_closure *closure);
argument. */
void grpc_iomgr_add_delayed_callback(grpc_iomgr_closure *iocb, int success);
#endif /* GRPC_INTERNAL_CORE_IOMGR_IOMGR_H */
#endif /* GRPC_INTERNAL_CORE_IOMGR_IOMGR_H */

@ -52,4 +52,4 @@ void grpc_iomgr_unregister_object(grpc_iomgr_object *obj);
void grpc_iomgr_platform_init(void);
void grpc_iomgr_platform_shutdown(void);
#endif /* GRPC_INTERNAL_CORE_IOMGR_IOMGR_INTERNAL_H */
#endif /* GRPC_INTERNAL_CORE_IOMGR_IOMGR_INTERNAL_H */

@ -51,4 +51,4 @@ void grpc_iomgr_platform_shutdown(void) {
grpc_fd_global_shutdown();
}
#endif /* GRPC_POSIX_SOCKET */
#endif /* GRPC_POSIX_SOCKET */

@ -39,4 +39,4 @@
void grpc_pollset_global_init(void);
void grpc_pollset_global_shutdown(void);
#endif /* GRPC_INTERNAL_CORE_IOMGR_IOMGR_POSIX_H */
#endif /* GRPC_INTERNAL_CORE_IOMGR_IOMGR_POSIX_H */

@ -68,4 +68,4 @@ void grpc_iomgr_platform_shutdown(void) {
winsock_shutdown();
}
#endif /* GRPC_WINSOCK_SOCKET */
#endif /* GRPC_WINSOCK_SOCKET */

@ -234,8 +234,7 @@ static void multipoll_with_epoll_pollset_destroy(grpc_pollset *pollset) {
}
static const grpc_pollset_vtable multipoll_with_epoll_pollset = {
multipoll_with_epoll_pollset_add_fd,
multipoll_with_epoll_pollset_del_fd,
multipoll_with_epoll_pollset_add_fd, multipoll_with_epoll_pollset_del_fd,
multipoll_with_epoll_pollset_maybe_work,
multipoll_with_epoll_pollset_finish_shutdown,
multipoll_with_epoll_pollset_destroy};

@ -74,7 +74,7 @@ static void multipoll_with_poll_pollset_add_fd(grpc_pollset *pollset,
}
h->fds[h->fd_count++] = fd;
GRPC_FD_REF(fd, "multipoller");
exit:
exit:
if (and_unlock_pollset) {
gpr_mu_unlock(&pollset->mu);
}
@ -202,8 +202,7 @@ static void multipoll_with_poll_pollset_destroy(grpc_pollset *pollset) {
}
static const grpc_pollset_vtable multipoll_with_poll_pollset = {
multipoll_with_poll_pollset_add_fd,
multipoll_with_poll_pollset_del_fd,
multipoll_with_poll_pollset_add_fd, multipoll_with_poll_pollset_del_fd,
multipoll_with_poll_pollset_maybe_work,
multipoll_with_poll_pollset_finish_shutdown,
multipoll_with_poll_pollset_destroy};

@ -140,10 +140,10 @@ void grpc_pollset_init(grpc_pollset *pollset) {
void grpc_pollset_add_fd(grpc_pollset *pollset, grpc_fd *fd) {
gpr_mu_lock(&pollset->mu);
pollset->vtable->add_fd(pollset, fd, 1);
/* the following (enabled only in debug) will reacquire and then release
our lock - meaning that if the unlocking flag passed to del_fd above is
not respected, the code will deadlock (in a way that we have a chance of
debugging) */
/* the following (enabled only in debug) will reacquire and then release
our lock - meaning that if the unlocking flag passed to del_fd above is
not respected, the code will deadlock (in a way that we have a chance of
debugging) */
#ifndef NDEBUG
gpr_mu_lock(&pollset->mu);
gpr_mu_unlock(&pollset->mu);
@ -153,10 +153,10 @@ void grpc_pollset_add_fd(grpc_pollset *pollset, grpc_fd *fd) {
void grpc_pollset_del_fd(grpc_pollset *pollset, grpc_fd *fd) {
gpr_mu_lock(&pollset->mu);
pollset->vtable->del_fd(pollset, fd, 1);
/* the following (enabled only in debug) will reacquire and then release
our lock - meaning that if the unlocking flag passed to del_fd above is
not respected, the code will deadlock (in a way that we have a chance of
debugging) */
/* the following (enabled only in debug) will reacquire and then release
our lock - meaning that if the unlocking flag passed to del_fd above is
not respected, the code will deadlock (in a way that we have a chance of
debugging) */
#ifndef NDEBUG
gpr_mu_lock(&pollset->mu);
gpr_mu_unlock(&pollset->mu);

@ -102,7 +102,8 @@ void grpc_kick_drain(grpc_pollset *p);
- longer than a millisecond polls are rounded up to the next nearest
millisecond to avoid spinning
- infinite timeouts are converted to -1 */
int grpc_poll_deadline_to_millis_timeout(gpr_timespec deadline, gpr_timespec now);
int grpc_poll_deadline_to_millis_timeout(gpr_timespec deadline,
gpr_timespec now);
/* turn a pollset into a multipoller: platform specific */
typedef void (*grpc_platform_become_multipoller_type)(grpc_pollset *pollset,

@ -56,8 +56,7 @@ static grpc_pollset_worker *pop_front_worker(grpc_pollset *p) {
grpc_pollset_worker *w = p->root_worker.next;
remove_worker(p, w);
return w;
}
else {
} else {
return NULL;
}
}
@ -100,7 +99,8 @@ void grpc_pollset_destroy(grpc_pollset *pollset) {
gpr_mu_destroy(&pollset->mu);
}
int grpc_pollset_work(grpc_pollset *pollset, grpc_pollset_worker *worker, gpr_timespec deadline) {
int grpc_pollset_work(grpc_pollset *pollset, grpc_pollset_worker *worker,
gpr_timespec deadline) {
gpr_timespec now;
int added_worker = 0;
now = gpr_now(GPR_CLOCK_MONOTONIC);
@ -134,8 +134,8 @@ void grpc_pollset_kick(grpc_pollset *p, grpc_pollset_worker *specific_worker) {
if (specific_worker != NULL) {
if (specific_worker == GRPC_POLLSET_KICK_BROADCAST) {
for (specific_worker = p->root_worker.next;
specific_worker != &p->root_worker;
specific_worker = specific_worker->next) {
specific_worker != &p->root_worker;
specific_worker = specific_worker->next) {
gpr_cv_signal(&specific_worker->cv);
}
p->kicked_without_pollers = 1;

@ -66,4 +66,4 @@ void grpc_resolved_addresses_destroy(grpc_resolved_addresses *addresses);
grpc_resolved_addresses *grpc_blocking_resolve_address(
const char *addr, const char *default_port);
#endif /* GRPC_INTERNAL_CORE_IOMGR_RESOLVE_ADDRESS_H */
#endif /* GRPC_INTERNAL_CORE_IOMGR_RESOLVE_ADDRESS_H */

@ -105,10 +105,7 @@ grpc_resolved_addresses *grpc_blocking_resolve_address(
s = getaddrinfo(host, port, &hints, &result);
if (s != 0) {
/* Retry if well-known service name is recognized */
char *svc[][2] = {
{"http", "80"},
{"https", "443"}
};
char *svc[][2] = {{"http", "80"}, {"https", "443"}};
int i;
for (i = 0; i < (int)(sizeof(svc) / sizeof(svc[0])); i++) {
if (strcmp(port, svc[i][0]) == 0) {

@ -44,4 +44,4 @@
#include "src/core/iomgr/sockaddr_posix.h"
#endif
#endif /* GRPC_INTERNAL_CORE_IOMGR_SOCKADDR_H */
#endif /* GRPC_INTERNAL_CORE_IOMGR_SOCKADDR_H */

@ -41,4 +41,4 @@
#include <netdb.h>
#include <unistd.h>
#endif /* GRPC_INTERNAL_CORE_IOMGR_SOCKADDR_POSIX_H */
#endif /* GRPC_INTERNAL_CORE_IOMGR_SOCKADDR_POSIX_H */

@ -206,7 +206,8 @@ int grpc_sockaddr_get_port(const struct sockaddr *addr) {
case AF_UNIX:
return 1;
default:
gpr_log(GPR_ERROR, "Unknown socket family %d in grpc_sockaddr_get_port", addr->sa_family);
gpr_log(GPR_ERROR, "Unknown socket family %d in grpc_sockaddr_get_port",
addr->sa_family);
return 0;
}
}
@ -220,7 +221,8 @@ int grpc_sockaddr_set_port(const struct sockaddr *addr, int port) {
((struct sockaddr_in6 *)addr)->sin6_port = htons(port);
return 1;
default:
gpr_log(GPR_ERROR, "Unknown socket family %d in grpc_sockaddr_set_port", addr->sa_family);
gpr_log(GPR_ERROR, "Unknown socket family %d in grpc_sockaddr_set_port",
addr->sa_family);
return 0;
}
}

@ -86,4 +86,4 @@ int grpc_sockaddr_to_string(char **out, const struct sockaddr *addr,
char *grpc_sockaddr_to_uri(const struct sockaddr *addr);
#endif /* GRPC_INTERNAL_CORE_IOMGR_SOCKADDR_UTILS_H */
#endif /* GRPC_INTERNAL_CORE_IOMGR_SOCKADDR_UTILS_H */

@ -43,4 +43,4 @@
const char *inet_ntop(int af, const void *src, char *dst, socklen_t size);
#endif
#endif /* GRPC_INTERNAL_CORE_IOMGR_SOCKADDR_WIN32_H */
#endif /* GRPC_INTERNAL_CORE_IOMGR_SOCKADDR_WIN32_H */

@ -110,4 +110,4 @@ extern int grpc_forbid_dualstack_sockets_for_testing;
int grpc_create_dualstack_socket(const struct sockaddr *addr, int type,
int protocol, grpc_dualstack_mode *dsmode);
#endif /* GRPC_INTERNAL_CORE_IOMGR_SOCKET_UTILS_POSIX_H */
#endif /* GRPC_INTERNAL_CORE_IOMGR_SOCKET_UTILS_POSIX_H */

@ -106,4 +106,4 @@ void grpc_winsocket_destroy(grpc_winsocket *winsocket) {
gpr_free(winsocket);
}
#endif /* GPR_WINSOCK_SOCKET */
#endif /* GPR_WINSOCK_SOCKET */

@ -54,7 +54,7 @@ typedef struct grpc_winsocket_callback_info {
OVERLAPPED overlapped;
/* The callback information for the pending operation. May be empty if the
caller hasn't registered a callback yet. */
void(*cb)(void *opaque, int success);
void (*cb)(void *opaque, int success);
void *opaque;
/* A boolean to describe if the IO Completion Port got a notification for
that operation. This will happen if the operation completed before the
@ -118,4 +118,4 @@ void grpc_winsocket_orphan(grpc_winsocket *socket);
or by grpc_winsocket_orphan if there's no pending operation. */
void grpc_winsocket_destroy(grpc_winsocket *socket);
#endif /* GRPC_INTERNAL_CORE_IOMGR_SOCKET_WINDOWS_H */
#endif /* GRPC_INTERNAL_CORE_IOMGR_SOCKET_WINDOWS_H */

@ -41,7 +41,7 @@
/* Asynchronously connect to an address (specified as (addr, len)), and call
cb with arg and the completed connection when done (or call cb with arg and
NULL on failure).
NULL on failure).
interested_parties points to a set of pollsets that would be interested
in this connection being established (in order to continue their work) */
void grpc_tcp_client_connect(void (*cb)(void *arg, grpc_endpoint *tcp),

@ -264,7 +264,8 @@ void grpc_tcp_client_connect(void (*cb)(void *arg, grpc_endpoint *ep),
ac->write_closure.cb_arg = ac;
gpr_mu_lock(&ac->mu);
grpc_alarm_init(&ac->alarm, gpr_convert_clock_type(deadline, GPR_CLOCK_MONOTONIC),
grpc_alarm_init(&ac->alarm,
gpr_convert_clock_type(deadline, GPR_CLOCK_MONOTONIC),
tc_on_alarm, ac, gpr_now(GPR_CLOCK_MONOTONIC));
grpc_fd_notify_on_write(ac->fd, &ac->write_closure);
gpr_mu_unlock(&ac->mu);

@ -572,7 +572,8 @@ static void grpc_tcp_add_to_pollset(grpc_endpoint *ep, grpc_pollset *pollset) {
grpc_pollset_add_fd(pollset, tcp->em_fd);
}
static void grpc_tcp_add_to_pollset_set(grpc_endpoint *ep, grpc_pollset_set *pollset_set) {
static void grpc_tcp_add_to_pollset_set(grpc_endpoint *ep,
grpc_pollset_set *pollset_set) {
grpc_tcp *tcp = (grpc_tcp *)ep;
grpc_pollset_set_add_fd(pollset_set, tcp->em_fd);
}

@ -56,4 +56,4 @@ extern int grpc_tcp_trace;
grpc_endpoint *grpc_tcp_create(grpc_fd *fd, size_t read_slice_size,
const char *peer_string);
#endif /* GRPC_INTERNAL_CORE_IOMGR_TCP_POSIX_H */
#endif /* GRPC_INTERNAL_CORE_IOMGR_TCP_POSIX_H */

@ -79,7 +79,8 @@ struct grpc_tcp_server {
/* active port count: how many ports are actually still listening */
int active_ports;
/* number of iomgr callbacks that have been explicitly scheduled during shutdown */
/* number of iomgr callbacks that have been explicitly scheduled during
* shutdown */
int iomgr_callbacks_pending;
/* all listening ports */
@ -292,7 +293,7 @@ static void on_accept(void *arg, int from_iocp) {
and act accordingly. */
transfered_bytes = 0;
wsa_success = WSAGetOverlappedResult(sock, &info->overlapped,
&transfered_bytes, FALSE, &flags);
&transfered_bytes, FALSE, &flags);
if (!wsa_success) {
if (sp->shutting_down) {
/* During the shutdown case, we ARE expecting an error. So that's well,
@ -309,16 +310,15 @@ static void on_accept(void *arg, int from_iocp) {
if (!sp->shutting_down) {
peer_name_string = NULL;
err = setsockopt(sock, SOL_SOCKET, SO_UPDATE_ACCEPT_CONTEXT,
(char *)&sp->socket->socket,
sizeof(sp->socket->socket));
(char *)&sp->socket->socket, sizeof(sp->socket->socket));
if (err) {
char *utf8_message = gpr_format_message(WSAGetLastError());
gpr_log(GPR_ERROR, "setsockopt error: %s", utf8_message);
gpr_free(utf8_message);
}
err = getpeername(sock, (struct sockaddr*)&peer_name, &peer_name_len);
err = getpeername(sock, (struct sockaddr *)&peer_name, &peer_name_len);
if (!err) {
peer_name_string = grpc_sockaddr_to_uri((struct sockaddr*)&peer_name);
peer_name_string = grpc_sockaddr_to_uri((struct sockaddr *)&peer_name);
} else {
char *utf8_message = gpr_format_message(WSAGetLastError());
gpr_log(GPR_ERROR, "getpeername error: %s", utf8_message);

@ -55,24 +55,22 @@ static int set_non_block(SOCKET sock) {
int status;
unsigned long param = 1;
DWORD ret;
status = WSAIoctl(sock, FIONBIO, &param, sizeof(param), NULL, 0, &ret,
NULL, NULL);
status =
WSAIoctl(sock, FIONBIO, &param, sizeof(param), NULL, 0, &ret, NULL, NULL);
return status == 0;
}
static int set_dualstack(SOCKET sock) {
int status;
unsigned long param = 0;
status = setsockopt(sock, IPPROTO_IPV6, IPV6_V6ONLY,
(const char *) &param, sizeof(param));
status = setsockopt(sock, IPPROTO_IPV6, IPV6_V6ONLY, (const char *)&param,
sizeof(param));
return status == 0;
}
int grpc_tcp_prepare_socket(SOCKET sock) {
if (!set_non_block(sock))
return 0;
if (!set_dualstack(sock))
return 0;
if (!set_non_block(sock)) return 0;
if (!set_dualstack(sock)) return 0;
return 1;
}
@ -100,9 +98,7 @@ typedef struct grpc_tcp {
char *peer_string;
} grpc_tcp;
static void tcp_ref(grpc_tcp *tcp) {
gpr_ref(&tcp->refcount);
}
static void tcp_ref(grpc_tcp *tcp) { gpr_ref(&tcp->refcount); }
static void tcp_unref(grpc_tcp *tcp) {
if (gpr_unref(&tcp->refcount)) {
@ -116,7 +112,7 @@ static void tcp_unref(grpc_tcp *tcp) {
/* Asynchronous callback from the IOCP, or the background thread. */
static void on_read(void *tcpp, int from_iocp) {
grpc_tcp *tcp = (grpc_tcp *) tcpp;
grpc_tcp *tcp = (grpc_tcp *)tcpp;
grpc_winsocket *socket = tcp->socket;
gpr_slice sub;
gpr_slice *slice = NULL;
@ -175,9 +171,9 @@ static void on_read(void *tcpp, int from_iocp) {
cb(opaque, slice, nslices, status);
}
static void win_notify_on_read(grpc_endpoint *ep,
grpc_endpoint_read_cb cb, void *arg) {
grpc_tcp *tcp = (grpc_tcp *) ep;
static void win_notify_on_read(grpc_endpoint *ep, grpc_endpoint_read_cb cb,
void *arg) {
grpc_tcp *tcp = (grpc_tcp *)ep;
grpc_winsocket *handle = tcp->socket;
grpc_winsocket_callback_info *info = &handle->read_info;
int status;
@ -201,8 +197,8 @@ static void win_notify_on_read(grpc_endpoint *ep,
buffer.buf = (char *)GPR_SLICE_START_PTR(tcp->read_slice);
/* First let's try a synchronous, non-blocking read. */
status = WSARecv(tcp->socket->socket, &buffer, 1, &bytes_read, &flags,
NULL, NULL);
status =
WSARecv(tcp->socket->socket, &buffer, 1, &bytes_read, &flags, NULL, NULL);
info->wsa_error = status == 0 ? 0 : WSAGetLastError();
/* Did we get data immediately ? Yay. */
@ -232,7 +228,7 @@ static void win_notify_on_read(grpc_endpoint *ep,
/* Asynchronous callback from the IOCP, or the background thread. */
static void on_write(void *tcpp, int from_iocp) {
grpc_tcp *tcp = (grpc_tcp *) tcpp;
grpc_tcp *tcp = (grpc_tcp *)tcpp;
grpc_winsocket *handle = tcp->socket;
grpc_winsocket_callback_info *info = &handle->write_info;
grpc_endpoint_cb_status status = GRPC_ENDPOINT_CB_OK;
@ -286,7 +282,7 @@ static grpc_endpoint_write_status win_write(grpc_endpoint *ep,
gpr_slice *slices, size_t nslices,
grpc_endpoint_write_cb cb,
void *arg) {
grpc_tcp *tcp = (grpc_tcp *) ep;
grpc_tcp *tcp = (grpc_tcp *)ep;
grpc_winsocket *socket = tcp->socket;
grpc_winsocket_callback_info *info = &socket->write_info;
unsigned i;
@ -309,7 +305,7 @@ static grpc_endpoint_write_status win_write(grpc_endpoint *ep,
gpr_slice_buffer_addn(&tcp->write_slices, slices, nslices);
if (tcp->write_slices.count > GPR_ARRAY_SIZE(local_buffers)) {
buffers = (WSABUF *) gpr_malloc(sizeof(WSABUF) * tcp->write_slices.count);
buffers = (WSABUF *)gpr_malloc(sizeof(WSABUF) * tcp->write_slices.count);
allocated = buffers;
}
@ -370,15 +366,15 @@ static grpc_endpoint_write_status win_write(grpc_endpoint *ep,
static void win_add_to_pollset(grpc_endpoint *ep, grpc_pollset *ps) {
grpc_tcp *tcp;
(void) ps;
tcp = (grpc_tcp *) ep;
(void)ps;
tcp = (grpc_tcp *)ep;
grpc_iocp_add_socket(tcp->socket);
}
static void win_add_to_pollset_set(grpc_endpoint *ep, grpc_pollset_set *pss) {
grpc_tcp *tcp;
(void) pss;
tcp = (grpc_tcp *) ep;
(void)pss;
tcp = (grpc_tcp *)ep;
grpc_iocp_add_socket(tcp->socket);
}
@ -389,7 +385,7 @@ static void win_add_to_pollset_set(grpc_endpoint *ep, grpc_pollset_set *pss) {
callback will happen from another thread, so we need to protect against
concurrent access of the data structure in that regard. */
static void win_shutdown(grpc_endpoint *ep) {
grpc_tcp *tcp = (grpc_tcp *) ep;
grpc_tcp *tcp = (grpc_tcp *)ep;
int extra_refs = 0;
gpr_mu_lock(&tcp->mu);
/* At that point, what may happen is that we're already inside the IOCP
@ -401,7 +397,7 @@ static void win_shutdown(grpc_endpoint *ep) {
}
static void win_destroy(grpc_endpoint *ep) {
grpc_tcp *tcp = (grpc_tcp *) ep;
grpc_tcp *tcp = (grpc_tcp *)ep;
tcp_unref(tcp);
}
@ -410,13 +406,12 @@ static char *win_get_peer(grpc_endpoint *ep) {
return gpr_strdup(tcp->peer_string);
}
static grpc_endpoint_vtable vtable = {win_notify_on_read, win_write,
win_add_to_pollset, win_add_to_pollset_set,
win_shutdown, win_destroy,
win_get_peer};
static grpc_endpoint_vtable vtable = {
win_notify_on_read, win_write, win_add_to_pollset, win_add_to_pollset_set,
win_shutdown, win_destroy, win_get_peer};
grpc_endpoint *grpc_tcp_create(grpc_winsocket *socket, char *peer_string) {
grpc_tcp *tcp = (grpc_tcp *) gpr_malloc(sizeof(grpc_tcp));
grpc_tcp *tcp = (grpc_tcp *)gpr_malloc(sizeof(grpc_tcp));
memset(tcp, 0, sizeof(grpc_tcp));
tcp->base.vtable = &vtable;
tcp->socket = socket;
@ -427,4 +422,4 @@ grpc_endpoint *grpc_tcp_create(grpc_winsocket *socket, char *peer_string) {
return &tcp->base;
}
#endif /* GPR_WINSOCK_SOCKET */
#endif /* GPR_WINSOCK_SOCKET */

@ -54,4 +54,4 @@ grpc_endpoint *grpc_tcp_create(grpc_winsocket *socket, char *peer_string);
int grpc_tcp_prepare_socket(SOCKET sock);
#endif /* GRPC_INTERNAL_CORE_IOMGR_TCP_WINDOWS_H */
#endif /* GRPC_INTERNAL_CORE_IOMGR_TCP_WINDOWS_H */

@ -85,4 +85,4 @@ void grpc_time_averaged_stats_add_sample(grpc_time_averaged_stats *stats,
value. */
double grpc_time_averaged_stats_update_average(grpc_time_averaged_stats *stats);
#endif /* GRPC_INTERNAL_CORE_IOMGR_TIME_AVERAGED_STATS_H */
#endif /* GRPC_INTERNAL_CORE_IOMGR_TIME_AVERAGED_STATS_H */

Some files were not shown because too many files have changed in this diff Show More

Loading…
Cancel
Save