Merge branch 'master' of github.com:grpc/grpc into decompression

pull/2135/head
David Garcia Quintas 10 years ago
commit e75d4c8531
  1. 24
      BUILD
  2. 225
      Makefile
  3. 55
      build.json
  4. 39
      gRPC.podspec
  5. 56
      include/grpc++/async_unary_call.h
  6. 29
      include/grpc++/byte_buffer.h
  7. 10
      include/grpc++/client_context.h
  8. 30
      include/grpc++/completion_queue.h
  9. 43
      include/grpc++/config.h
  10. 72
      include/grpc++/config_protobuf.h
  11. 427
      include/grpc++/impl/call.h
  12. 26
      include/grpc++/impl/client_unary_call.h
  13. 76
      include/grpc++/impl/proto_utils.h
  14. 111
      include/grpc++/impl/rpc_service_method.h
  15. 68
      include/grpc++/impl/serialization_traits.h
  16. 41
      include/grpc++/impl/service_type.h
  17. 126
      include/grpc++/server.h
  18. 16
      include/grpc++/server_context.h
  19. 466
      include/grpc++/stream.h
  20. 3
      include/grpc/byte_buffer.h
  21. 2
      include/grpc/byte_buffer_reader.h
  22. 2
      include/grpc/compression.h
  23. 65
      include/grpc/grpc.h
  24. 5
      include/grpc/grpc_security.h
  25. 26
      include/grpc/support/port_platform.h
  26. 6
      include/grpc/support/slice.h
  27. 5
      include/grpc/support/tls_pthread.h
  28. 7
      src/compiler/config.h
  29. 194
      src/compiler/cpp_generator.cc
  30. 6
      src/core/channel/channel_stack.c
  31. 7
      src/core/channel/child_channel.c
  32. 45
      src/core/channel/client_channel.c
  33. 95
      src/core/channel/client_setup.c
  34. 12
      src/core/channel/client_setup.h
  35. 2
      src/core/compression/message_compress.h
  36. 38
      src/core/httpcli/httpcli.c
  37. 38
      src/core/httpcli/httpcli.h
  38. 64
      src/core/iomgr/fd_posix.c
  39. 22
      src/core/iomgr/fd_posix.h
  40. 40
      src/core/iomgr/iomgr.c
  41. 11
      src/core/iomgr/pollset.h
  42. 41
      src/core/iomgr/pollset_kick_posix.c
  43. 46
      src/core/iomgr/pollset_kick_posix.h
  44. 23
      src/core/iomgr/pollset_multipoller_with_epoll.c
  45. 58
      src/core/iomgr/pollset_multipoller_with_poll_posix.c
  46. 282
      src/core/iomgr/pollset_posix.c
  47. 19
      src/core/iomgr/pollset_posix.h
  48. 49
      src/core/iomgr/pollset_set.h
  49. 125
      src/core/iomgr/pollset_set_posix.c
  50. 55
      src/core/iomgr/pollset_set_posix.h
  51. 25
      src/core/iomgr/pollset_set_windows.c
  52. 17
      src/core/iomgr/pollset_set_windows.h
  53. 9
      src/core/iomgr/pollset_windows.c
  54. 7
      src/core/iomgr/pollset_windows.h
  55. 12
      src/core/iomgr/tcp_client.h
  56. 26
      src/core/iomgr/tcp_client_posix.c
  57. 25
      src/core/iomgr/tcp_client_windows.c
  58. 7
      src/core/iomgr/tcp_posix.c
  59. 89
      src/core/iomgr/tcp_server_posix.c
  60. 2
      src/core/json/json.h
  61. 12
      src/core/json/json_reader.c
  62. 22
      src/core/json/json_string.c
  63. 14
      src/core/json/json_writer.c
  64. 30
      src/core/security/client_auth_filter.c
  65. 64
      src/core/security/credentials.c
  66. 11
      src/core/security/credentials.h
  67. 33
      src/core/security/google_default_credentials.c
  68. 32
      src/core/security/security_connector.c
  69. 3
      src/core/security/security_connector.h
  70. 16
      src/core/security/server_secure_chttp2.c
  71. 6
      src/core/support/cmdline.c
  72. 8
      src/core/support/cpu_linux.c
  73. 3
      src/core/support/file.c
  74. 6
      src/core/support/histogram.c
  75. 4
      src/core/support/host_port.c
  76. 17
      src/core/support/log_win32.c
  77. 14
      src/core/support/murmur_hash.c
  78. 28
      src/core/support/slice.c
  79. 8
      src/core/support/slice_buffer.c
  80. 6
      src/core/support/string.c
  81. 4
      src/core/support/subprocess_posix.c
  82. 16
      src/core/support/time.c
  83. 2
      src/core/support/time_posix.c
  84. 8
      src/core/surface/byte_buffer_reader.c
  85. 141
      src/core/surface/call.c
  86. 25
      src/core/surface/call.h
  87. 20
      src/core/surface/channel.c
  88. 13
      src/core/surface/channel.h
  89. 16
      src/core/surface/channel_create.c
  90. 61
      src/core/surface/completion_queue.c
  91. 15
      src/core/surface/completion_queue.h
  92. 9
      src/core/surface/lame_client.c
  93. 18
      src/core/surface/secure_channel_create.c
  94. 274
      src/core/surface/server.c
  95. 93
      src/core/transport/chttp2_transport.c
  96. 58
      src/core/transport/stream_op.c
  97. 47
      src/core/transport/stream_op.h
  98. 13
      src/core/transport/transport.c
  99. 14
      src/core/transport/transport.h
  100. 130
      src/core/tsi/ssl_transport_security.c
  101. Some files were not shown because too many files have changed in this diff Show More

24
BUILD

@ -2,6 +2,8 @@
# This currently builds C and C++ code.
# This file has been automatically generated from a template file.
# Please look at the templates directory instead.
# This file can be regenerated from the template by running
# tools/buildgen/generate_projects.sh
# Copyright 2015, Google Inc.
# All rights reserved.
@ -170,10 +172,11 @@ cc_library(
"src/core/iomgr/iomgr_internal.h",
"src/core/iomgr/iomgr_posix.h",
"src/core/iomgr/pollset.h",
"src/core/iomgr/pollset_kick.h",
"src/core/iomgr/pollset_kick_posix.h",
"src/core/iomgr/pollset_kick_windows.h",
"src/core/iomgr/pollset_posix.h",
"src/core/iomgr/pollset_set.h",
"src/core/iomgr/pollset_set_posix.h",
"src/core/iomgr/pollset_set_windows.h",
"src/core/iomgr/pollset_windows.h",
"src/core/iomgr/resolve_address.h",
"src/core/iomgr/sockaddr.h",
@ -275,10 +278,12 @@ cc_library(
"src/core/iomgr/iomgr.c",
"src/core/iomgr/iomgr_posix.c",
"src/core/iomgr/iomgr_windows.c",
"src/core/iomgr/pollset_kick.c",
"src/core/iomgr/pollset_kick_posix.c",
"src/core/iomgr/pollset_multipoller_with_epoll.c",
"src/core/iomgr/pollset_multipoller_with_poll_posix.c",
"src/core/iomgr/pollset_posix.c",
"src/core/iomgr/pollset_set_posix.c",
"src/core/iomgr/pollset_set_windows.c",
"src/core/iomgr/pollset_windows.c",
"src/core/iomgr/resolve_address_posix.c",
"src/core/iomgr/resolve_address_windows.c",
@ -395,10 +400,11 @@ cc_library(
"src/core/iomgr/iomgr_internal.h",
"src/core/iomgr/iomgr_posix.h",
"src/core/iomgr/pollset.h",
"src/core/iomgr/pollset_kick.h",
"src/core/iomgr/pollset_kick_posix.h",
"src/core/iomgr/pollset_kick_windows.h",
"src/core/iomgr/pollset_posix.h",
"src/core/iomgr/pollset_set.h",
"src/core/iomgr/pollset_set_posix.h",
"src/core/iomgr/pollset_set_windows.h",
"src/core/iomgr/pollset_windows.h",
"src/core/iomgr/resolve_address.h",
"src/core/iomgr/sockaddr.h",
@ -478,10 +484,12 @@ cc_library(
"src/core/iomgr/iomgr.c",
"src/core/iomgr/iomgr_posix.c",
"src/core/iomgr/iomgr_windows.c",
"src/core/iomgr/pollset_kick.c",
"src/core/iomgr/pollset_kick_posix.c",
"src/core/iomgr/pollset_multipoller_with_epoll.c",
"src/core/iomgr/pollset_multipoller_with_poll_posix.c",
"src/core/iomgr/pollset_posix.c",
"src/core/iomgr/pollset_set_posix.c",
"src/core/iomgr/pollset_set_windows.c",
"src/core/iomgr/pollset_windows.c",
"src/core/iomgr/resolve_address_posix.c",
"src/core/iomgr/resolve_address_windows.c",
@ -580,7 +588,6 @@ cc_library(
"src/cpp/client/channel.cc",
"src/cpp/client/channel_arguments.cc",
"src/cpp/client/client_context.cc",
"src/cpp/client/client_unary_call.cc",
"src/cpp/client/create_channel.cc",
"src/cpp/client/credentials.cc",
"src/cpp/client/generic_stub.cc",
@ -621,6 +628,7 @@ cc_library(
"include/grpc++/impl/internal_stub.h",
"include/grpc++/impl/rpc_method.h",
"include/grpc++/impl/rpc_service_method.h",
"include/grpc++/impl/serialization_traits.h",
"include/grpc++/impl/service_type.h",
"include/grpc++/impl/sync.h",
"include/grpc++/impl/sync_cxx11.h",
@ -660,7 +668,6 @@ cc_library(
"src/cpp/client/channel.cc",
"src/cpp/client/channel_arguments.cc",
"src/cpp/client/client_context.cc",
"src/cpp/client/client_unary_call.cc",
"src/cpp/client/create_channel.cc",
"src/cpp/client/credentials.cc",
"src/cpp/client/generic_stub.cc",
@ -701,6 +708,7 @@ cc_library(
"include/grpc++/impl/internal_stub.h",
"include/grpc++/impl/rpc_method.h",
"include/grpc++/impl/rpc_service_method.h",
"include/grpc++/impl/serialization_traits.h",
"include/grpc++/impl/service_type.h",
"include/grpc++/impl/sync.h",
"include/grpc++/impl/sync_cxx11.h",

File diff suppressed because one or more lines are too long

@ -45,6 +45,7 @@
"include/grpc++/impl/internal_stub.h",
"include/grpc++/impl/rpc_method.h",
"include/grpc++/impl/rpc_service_method.h",
"include/grpc++/impl/serialization_traits.h",
"include/grpc++/impl/service_type.h",
"include/grpc++/impl/sync.h",
"include/grpc++/impl/sync_cxx11.h",
@ -72,7 +73,6 @@
"src/cpp/client/channel.cc",
"src/cpp/client/channel_arguments.cc",
"src/cpp/client/client_context.cc",
"src/cpp/client/client_unary_call.cc",
"src/cpp/client/create_channel.cc",
"src/cpp/client/credentials.cc",
"src/cpp/client/generic_stub.cc",
@ -132,10 +132,11 @@
"src/core/iomgr/iomgr_internal.h",
"src/core/iomgr/iomgr_posix.h",
"src/core/iomgr/pollset.h",
"src/core/iomgr/pollset_kick.h",
"src/core/iomgr/pollset_kick_posix.h",
"src/core/iomgr/pollset_kick_windows.h",
"src/core/iomgr/pollset_posix.h",
"src/core/iomgr/pollset_set.h",
"src/core/iomgr/pollset_set_posix.h",
"src/core/iomgr/pollset_set_windows.h",
"src/core/iomgr/pollset_windows.h",
"src/core/iomgr/resolve_address.h",
"src/core/iomgr/sockaddr.h",
@ -215,10 +216,12 @@
"src/core/iomgr/iomgr.c",
"src/core/iomgr/iomgr_posix.c",
"src/core/iomgr/iomgr_windows.c",
"src/core/iomgr/pollset_kick.c",
"src/core/iomgr/pollset_kick_posix.c",
"src/core/iomgr/pollset_multipoller_with_epoll.c",
"src/core/iomgr/pollset_multipoller_with_poll_posix.c",
"src/core/iomgr/pollset_posix.c",
"src/core/iomgr/pollset_set_posix.c",
"src/core/iomgr/pollset_set_windows.c",
"src/core/iomgr/pollset_windows.c",
"src/core/iomgr/resolve_address_posix.c",
"src/core/iomgr/resolve_address_windows.c",
@ -1283,6 +1286,20 @@
"gpr"
]
},
{
"name": "grpc_security_connector_test",
"build": "test",
"language": "c",
"src": [
"test/core/security/security_connector_test.c"
],
"deps": [
"grpc_test_util",
"grpc",
"gpr_test_util",
"gpr"
]
},
{
"name": "grpc_stream_op_test",
"build": "test",
@ -1814,6 +1831,36 @@
"gpr"
]
},
{
"name": "cxx_byte_buffer_test",
"build": "test",
"language": "c++",
"src": [
"test/cpp/util/byte_buffer_test.cc"
],
"deps": [
"grpc_test_util",
"grpc++",
"grpc",
"gpr_test_util",
"gpr"
]
},
{
"name": "cxx_slice_test",
"build": "test",
"language": "c++",
"src": [
"test/cpp/util/slice_test.cc"
],
"deps": [
"grpc_test_util",
"grpc++",
"grpc",
"gpr_test_util",
"gpr"
]
},
{
"name": "cxx_time_test",
"build": "test",

File diff suppressed because one or more lines are too long

@ -51,47 +51,50 @@ class ClientAsyncResponseReaderInterface {
virtual ~ClientAsyncResponseReaderInterface() {}
virtual void ReadInitialMetadata(void* tag) = 0;
virtual void Finish(R* msg, Status* status, void* tag) = 0;
};
template <class R>
class ClientAsyncResponseReader GRPC_FINAL
: public ClientAsyncResponseReaderInterface<R> {
public:
template <class W>
ClientAsyncResponseReader(ChannelInterface* channel, CompletionQueue* cq,
const RpcMethod& method, ClientContext* context,
const grpc::protobuf::Message& request)
const W& request)
: context_(context), call_(channel->CreateCall(method, context, cq)) {
init_buf_.AddSendInitialMetadata(&context->send_initial_metadata_);
init_buf_.AddSendMessage(request);
init_buf_.AddClientSendClose();
init_buf_.SendInitialMetadata(context->send_initial_metadata_);
// TODO(ctiller): don't assert
GPR_ASSERT(init_buf_.SendMessage(request).ok());
init_buf_.ClientSendClose();
call_.PerformOps(&init_buf_);
}
void ReadInitialMetadata(void* tag) {
GPR_ASSERT(!context_->initial_metadata_received_);
meta_buf_.Reset(tag);
meta_buf_.AddRecvInitialMetadata(context_);
meta_buf_.set_output_tag(tag);
meta_buf_.RecvInitialMetadata(context_);
call_.PerformOps(&meta_buf_);
}
void Finish(R* msg, Status* status, void* tag) {
finish_buf_.Reset(tag);
finish_buf_.set_output_tag(tag);
if (!context_->initial_metadata_received_) {
finish_buf_.AddRecvInitialMetadata(context_);
finish_buf_.RecvInitialMetadata(context_);
}
finish_buf_.AddRecvMessage(msg);
finish_buf_.AddClientRecvStatus(context_, status);
finish_buf_.RecvMessage(msg);
finish_buf_.ClientRecvStatus(context_, status);
call_.PerformOps(&finish_buf_);
}
private:
ClientContext* context_;
Call call_;
SneakyCallOpBuffer init_buf_;
CallOpBuffer meta_buf_;
CallOpBuffer finish_buf_;
SneakyCallOpSet<CallOpSendInitialMetadata, CallOpSendMessage,
CallOpClientSendClose> init_buf_;
CallOpSet<CallOpRecvInitialMetadata> meta_buf_;
CallOpSet<CallOpRecvInitialMetadata, CallOpRecvMessage<R>,
CallOpClientRecvStatus> finish_buf_;
};
template <class W>
@ -104,34 +107,36 @@ class ServerAsyncResponseWriter GRPC_FINAL
void SendInitialMetadata(void* tag) GRPC_OVERRIDE {
GPR_ASSERT(!ctx_->sent_initial_metadata_);
meta_buf_.Reset(tag);
meta_buf_.AddSendInitialMetadata(&ctx_->initial_metadata_);
meta_buf_.set_output_tag(tag);
meta_buf_.SendInitialMetadata(ctx_->initial_metadata_);
ctx_->sent_initial_metadata_ = true;
call_.PerformOps(&meta_buf_);
}
void Finish(const W& msg, const Status& status, void* tag) {
finish_buf_.Reset(tag);
finish_buf_.set_output_tag(tag);
if (!ctx_->sent_initial_metadata_) {
finish_buf_.AddSendInitialMetadata(&ctx_->initial_metadata_);
finish_buf_.SendInitialMetadata(ctx_->initial_metadata_);
ctx_->sent_initial_metadata_ = true;
}
// The response is dropped if the status is not OK.
if (status.ok()) {
finish_buf_.AddSendMessage(msg);
finish_buf_.ServerSendStatus(
ctx_->trailing_metadata_, finish_buf_.SendMessage(msg));
} else {
finish_buf_.ServerSendStatus(ctx_->trailing_metadata_, status);
}
finish_buf_.AddServerSendStatus(&ctx_->trailing_metadata_, status);
call_.PerformOps(&finish_buf_);
}
void FinishWithError(const Status& status, void* tag) {
GPR_ASSERT(!status.ok());
finish_buf_.Reset(tag);
finish_buf_.set_output_tag(tag);
if (!ctx_->sent_initial_metadata_) {
finish_buf_.AddSendInitialMetadata(&ctx_->initial_metadata_);
finish_buf_.SendInitialMetadata(ctx_->initial_metadata_);
ctx_->sent_initial_metadata_ = true;
}
finish_buf_.AddServerSendStatus(&ctx_->trailing_metadata_, status);
finish_buf_.ServerSendStatus(ctx_->trailing_metadata_, status);
call_.PerformOps(&finish_buf_);
}
@ -140,8 +145,9 @@ class ServerAsyncResponseWriter GRPC_FINAL
Call call_;
ServerContext* ctx_;
CallOpBuffer meta_buf_;
CallOpBuffer finish_buf_;
CallOpSet<CallOpSendInitialMetadata> meta_buf_;
CallOpSet<CallOpSendInitialMetadata, CallOpSendMessage,
CallOpServerSendStatus> finish_buf_;
};
} // namespace grpc

@ -39,6 +39,8 @@
#include <grpc/support/log.h>
#include <grpc++/config.h>
#include <grpc++/slice.h>
#include <grpc++/status.h>
#include <grpc++/impl/serialization_traits.h>
#include <vector>
@ -48,7 +50,7 @@ class ByteBuffer GRPC_FINAL {
public:
ByteBuffer() : buffer_(nullptr) {}
ByteBuffer(Slice* slices, size_t nslices);
ByteBuffer(const Slice* slices, size_t nslices);
~ByteBuffer() {
if (buffer_) {
@ -56,13 +58,16 @@ class ByteBuffer GRPC_FINAL {
}
}
void Dump(std::vector<Slice>* slices);
void Dump(std::vector<Slice>* slices) const;
void Clear();
size_t Length();
size_t Length() const;
private:
friend class CallOpBuffer;
friend class SerializationTraits<ByteBuffer, void>;
ByteBuffer(const ByteBuffer&);
ByteBuffer& operator=(const ByteBuffer&);
// takes ownership
void set_buffer(grpc_byte_buffer* buf) {
@ -78,6 +83,22 @@ class ByteBuffer GRPC_FINAL {
grpc_byte_buffer* buffer_;
};
template <>
class SerializationTraits<ByteBuffer, void> {
public:
static Status Deserialize(grpc_byte_buffer* byte_buffer, ByteBuffer* dest,
int max_message_size) {
dest->set_buffer(byte_buffer);
return Status::OK;
}
static Status Serialize(const ByteBuffer& source, grpc_byte_buffer** buffer,
bool* own_buffer) {
*buffer = source.buffer();
*own_buffer = false;
return Status::OK;
}
};
} // namespace grpc
#endif // GRPCXX_BYTE_BUFFER_H

@ -49,7 +49,6 @@ struct grpc_completion_queue;
namespace grpc {
class CallOpBuffer;
class ChannelInterface;
class CompletionQueue;
class Credentials;
@ -115,7 +114,8 @@ class ClientContext {
ClientContext(const ClientContext&);
ClientContext& operator=(const ClientContext&);
friend class CallOpBuffer;
friend class CallOpClientRecvStatus;
friend class CallOpRecvInitialMetadata;
friend class Channel;
template <class R>
friend class ::grpc::ClientReader;
@ -131,6 +131,12 @@ class ClientContext {
friend class ::grpc::ClientAsyncReaderWriter;
template <class R>
friend class ::grpc::ClientAsyncResponseReader;
template <class InputMessage, class OutputMessage>
friend Status BlockingUnaryCall(ChannelInterface* channel,
const RpcMethod& method,
ClientContext* context,
const InputMessage& request,
OutputMessage* result);
grpc_call* call() { return call_; }
void set_call(grpc_call* call,

@ -35,7 +35,6 @@
#define GRPCXX_COMPLETION_QUEUE_H
#include <grpc/support/time.h>
#include <grpc++/impl/client_unary_call.h>
#include <grpc++/impl/grpc_library.h>
#include <grpc++/time.h>
@ -55,11 +54,23 @@ template <class W>
class ServerWriter;
template <class R, class W>
class ServerReaderWriter;
template <class ServiceType, class RequestType, class ResponseType>
class RpcMethodHandler;
template <class ServiceType, class RequestType, class ResponseType>
class ClientStreamingHandler;
template <class ServiceType, class RequestType, class ResponseType>
class ServerStreamingHandler;
template <class ServiceType, class RequestType, class ResponseType>
class BidiStreamingHandler;
class ChannelInterface;
class ClientContext;
class CompletionQueue;
class RpcMethod;
class Server;
class ServerBuilder;
class ServerContext;
class Status;
class CompletionQueueTag {
public:
@ -84,7 +95,7 @@ class CompletionQueue : public GrpcLibrary {
// Nonblocking (until deadline) read from queue.
// Cannot rely on result of tag or ok if return is TIMEOUT
template<typename T>
template <typename T>
NextStatus AsyncNext(void** tag, bool* ok, const T& deadline) {
TimePoint<T> deadline_tp(deadline);
return AsyncNextInternal(tag, ok, deadline_tp.raw_time());
@ -118,13 +129,22 @@ class CompletionQueue : public GrpcLibrary {
friend class ::grpc::ServerWriter;
template <class R, class W>
friend class ::grpc::ServerReaderWriter;
template <class ServiceType, class RequestType, class ResponseType>
friend class RpcMethodHandler;
template <class ServiceType, class RequestType, class ResponseType>
friend class ClientStreamingHandler;
template <class ServiceType, class RequestType, class ResponseType>
friend class ServerStreamingHandler;
template <class ServiceType, class RequestType, class ResponseType>
friend class BidiStreamingHandler;
friend class ::grpc::Server;
friend class ::grpc::ServerContext;
template <class InputMessage, class OutputMessage>
friend Status BlockingUnaryCall(ChannelInterface* channel,
const RpcMethod& method,
ClientContext* context,
const grpc::protobuf::Message& request,
grpc::protobuf::Message* result);
const InputMessage& request,
OutputMessage* result);
NextStatus AsyncNextInternal(void** tag, bool* ok, gpr_timespec deadline);

@ -77,31 +77,6 @@
#define GRPC_OVERRIDE override
#endif
#ifndef GRPC_CUSTOM_PROTOBUF_INT64
#include <google/protobuf/stubs/common.h>
#define GRPC_CUSTOM_PROTOBUF_INT64 ::google::protobuf::int64
#endif
#ifndef GRPC_CUSTOM_MESSAGE
#include <google/protobuf/message.h>
#define GRPC_CUSTOM_MESSAGE ::google::protobuf::Message
#endif
#ifndef GRPC_CUSTOM_STRING
#include <string>
#define GRPC_CUSTOM_STRING std::string
#endif
#ifndef GRPC_CUSTOM_ZEROCOPYOUTPUTSTREAM
#include <google/protobuf/io/coded_stream.h>
#include <google/protobuf/io/zero_copy_stream.h>
#define GRPC_CUSTOM_ZEROCOPYOUTPUTSTREAM \
::google::protobuf::io::ZeroCopyOutputStream
#define GRPC_CUSTOM_ZEROCOPYINPUTSTREAM \
::google::protobuf::io::ZeroCopyInputStream
#define GRPC_CUSTOM_CODEDINPUTSTREAM ::google::protobuf::io::CodedInputStream
#endif
#ifdef GRPC_CXX0X_NO_NULLPTR
#include <memory>
const class {
@ -125,23 +100,15 @@ const class {
} nullptr = {};
#endif
#ifndef GRPC_CUSTOM_STRING
#include <string>
#define GRPC_CUSTOM_STRING std::string
#endif
namespace grpc {
typedef GRPC_CUSTOM_STRING string;
namespace protobuf {
typedef GRPC_CUSTOM_MESSAGE Message;
typedef GRPC_CUSTOM_PROTOBUF_INT64 int64;
namespace io {
typedef GRPC_CUSTOM_ZEROCOPYOUTPUTSTREAM ZeroCopyOutputStream;
typedef GRPC_CUSTOM_ZEROCOPYINPUTSTREAM ZeroCopyInputStream;
typedef GRPC_CUSTOM_CODEDINPUTSTREAM CodedInputStream;
} // namespace io
} // namespace protobuf
} // namespace grpc
#endif // GRPCXX_CONFIG_H

@ -0,0 +1,72 @@
/*
*
* Copyright 2015, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following disclaimer
* in the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Google Inc. nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
#ifndef GRPCXX_CONFIG_PROTOBUF_H
#define GRPCXX_CONFIG_PROTOBUF_H
#ifndef GRPC_CUSTOM_PROTOBUF_INT64
#include <google/protobuf/stubs/common.h>
#define GRPC_CUSTOM_PROTOBUF_INT64 ::google::protobuf::int64
#endif
#ifndef GRPC_CUSTOM_MESSAGE
#include <google/protobuf/message.h>
#define GRPC_CUSTOM_MESSAGE ::google::protobuf::Message
#endif
#ifndef GRPC_CUSTOM_ZEROCOPYOUTPUTSTREAM
#include <google/protobuf/io/coded_stream.h>
#include <google/protobuf/io/zero_copy_stream.h>
#define GRPC_CUSTOM_ZEROCOPYOUTPUTSTREAM \
::google::protobuf::io::ZeroCopyOutputStream
#define GRPC_CUSTOM_ZEROCOPYINPUTSTREAM \
::google::protobuf::io::ZeroCopyInputStream
#define GRPC_CUSTOM_CODEDINPUTSTREAM ::google::protobuf::io::CodedInputStream
#endif
namespace grpc {
namespace protobuf {
typedef GRPC_CUSTOM_MESSAGE Message;
typedef GRPC_CUSTOM_PROTOBUF_INT64 int64;
namespace io {
typedef GRPC_CUSTOM_ZEROCOPYOUTPUTSTREAM ZeroCopyOutputStream;
typedef GRPC_CUSTOM_ZEROCOPYINPUTSTREAM ZeroCopyInputStream;
typedef GRPC_CUSTOM_CODEDINPUTSTREAM CodedInputStream;
} // namespace io
} // namespace protobuf
} // namespace grpc
#endif // GRPCXX_CONFIG_PROTOBUF_H

@ -34,14 +34,18 @@
#ifndef GRPCXX_IMPL_CALL_H
#define GRPCXX_IMPL_CALL_H
#include <grpc/grpc.h>
#include <grpc/support/alloc.h>
#include <grpc++/client_context.h>
#include <grpc++/completion_queue.h>
#include <grpc++/config.h>
#include <grpc++/status.h>
#include <grpc++/impl/serialization_traits.h>
#include <memory>
#include <map>
#include <string.h>
struct grpc_call;
struct grpc_op;
@ -50,84 +54,383 @@ namespace grpc {
class ByteBuffer;
class Call;
class CallOpBuffer : public CompletionQueueTag {
void FillMetadataMap(grpc_metadata_array* arr,
std::multimap<grpc::string, grpc::string>* metadata);
grpc_metadata* FillMetadataArray(
const std::multimap<grpc::string, grpc::string>& metadata);
/// Default argument for CallOpSet. I is unused by the class, but can be
/// used for generating multiple names for the same thing.
template <int I>
class CallNoOp {
protected:
void AddOp(grpc_op* ops, size_t* nops) {}
void FinishOp(bool* status, int max_message_size) {}
};
class CallOpSendInitialMetadata {
public:
CallOpBuffer();
~CallOpBuffer();
void Reset(void* next_return_tag);
// Does not take ownership.
void AddSendInitialMetadata(
std::multimap<grpc::string, grpc::string>* metadata);
void AddSendInitialMetadata(ClientContext* ctx);
void AddRecvInitialMetadata(ClientContext* ctx);
void AddSendMessage(const grpc::protobuf::Message& message);
void AddSendMessage(const ByteBuffer& message);
void AddRecvMessage(grpc::protobuf::Message* message);
void AddRecvMessage(ByteBuffer* message);
void AddClientSendClose();
void AddClientRecvStatus(ClientContext* ctx, Status* status);
void AddServerSendStatus(std::multimap<grpc::string, grpc::string>* metadata,
const Status& status);
void AddServerRecvClose(bool* cancelled);
// INTERNAL API:
// Convert to an array of grpc_op elements
void FillOps(grpc_op* ops, size_t* nops);
// Called by completion queue just prior to returning from Next() or Pluck()
bool FinalizeResult(void** tag, bool* status) GRPC_OVERRIDE;
CallOpSendInitialMetadata() : send_(false) {}
void set_max_message_size(int max_message_size) {
max_message_size_ = max_message_size;
void SendInitialMetadata(
const std::multimap<grpc::string, grpc::string>& metadata) {
send_ = true;
initial_metadata_count_ = metadata.size();
initial_metadata_ = FillMetadataArray(metadata);
}
bool got_message;
protected:
void AddOp(grpc_op* ops, size_t* nops) {
if (!send_) return;
grpc_op* op = &ops[(*nops)++];
op->op = GRPC_OP_SEND_INITIAL_METADATA;
op->flags = 0;
op->data.send_initial_metadata.count = initial_metadata_count_;
op->data.send_initial_metadata.metadata = initial_metadata_;
}
void FinishOp(bool* status, int max_message_size) {
if (!send_) return;
gpr_free(initial_metadata_);
send_ = false;
}
private:
void* return_tag_;
// Send initial metadata
bool send_initial_metadata_;
bool send_;
size_t initial_metadata_count_;
grpc_metadata* initial_metadata_;
// Recv initial metadta
std::multimap<grpc::string, grpc::string>* recv_initial_metadata_;
grpc_metadata_array recv_initial_metadata_arr_;
// Send message
const grpc::protobuf::Message* send_message_;
const ByteBuffer* send_message_buffer_;
};
class CallOpSendMessage {
public:
CallOpSendMessage() : send_buf_(nullptr), own_buf_(false) {}
template <class M>
Status SendMessage(const M& message) GRPC_MUST_USE_RESULT;
protected:
void AddOp(grpc_op* ops, size_t* nops) {
if (send_buf_ == nullptr) return;
grpc_op* op = &ops[(*nops)++];
op->op = GRPC_OP_SEND_MESSAGE;
op->flags = 0;
op->data.send_message = send_buf_;
}
void FinishOp(bool* status, int max_message_size) {
if (own_buf_) grpc_byte_buffer_destroy(send_buf_);
send_buf_ = nullptr;
}
private:
grpc_byte_buffer* send_buf_;
// Recv message
grpc::protobuf::Message* recv_message_;
ByteBuffer* recv_message_buffer_;
bool own_buf_;
};
template <class M>
Status CallOpSendMessage::SendMessage(const M& message) {
return SerializationTraits<M>::Serialize(message, &send_buf_, &own_buf_);
}
template <class R>
class CallOpRecvMessage {
public:
CallOpRecvMessage() : got_message(false), message_(nullptr) {}
void RecvMessage(R* message) { message_ = message; }
bool got_message;
protected:
void AddOp(grpc_op* ops, size_t* nops) {
if (message_ == nullptr) return;
grpc_op* op = &ops[(*nops)++];
op->op = GRPC_OP_RECV_MESSAGE;
op->flags = 0;
op->data.recv_message = &recv_buf_;
}
void FinishOp(bool* status, int max_message_size) {
if (message_ == nullptr) return;
if (recv_buf_) {
if (*status) {
got_message = true;
*status = SerializationTraits<R>::Deserialize(recv_buf_, message_,
max_message_size)
.ok();
} else {
got_message = false;
grpc_byte_buffer_destroy(recv_buf_);
}
} else {
got_message = false;
*status = false;
}
message_ = nullptr;
}
private:
R* message_;
grpc_byte_buffer* recv_buf_;
int max_message_size_;
// Client send close
bool client_send_close_;
// Client recv status
};
class CallOpGenericRecvMessage {
public:
CallOpGenericRecvMessage() : got_message(false) {}
template <class R>
void RecvMessage(R* message) {
deserialize_ = [message](grpc_byte_buffer* buf,
int max_message_size) -> Status {
return SerializationTraits<R>::Deserialize(buf, message,
max_message_size);
};
}
bool got_message;
protected:
void AddOp(grpc_op* ops, size_t* nops) {
if (!deserialize_) return;
grpc_op* op = &ops[(*nops)++];
op->op = GRPC_OP_RECV_MESSAGE;
op->flags = 0;
op->data.recv_message = &recv_buf_;
}
void FinishOp(bool* status, int max_message_size) {
if (!deserialize_) return;
if (recv_buf_) {
if (*status) {
got_message = true;
*status = deserialize_(recv_buf_, max_message_size).ok();
} else {
got_message = false;
grpc_byte_buffer_destroy(recv_buf_);
}
} else {
got_message = false;
*status = false;
}
deserialize_ = DeserializeFunc();
}
private:
typedef std::function<Status(grpc_byte_buffer*, int)> DeserializeFunc;
DeserializeFunc deserialize_;
grpc_byte_buffer* recv_buf_;
};
class CallOpClientSendClose {
public:
CallOpClientSendClose() : send_(false) {}
void ClientSendClose() { send_ = true; }
protected:
void AddOp(grpc_op* ops, size_t* nops) {
if (!send_) return;
grpc_op* op = &ops[(*nops)++];
op->op = GRPC_OP_SEND_CLOSE_FROM_CLIENT;
op->flags = 0;
}
void FinishOp(bool* status, int max_message_size) { send_ = false; }
private:
bool send_;
};
class CallOpServerSendStatus {
public:
CallOpServerSendStatus() : send_status_available_(false) {}
void ServerSendStatus(
const std::multimap<grpc::string, grpc::string>& trailing_metadata,
const Status& status) {
trailing_metadata_count_ = trailing_metadata.size();
trailing_metadata_ = FillMetadataArray(trailing_metadata);
send_status_available_ = true;
send_status_code_ = static_cast<grpc_status_code>(status.error_code());
send_status_details_ = status.error_message();
}
protected:
void AddOp(grpc_op* ops, size_t* nops) {
if (!send_status_available_) return;
grpc_op* op = &ops[(*nops)++];
op->op = GRPC_OP_SEND_STATUS_FROM_SERVER;
op->data.send_status_from_server.trailing_metadata_count =
trailing_metadata_count_;
op->data.send_status_from_server.trailing_metadata = trailing_metadata_;
op->data.send_status_from_server.status = send_status_code_;
op->data.send_status_from_server.status_details =
send_status_details_.empty() ? nullptr : send_status_details_.c_str();
op->flags = 0;
}
void FinishOp(bool* status, int max_message_size) {
if (!send_status_available_) return;
gpr_free(trailing_metadata_);
send_status_available_ = false;
}
private:
bool send_status_available_;
grpc_status_code send_status_code_;
grpc::string send_status_details_;
size_t trailing_metadata_count_;
grpc_metadata* trailing_metadata_;
};
class CallOpRecvInitialMetadata {
public:
CallOpRecvInitialMetadata() : recv_initial_metadata_(nullptr) {}
void RecvInitialMetadata(ClientContext* context) {
context->initial_metadata_received_ = true;
recv_initial_metadata_ = &context->recv_initial_metadata_;
}
protected:
void AddOp(grpc_op* ops, size_t* nops) {
if (!recv_initial_metadata_) return;
memset(&recv_initial_metadata_arr_, 0, sizeof(recv_initial_metadata_arr_));
grpc_op* op = &ops[(*nops)++];
op->op = GRPC_OP_RECV_INITIAL_METADATA;
op->data.recv_initial_metadata = &recv_initial_metadata_arr_;
op->flags = 0;
}
void FinishOp(bool* status, int max_message_size) {
if (recv_initial_metadata_ == nullptr) return;
FillMetadataMap(&recv_initial_metadata_arr_, recv_initial_metadata_);
recv_initial_metadata_ = nullptr;
}
private:
std::multimap<grpc::string, grpc::string>* recv_initial_metadata_;
grpc_metadata_array recv_initial_metadata_arr_;
};
class CallOpClientRecvStatus {
public:
CallOpClientRecvStatus() : recv_status_(nullptr) {}
void ClientRecvStatus(ClientContext* context, Status* status) {
recv_trailing_metadata_ = &context->trailing_metadata_;
recv_status_ = status;
}
protected:
void AddOp(grpc_op* ops, size_t* nops) {
if (recv_status_ == nullptr) return;
memset(&recv_trailing_metadata_arr_, 0,
sizeof(recv_trailing_metadata_arr_));
status_details_ = nullptr;
status_details_capacity_ = 0;
grpc_op* op = &ops[(*nops)++];
op->op = GRPC_OP_RECV_STATUS_ON_CLIENT;
op->data.recv_status_on_client.trailing_metadata =
&recv_trailing_metadata_arr_;
op->data.recv_status_on_client.status = &status_code_;
op->data.recv_status_on_client.status_details = &status_details_;
op->data.recv_status_on_client.status_details_capacity =
&status_details_capacity_;
op->flags = 0;
}
void FinishOp(bool* status, int max_message_size) {
if (recv_status_ == nullptr) return;
FillMetadataMap(&recv_trailing_metadata_arr_, recv_trailing_metadata_);
*recv_status_ = Status(
static_cast<StatusCode>(status_code_),
status_details_ ? grpc::string(status_details_) : grpc::string());
gpr_free(status_details_);
recv_status_ = nullptr;
}
private:
std::multimap<grpc::string, grpc::string>* recv_trailing_metadata_;
Status* recv_status_;
grpc_metadata_array recv_trailing_metadata_arr_;
grpc_status_code status_code_;
char* status_details_;
size_t status_details_capacity_;
// Server send status
bool send_status_available_;
grpc_status_code send_status_code_;
grpc::string send_status_details_;
size_t trailing_metadata_count_;
grpc_metadata* trailing_metadata_;
int cancelled_buf_;
bool* recv_closed_;
};
// SneakyCallOpBuffer does not post completions to the completion queue
class SneakyCallOpBuffer GRPC_FINAL : public CallOpBuffer {
/// An abstract collection of call ops, used to generate the
/// grpc_call_op structure to pass down to the lower layers,
/// and as it is-a CompletionQueueTag, also massages the final
/// completion into the correct form for consumption in the C++
/// API.
class CallOpSetInterface : public CompletionQueueTag {
public:
CallOpSetInterface() : max_message_size_(0) {}
/// Fills in grpc_op, starting from ops[*nops] and moving
/// upwards.
virtual void FillOps(grpc_op* ops, size_t* nops) = 0;
void set_max_message_size(int max_message_size) {
max_message_size_ = max_message_size;
}
protected:
int max_message_size_;
};
/// Primary implementaiton of CallOpSetInterface.
/// Since we cannot use variadic templates, we declare slots up to
/// the maximum count of ops we'll need in a set. We leverage the
/// empty base class optimization to slim this class (especially
/// when there are many unused slots used). To avoid duplicate base classes,
/// the template parmeter for CallNoOp is varied by argument position.
template <class Op1 = CallNoOp<1>, class Op2 = CallNoOp<2>,
class Op3 = CallNoOp<3>, class Op4 = CallNoOp<4>,
class Op5 = CallNoOp<5>, class Op6 = CallNoOp<6>>
class CallOpSet : public CallOpSetInterface,
public Op1,
public Op2,
public Op3,
public Op4,
public Op5,
public Op6 {
public:
CallOpSet() : return_tag_(this) {}
void FillOps(grpc_op* ops, size_t* nops) GRPC_OVERRIDE {
this->Op1::AddOp(ops, nops);
this->Op2::AddOp(ops, nops);
this->Op3::AddOp(ops, nops);
this->Op4::AddOp(ops, nops);
this->Op5::AddOp(ops, nops);
this->Op6::AddOp(ops, nops);
}
bool FinalizeResult(void** tag, bool* status) GRPC_OVERRIDE {
this->Op1::FinishOp(status, max_message_size_);
this->Op2::FinishOp(status, max_message_size_);
this->Op3::FinishOp(status, max_message_size_);
this->Op4::FinishOp(status, max_message_size_);
this->Op5::FinishOp(status, max_message_size_);
this->Op6::FinishOp(status, max_message_size_);
*tag = return_tag_;
return true;
}
void set_output_tag(void* return_tag) { return_tag_ = return_tag; }
private:
void* return_tag_;
};
/// A CallOpSet that does not post completions to the completion queue.
///
/// Allows hiding some completions that the C core must generate from
/// C++ users.
template <class Op1 = CallNoOp<1>, class Op2 = CallNoOp<2>,
class Op3 = CallNoOp<3>, class Op4 = CallNoOp<4>,
class Op5 = CallNoOp<5>, class Op6 = CallNoOp<6>>
class SneakyCallOpSet GRPC_FINAL
: public CallOpSet<Op1, Op2, Op3, Op4, Op5, Op6> {
public:
bool FinalizeResult(void** tag, bool* status) GRPC_OVERRIDE {
return CallOpBuffer::FinalizeResult(tag, status) && false;
typedef CallOpSet<Op1, Op2, Op3, Op4, Op5, Op6> Base;
return Base::FinalizeResult(tag, status) && false;
}
};
@ -135,7 +438,7 @@ class SneakyCallOpBuffer GRPC_FINAL : public CallOpBuffer {
class CallHook {
public:
virtual ~CallHook() {}
virtual void PerformOpsOnCall(CallOpBuffer* ops, Call* call) = 0;
virtual void PerformOpsOnCall(CallOpSetInterface* ops, Call* call) = 0;
};
// Straightforward wrapping of the C call object
@ -146,7 +449,7 @@ class Call GRPC_FINAL {
Call(grpc_call* call, CallHook* call_hook_, CompletionQueue* cq,
int max_message_size);
void PerformOps(CallOpBuffer* buffer);
void PerformOps(CallOpSetInterface* ops);
grpc_call* call() { return call_; }
CompletionQueue* cq() { return cq_; }

@ -37,6 +37,8 @@
#include <grpc++/config.h>
#include <grpc++/status.h>
#include <grpc++/impl/call.h>
namespace grpc {
class ChannelInterface;
@ -45,10 +47,28 @@ class CompletionQueue;
class RpcMethod;
// Wrapper that performs a blocking unary call
template <class InputMessage, class OutputMessage>
Status BlockingUnaryCall(ChannelInterface* channel, const RpcMethod& method,
ClientContext* context,
const grpc::protobuf::Message& request,
grpc::protobuf::Message* result);
ClientContext* context, const InputMessage& request,
OutputMessage* result) {
CompletionQueue cq;
Call call(channel->CreateCall(method, context, &cq));
CallOpSet<CallOpSendInitialMetadata, CallOpSendMessage,
CallOpRecvInitialMetadata, CallOpRecvMessage<OutputMessage>,
CallOpClientSendClose, CallOpClientRecvStatus> ops;
Status status = ops.SendMessage(request);
if (!status.ok()) {
return status;
}
ops.SendInitialMetadata(context->send_initial_metadata_);
ops.RecvInitialMetadata(context);
ops.RecvMessage(result);
ops.ClientSendClose();
ops.ClientRecvStatus(context, &status);
call.PerformOps(&ops);
GPR_ASSERT((cq.Pluck(&ops) && ops.got_message) || !status.ok());
return status;
}
} // namespace grpc

@ -0,0 +1,76 @@
/*
*
* Copyright 2015, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following disclaimer
* in the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Google Inc. nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
#ifndef GRPC_INTERNAL_CPP_PROTO_PROTO_UTILS_H
#define GRPC_INTERNAL_CPP_PROTO_PROTO_UTILS_H
#include <type_traits>
#include <grpc/grpc.h>
#include <grpc++/impl/serialization_traits.h>
#include <grpc++/config_protobuf.h>
#include <grpc++/status.h>
namespace grpc {
// Serialize the msg into a buffer created inside the function. The caller
// should destroy the returned buffer when done with it. If serialization fails,
// false is returned and buffer is left unchanged.
Status SerializeProto(const grpc::protobuf::Message& msg,
grpc_byte_buffer** buffer);
// The caller keeps ownership of buffer and msg.
Status DeserializeProto(grpc_byte_buffer* buffer, grpc::protobuf::Message* msg,
int max_message_size);
template <class T>
class SerializationTraits<T, typename std::enable_if<std::is_base_of<
grpc::protobuf::Message, T>::value>::type> {
public:
static Status Serialize(const grpc::protobuf::Message& msg,
grpc_byte_buffer** buffer, bool* own_buffer) {
*own_buffer = true;
return SerializeProto(msg, buffer);
}
static Status Deserialize(grpc_byte_buffer* buffer,
grpc::protobuf::Message* msg,
int max_message_size) {
auto status = DeserializeProto(buffer, msg, max_message_size);
grpc_byte_buffer_destroy(buffer);
return status;
}
};
} // namespace grpc
#endif // GRPC_INTERNAL_CPP_PROTO_PROTO_UTILS_H

@ -55,16 +55,19 @@ class MethodHandler {
public:
virtual ~MethodHandler() {}
struct HandlerParameter {
HandlerParameter(Call* c, ServerContext* context,
const grpc::protobuf::Message* req,
grpc::protobuf::Message* resp)
: call(c), server_context(context), request(req), response(resp) {}
HandlerParameter(Call* c, ServerContext* context, grpc_byte_buffer* req,
int max_size)
: call(c),
server_context(context),
request(req),
max_message_size(max_size) {}
Call* call;
ServerContext* server_context;
const grpc::protobuf::Message* request;
grpc::protobuf::Message* response;
// Handler required to grpc_byte_buffer_destroy this
grpc_byte_buffer* request;
int max_message_size;
};
virtual Status RunHandler(const HandlerParameter& param) = 0;
virtual void RunHandler(const HandlerParameter& param) = 0;
};
// A wrapper class of an application provided rpc method handler.
@ -77,11 +80,25 @@ class RpcMethodHandler : public MethodHandler {
ServiceType* service)
: func_(func), service_(service) {}
Status RunHandler(const HandlerParameter& param) GRPC_FINAL {
// Invoke application function, cast proto messages to their actual types.
return func_(service_, param.server_context,
dynamic_cast<const RequestType*>(param.request),
dynamic_cast<ResponseType*>(param.response));
void RunHandler(const HandlerParameter& param) GRPC_FINAL {
RequestType req;
Status status = SerializationTraits<RequestType>::Deserialize(
param.request, &req, param.max_message_size);
ResponseType rsp;
if (status.ok()) {
status = func_(service_, param.server_context, &req, &rsp);
}
GPR_ASSERT(!param.server_context->sent_initial_metadata_);
CallOpSet<CallOpSendInitialMetadata, CallOpSendMessage,
CallOpServerSendStatus> ops;
ops.SendInitialMetadata(param.server_context->initial_metadata_);
if (status.ok()) {
status = ops.SendMessage(rsp);
}
ops.ServerSendStatus(param.server_context->trailing_metadata_, status);
param.call->PerformOps(&ops);
param.call->cq()->Pluck(&ops);
}
private:
@ -102,10 +119,21 @@ class ClientStreamingHandler : public MethodHandler {
ServiceType* service)
: func_(func), service_(service) {}
Status RunHandler(const HandlerParameter& param) GRPC_FINAL {
void RunHandler(const HandlerParameter& param) GRPC_FINAL {
ServerReader<RequestType> reader(param.call, param.server_context);
return func_(service_, param.server_context, &reader,
dynamic_cast<ResponseType*>(param.response));
ResponseType rsp;
Status status = func_(service_, param.server_context, &reader, &rsp);
GPR_ASSERT(!param.server_context->sent_initial_metadata_);
CallOpSet<CallOpSendInitialMetadata, CallOpSendMessage,
CallOpServerSendStatus> ops;
ops.SendInitialMetadata(param.server_context->initial_metadata_);
if (status.ok()) {
status = ops.SendMessage(rsp);
}
ops.ServerSendStatus(param.server_context->trailing_metadata_, status);
param.call->PerformOps(&ops);
param.call->cq()->Pluck(&ops);
}
private:
@ -124,10 +152,23 @@ class ServerStreamingHandler : public MethodHandler {
ServiceType* service)
: func_(func), service_(service) {}
Status RunHandler(const HandlerParameter& param) GRPC_FINAL {
ServerWriter<ResponseType> writer(param.call, param.server_context);
return func_(service_, param.server_context,
dynamic_cast<const RequestType*>(param.request), &writer);
void RunHandler(const HandlerParameter& param) GRPC_FINAL {
RequestType req;
Status status = SerializationTraits<RequestType>::Deserialize(
param.request, &req, param.max_message_size);
if (status.ok()) {
ServerWriter<ResponseType> writer(param.call, param.server_context);
status = func_(service_, param.server_context, &req, &writer);
}
CallOpSet<CallOpSendInitialMetadata, CallOpServerSendStatus> ops;
if (!param.server_context->sent_initial_metadata_) {
ops.SendInitialMetadata(param.server_context->initial_metadata_);
}
ops.ServerSendStatus(param.server_context->trailing_metadata_, status);
param.call->PerformOps(&ops);
param.call->cq()->Pluck(&ops);
}
private:
@ -147,10 +188,18 @@ class BidiStreamingHandler : public MethodHandler {
ServiceType* service)
: func_(func), service_(service) {}
Status RunHandler(const HandlerParameter& param) GRPC_FINAL {
void RunHandler(const HandlerParameter& param) GRPC_FINAL {
ServerReaderWriter<ResponseType, RequestType> stream(param.call,
param.server_context);
return func_(service_, param.server_context, &stream);
Status status = func_(service_, param.server_context, &stream);
CallOpSet<CallOpSendInitialMetadata, CallOpServerSendStatus> ops;
if (!param.server_context->sent_initial_metadata_) {
ops.SendInitialMetadata(param.server_context->initial_metadata_);
}
ops.ServerSendStatus(param.server_context->trailing_metadata_, status);
param.call->PerformOps(&ops);
param.call->cq()->Pluck(&ops);
}
private:
@ -162,29 +211,15 @@ class BidiStreamingHandler : public MethodHandler {
// Server side rpc method class
class RpcServiceMethod : public RpcMethod {
public:
// Takes ownership of the handler and two prototype objects.
// Takes ownership of the handler
RpcServiceMethod(const char* name, RpcMethod::RpcType type,
MethodHandler* handler,
grpc::protobuf::Message* request_prototype,
grpc::protobuf::Message* response_prototype)
: RpcMethod(name, type, nullptr),
handler_(handler),
request_prototype_(request_prototype),
response_prototype_(response_prototype) {}
MethodHandler* handler)
: RpcMethod(name, type, nullptr), handler_(handler) {}
MethodHandler* handler() { return handler_.get(); }
grpc::protobuf::Message* AllocateRequestProto() {
return request_prototype_->New();
}
grpc::protobuf::Message* AllocateResponseProto() {
return response_prototype_->New();
}
private:
std::unique_ptr<MethodHandler> handler_;
std::unique_ptr<grpc::protobuf::Message> request_prototype_;
std::unique_ptr<grpc::protobuf::Message> response_prototype_;
};
// This class contains all the method information for an rpc service. It is

@ -0,0 +1,68 @@
/*
*
* Copyright 2015, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following disclaimer
* in the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Google Inc. nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
#ifndef GRPCXX_IMPL_SERIALIZATION_TRAITS_H
#define GRPCXX_IMPL_SERIALIZATION_TRAITS_H
namespace grpc {
/// Defines how to serialize and deserialize some type.
///
/// Used for hooking different message serialization API's into GRPC.
/// Each SerializationTraits implementation must provide the following
/// functions:
/// static Status Serialize(const Message& msg,
/// grpc_byte_buffer** buffer,
// bool* own_buffer);
/// static Status Deserialize(grpc_byte_buffer* buffer,
/// Message* msg,
/// int max_message_size);
///
/// Serialize is required to convert message to a grpc_byte_buffer, and
/// to store a pointer to that byte buffer at *buffer. *own_buffer should
/// be set to true if the caller owns said byte buffer, or false if
/// ownership is retained elsewhere.
///
/// Deserialize is required to convert buffer into the message stored at
/// msg. max_message_size is passed in as a bound on the maximum number of
/// message bytes Deserialize should accept.
///
/// Both functions return a Status, allowing them to explain what went
/// wrong if required.
template <class Message,
class UnusedButHereForPartialTemplateSpecialization = void>
class SerializationTraits;
} // namespace grpc
#endif // GRPCXX_IMPL_SERIALIZATION_TRAITS_H

@ -35,6 +35,8 @@
#define GRPCXX_IMPL_SERVICE_TYPE_H
#include <grpc++/config.h>
#include <grpc++/impl/serialization_traits.h>
#include <grpc++/server.h>
#include <grpc++/status.h>
namespace grpc {
@ -65,20 +67,8 @@ class ServerAsyncStreamingInterface {
class AsynchronousService {
public:
// this is Server, but in disguise to avoid a link dependency
class DispatchImpl {
public:
virtual void RequestAsyncCall(void* registered_method,
ServerContext* context,
::grpc::protobuf::Message* request,
ServerAsyncStreamingInterface* stream,
CompletionQueue* call_cq,
ServerCompletionQueue* notification_cq,
void* tag) = 0;
};
AsynchronousService(const char** method_names, size_t method_count)
: dispatch_impl_(nullptr),
: server_(nullptr),
method_names_(method_names),
method_count_(method_count),
request_args_(nullptr) {}
@ -86,42 +76,43 @@ class AsynchronousService {
~AsynchronousService() { delete[] request_args_; }
protected:
void RequestAsyncUnary(int index, ServerContext* context,
grpc::protobuf::Message* request,
template <class Message>
void RequestAsyncUnary(int index, ServerContext* context, Message* request,
ServerAsyncStreamingInterface* stream,
CompletionQueue* call_cq,
ServerCompletionQueue* notification_cq, void* tag) {
dispatch_impl_->RequestAsyncCall(request_args_[index], context, request,
stream, call_cq, notification_cq, tag);
server_->RequestAsyncCall(request_args_[index], context, stream, call_cq,
notification_cq, tag, request);
}
void RequestClientStreaming(int index, ServerContext* context,
ServerAsyncStreamingInterface* stream,
CompletionQueue* call_cq,
ServerCompletionQueue* notification_cq,
void* tag) {
dispatch_impl_->RequestAsyncCall(request_args_[index], context, nullptr,
stream, call_cq, notification_cq, tag);
server_->RequestAsyncCall(request_args_[index], context, stream, call_cq,
notification_cq, tag);
}
template <class Message>
void RequestServerStreaming(int index, ServerContext* context,
grpc::protobuf::Message* request,
Message* request,
ServerAsyncStreamingInterface* stream,
CompletionQueue* call_cq,
ServerCompletionQueue* notification_cq,
void* tag) {
dispatch_impl_->RequestAsyncCall(request_args_[index], context, request,
stream, call_cq, notification_cq, tag);
server_->RequestAsyncCall(request_args_[index], context, stream, call_cq,
notification_cq, tag, request);
}
void RequestBidiStreaming(int index, ServerContext* context,
ServerAsyncStreamingInterface* stream,
CompletionQueue* call_cq,
ServerCompletionQueue* notification_cq, void* tag) {
dispatch_impl_->RequestAsyncCall(request_args_[index], context, nullptr,
stream, call_cq, notification_cq, tag);
server_->RequestAsyncCall(request_args_[index], context, stream, call_cq,
notification_cq, tag);
}
private:
friend class Server;
DispatchImpl* dispatch_impl_;
Server* server_;
const char** const method_names_;
size_t method_count_;
void** request_args_;

@ -41,25 +41,24 @@
#include <grpc++/config.h>
#include <grpc++/impl/call.h>
#include <grpc++/impl/grpc_library.h>
#include <grpc++/impl/service_type.h>
#include <grpc++/impl/sync.h>
#include <grpc++/status.h>
struct grpc_server;
namespace grpc {
class AsynchronousService;
class GenericServerContext;
class AsyncGenericService;
class RpcService;
class RpcServiceMethod;
class ServerAsyncStreamingInterface;
class ServerCredentials;
class ThreadPoolInterface;
// Currently it only supports handling rpcs in a single thread.
class Server GRPC_FINAL : public GrpcLibrary,
private CallHook,
private AsynchronousService::DispatchImpl {
class Server GRPC_FINAL : public GrpcLibrary, private CallHook {
public:
~Server();
@ -73,6 +72,7 @@ class Server GRPC_FINAL : public GrpcLibrary,
private:
friend class AsyncGenericService;
friend class AsynchronousService;
friend class ServerBuilder;
class SyncRequest;
@ -96,21 +96,123 @@ class Server GRPC_FINAL : public GrpcLibrary,
void RunRpc();
void ScheduleCallback();
void PerformOpsOnCall(CallOpBuffer* ops, Call* call) GRPC_OVERRIDE;
void PerformOpsOnCall(CallOpSetInterface* ops, Call* call) GRPC_OVERRIDE;
class BaseAsyncRequest : public CompletionQueueTag {
public:
BaseAsyncRequest(Server* server, ServerContext* context,
ServerAsyncStreamingInterface* stream,
CompletionQueue* call_cq, void* tag);
virtual ~BaseAsyncRequest();
bool FinalizeResult(void** tag, bool* status) GRPC_OVERRIDE;
protected:
Server* const server_;
ServerContext* const context_;
ServerAsyncStreamingInterface* const stream_;
CompletionQueue* const call_cq_;
void* const tag_;
grpc_call* call_;
grpc_metadata_array initial_metadata_array_;
};
class RegisteredAsyncRequest : public BaseAsyncRequest {
public:
RegisteredAsyncRequest(Server* server, ServerContext* context,
ServerAsyncStreamingInterface* stream,
CompletionQueue* call_cq, void* tag);
// uses BaseAsyncRequest::FinalizeResult
protected:
void IssueRequest(void* registered_method, grpc_byte_buffer** payload,
ServerCompletionQueue* notification_cq);
};
class NoPayloadAsyncRequest GRPC_FINAL : public RegisteredAsyncRequest {
public:
NoPayloadAsyncRequest(void* registered_method, Server* server,
ServerContext* context,
ServerAsyncStreamingInterface* stream,
CompletionQueue* call_cq,
ServerCompletionQueue* notification_cq, void* tag)
: RegisteredAsyncRequest(server, context, stream, call_cq, tag) {
IssueRequest(registered_method, nullptr, notification_cq);
}
// uses RegisteredAsyncRequest::FinalizeResult
};
template <class Message>
class PayloadAsyncRequest GRPC_FINAL : public RegisteredAsyncRequest {
public:
PayloadAsyncRequest(void* registered_method, Server* server,
ServerContext* context,
ServerAsyncStreamingInterface* stream,
CompletionQueue* call_cq,
ServerCompletionQueue* notification_cq, void* tag,
Message* request)
: RegisteredAsyncRequest(server, context, stream, call_cq, tag),
request_(request) {
IssueRequest(registered_method, &payload_, notification_cq);
}
bool FinalizeResult(void** tag, bool* status) GRPC_OVERRIDE {
bool serialization_status =
*status && payload_ &&
SerializationTraits<Message>::Deserialize(payload_, request_,
server_->max_message_size_)
.ok();
bool ret = RegisteredAsyncRequest::FinalizeResult(tag, status);
*status = serialization_status && *status;
return ret;
}
private:
grpc_byte_buffer* payload_;
Message* const request_;
};
class GenericAsyncRequest GRPC_FINAL : public BaseAsyncRequest {
public:
GenericAsyncRequest(Server* server, GenericServerContext* context,
ServerAsyncStreamingInterface* stream,
CompletionQueue* call_cq,
ServerCompletionQueue* notification_cq, void* tag);
bool FinalizeResult(void** tag, bool* status) GRPC_OVERRIDE;
private:
grpc_call_details call_details_;
};
template <class Message>
void RequestAsyncCall(void* registered_method, ServerContext* context,
ServerAsyncStreamingInterface* stream,
CompletionQueue* call_cq,
ServerCompletionQueue* notification_cq, void* tag,
Message* message) {
new PayloadAsyncRequest<Message>(registered_method, this, context, stream,
call_cq, notification_cq, tag, message);
}
// DispatchImpl
void RequestAsyncCall(void* registered_method, ServerContext* context,
grpc::protobuf::Message* request,
ServerAsyncStreamingInterface* stream,
CompletionQueue* call_cq,
ServerCompletionQueue* notification_cq,
void* tag) GRPC_OVERRIDE;
ServerCompletionQueue* notification_cq, void* tag) {
new NoPayloadAsyncRequest(registered_method, this, context, stream, call_cq,
notification_cq, tag);
}
void RequestAsyncGenericCall(GenericServerContext* context,
ServerAsyncStreamingInterface* stream,
CompletionQueue* cq,
CompletionQueue* call_cq,
ServerCompletionQueue* notification_cq,
void* tag);
void* tag) {
new GenericAsyncRequest(this, context, stream, call_cq, notification_cq,
tag);
}
const int max_message_size_;
@ -133,8 +235,6 @@ class Server GRPC_FINAL : public GrpcLibrary,
ThreadPoolInterface* thread_pool_;
// Whether the thread pool is created and owned by the server.
bool thread_pool_owned_;
private:
Server() : max_message_size_(-1), server_(NULL) { abort(); }
};
} // namespace grpc

@ -60,6 +60,14 @@ template <class W>
class ServerWriter;
template <class R, class W>
class ServerReaderWriter;
template <class ServiceType, class RequestType, class ResponseType>
class RpcMethodHandler;
template <class ServiceType, class RequestType, class ResponseType>
class ClientStreamingHandler;
template <class ServiceType, class RequestType, class ResponseType>
class ServerStreamingHandler;
template <class ServiceType, class RequestType, class ResponseType>
class BidiStreamingHandler;
class Call;
class CallOpBuffer;
@ -105,6 +113,14 @@ class ServerContext {
friend class ::grpc::ServerWriter;
template <class R, class W>
friend class ::grpc::ServerReaderWriter;
template <class ServiceType, class RequestType, class ResponseType>
friend class RpcMethodHandler;
template <class ServiceType, class RequestType, class ResponseType>
friend class ClientStreamingHandler;
template <class ServiceType, class RequestType, class ResponseType>
friend class ServerStreamingHandler;
template <class ServiceType, class RequestType, class ResponseType>
friend class BidiStreamingHandler;
// Prevent copying.
ServerContext(const ServerContext&);

@ -93,15 +93,18 @@ template <class R>
class ClientReader GRPC_FINAL : public ClientReaderInterface<R> {
public:
// Blocking create a stream and write the first request out.
template <class W>
ClientReader(ChannelInterface* channel, const RpcMethod& method,
ClientContext* context, const grpc::protobuf::Message& request)
ClientContext* context, const W& request)
: context_(context), call_(channel->CreateCall(method, context, &cq_)) {
CallOpBuffer buf;
buf.AddSendInitialMetadata(&context->send_initial_metadata_);
buf.AddSendMessage(request);
buf.AddClientSendClose();
call_.PerformOps(&buf);
cq_.Pluck(&buf);
CallOpSet<CallOpSendInitialMetadata, CallOpSendMessage,
CallOpClientSendClose> ops;
ops.SendInitialMetadata(context->send_initial_metadata_);
// TODO(ctiller): don't assert
GPR_ASSERT(ops.SendMessage(request).ok());
ops.ClientSendClose();
call_.PerformOps(&ops);
cq_.Pluck(&ops);
}
// Blocking wait for initial metadata from server. The received metadata
@ -111,28 +114,28 @@ class ClientReader GRPC_FINAL : public ClientReaderInterface<R> {
void WaitForInitialMetadata() {
GPR_ASSERT(!context_->initial_metadata_received_);
CallOpBuffer buf;
buf.AddRecvInitialMetadata(context_);
call_.PerformOps(&buf);
cq_.Pluck(&buf); // status ignored
CallOpSet<CallOpRecvInitialMetadata> ops;
ops.RecvInitialMetadata(context_);
call_.PerformOps(&ops);
cq_.Pluck(&ops); // status ignored
}
bool Read(R* msg) GRPC_OVERRIDE {
CallOpBuffer buf;
CallOpSet<CallOpRecvInitialMetadata, CallOpRecvMessage<R>> ops;
if (!context_->initial_metadata_received_) {
buf.AddRecvInitialMetadata(context_);
ops.RecvInitialMetadata(context_);
}
buf.AddRecvMessage(msg);
call_.PerformOps(&buf);
return cq_.Pluck(&buf) && buf.got_message;
ops.RecvMessage(msg);
call_.PerformOps(&ops);
return cq_.Pluck(&ops) && ops.got_message;
}
Status Finish() GRPC_OVERRIDE {
CallOpBuffer buf;
CallOpSet<CallOpClientRecvStatus> ops;
Status status;
buf.AddClientRecvStatus(context_, &status);
call_.PerformOps(&buf);
GPR_ASSERT(cq_.Pluck(&buf));
ops.ClientRecvStatus(context_, &status);
call_.PerformOps(&ops);
GPR_ASSERT(cq_.Pluck(&ops));
return status;
}
@ -150,48 +153,49 @@ class ClientWriterInterface : public ClientStreamingInterface,
};
template <class W>
class ClientWriter GRPC_FINAL : public ClientWriterInterface<W> {
class ClientWriter : public ClientWriterInterface<W> {
public:
// Blocking create a stream.
template <class R>
ClientWriter(ChannelInterface* channel, const RpcMethod& method,
ClientContext* context, grpc::protobuf::Message* response)
: context_(context),
response_(response),
call_(channel->CreateCall(method, context, &cq_)) {
CallOpBuffer buf;
buf.AddSendInitialMetadata(&context->send_initial_metadata_);
call_.PerformOps(&buf);
cq_.Pluck(&buf);
ClientContext* context, R* response)
: context_(context), call_(channel->CreateCall(method, context, &cq_)) {
finish_ops_.RecvMessage(response);
CallOpSet<CallOpSendInitialMetadata> ops;
ops.SendInitialMetadata(context->send_initial_metadata_);
call_.PerformOps(&ops);
cq_.Pluck(&ops);
}
bool Write(const W& msg) GRPC_OVERRIDE {
CallOpBuffer buf;
buf.AddSendMessage(msg);
call_.PerformOps(&buf);
return cq_.Pluck(&buf);
CallOpSet<CallOpSendMessage> ops;
if (!ops.SendMessage(msg).ok()) {
return false;
}
call_.PerformOps(&ops);
return cq_.Pluck(&ops);
}
bool WritesDone() GRPC_OVERRIDE {
CallOpBuffer buf;
buf.AddClientSendClose();
call_.PerformOps(&buf);
return cq_.Pluck(&buf);
CallOpSet<CallOpClientSendClose> ops;
ops.ClientSendClose();
call_.PerformOps(&ops);
return cq_.Pluck(&ops);
}
// Read the final response and wait for the final status.
Status Finish() GRPC_OVERRIDE {
CallOpBuffer buf;
Status status;
buf.AddRecvMessage(response_);
buf.AddClientRecvStatus(context_, &status);
call_.PerformOps(&buf);
GPR_ASSERT(cq_.Pluck(&buf));
finish_ops_.ClientRecvStatus(context_, &status);
call_.PerformOps(&finish_ops_);
GPR_ASSERT(cq_.Pluck(&finish_ops_));
return status;
}
private:
ClientContext* context_;
grpc::protobuf::Message* const response_;
CallOpSet<CallOpGenericRecvMessage, CallOpClientRecvStatus> finish_ops_;
CompletionQueue cq_;
Call call_;
};
@ -213,10 +217,10 @@ class ClientReaderWriter GRPC_FINAL : public ClientReaderWriterInterface<W, R> {
ClientReaderWriter(ChannelInterface* channel, const RpcMethod& method,
ClientContext* context)
: context_(context), call_(channel->CreateCall(method, context, &cq_)) {
CallOpBuffer buf;
buf.AddSendInitialMetadata(&context->send_initial_metadata_);
call_.PerformOps(&buf);
cq_.Pluck(&buf);
CallOpSet<CallOpSendInitialMetadata> ops;
ops.SendInitialMetadata(context->send_initial_metadata_);
call_.PerformOps(&ops);
cq_.Pluck(&ops);
}
// Blocking wait for initial metadata from server. The received metadata
@ -226,42 +230,42 @@ class ClientReaderWriter GRPC_FINAL : public ClientReaderWriterInterface<W, R> {
void WaitForInitialMetadata() {
GPR_ASSERT(!context_->initial_metadata_received_);
CallOpBuffer buf;
buf.AddRecvInitialMetadata(context_);
call_.PerformOps(&buf);
cq_.Pluck(&buf); // status ignored
CallOpSet<CallOpRecvInitialMetadata> ops;
ops.RecvInitialMetadata(context_);
call_.PerformOps(&ops);
cq_.Pluck(&ops); // status ignored
}
bool Read(R* msg) GRPC_OVERRIDE {
CallOpBuffer buf;
CallOpSet<CallOpRecvInitialMetadata, CallOpRecvMessage<R>> ops;
if (!context_->initial_metadata_received_) {
buf.AddRecvInitialMetadata(context_);
ops.RecvInitialMetadata(context_);
}
buf.AddRecvMessage(msg);
call_.PerformOps(&buf);
return cq_.Pluck(&buf) && buf.got_message;
ops.RecvMessage(msg);
call_.PerformOps(&ops);
return cq_.Pluck(&ops) && ops.got_message;
}
bool Write(const W& msg) GRPC_OVERRIDE {
CallOpBuffer buf;
buf.AddSendMessage(msg);
call_.PerformOps(&buf);
return cq_.Pluck(&buf);
CallOpSet<CallOpSendMessage> ops;
if (!ops.SendMessage(msg).ok()) return false;
call_.PerformOps(&ops);
return cq_.Pluck(&ops);
}
bool WritesDone() GRPC_OVERRIDE {
CallOpBuffer buf;
buf.AddClientSendClose();
call_.PerformOps(&buf);
return cq_.Pluck(&buf);
CallOpSet<CallOpClientSendClose> ops;
ops.ClientSendClose();
call_.PerformOps(&ops);
return cq_.Pluck(&ops);
}
Status Finish() GRPC_OVERRIDE {
CallOpBuffer buf;
CallOpSet<CallOpClientRecvStatus> ops;
Status status;
buf.AddClientRecvStatus(context_, &status);
call_.PerformOps(&buf);
GPR_ASSERT(cq_.Pluck(&buf));
ops.ClientRecvStatus(context_, &status);
call_.PerformOps(&ops);
GPR_ASSERT(cq_.Pluck(&ops));
return status;
}
@ -279,18 +283,18 @@ class ServerReader GRPC_FINAL : public ReaderInterface<R> {
void SendInitialMetadata() {
GPR_ASSERT(!ctx_->sent_initial_metadata_);
CallOpBuffer buf;
buf.AddSendInitialMetadata(&ctx_->initial_metadata_);
CallOpSet<CallOpSendInitialMetadata> ops;
ops.SendInitialMetadata(ctx_->initial_metadata_);
ctx_->sent_initial_metadata_ = true;
call_->PerformOps(&buf);
call_->cq()->Pluck(&buf);
call_->PerformOps(&ops);
call_->cq()->Pluck(&ops);
}
bool Read(R* msg) GRPC_OVERRIDE {
CallOpBuffer buf;
buf.AddRecvMessage(msg);
call_->PerformOps(&buf);
return call_->cq()->Pluck(&buf) && buf.got_message;
CallOpSet<CallOpRecvMessage<R>> ops;
ops.RecvMessage(msg);
call_->PerformOps(&ops);
return call_->cq()->Pluck(&ops) && ops.got_message;
}
private:
@ -306,22 +310,24 @@ class ServerWriter GRPC_FINAL : public WriterInterface<W> {
void SendInitialMetadata() {
GPR_ASSERT(!ctx_->sent_initial_metadata_);
CallOpBuffer buf;
buf.AddSendInitialMetadata(&ctx_->initial_metadata_);
CallOpSet<CallOpSendInitialMetadata> ops;
ops.SendInitialMetadata(ctx_->initial_metadata_);
ctx_->sent_initial_metadata_ = true;
call_->PerformOps(&buf);
call_->cq()->Pluck(&buf);
call_->PerformOps(&ops);
call_->cq()->Pluck(&ops);
}
bool Write(const W& msg) GRPC_OVERRIDE {
CallOpBuffer buf;
CallOpSet<CallOpSendInitialMetadata, CallOpSendMessage> ops;
if (!ops.SendMessage(msg).ok()) {
return false;
}
if (!ctx_->sent_initial_metadata_) {
buf.AddSendInitialMetadata(&ctx_->initial_metadata_);
ops.SendInitialMetadata(ctx_->initial_metadata_);
ctx_->sent_initial_metadata_ = true;
}
buf.AddSendMessage(msg);
call_->PerformOps(&buf);
return call_->cq()->Pluck(&buf);
call_->PerformOps(&ops);
return call_->cq()->Pluck(&ops);
}
private:
@ -339,29 +345,31 @@ class ServerReaderWriter GRPC_FINAL : public WriterInterface<W>,
void SendInitialMetadata() {
GPR_ASSERT(!ctx_->sent_initial_metadata_);
CallOpBuffer buf;
buf.AddSendInitialMetadata(&ctx_->initial_metadata_);
CallOpSet<CallOpSendInitialMetadata> ops;
ops.SendInitialMetadata(ctx_->initial_metadata_);
ctx_->sent_initial_metadata_ = true;
call_->PerformOps(&buf);
call_->cq()->Pluck(&buf);
call_->PerformOps(&ops);
call_->cq()->Pluck(&ops);
}
bool Read(R* msg) GRPC_OVERRIDE {
CallOpBuffer buf;
buf.AddRecvMessage(msg);
call_->PerformOps(&buf);
return call_->cq()->Pluck(&buf) && buf.got_message;
CallOpSet<CallOpRecvMessage<R>> ops;
ops.RecvMessage(msg);
call_->PerformOps(&ops);
return call_->cq()->Pluck(&ops) && ops.got_message;
}
bool Write(const W& msg) GRPC_OVERRIDE {
CallOpBuffer buf;
CallOpSet<CallOpSendInitialMetadata, CallOpSendMessage> ops;
if (!ops.SendMessage(msg).ok()) {
return false;
}
if (!ctx_->sent_initial_metadata_) {
buf.AddSendInitialMetadata(&ctx_->initial_metadata_);
ops.SendInitialMetadata(ctx_->initial_metadata_);
ctx_->sent_initial_metadata_ = true;
}
buf.AddSendMessage(msg);
call_->PerformOps(&buf);
return call_->cq()->Pluck(&buf);
call_->PerformOps(&ops);
return call_->cq()->Pluck(&ops);
}
private:
@ -400,57 +408,59 @@ class AsyncWriterInterface {
template <class R>
class ClientAsyncReaderInterface : public ClientAsyncStreamingInterface,
public AsyncReaderInterface<R> {
};
public AsyncReaderInterface<R> {};
template <class R>
class ClientAsyncReader GRPC_FINAL : public ClientAsyncReaderInterface<R> {
public:
// Create a stream and write the first request out.
template <class W>
ClientAsyncReader(ChannelInterface* channel, CompletionQueue* cq,
const RpcMethod& method, ClientContext* context,
const grpc::protobuf::Message& request, void* tag)
const W& request, void* tag)
: context_(context), call_(channel->CreateCall(method, context, cq)) {
init_buf_.Reset(tag);
init_buf_.AddSendInitialMetadata(&context->send_initial_metadata_);
init_buf_.AddSendMessage(request);
init_buf_.AddClientSendClose();
call_.PerformOps(&init_buf_);
init_ops_.set_output_tag(tag);
init_ops_.SendInitialMetadata(context->send_initial_metadata_);
// TODO(ctiller): don't assert
GPR_ASSERT(init_ops_.SendMessage(request).ok());
init_ops_.ClientSendClose();
call_.PerformOps(&init_ops_);
}
void ReadInitialMetadata(void* tag) GRPC_OVERRIDE {
GPR_ASSERT(!context_->initial_metadata_received_);
meta_buf_.Reset(tag);
meta_buf_.AddRecvInitialMetadata(context_);
call_.PerformOps(&meta_buf_);
meta_ops_.set_output_tag(tag);
meta_ops_.RecvInitialMetadata(context_);
call_.PerformOps(&meta_ops_);
}
void Read(R* msg, void* tag) GRPC_OVERRIDE {
read_buf_.Reset(tag);
read_ops_.set_output_tag(tag);
if (!context_->initial_metadata_received_) {
read_buf_.AddRecvInitialMetadata(context_);
read_ops_.RecvInitialMetadata(context_);
}
read_buf_.AddRecvMessage(msg);
call_.PerformOps(&read_buf_);
read_ops_.RecvMessage(msg);
call_.PerformOps(&read_ops_);
}
void Finish(Status* status, void* tag) GRPC_OVERRIDE {
finish_buf_.Reset(tag);
finish_ops_.set_output_tag(tag);
if (!context_->initial_metadata_received_) {
finish_buf_.AddRecvInitialMetadata(context_);
finish_ops_.RecvInitialMetadata(context_);
}
finish_buf_.AddClientRecvStatus(context_, status);
call_.PerformOps(&finish_buf_);
finish_ops_.ClientRecvStatus(context_, status);
call_.PerformOps(&finish_ops_);
}
private:
ClientContext* context_;
Call call_;
CallOpBuffer init_buf_;
CallOpBuffer meta_buf_;
CallOpBuffer read_buf_;
CallOpBuffer finish_buf_;
CallOpSet<CallOpSendInitialMetadata, CallOpSendMessage, CallOpClientSendClose>
init_ops_;
CallOpSet<CallOpRecvInitialMetadata> meta_ops_;
CallOpSet<CallOpRecvInitialMetadata, CallOpRecvMessage<R>> read_ops_;
CallOpSet<CallOpRecvInitialMetadata, CallOpClientRecvStatus> finish_ops_;
};
template <class W>
@ -463,56 +473,57 @@ class ClientAsyncWriterInterface : public ClientAsyncStreamingInterface,
template <class W>
class ClientAsyncWriter GRPC_FINAL : public ClientAsyncWriterInterface<W> {
public:
template <class R>
ClientAsyncWriter(ChannelInterface* channel, CompletionQueue* cq,
const RpcMethod& method, ClientContext* context,
grpc::protobuf::Message* response, void* tag)
: context_(context),
response_(response),
call_(channel->CreateCall(method, context, cq)) {
init_buf_.Reset(tag);
init_buf_.AddSendInitialMetadata(&context->send_initial_metadata_);
call_.PerformOps(&init_buf_);
R* response, void* tag)
: context_(context), call_(channel->CreateCall(method, context, cq)) {
finish_ops_.RecvMessage(response);
init_ops_.set_output_tag(tag);
init_ops_.SendInitialMetadata(context->send_initial_metadata_);
call_.PerformOps(&init_ops_);
}
void ReadInitialMetadata(void* tag) GRPC_OVERRIDE {
GPR_ASSERT(!context_->initial_metadata_received_);
meta_buf_.Reset(tag);
meta_buf_.AddRecvInitialMetadata(context_);
call_.PerformOps(&meta_buf_);
meta_ops_.set_output_tag(tag);
meta_ops_.RecvInitialMetadata(context_);
call_.PerformOps(&meta_ops_);
}
void Write(const W& msg, void* tag) GRPC_OVERRIDE {
write_buf_.Reset(tag);
write_buf_.AddSendMessage(msg);
call_.PerformOps(&write_buf_);
write_ops_.set_output_tag(tag);
// TODO(ctiller): don't assert
GPR_ASSERT(write_ops_.SendMessage(msg).ok());
call_.PerformOps(&write_ops_);
}
void WritesDone(void* tag) GRPC_OVERRIDE {
writes_done_buf_.Reset(tag);
writes_done_buf_.AddClientSendClose();
call_.PerformOps(&writes_done_buf_);
writes_done_ops_.set_output_tag(tag);
writes_done_ops_.ClientSendClose();
call_.PerformOps(&writes_done_ops_);
}
void Finish(Status* status, void* tag) GRPC_OVERRIDE {
finish_buf_.Reset(tag);
finish_ops_.set_output_tag(tag);
if (!context_->initial_metadata_received_) {
finish_buf_.AddRecvInitialMetadata(context_);
finish_ops_.RecvInitialMetadata(context_);
}
finish_buf_.AddRecvMessage(response_);
finish_buf_.AddClientRecvStatus(context_, status);
call_.PerformOps(&finish_buf_);
finish_ops_.ClientRecvStatus(context_, status);
call_.PerformOps(&finish_ops_);
}
private:
ClientContext* context_;
grpc::protobuf::Message* const response_;
Call call_;
CallOpBuffer init_buf_;
CallOpBuffer meta_buf_;
CallOpBuffer write_buf_;
CallOpBuffer writes_done_buf_;
CallOpBuffer finish_buf_;
CallOpSet<CallOpSendInitialMetadata> init_ops_;
CallOpSet<CallOpRecvInitialMetadata> meta_ops_;
CallOpSet<CallOpSendMessage> write_ops_;
CallOpSet<CallOpClientSendClose> writes_done_ops_;
CallOpSet<CallOpRecvInitialMetadata, CallOpGenericRecvMessage,
CallOpClientRecvStatus> finish_ops_;
};
// Client-side interface for bi-directional streaming.
@ -532,58 +543,59 @@ class ClientAsyncReaderWriter GRPC_FINAL
const RpcMethod& method, ClientContext* context,
void* tag)
: context_(context), call_(channel->CreateCall(method, context, cq)) {
init_buf_.Reset(tag);
init_buf_.AddSendInitialMetadata(&context->send_initial_metadata_);
call_.PerformOps(&init_buf_);
init_ops_.set_output_tag(tag);
init_ops_.SendInitialMetadata(context->send_initial_metadata_);
call_.PerformOps(&init_ops_);
}
void ReadInitialMetadata(void* tag) GRPC_OVERRIDE {
GPR_ASSERT(!context_->initial_metadata_received_);
meta_buf_.Reset(tag);
meta_buf_.AddRecvInitialMetadata(context_);
call_.PerformOps(&meta_buf_);
meta_ops_.set_output_tag(tag);
meta_ops_.RecvInitialMetadata(context_);
call_.PerformOps(&meta_ops_);
}
void Read(R* msg, void* tag) GRPC_OVERRIDE {
read_buf_.Reset(tag);
read_ops_.set_output_tag(tag);
if (!context_->initial_metadata_received_) {
read_buf_.AddRecvInitialMetadata(context_);
read_ops_.RecvInitialMetadata(context_);
}
read_buf_.AddRecvMessage(msg);
call_.PerformOps(&read_buf_);
read_ops_.RecvMessage(msg);
call_.PerformOps(&read_ops_);
}
void Write(const W& msg, void* tag) GRPC_OVERRIDE {
write_buf_.Reset(tag);
write_buf_.AddSendMessage(msg);
call_.PerformOps(&write_buf_);
write_ops_.set_output_tag(tag);
// TODO(ctiller): don't assert
GPR_ASSERT(write_ops_.SendMessage(msg).ok());
call_.PerformOps(&write_ops_);
}
void WritesDone(void* tag) GRPC_OVERRIDE {
writes_done_buf_.Reset(tag);
writes_done_buf_.AddClientSendClose();
call_.PerformOps(&writes_done_buf_);
writes_done_ops_.set_output_tag(tag);
writes_done_ops_.ClientSendClose();
call_.PerformOps(&writes_done_ops_);
}
void Finish(Status* status, void* tag) GRPC_OVERRIDE {
finish_buf_.Reset(tag);
finish_ops_.set_output_tag(tag);
if (!context_->initial_metadata_received_) {
finish_buf_.AddRecvInitialMetadata(context_);
finish_ops_.RecvInitialMetadata(context_);
}
finish_buf_.AddClientRecvStatus(context_, status);
call_.PerformOps(&finish_buf_);
finish_ops_.ClientRecvStatus(context_, status);
call_.PerformOps(&finish_ops_);
}
private:
ClientContext* context_;
Call call_;
CallOpBuffer init_buf_;
CallOpBuffer meta_buf_;
CallOpBuffer read_buf_;
CallOpBuffer write_buf_;
CallOpBuffer writes_done_buf_;
CallOpBuffer finish_buf_;
CallOpSet<CallOpSendInitialMetadata> init_ops_;
CallOpSet<CallOpRecvInitialMetadata> meta_ops_;
CallOpSet<CallOpRecvInitialMetadata, CallOpRecvMessage<R>> read_ops_;
CallOpSet<CallOpSendMessage> write_ops_;
CallOpSet<CallOpClientSendClose> writes_done_ops_;
CallOpSet<CallOpRecvInitialMetadata, CallOpClientRecvStatus> finish_ops_;
};
template <class W, class R>
@ -596,41 +608,44 @@ class ServerAsyncReader GRPC_FINAL : public ServerAsyncStreamingInterface,
void SendInitialMetadata(void* tag) GRPC_OVERRIDE {
GPR_ASSERT(!ctx_->sent_initial_metadata_);
meta_buf_.Reset(tag);
meta_buf_.AddSendInitialMetadata(&ctx_->initial_metadata_);
meta_ops_.set_output_tag(tag);
meta_ops_.SendInitialMetadata(ctx_->initial_metadata_);
ctx_->sent_initial_metadata_ = true;
call_.PerformOps(&meta_buf_);
call_.PerformOps(&meta_ops_);
}
void Read(R* msg, void* tag) GRPC_OVERRIDE {
read_buf_.Reset(tag);
read_buf_.AddRecvMessage(msg);
call_.PerformOps(&read_buf_);
read_ops_.set_output_tag(tag);
read_ops_.RecvMessage(msg);
call_.PerformOps(&read_ops_);
}
void Finish(const W& msg, const Status& status, void* tag) {
finish_buf_.Reset(tag);
finish_ops_.set_output_tag(tag);
if (!ctx_->sent_initial_metadata_) {
finish_buf_.AddSendInitialMetadata(&ctx_->initial_metadata_);
finish_ops_.SendInitialMetadata(ctx_->initial_metadata_);
ctx_->sent_initial_metadata_ = true;
}
// The response is dropped if the status is not OK.
if (status.ok()) {
finish_buf_.AddSendMessage(msg);
finish_ops_.ServerSendStatus(
ctx_->trailing_metadata_,
finish_ops_.SendMessage(msg));
} else {
finish_ops_.ServerSendStatus(ctx_->trailing_metadata_, status);
}
finish_buf_.AddServerSendStatus(&ctx_->trailing_metadata_, status);
call_.PerformOps(&finish_buf_);
call_.PerformOps(&finish_ops_);
}
void FinishWithError(const Status& status, void* tag) {
GPR_ASSERT(!status.ok());
finish_buf_.Reset(tag);
finish_ops_.set_output_tag(tag);
if (!ctx_->sent_initial_metadata_) {
finish_buf_.AddSendInitialMetadata(&ctx_->initial_metadata_);
finish_ops_.SendInitialMetadata(ctx_->initial_metadata_);
ctx_->sent_initial_metadata_ = true;
}
finish_buf_.AddServerSendStatus(&ctx_->trailing_metadata_, status);
call_.PerformOps(&finish_buf_);
finish_ops_.ServerSendStatus(ctx_->trailing_metadata_, status);
call_.PerformOps(&finish_ops_);
}
private:
@ -638,9 +653,10 @@ class ServerAsyncReader GRPC_FINAL : public ServerAsyncStreamingInterface,
Call call_;
ServerContext* ctx_;
CallOpBuffer meta_buf_;
CallOpBuffer read_buf_;
CallOpBuffer finish_buf_;
CallOpSet<CallOpSendInitialMetadata> meta_ops_;
CallOpSet<CallOpRecvMessage<R>> read_ops_;
CallOpSet<CallOpSendInitialMetadata, CallOpSendMessage,
CallOpServerSendStatus> finish_ops_;
};
template <class W>
@ -653,30 +669,31 @@ class ServerAsyncWriter GRPC_FINAL : public ServerAsyncStreamingInterface,
void SendInitialMetadata(void* tag) GRPC_OVERRIDE {
GPR_ASSERT(!ctx_->sent_initial_metadata_);
meta_buf_.Reset(tag);
meta_buf_.AddSendInitialMetadata(&ctx_->initial_metadata_);
meta_ops_.set_output_tag(tag);
meta_ops_.SendInitialMetadata(ctx_->initial_metadata_);
ctx_->sent_initial_metadata_ = true;
call_.PerformOps(&meta_buf_);
call_.PerformOps(&meta_ops_);
}
void Write(const W& msg, void* tag) GRPC_OVERRIDE {
write_buf_.Reset(tag);
write_ops_.set_output_tag(tag);
if (!ctx_->sent_initial_metadata_) {
write_buf_.AddSendInitialMetadata(&ctx_->initial_metadata_);
write_ops_.SendInitialMetadata(ctx_->initial_metadata_);
ctx_->sent_initial_metadata_ = true;
}
write_buf_.AddSendMessage(msg);
call_.PerformOps(&write_buf_);
// TODO(ctiller): don't assert
GPR_ASSERT(write_ops_.SendMessage(msg).ok());
call_.PerformOps(&write_ops_);
}
void Finish(const Status& status, void* tag) {
finish_buf_.Reset(tag);
finish_ops_.set_output_tag(tag);
if (!ctx_->sent_initial_metadata_) {
finish_buf_.AddSendInitialMetadata(&ctx_->initial_metadata_);
finish_ops_.SendInitialMetadata(ctx_->initial_metadata_);
ctx_->sent_initial_metadata_ = true;
}
finish_buf_.AddServerSendStatus(&ctx_->trailing_metadata_, status);
call_.PerformOps(&finish_buf_);
finish_ops_.ServerSendStatus(ctx_->trailing_metadata_, status);
call_.PerformOps(&finish_ops_);
}
private:
@ -684,9 +701,9 @@ class ServerAsyncWriter GRPC_FINAL : public ServerAsyncStreamingInterface,
Call call_;
ServerContext* ctx_;
CallOpBuffer meta_buf_;
CallOpBuffer write_buf_;
CallOpBuffer finish_buf_;
CallOpSet<CallOpSendInitialMetadata> meta_ops_;
CallOpSet<CallOpSendInitialMetadata, CallOpSendMessage> write_ops_;
CallOpSet<CallOpSendInitialMetadata, CallOpServerSendStatus> finish_ops_;
};
// Server-side interface for bi-directional streaming.
@ -701,36 +718,37 @@ class ServerAsyncReaderWriter GRPC_FINAL : public ServerAsyncStreamingInterface,
void SendInitialMetadata(void* tag) GRPC_OVERRIDE {
GPR_ASSERT(!ctx_->sent_initial_metadata_);
meta_buf_.Reset(tag);
meta_buf_.AddSendInitialMetadata(&ctx_->initial_metadata_);
meta_ops_.set_output_tag(tag);
meta_ops_.SendInitialMetadata(ctx_->initial_metadata_);
ctx_->sent_initial_metadata_ = true;
call_.PerformOps(&meta_buf_);
call_.PerformOps(&meta_ops_);
}
void Read(R* msg, void* tag) GRPC_OVERRIDE {
read_buf_.Reset(tag);
read_buf_.AddRecvMessage(msg);
call_.PerformOps(&read_buf_);
read_ops_.set_output_tag(tag);
read_ops_.RecvMessage(msg);
call_.PerformOps(&read_ops_);
}
void Write(const W& msg, void* tag) GRPC_OVERRIDE {
write_buf_.Reset(tag);
write_ops_.set_output_tag(tag);
if (!ctx_->sent_initial_metadata_) {
write_buf_.AddSendInitialMetadata(&ctx_->initial_metadata_);
write_ops_.SendInitialMetadata(ctx_->initial_metadata_);
ctx_->sent_initial_metadata_ = true;
}
write_buf_.AddSendMessage(msg);
call_.PerformOps(&write_buf_);
// TODO(ctiller): don't assert
GPR_ASSERT(write_ops_.SendMessage(msg).ok());
call_.PerformOps(&write_ops_);
}
void Finish(const Status& status, void* tag) {
finish_buf_.Reset(tag);
finish_ops_.set_output_tag(tag);
if (!ctx_->sent_initial_metadata_) {
finish_buf_.AddSendInitialMetadata(&ctx_->initial_metadata_);
finish_ops_.SendInitialMetadata(ctx_->initial_metadata_);
ctx_->sent_initial_metadata_ = true;
}
finish_buf_.AddServerSendStatus(&ctx_->trailing_metadata_, status);
call_.PerformOps(&finish_buf_);
finish_ops_.ServerSendStatus(ctx_->trailing_metadata_, status);
call_.PerformOps(&finish_ops_);
}
private:
@ -738,10 +756,10 @@ class ServerAsyncReaderWriter GRPC_FINAL : public ServerAsyncStreamingInterface,
Call call_;
ServerContext* ctx_;
CallOpBuffer meta_buf_;
CallOpBuffer read_buf_;
CallOpBuffer write_buf_;
CallOpBuffer finish_buf_;
CallOpSet<CallOpSendInitialMetadata> meta_ops_;
CallOpSet<CallOpRecvMessage<R>> read_ops_;
CallOpSet<CallOpSendInitialMetadata, CallOpSendMessage> write_ops_;
CallOpSet<CallOpSendInitialMetadata, CallOpServerSendStatus> finish_ops_;
};
} // namespace grpc

@ -85,7 +85,6 @@ size_t grpc_byte_buffer_length(grpc_byte_buffer *bb);
/** Destroys \a byte_buffer deallocating all its memory. */
void grpc_byte_buffer_destroy(grpc_byte_buffer *byte_buffer);
/** Reader for byte buffers. Iterates over slices in the byte buffer */
struct grpc_byte_buffer_reader;
typedef struct grpc_byte_buffer_reader grpc_byte_buffer_reader;
@ -111,4 +110,4 @@ grpc_byte_buffer *grpc_raw_byte_buffer_from_reader(
}
#endif
#endif /* GRPC_BYTE_BUFFER_H */
#endif /* GRPC_BYTE_BUFFER_H */

@ -55,4 +55,4 @@ struct grpc_byte_buffer_reader {
}
#endif
#endif /* GRPC_BYTE_BUFFER_READER_H */
#endif /* GRPC_BYTE_BUFFER_READER_H */

@ -59,4 +59,4 @@ const char *grpc_compression_algorithm_name(
grpc_compression_algorithm grpc_compression_algorithm_for_level(
grpc_compression_level level);
#endif /* GRPC_COMPRESSION_H */
#endif /* GRPC_COMPRESSION_H */

@ -145,7 +145,10 @@ typedef enum grpc_call_error {
/* the flags value was illegal for this call */
GRPC_CALL_ERROR_INVALID_FLAGS,
/* invalid metadata was passed to this call */
GRPC_CALL_ERROR_INVALID_METADATA
GRPC_CALL_ERROR_INVALID_METADATA,
/* completion queue for notification has not been registered with the server
*/
GRPC_CALL_ERROR_NOT_SERVER_COMPLETION_QUEUE
} grpc_call_error;
/* Write Flags: */
@ -253,7 +256,7 @@ typedef enum {
no arguments) */
typedef struct grpc_op {
grpc_op_type op;
gpr_uint32 flags; /**< Write flags bitset for grpc_begin_messages */
gpr_uint32 flags; /**< Write flags bitset for grpc_begin_messages */
union {
struct {
size_t count;
@ -338,7 +341,7 @@ void grpc_shutdown(void);
grpc_completion_queue *grpc_completion_queue_create(void);
/** Blocks until an event is available, the completion queue is being shut down,
or deadline is reached.
or deadline is reached.
Returns a grpc_event with type GRPC_QUEUE_TIMEOUT on timeout,
otherwise a grpc_event describing the event that occurred.
@ -349,7 +352,7 @@ grpc_event grpc_completion_queue_next(grpc_completion_queue *cq,
gpr_timespec deadline);
/** Blocks until an event with tag 'tag' is available, the completion queue is
being shutdown or deadline is reached.
being shutdown or deadline is reached.
Returns a grpc_event with type GRPC_QUEUE_TIMEOUT on timeout,
otherwise a grpc_event describing the event that occurred.
@ -372,10 +375,9 @@ void grpc_completion_queue_shutdown(grpc_completion_queue *cq);
drained and no threads are executing grpc_completion_queue_next */
void grpc_completion_queue_destroy(grpc_completion_queue *cq);
/* Create a call given a grpc_channel, in order to call 'method'. The request
is not sent until grpc_call_invoke is called. All completions are sent to
'completion_queue'. 'method' and 'host' need only live through the invocation
of this function. */
/* Create a call given a grpc_channel, in order to call 'method'. All
completions are sent to 'completion_queue'. 'method' and 'host' need only
live through the invocation of this function. */
grpc_call *grpc_channel_create_call(grpc_channel *channel,
grpc_completion_queue *completion_queue,
const char *method, const char *host,
@ -394,7 +396,11 @@ grpc_call *grpc_channel_create_registered_call(
completion of type 'tag' to the completion queue bound to the call.
The order of ops specified in the batch has no significance.
Only one operation of each type can be active at once in any given
batch. */
batch.
THREAD SAFETY: access to grpc_call_start_batch in multi-threaded environment
needs to be synchronized. As an optimization, you may synchronize batches
containing just send operations independently from batches containing just
receive operations. */
grpc_call_error grpc_call_start_batch(grpc_call *call, const grpc_op *ops,
size_t nops, void *tag);
@ -412,17 +418,6 @@ grpc_channel *grpc_lame_client_channel_create(void);
/* Close and destroy a grpc channel */
void grpc_channel_destroy(grpc_channel *channel);
/* THREAD-SAFETY for grpc_call
The following functions are thread-compatible for any given call:
grpc_call_add_metadata
grpc_call_invoke
grpc_call_start_write
grpc_call_writes_done
grpc_call_start_read
grpc_call_destroy
The function grpc_call_cancel is thread-safe, and can be called at any
point before grpc_call_destroy is called. */
/* Error handling for grpc_call
Most grpc_call functions return a grpc_error. If the error is not GRPC_OK
then the operation failed due to some unsatisfied precondition.
@ -430,7 +425,10 @@ void grpc_channel_destroy(grpc_channel *channel);
has been made. */
/* Called by clients to cancel an RPC on the server.
Can be called multiple times, from any thread. */
Can be called multiple times, from any thread.
THREAD-SAFETY grpc_call_cancel and grpc_call_cancel_with_status
are thread-safe, and can be called at any point before grpc_call_destroy
is called.*/
grpc_call_error grpc_call_cancel(grpc_call *call);
/* Called by clients to cancel an RPC on the server.
@ -443,10 +441,13 @@ grpc_call_error grpc_call_cancel_with_status(grpc_call *call,
grpc_status_code status,
const char *description);
/* Destroy a call. */
/* Destroy a call.
THREAD SAFETY: grpc_call_destroy is thread-compatible */
void grpc_call_destroy(grpc_call *call);
/* Request notification of a new call */
/* Request notification of a new call. 'cq_for_notification' must
have been registered to the server via grpc_server_register_completion_queue.
*/
grpc_call_error grpc_server_request_call(
grpc_server *server, grpc_call **call, grpc_call_details *details,
grpc_metadata_array *request_metadata,
@ -463,7 +464,9 @@ grpc_call_error grpc_server_request_call(
void *grpc_server_register_method(grpc_server *server, const char *method,
const char *host);
/* Request notification of a new pre-registered call */
/* Request notification of a new pre-registered call. 'cq_for_notification' must
have been registered to the server via grpc_server_register_completion_queue.
*/
grpc_call_error grpc_server_request_registered_call(
grpc_server *server, void *registered_method, grpc_call **call,
gpr_timespec *deadline, grpc_metadata_array *request_metadata,
@ -477,9 +480,10 @@ grpc_call_error grpc_server_request_registered_call(
through the invocation of this function. */
grpc_server *grpc_server_create(const grpc_channel_args *args);
/* Register a completion queue with the server. Must be done for any completion
queue that is passed to grpc_server_request_* call. Must be performed prior
to grpc_server_start. */
/* Register a completion queue with the server. Must be done for any
notification completion queue that is passed to grpc_server_request_*_call
and to grpc_server_shutdown_and_notify. Must be performed prior to
grpc_server_start. */
void grpc_server_register_completion_queue(grpc_server *server,
grpc_completion_queue *cq);
@ -496,7 +500,8 @@ void grpc_server_start(grpc_server *server);
Existing calls will be allowed to complete.
Send a GRPC_OP_COMPLETE event when there are no more calls being serviced.
Shutdown is idempotent, and all tags will be notified at once if multiple
grpc_server_shutdown_and_notify calls are made. */
grpc_server_shutdown_and_notify calls are made. 'cq' must have been
registered to this server via grpc_server_register_completion_queue. */
void grpc_server_shutdown_and_notify(grpc_server *server,
grpc_completion_queue *cq, void *tag);
@ -514,9 +519,9 @@ void grpc_server_destroy(grpc_server *server);
Tracers (usually controlled by the environment variable GRPC_TRACE)
allow printf-style debugging on GRPC internals, and are useful for
tracking down problems in the field.
tracking down problems in the field.
Use of this function is not strictly thread-safe, but the
Use of this function is not strictly thread-safe, but the
thread-safety issues raised by it should not be of concern. */
int grpc_tracer_set_enabled(const char *name, int enabled);

@ -195,8 +195,7 @@ grpc_call_error grpc_call_set_credentials(grpc_call *call,
/* TODO(jboeuf): Define some well-known property names. */
#define GRPC_TRANSPORT_SECURITY_TYPE_PROPERTY_NAME \
"transport_security_type"
#define GRPC_TRANSPORT_SECURITY_TYPE_PROPERTY_NAME "transport_security_type"
#define GRPC_FAKE_TRANSPORT_SECURITY_TYPE "fake"
#define GRPC_SSL_TRANSPORT_SECURITY_TYPE "ssl"
@ -251,4 +250,4 @@ const grpc_auth_context *grpc_call_auth_context(grpc_call *call);
}
#endif
#endif /* GRPC_GRPC_SECURITY_H */
#endif /* GRPC_GRPC_SECURITY_H */

@ -223,7 +223,9 @@
#endif
/* Validate platform combinations */
#if defined(GPR_GCC_ATOMIC) + defined(GPR_GCC_SYNC) + defined(GPR_WIN32_ATOMIC) != 1
#if defined(GPR_GCC_ATOMIC) + defined(GPR_GCC_SYNC) + \
defined(GPR_WIN32_ATOMIC) != \
1
#error Must define exactly one of GPR_GCC_ATOMIC, GPR_GCC_SYNC, GPR_WIN32_ATOMIC
#endif
@ -231,7 +233,9 @@
#error Must define exactly one of GPR_ARCH_32, GPR_ARCH_64
#endif
#if defined(GPR_CPU_LINUX) + defined(GPR_CPU_POSIX) + defined(GPR_WIN32) + defined(GPR_CPU_IPHONE) + defined(GPR_CPU_CUSTOM) != 1
#if defined(GPR_CPU_LINUX) + defined(GPR_CPU_POSIX) + defined(GPR_WIN32) + \
defined(GPR_CPU_IPHONE) + defined(GPR_CPU_CUSTOM) != \
1
#error Must define exactly one of GPR_CPU_LINUX, GPR_CPU_POSIX, GPR_WIN32, GPR_CPU_IPHONE, GPR_CPU_CUSTOM
#endif
@ -239,11 +243,15 @@
#error Must define GPR_POSIX_SOCKET to use GPR_POSIX_MULTIPOLL_WITH_POLL
#endif
#if defined(GPR_POSIX_SOCKET) + defined(GPR_WINSOCK_SOCKET) + defined(GPR_CUSTOM_SOCKET) != 1
#if defined(GPR_POSIX_SOCKET) + defined(GPR_WINSOCK_SOCKET) + \
defined(GPR_CUSTOM_SOCKET) != \
1
#error Must define exactly one of GPR_POSIX_SOCKET, GPR_WINSOCK_SOCKET, GPR_CUSTOM_SOCKET
#endif
#if defined(GPR_MSVC_TLS) + defined(GPR_GCC_TLS) + defined(GPR_PTHREAD_TLS) + defined(GPR_CUSTOM_TLS) != 1
#if defined(GPR_MSVC_TLS) + defined(GPR_GCC_TLS) + defined(GPR_PTHREAD_TLS) + \
defined(GPR_CUSTOM_TLS) != \
1
#error Must define exactly one of GPR_MSVC_TLS, GPR_GCC_TLS, GPR_PTHREAD_TLS, GPR_CUSTOM_TLS
#endif
@ -266,4 +274,12 @@ typedef uintptr_t gpr_uintptr;
power of two */
#define GPR_MAX_ALIGNMENT 16
#endif /* GRPC_SUPPORT_PORT_PLATFORM_H */
#ifndef GRPC_MUST_USE_RESULT
#ifdef __GNUC__
#define GRPC_MUST_USE_RESULT __attribute__((warn_unused_result))
#else
#define GRPC_MUST_USE_RESULT
#endif
#endif
#endif /* GRPC_SUPPORT_PORT_PLATFORM_H */

@ -97,8 +97,8 @@ typedef struct gpr_slice {
((slice).refcount ? (slice).data.refcounted.length \
: (slice).data.inlined.length)
#define GPR_SLICE_SET_LENGTH(slice, newlen) \
((slice).refcount ? ((slice).data.refcounted.length = (newlen)) \
: ((slice).data.inlined.length = (newlen)))
((slice).refcount ? ((slice).data.refcounted.length = (size_t)(newlen)) \
: ((slice).data.inlined.length = (gpr_uint8)(newlen)))
#define GPR_SLICE_END_PTR(slice) \
GPR_SLICE_START_PTR(slice) + GPR_SLICE_LENGTH(slice)
#define GPR_SLICE_IS_EMPTY(slice) (GPR_SLICE_LENGTH(slice) == 0)
@ -176,4 +176,4 @@ int gpr_slice_str_cmp(gpr_slice a, const char *b);
}
#endif
#endif /* GRPC_SUPPORT_SLICE_H */
#endif /* GRPC_SUPPORT_SLICE_H */

@ -34,7 +34,7 @@
#ifndef GRPC_SUPPORT_TLS_PTHREAD_H
#define GRPC_SUPPORT_TLS_PTHREAD_H
#include <grpc/support/log.h> /* for GPR_ASSERT */
#include <grpc/support/log.h> /* for GPR_ASSERT */
#include <pthread.h>
/* Thread local storage based on pthread library calls.
@ -44,8 +44,7 @@ struct gpr_pthread_thread_local {
pthread_key_t key;
};
#define GPR_TLS_DECL(name) \
static struct gpr_pthread_thread_local name = {0}
#define GPR_TLS_DECL(name) static struct gpr_pthread_thread_local name = {0}
#define gpr_tls_init(tls) GPR_ASSERT(0 == pthread_key_create(&(tls)->key, NULL))
#define gpr_tls_destroy(tls) pthread_key_delete((tls)->key)

@ -35,6 +35,7 @@
#define SRC_COMPILER_CONFIG_H
#include <grpc++/config.h>
#include <grpc++/config_protobuf.h>
#ifndef GRPC_CUSTOM_DESCRIPTOR
#include <google/protobuf/descriptor.h>
@ -48,7 +49,8 @@
#ifndef GRPC_CUSTOM_CODEGENERATOR
#include <google/protobuf/compiler/code_generator.h>
#define GRPC_CUSTOM_CODEGENERATOR ::google::protobuf::compiler::CodeGenerator
#define GRPC_CUSTOM_GENERATORCONTEXT ::google::protobuf::compiler::GeneratorContext
#define GRPC_CUSTOM_GENERATORCONTEXT \
::google::protobuf::compiler::GeneratorContext
#endif
#ifndef GRPC_CUSTOM_PRINTER
@ -57,7 +59,8 @@
#include <google/protobuf/io/zero_copy_stream_impl_lite.h>
#define GRPC_CUSTOM_PRINTER ::google::protobuf::io::Printer
#define GRPC_CUSTOM_CODEDOUTPUTSTREAM ::google::protobuf::io::CodedOutputStream
#define GRPC_CUSTOM_STRINGOUTPUTSTREAM ::google::protobuf::io::StringOutputStream
#define GRPC_CUSTOM_STRINGOUTPUTSTREAM \
::google::protobuf::io::StringOutputStream
#endif
#ifndef GRPC_CUSTOM_PLUGINMAIN

@ -97,7 +97,8 @@ grpc::string GetHeaderPrologue(const grpc::protobuf::FileDescriptor *file,
vars["filename_base"] = grpc_generator::StripProto(file->name());
printer.Print(vars, "// Generated by the gRPC protobuf plugin.\n");
printer.Print(vars, "// If you make any local change, they will be lost.\n");
printer.Print(vars,
"// If you make any local change, they will be lost.\n");
printer.Print(vars, "// source: $filename$\n");
printer.Print(vars, "#ifndef GRPC_$filename_identifier$__INCLUDED\n");
printer.Print(vars, "#define GRPC_$filename_identifier$__INCLUDED\n");
@ -113,6 +114,7 @@ grpc::string GetHeaderIncludes(const grpc::protobuf::FileDescriptor *file,
grpc::string temp =
"#include <grpc++/impl/internal_stub.h>\n"
"#include <grpc++/impl/rpc_method.h>\n"
"#include <grpc++/impl/proto_utils.h>\n"
"#include <grpc++/impl/service_type.h>\n"
"#include <grpc++/async_unary_call.h>\n"
"#include <grpc++/status.h>\n"
@ -141,10 +143,10 @@ grpc::string GetHeaderIncludes(const grpc::protobuf::FileDescriptor *file,
return temp;
}
void PrintHeaderClientMethodInterfaces(grpc::protobuf::io::Printer *printer,
const grpc::protobuf::MethodDescriptor *method,
std::map<grpc::string, grpc::string> *vars,
bool is_public) {
void PrintHeaderClientMethodInterfaces(
grpc::protobuf::io::Printer *printer,
const grpc::protobuf::MethodDescriptor *method,
std::map<grpc::string, grpc::string> *vars, bool is_public) {
(*vars)["Method"] = method->name();
(*vars)["Request"] =
grpc_cpp_generator::ClassName(method->input_type(), true);
@ -157,19 +159,17 @@ void PrintHeaderClientMethodInterfaces(grpc::protobuf::io::Printer *printer,
*vars,
"virtual ::grpc::Status $Method$(::grpc::ClientContext* context, "
"const $Request$& request, $Response$* response) = 0;\n");
printer->Print(
*vars,
"std::unique_ptr< "
"::grpc::ClientAsyncResponseReaderInterface< $Response$>> "
"Async$Method$(::grpc::ClientContext* context, "
"const $Request$& request, "
"::grpc::CompletionQueue* cq) {\n");
printer->Print(*vars,
"std::unique_ptr< "
"::grpc::ClientAsyncResponseReaderInterface< $Response$>> "
"Async$Method$(::grpc::ClientContext* context, "
"const $Request$& request, "
"::grpc::CompletionQueue* cq) {\n");
printer->Indent();
printer->Print(
*vars,
"return std::unique_ptr< "
"::grpc::ClientAsyncResponseReaderInterface< $Response$>>("
"Async$Method$Raw(context, request, cq));\n");
printer->Print(*vars,
"return std::unique_ptr< "
"::grpc::ClientAsyncResponseReaderInterface< $Response$>>("
"Async$Method$Raw(context, request, cq));\n");
printer->Outdent();
printer->Print("}\n");
} else if (ClientOnlyStreaming(method)) {
@ -188,14 +188,14 @@ void PrintHeaderClientMethodInterfaces(grpc::protobuf::io::Printer *printer,
printer->Print(
*vars,
"std::unique_ptr< ::grpc::ClientAsyncWriterInterface< $Request$>>"
" Async$Method$(::grpc::ClientContext* context, $Response$* response, "
" Async$Method$(::grpc::ClientContext* context, $Response$* "
"response, "
"::grpc::CompletionQueue* cq, void* tag) {\n");
printer->Indent();
printer->Print(
*vars,
"return std::unique_ptr< "
"::grpc::ClientAsyncWriterInterface< $Request$>>("
"Async$Method$Raw(context, response, cq, tag));\n");
printer->Print(*vars,
"return std::unique_ptr< "
"::grpc::ClientAsyncWriterInterface< $Request$>>("
"Async$Method$Raw(context, response, cq, tag));\n");
printer->Outdent();
printer->Print("}\n");
} else if (ServerOnlyStreaming(method)) {
@ -218,18 +218,17 @@ void PrintHeaderClientMethodInterfaces(grpc::protobuf::io::Printer *printer,
"::grpc::ClientContext* context, const $Request$& request, "
"::grpc::CompletionQueue* cq, void* tag) {\n");
printer->Indent();
printer->Print(
*vars,
"return std::unique_ptr< "
"::grpc::ClientAsyncReaderInterface< $Response$>>("
"Async$Method$Raw(context, request, cq, tag));\n");
printer->Print(*vars,
"return std::unique_ptr< "
"::grpc::ClientAsyncReaderInterface< $Response$>>("
"Async$Method$Raw(context, request, cq, tag));\n");
printer->Outdent();
printer->Print("}\n");
} else if (BidiStreaming(method)) {
printer->Print(
*vars,
"std::unique_ptr< ::grpc::ClientReaderWriterInterface< $Request$, $Response$>> "
"$Method$(::grpc::ClientContext* context) {\n");
printer->Print(*vars,
"std::unique_ptr< ::grpc::ClientReaderWriterInterface< "
"$Request$, $Response$>> "
"$Method$(::grpc::ClientContext* context) {\n");
printer->Indent();
printer->Print(
*vars,
@ -267,12 +266,11 @@ void PrintHeaderClientMethodInterfaces(grpc::protobuf::io::Printer *printer,
"virtual ::grpc::ClientWriterInterface< $Request$>*"
" $Method$Raw("
"::grpc::ClientContext* context, $Response$* response) = 0;\n");
printer->Print(
*vars,
"virtual ::grpc::ClientAsyncWriterInterface< $Request$>*"
" Async$Method$Raw(::grpc::ClientContext* context, "
"$Response$* response, "
"::grpc::CompletionQueue* cq, void* tag) = 0;\n");
printer->Print(*vars,
"virtual ::grpc::ClientAsyncWriterInterface< $Request$>*"
" Async$Method$Raw(::grpc::ClientContext* context, "
"$Response$* response, "
"::grpc::CompletionQueue* cq, void* tag) = 0;\n");
} else if (ServerOnlyStreaming(method)) {
printer->Print(
*vars,
@ -285,16 +283,15 @@ void PrintHeaderClientMethodInterfaces(grpc::protobuf::io::Printer *printer,
"::grpc::ClientContext* context, const $Request$& request, "
"::grpc::CompletionQueue* cq, void* tag) = 0;\n");
} else if (BidiStreaming(method)) {
printer->Print(
*vars,
"virtual ::grpc::ClientReaderWriterInterface< $Request$, $Response$>* "
"$Method$Raw(::grpc::ClientContext* context) = 0;\n");
printer->Print(
*vars,
"virtual ::grpc::ClientAsyncReaderWriterInterface< "
"$Request$, $Response$>* "
"Async$Method$Raw(::grpc::ClientContext* context, "
"::grpc::CompletionQueue* cq, void* tag) = 0;\n");
printer->Print(*vars,
"virtual ::grpc::ClientReaderWriterInterface< $Request$, "
"$Response$>* "
"$Method$Raw(::grpc::ClientContext* context) = 0;\n");
printer->Print(*vars,
"virtual ::grpc::ClientAsyncReaderWriterInterface< "
"$Request$, $Response$>* "
"Async$Method$Raw(::grpc::ClientContext* context, "
"::grpc::CompletionQueue* cq, void* tag) = 0;\n");
}
}
}
@ -321,11 +318,10 @@ void PrintHeaderClientMethod(grpc::protobuf::io::Printer *printer,
"const $Request$& request, "
"::grpc::CompletionQueue* cq) {\n");
printer->Indent();
printer->Print(
*vars,
"return std::unique_ptr< "
"::grpc::ClientAsyncResponseReader< $Response$>>("
"Async$Method$Raw(context, request, cq));\n");
printer->Print(*vars,
"return std::unique_ptr< "
"::grpc::ClientAsyncResponseReader< $Response$>>("
"Async$Method$Raw(context, request, cq));\n");
printer->Outdent();
printer->Print("}\n");
} else if (ClientOnlyStreaming(method)) {
@ -335,17 +331,16 @@ void PrintHeaderClientMethod(grpc::protobuf::io::Printer *printer,
" $Method$("
"::grpc::ClientContext* context, $Response$* response) {\n");
printer->Indent();
printer->Print(
*vars,
"return std::unique_ptr< ::grpc::ClientWriter< $Request$>>"
"($Method$Raw(context, response));\n");
printer->Print(*vars,
"return std::unique_ptr< ::grpc::ClientWriter< $Request$>>"
"($Method$Raw(context, response));\n");
printer->Outdent();
printer->Print("}\n");
printer->Print(
*vars,
"std::unique_ptr< ::grpc::ClientAsyncWriter< $Request$>>"
" Async$Method$(::grpc::ClientContext* context, $Response$* response, "
"::grpc::CompletionQueue* cq, void* tag) {\n");
printer->Print(*vars,
"std::unique_ptr< ::grpc::ClientAsyncWriter< $Request$>>"
" Async$Method$(::grpc::ClientContext* context, "
"$Response$* response, "
"::grpc::CompletionQueue* cq, void* tag) {\n");
printer->Indent();
printer->Print(
*vars,
@ -385,53 +380,47 @@ void PrintHeaderClientMethod(grpc::protobuf::io::Printer *printer,
"std::unique_ptr< ::grpc::ClientReaderWriter< $Request$, $Response$>>"
" $Method$(::grpc::ClientContext* context) {\n");
printer->Indent();
printer->Print(
*vars,
"return std::unique_ptr< "
"::grpc::ClientReaderWriter< $Request$, $Response$>>("
"$Method$Raw(context));\n");
printer->Print(*vars,
"return std::unique_ptr< "
"::grpc::ClientReaderWriter< $Request$, $Response$>>("
"$Method$Raw(context));\n");
printer->Outdent();
printer->Print("}\n");
printer->Print(
*vars,
"std::unique_ptr< ::grpc::ClientAsyncReaderWriter< "
"$Request$, $Response$>> "
"Async$Method$(::grpc::ClientContext* context, "
"::grpc::CompletionQueue* cq, void* tag) {\n");
printer->Print(*vars,
"std::unique_ptr< ::grpc::ClientAsyncReaderWriter< "
"$Request$, $Response$>> "
"Async$Method$(::grpc::ClientContext* context, "
"::grpc::CompletionQueue* cq, void* tag) {\n");
printer->Indent();
printer->Print(
*vars,
"return std::unique_ptr< "
"::grpc::ClientAsyncReaderWriter< $Request$, $Response$>>("
"Async$Method$Raw(context, cq, tag));\n");
printer->Print(*vars,
"return std::unique_ptr< "
"::grpc::ClientAsyncReaderWriter< $Request$, $Response$>>("
"Async$Method$Raw(context, cq, tag));\n");
printer->Outdent();
printer->Print("}\n");
}
} else {
if (NoStreaming(method)) {
printer->Print(
*vars,
"::grpc::ClientAsyncResponseReader< $Response$>* "
"Async$Method$Raw(::grpc::ClientContext* context, "
"const $Request$& request, "
"::grpc::CompletionQueue* cq) GRPC_OVERRIDE;\n");
printer->Print(*vars,
"::grpc::ClientAsyncResponseReader< $Response$>* "
"Async$Method$Raw(::grpc::ClientContext* context, "
"const $Request$& request, "
"::grpc::CompletionQueue* cq) GRPC_OVERRIDE;\n");
} else if (ClientOnlyStreaming(method)) {
printer->Print(
*vars,
"::grpc::ClientWriter< $Request$>* $Method$Raw("
"::grpc::ClientContext* context, $Response$* response) "
"GRPC_OVERRIDE;\n");
printer->Print(*vars,
"::grpc::ClientWriter< $Request$>* $Method$Raw("
"::grpc::ClientContext* context, $Response$* response) "
"GRPC_OVERRIDE;\n");
printer->Print(
*vars,
"::grpc::ClientAsyncWriter< $Request$>* Async$Method$Raw("
"::grpc::ClientContext* context, $Response$* response, "
"::grpc::CompletionQueue* cq, void* tag) GRPC_OVERRIDE;\n");
} else if (ServerOnlyStreaming(method)) {
printer->Print(
*vars,
"::grpc::ClientReader< $Response$>* $Method$Raw("
"::grpc::ClientContext* context, const $Request$& request)"
" GRPC_OVERRIDE;\n");
printer->Print(*vars,
"::grpc::ClientReader< $Response$>* $Method$Raw("
"::grpc::ClientContext* context, const $Request$& request)"
" GRPC_OVERRIDE;\n");
printer->Print(
*vars,
"::grpc::ClientAsyncReader< $Response$>* Async$Method$Raw("
@ -629,7 +618,7 @@ grpc::string GetHeaderServices(const grpc::protobuf::FileDescriptor *file,
const Parameters &params) {
grpc::string output;
{
// Scope the output stream so it closes and finalizes output to the string.
// Scope the output stream so it closes and finalizes output to the string.
grpc::protobuf::io::StringOutputStream output_stream(&output);
grpc::protobuf::io::Printer printer(&output_stream, '$');
std::map<grpc::string, grpc::string> vars;
@ -693,7 +682,8 @@ grpc::string GetSourcePrologue(const grpc::protobuf::FileDescriptor *file,
vars["filename_base"] = grpc_generator::StripProto(file->name());
printer.Print(vars, "// Generated by the gRPC protobuf plugin.\n");
printer.Print(vars, "// If you make any local change, they will be lost.\n");
printer.Print(vars,
"// If you make any local change, they will be lost.\n");
printer.Print(vars, "// source: $filename$\n\n");
printer.Print(vars, "#include \"$filename_base$.pb.h\"\n");
printer.Print(vars, "#include \"$filename_base$.grpc.pb.h\"\n");
@ -1056,8 +1046,7 @@ void PrintSourceService(grpc::protobuf::io::Printer *printer,
" new ::grpc::RpcMethodHandler< $ns$$Service$::Service, "
"$Request$, "
"$Response$>(\n"
" std::mem_fn(&$ns$$Service$::Service::$Method$), this),\n"
" new $Request$, new $Response$));\n");
" std::mem_fn(&$ns$$Service$::Service::$Method$), this)));\n");
} else if (ClientOnlyStreaming(method)) {
printer->Print(
*vars,
@ -1066,8 +1055,7 @@ void PrintSourceService(grpc::protobuf::io::Printer *printer,
" ::grpc::RpcMethod::CLIENT_STREAMING,\n"
" new ::grpc::ClientStreamingHandler< "
"$ns$$Service$::Service, $Request$, $Response$>(\n"
" std::mem_fn(&$ns$$Service$::Service::$Method$), this),\n"
" new $Request$, new $Response$));\n");
" std::mem_fn(&$ns$$Service$::Service::$Method$), this)));\n");
} else if (ServerOnlyStreaming(method)) {
printer->Print(
*vars,
@ -1076,8 +1064,7 @@ void PrintSourceService(grpc::protobuf::io::Printer *printer,
" ::grpc::RpcMethod::SERVER_STREAMING,\n"
" new ::grpc::ServerStreamingHandler< "
"$ns$$Service$::Service, $Request$, $Response$>(\n"
" std::mem_fn(&$ns$$Service$::Service::$Method$), this),\n"
" new $Request$, new $Response$));\n");
" std::mem_fn(&$ns$$Service$::Service::$Method$), this)));\n");
} else if (BidiStreaming(method)) {
printer->Print(
*vars,
@ -1086,8 +1073,7 @@ void PrintSourceService(grpc::protobuf::io::Printer *printer,
" ::grpc::RpcMethod::BIDI_STREAMING,\n"
" new ::grpc::BidiStreamingHandler< "
"$ns$$Service$::Service, $Request$, $Response$>(\n"
" std::mem_fn(&$ns$$Service$::Service::$Method$), this),\n"
" new $Request$, new $Response$));\n");
" std::mem_fn(&$ns$$Service$::Service::$Method$), this)));\n");
}
}
printer->Print("return service_;\n");

@ -211,9 +211,3 @@ void grpc_call_element_send_cancel(grpc_call_element *cur_elem) {
op.cancel_with_status = GRPC_STATUS_CANCELLED;
grpc_call_next_op(cur_elem, &op);
}
void grpc_call_element_recv_status(grpc_call_element *cur_elem,
grpc_status_code status,
const char *message) {
abort();
}

@ -157,9 +157,10 @@ static void lb_destroy_channel_elem(grpc_channel_element *elem) {
}
const grpc_channel_filter grpc_child_channel_top_filter = {
lb_start_transport_op, lb_channel_op, sizeof(lb_call_data),
lb_init_call_elem, lb_destroy_call_elem, sizeof(lb_channel_data),
lb_init_channel_elem, lb_destroy_channel_elem, "child-channel",
lb_start_transport_op, lb_channel_op,
sizeof(lb_call_data), lb_init_call_elem, lb_destroy_call_elem,
sizeof(lb_channel_data), lb_init_channel_elem, lb_destroy_channel_elem,
"child-channel",
};
/* grpc_child_channel proper */

@ -39,6 +39,7 @@
#include "src/core/channel/child_channel.h"
#include "src/core/channel/connected_channel.h"
#include "src/core/iomgr/iomgr.h"
#include "src/core/iomgr/pollset_set.h"
#include "src/core/support/string.h"
#include <grpc/support/alloc.h>
#include <grpc/support/log.h>
@ -101,10 +102,17 @@ struct call_data {
static int prepare_activate(grpc_call_element *elem,
grpc_child_channel *on_child) {
call_data *calld = elem->call_data;
channel_data *chand = elem->channel_data;
if (calld->state == CALL_CANCELLED) return 0;
/* no more access to calld->s.waiting allowed */
GPR_ASSERT(calld->state == CALL_WAITING);
if (calld->s.waiting_op.bind_pollset) {
grpc_transport_setup_del_interested_party(chand->transport_setup,
calld->s.waiting_op.bind_pollset);
}
calld->state = CALL_ACTIVE;
/* create a child call */
@ -131,7 +139,11 @@ static void remove_waiting_child(channel_data *chand, call_data *calld) {
size_t new_count;
size_t i;
for (i = 0, new_count = 0; i < chand->waiting_child_count; i++) {
if (chand->waiting_children[i] == calld) continue;
if (chand->waiting_children[i] == calld) {
grpc_transport_setup_del_interested_party(
chand->transport_setup, calld->s.waiting_op.bind_pollset);
continue;
}
chand->waiting_children[new_count++] = chand->waiting_children[i];
}
GPR_ASSERT(new_count == chand->waiting_child_count - 1 ||
@ -166,6 +178,9 @@ static void handle_op_after_cancellation(grpc_call_element *elem,
*op->recv_state = GRPC_STREAM_CLOSED;
op->on_done_recv(op->recv_user_data, 1);
}
if (op->on_consumed) {
op->on_consumed(op->on_consumed_user_data, 0);
}
}
static void cc_start_transport_op(grpc_call_element *elem,
@ -191,6 +206,7 @@ static void cc_start_transport_op(grpc_call_element *elem,
handle_op_after_cancellation(elem, op);
} else {
calld->state = CALL_WAITING;
calld->s.waiting_op.bind_pollset = NULL;
if (chand->active_child) {
/* channel is connected - use the connected stack */
if (prepare_activate(elem, chand->active_child)) {
@ -222,6 +238,8 @@ static void cc_start_transport_op(grpc_call_element *elem,
}
calld->s.waiting_op = *op;
chand->waiting_children[chand->waiting_child_count++] = calld;
grpc_transport_setup_add_interested_party(chand->transport_setup,
op->bind_pollset);
gpr_mu_unlock(&chand->mu);
/* finally initiate transport setup if needed */
@ -257,6 +275,9 @@ static void cc_start_transport_op(grpc_call_element *elem,
calld->s.waiting_op.recv_user_data = op->recv_user_data;
}
gpr_mu_unlock(&chand->mu);
if (op->on_consumed) {
op->on_consumed(op->on_consumed_user_data, 0);
}
}
break;
case CALL_CANCELLED:
@ -365,12 +386,24 @@ static void init_call_elem(grpc_call_element *elem,
/* Destructor for call_data */
static void destroy_call_elem(grpc_call_element *elem) {
call_data *calld = elem->call_data;
channel_data *chand = elem->channel_data;
/* if the call got activated, we need to destroy the child stack also, and
remove it from the in-flight requests tracked by the child_entry we
picked */
if (calld->state == CALL_ACTIVE) {
grpc_child_call_destroy(calld->s.active.child_call);
gpr_mu_lock(&chand->mu);
switch (calld->state) {
case CALL_ACTIVE:
gpr_mu_unlock(&chand->mu);
grpc_child_call_destroy(calld->s.active.child_call);
break;
case CALL_WAITING:
remove_waiting_child(chand, calld);
gpr_mu_unlock(&chand->mu);
break;
default:
gpr_mu_unlock(&chand->mu);
break;
}
GPR_ASSERT(calld->state != CALL_WAITING);
}
@ -416,9 +449,9 @@ static void destroy_channel_elem(grpc_channel_element *elem) {
}
const grpc_channel_filter grpc_client_channel_filter = {
cc_start_transport_op, channel_op, sizeof(call_data), init_call_elem,
destroy_call_elem, sizeof(channel_data), init_channel_elem,
destroy_channel_elem, "client-channel",
cc_start_transport_op, channel_op, sizeof(call_data),
init_call_elem, destroy_call_elem, sizeof(channel_data),
init_channel_elem, destroy_channel_elem, "client-channel",
};
grpc_transport_setup_result grpc_client_channel_transport_setup_complete(

@ -56,6 +56,9 @@ struct grpc_client_setup {
gpr_cv cv;
grpc_client_setup_request *active_request;
int refs;
/** The set of pollsets that are currently interested in this
connection being established */
grpc_pollset_set interested_parties;
};
struct grpc_client_setup_request {
@ -68,14 +71,22 @@ gpr_timespec grpc_client_setup_request_deadline(grpc_client_setup_request *r) {
return r->deadline;
}
grpc_pollset_set *grpc_client_setup_get_interested_parties(
grpc_client_setup_request *r) {
return &r->setup->interested_parties;
}
static void destroy_setup(grpc_client_setup *s) {
gpr_mu_destroy(&s->mu);
gpr_cv_destroy(&s->cv);
s->done(s->user_data);
grpc_channel_args_destroy(s->args);
grpc_pollset_set_destroy(&s->interested_parties);
gpr_free(s);
}
static void destroy_request(grpc_client_setup_request *r) { gpr_free(r); }
/* initiate handshaking */
static void setup_initiate(grpc_transport_setup *sp) {
grpc_client_setup *s = (grpc_client_setup *)sp;
@ -83,8 +94,7 @@ static void setup_initiate(grpc_transport_setup *sp) {
int in_alarm = 0;
r->setup = s;
/* TODO(klempner): Actually set a deadline */
r->deadline = gpr_inf_future;
r->deadline = gpr_time_add(gpr_now(), gpr_time_from_seconds(60));
gpr_mu_lock(&s->mu);
GPR_ASSERT(s->refs > 0);
@ -104,10 +114,30 @@ static void setup_initiate(grpc_transport_setup *sp) {
if (!in_alarm) {
s->initiate(s->user_data, r);
} else {
gpr_free(r);
destroy_request(r);
}
}
/** implementation of add_interested_party for setup vtable */
static void setup_add_interested_party(grpc_transport_setup *sp,
grpc_pollset *pollset) {
grpc_client_setup *s = (grpc_client_setup *)sp;
gpr_mu_lock(&s->mu);
grpc_pollset_set_add_pollset(&s->interested_parties, pollset);
gpr_mu_unlock(&s->mu);
}
/** implementation of del_interested_party for setup vtable */
static void setup_del_interested_party(grpc_transport_setup *sp,
grpc_pollset *pollset) {
grpc_client_setup *s = (grpc_client_setup *)sp;
gpr_mu_lock(&s->mu);
grpc_pollset_set_del_pollset(&s->interested_parties, pollset);
gpr_mu_unlock(&s->mu);
}
/* cancel handshaking: cancel all requests, and shutdown (the caller promises
not to initiate again) */
static void setup_cancel(grpc_transport_setup *sp) {
@ -137,7 +167,8 @@ static void setup_cancel(grpc_transport_setup *sp) {
}
}
int grpc_client_setup_cb_begin(grpc_client_setup_request *r) {
int grpc_client_setup_cb_begin(grpc_client_setup_request *r,
const char *reason) {
gpr_mu_lock(&r->setup->mu);
if (r->setup->cancelled) {
gpr_mu_unlock(&r->setup->mu);
@ -148,7 +179,8 @@ int grpc_client_setup_cb_begin(grpc_client_setup_request *r) {
return 1;
}
void grpc_client_setup_cb_end(grpc_client_setup_request *r) {
void grpc_client_setup_cb_end(grpc_client_setup_request *r,
const char *reason) {
gpr_mu_lock(&r->setup->mu);
r->setup->in_cb--;
if (r->setup->cancelled) gpr_cv_signal(&r->setup->cv);
@ -156,8 +188,9 @@ void grpc_client_setup_cb_end(grpc_client_setup_request *r) {
}
/* vtable for transport setup */
static const grpc_transport_setup_vtable setup_vtable = {setup_initiate,
setup_cancel};
static const grpc_transport_setup_vtable setup_vtable = {
setup_initiate, setup_add_interested_party, setup_del_interested_party,
setup_cancel};
void grpc_client_setup_create_and_attach(
grpc_channel_stack *newly_minted_channel, const grpc_channel_args *args,
@ -180,42 +213,44 @@ void grpc_client_setup_create_and_attach(
s->in_alarm = 0;
s->in_cb = 0;
s->cancelled = 0;
grpc_pollset_set_init(&s->interested_parties);
grpc_client_channel_set_transport_setup(newly_minted_channel, &s->base);
}
int grpc_client_setup_request_should_continue(grpc_client_setup_request *r) {
int grpc_client_setup_request_should_continue(grpc_client_setup_request *r,
const char *reason) {
int result;
if (gpr_time_cmp(gpr_now(), r->deadline) > 0) {
return 0;
result = 0;
} else {
gpr_mu_lock(&r->setup->mu);
result = r->setup->active_request == r;
gpr_mu_unlock(&r->setup->mu);
}
gpr_mu_lock(&r->setup->mu);
result = r->setup->active_request == r;
gpr_mu_unlock(&r->setup->mu);
return result;
}
static void backoff_alarm_done(void *arg /* grpc_client_setup */, int success) {
grpc_client_setup *s = arg;
grpc_client_setup_request *r = gpr_malloc(sizeof(grpc_client_setup_request));
r->setup = s;
/* TODO(klempner): Set this to something useful */
r->deadline = gpr_inf_future;
static void backoff_alarm_done(void *arg /* grpc_client_setup_request */,
int success) {
grpc_client_setup_request *r = arg;
grpc_client_setup *s = r->setup;
/* Handle status cancelled? */
gpr_mu_lock(&s->mu);
s->active_request = r;
s->in_alarm = 0;
if (!success) {
if (s->active_request != NULL || !success) {
if (0 == --s->refs) {
gpr_mu_unlock(&s->mu);
destroy_setup(s);
gpr_free(r);
destroy_request(r);
return;
} else {
gpr_mu_unlock(&s->mu);
destroy_request(r);
return;
}
}
s->active_request = r;
gpr_mu_unlock(&s->mu);
s->initiate(s->user_data, r);
}
@ -231,16 +266,12 @@ void grpc_client_setup_request_finish(grpc_client_setup_request *r,
} else {
retry = 0;
}
if (!retry && 0 == --s->refs) {
gpr_mu_unlock(&s->mu);
destroy_setup(s);
gpr_free(r);
return;
}
gpr_free(r);
if (retry) {
destroy_request(r);
} else if (retry) {
/* TODO(klempner): Replace these values with further consideration. 2x is
probably too aggressive of a backoff. */
gpr_timespec max_backoff = gpr_time_from_minutes(2);
@ -248,15 +279,17 @@ void grpc_client_setup_request_finish(grpc_client_setup_request *r,
gpr_timespec deadline = gpr_time_add(s->current_backoff_interval, now);
GPR_ASSERT(!s->in_alarm);
s->in_alarm = 1;
grpc_alarm_init(&s->backoff_alarm, deadline, backoff_alarm_done, s, now);
grpc_alarm_init(&s->backoff_alarm, deadline, backoff_alarm_done, r, now);
s->current_backoff_interval =
gpr_time_add(s->current_backoff_interval, s->current_backoff_interval);
if (gpr_time_cmp(s->current_backoff_interval, max_backoff) > 0) {
s->current_backoff_interval = max_backoff;
}
gpr_mu_unlock(&s->mu);
} else {
gpr_mu_unlock(&s->mu);
destroy_request(r);
}
gpr_mu_unlock(&s->mu);
}
const grpc_channel_args *grpc_client_setup_get_channel_args(

@ -52,7 +52,8 @@ void grpc_client_setup_create_and_attach(
/* Check that r is the active request: needs to be performed at each callback.
If this races, we'll have two connection attempts running at once and the
old one will get cleaned up in due course, which is fine. */
int grpc_client_setup_request_should_continue(grpc_client_setup_request *r);
int grpc_client_setup_request_should_continue(grpc_client_setup_request *r,
const char *reason);
void grpc_client_setup_request_finish(grpc_client_setup_request *r,
int was_successful);
const grpc_channel_args *grpc_client_setup_get_channel_args(
@ -61,13 +62,16 @@ const grpc_channel_args *grpc_client_setup_get_channel_args(
/* Call before calling back into the setup listener, and call only if
this function returns 1. If it returns 1, also promise to call
grpc_client_setup_cb_end */
int grpc_client_setup_cb_begin(grpc_client_setup_request *r);
void grpc_client_setup_cb_end(grpc_client_setup_request *r);
int grpc_client_setup_cb_begin(grpc_client_setup_request *r,
const char *reason);
void grpc_client_setup_cb_end(grpc_client_setup_request *r, const char *reason);
/* Get the deadline for a request passed in to initiate. Implementations should
make a best effort to honor this deadline. */
gpr_timespec grpc_client_setup_request_deadline(grpc_client_setup_request *r);
grpc_pollset_set *grpc_client_setup_get_interested_parties(
grpc_client_setup_request *r);
grpc_mdctx *grpc_client_setup_get_mdctx(grpc_client_setup_request *r);
#endif /* GRPC_INTERNAL_CORE_CHANNEL_CLIENT_SETUP_H */
#endif /* GRPC_INTERNAL_CORE_CHANNEL_CLIENT_SETUP_H */

@ -49,4 +49,4 @@ int grpc_msg_compress(grpc_compression_algorithm algorithm,
int grpc_msg_decompress(grpc_compression_algorithm algorithm,
gpr_slice_buffer *input, gpr_slice_buffer *output);
#endif /* GRPC_INTERNAL_CORE_COMPRESSION_MESSAGE_COMPRESS_H */
#endif /* GRPC_INTERNAL_CORE_COMPRESSION_MESSAGE_COMPRESS_H */

@ -60,14 +60,26 @@ typedef struct {
int use_ssl;
grpc_httpcli_response_cb on_response;
void *user_data;
grpc_httpcli_context *context;
grpc_pollset *pollset;
grpc_iomgr_object iomgr_obj;
} internal_request;
static grpc_httpcli_get_override g_get_override = NULL;
static grpc_httpcli_post_override g_post_override = NULL;
void grpc_httpcli_context_init(grpc_httpcli_context *context) {
grpc_pollset_set_init(&context->pollset_set);
}
void grpc_httpcli_context_destroy(grpc_httpcli_context *context) {
grpc_pollset_set_destroy(&context->pollset_set);
}
static void next_address(internal_request *req);
static void finish(internal_request *req, int success) {
grpc_pollset_set_del_pollset(&req->context->pollset_set, req->pollset);
req->on_response(req->user_data, success ? &req->parser.r : NULL);
grpc_httpcli_parser_destroy(&req->parser);
if (req->addresses != NULL) {
@ -78,6 +90,7 @@ static void finish(internal_request *req, int success) {
}
gpr_slice_unref(req->request_text);
gpr_free(req->host);
grpc_iomgr_unregister_object(&req->iomgr_obj);
gpr_free(req);
}
@ -198,8 +211,9 @@ static void next_address(internal_request *req) {
return;
}
addr = &req->addresses->addrs[req->next_address++];
grpc_tcp_client_connect(on_connected, req, (struct sockaddr *)&addr->addr,
addr->len, req->deadline);
grpc_tcp_client_connect(on_connected, req, &req->context->pollset_set,
(struct sockaddr *)&addr->addr, addr->len,
req->deadline);
}
static void on_resolved(void *arg, grpc_resolved_addresses *addresses) {
@ -213,10 +227,12 @@ static void on_resolved(void *arg, grpc_resolved_addresses *addresses) {
next_address(req);
}
void grpc_httpcli_get(const grpc_httpcli_request *request,
void grpc_httpcli_get(grpc_httpcli_context *context, grpc_pollset *pollset,
const grpc_httpcli_request *request,
gpr_timespec deadline,
grpc_httpcli_response_cb on_response, void *user_data) {
internal_request *req;
char *name;
if (g_get_override &&
g_get_override(request, deadline, on_response, user_data)) {
return;
@ -229,19 +245,27 @@ void grpc_httpcli_get(const grpc_httpcli_request *request,
req->user_data = user_data;
req->deadline = deadline;
req->use_ssl = request->use_ssl;
req->context = context;
req->pollset = pollset;
gpr_asprintf(&name, "HTTP:GET:%s:%s", request->host, request->path);
grpc_iomgr_register_object(&req->iomgr_obj, name);
gpr_free(name);
if (req->use_ssl) {
req->host = gpr_strdup(request->host);
}
grpc_pollset_set_add_pollset(&req->context->pollset_set, req->pollset);
grpc_resolve_address(request->host, req->use_ssl ? "https" : "http",
on_resolved, req);
}
void grpc_httpcli_post(const grpc_httpcli_request *request,
void grpc_httpcli_post(grpc_httpcli_context *context, grpc_pollset *pollset,
const grpc_httpcli_request *request,
const char *body_bytes, size_t body_size,
gpr_timespec deadline,
grpc_httpcli_response_cb on_response, void *user_data) {
internal_request *req;
char *name;
if (g_post_override && g_post_override(request, body_bytes, body_size,
deadline, on_response, user_data)) {
return;
@ -255,10 +279,16 @@ void grpc_httpcli_post(const grpc_httpcli_request *request,
req->user_data = user_data;
req->deadline = deadline;
req->use_ssl = request->use_ssl;
req->context = context;
req->pollset = pollset;
gpr_asprintf(&name, "HTTP:GET:%s:%s", request->host, request->path);
grpc_iomgr_register_object(&req->iomgr_obj, name);
gpr_free(name);
if (req->use_ssl) {
req->host = gpr_strdup(request->host);
}
grpc_pollset_set_add_pollset(&req->context->pollset_set, req->pollset);
grpc_resolve_address(request->host, req->use_ssl ? "https" : "http",
on_resolved, req);
}

@ -38,6 +38,8 @@
#include <grpc/support/time.h>
#include "src/core/iomgr/pollset_set.h"
/* User agent this library reports */
#define GRPC_HTTPCLI_USER_AGENT "grpc-httpcli/0.0"
/* Maximum length of a header string of the form 'Key: Value\r\n' */
@ -49,6 +51,13 @@ typedef struct grpc_httpcli_header {
char *value;
} grpc_httpcli_header;
/* Tracks in-progress http requests
TODO(ctiller): allow caching and capturing multiple requests for the
same content and combining them */
typedef struct grpc_httpcli_context {
grpc_pollset_set pollset_set;
} grpc_httpcli_context;
/* A request */
typedef struct grpc_httpcli_request {
/* The host name to connect to */
@ -80,7 +89,14 @@ typedef struct grpc_httpcli_response {
typedef void (*grpc_httpcli_response_cb)(void *user_data,
const grpc_httpcli_response *response);
void grpc_httpcli_context_init(grpc_httpcli_context *context);
void grpc_httpcli_context_destroy(grpc_httpcli_context *context);
/* Asynchronously perform a HTTP GET.
'context' specifies the http context under which to do the get
'pollset' indicates a grpc_pollset that is interested in the result
of the get - work on this pollset may be used to progress the get
operation
'request' contains request parameters - these are caller owned and can be
destroyed once the call returns
'deadline' contains a deadline for the request (or gpr_inf_future)
@ -88,14 +104,28 @@ typedef void (*grpc_httpcli_response_cb)(void *user_data,
lifetime of the request
'on_response' is a callback to report results to (and 'user_data' is a user
supplied pointer to pass to said call) */
void grpc_httpcli_get(const grpc_httpcli_request *request,
void grpc_httpcli_get(grpc_httpcli_context *context, grpc_pollset *pollset,
const grpc_httpcli_request *request,
gpr_timespec deadline,
grpc_httpcli_response_cb on_response, void *user_data);
/* Asynchronously perform a HTTP POST.
When there is no body, pass in NULL as body_bytes.
'context' specifies the http context under which to do the post
'pollset' indicates a grpc_pollset that is interested in the result
of the post - work on this pollset may be used to progress the post
operation
'request' contains request parameters - these are caller owned and can be
destroyed once the call returns
'body_bytes' and 'body_size' specify the payload for the post.
When there is no body, pass in NULL as body_bytes.
'deadline' contains a deadline for the request (or gpr_inf_future)
'em' points to a caller owned event manager that must be alive for the
lifetime of the request
'on_response' is a callback to report results to (and 'user_data' is a user
supplied pointer to pass to said call)
Does not support ?var1=val1&var2=val2 in the path. */
void grpc_httpcli_post(const grpc_httpcli_request *request,
void grpc_httpcli_post(grpc_httpcli_context *context, grpc_pollset *pollset,
const grpc_httpcli_request *request,
const char *body_bytes, size_t body_size,
gpr_timespec deadline,
grpc_httpcli_response_cb on_response, void *user_data);
@ -115,4 +145,4 @@ typedef int (*grpc_httpcli_post_override)(const grpc_httpcli_request *request,
void grpc_httpcli_set_override(grpc_httpcli_get_override get,
grpc_httpcli_post_override post);
#endif /* GRPC_INTERNAL_CORE_HTTPCLI_HTTPCLI_H */
#endif /* GRPC_INTERNAL_CORE_HTTPCLI_HTTPCLI_H */

@ -109,16 +109,40 @@ static void destroy(grpc_fd *fd) {
gpr_free(fd);
}
#ifdef GRPC_FD_REF_COUNT_DEBUG
#define REF_BY(fd, n, reason) ref_by(fd, n, reason, __FILE__, __LINE__)
#define UNREF_BY(fd, n, reason) unref_by(fd, n, reason, __FILE__, __LINE__)
static void ref_by(grpc_fd *fd, int n, const char *reason, const char *file,
int line) {
gpr_log(GPR_DEBUG, "FD %d %p ref %d %d -> %d [%s; %s:%d]", fd->fd, fd, n,
gpr_atm_no_barrier_load(&fd->refst),
gpr_atm_no_barrier_load(&fd->refst) + n, reason, file, line);
#else
#define REF_BY(fd, n, reason) ref_by(fd, n)
#define UNREF_BY(fd, n, reason) unref_by(fd, n)
static void ref_by(grpc_fd *fd, int n) {
#endif
GPR_ASSERT(gpr_atm_no_barrier_fetch_add(&fd->refst, n) > 0);
}
#ifdef GRPC_FD_REF_COUNT_DEBUG
static void unref_by(grpc_fd *fd, int n, const char *reason, const char *file,
int line) {
gpr_atm old;
gpr_log(GPR_DEBUG, "FD %d %p unref %d %d -> %d [%s; %s:%d]", fd->fd, fd, n,
gpr_atm_no_barrier_load(&fd->refst),
gpr_atm_no_barrier_load(&fd->refst) - n, reason, file, line);
#else
static void unref_by(grpc_fd *fd, int n) {
gpr_atm old = gpr_atm_full_fetch_add(&fd->refst, -n);
gpr_atm old;
#endif
old = gpr_atm_full_fetch_add(&fd->refst, -n);
if (old == n) {
grpc_iomgr_add_callback(&fd->on_done_closure);
freelist_fd(fd);
if (fd->on_done_closure) {
grpc_iomgr_add_callback(fd->on_done_closure);
}
grpc_iomgr_unregister_object(&fd->iomgr_object);
freelist_fd(fd);
} else {
GPR_ASSERT(old > n);
}
@ -135,12 +159,9 @@ void grpc_fd_global_shutdown(void) {
gpr_mu_destroy(&fd_freelist_mu);
}
static void do_nothing(void *ignored, int success) {}
grpc_fd *grpc_fd_create(int fd, const char *name) {
grpc_fd *r = alloc_fd(fd);
grpc_iomgr_register_object(&r->iomgr_object, name);
grpc_pollset_add_fd(grpc_backup_pollset(), r);
return r;
}
@ -178,24 +199,35 @@ static void wake_all_watchers_locked(grpc_fd *fd) {
}
}
void grpc_fd_orphan(grpc_fd *fd, grpc_iomgr_cb_func on_done, void *user_data) {
grpc_iomgr_closure_init(&fd->on_done_closure, on_done ? on_done : do_nothing,
user_data);
void grpc_fd_orphan(grpc_fd *fd, grpc_iomgr_closure *on_done,
const char *reason) {
fd->on_done_closure = on_done;
shutdown(fd->fd, SHUT_RDWR);
ref_by(fd, 1); /* remove active status, but keep referenced */
REF_BY(fd, 1, reason); /* remove active status, but keep referenced */
gpr_mu_lock(&fd->watcher_mu);
wake_all_watchers_locked(fd);
gpr_mu_unlock(&fd->watcher_mu);
unref_by(fd, 2); /* drop the reference */
UNREF_BY(fd, 2, reason); /* drop the reference */
}
/* increment refcount by two to avoid changing the orphan bit */
#ifdef GRPC_FD_REF_COUNT_DEBUG
void grpc_fd_ref(grpc_fd *fd, const char *reason, const char *file, int line) {
ref_by(fd, 2, reason, file, line);
}
void grpc_fd_unref(grpc_fd *fd, const char *reason, const char *file,
int line) {
unref_by(fd, 2, reason, file, line);
}
#else
void grpc_fd_ref(grpc_fd *fd) { ref_by(fd, 2); }
void grpc_fd_unref(grpc_fd *fd) { unref_by(fd, 2); }
#endif
static void process_callback(grpc_iomgr_closure *closure, int success,
int allow_synchronous_callback) {
int allow_synchronous_callback) {
if (allow_synchronous_callback) {
closure->cb(closure->cb_arg, success);
} else {
@ -235,7 +267,7 @@ static void notify_on(grpc_fd *fd, gpr_atm *st, grpc_iomgr_closure *closure,
GPR_ASSERT(gpr_atm_no_barrier_load(st) == READY);
gpr_atm_rel_store(st, NOT_READY);
process_callback(closure, !gpr_atm_acq_load(&fd->shutdown),
allow_synchronous_callback);
allow_synchronous_callback);
return;
default: /* WAITING */
/* upcallptr was set to a different closure. This is an error! */
@ -279,7 +311,7 @@ static void set_ready(grpc_fd *fd, gpr_atm *st,
/* only one set_ready can be active at once (but there may be a racing
notify_on) */
int success;
grpc_iomgr_closure* closure;
grpc_iomgr_closure *closure;
size_t ncb = 0;
gpr_mu_lock(&fd->set_state_mu);
@ -319,7 +351,7 @@ gpr_uint32 grpc_fd_begin_poll(grpc_fd *fd, grpc_pollset *pollset,
gpr_uint32 mask = 0;
/* keep track of pollers that have requested our events, in case they change
*/
grpc_fd_ref(fd);
GRPC_FD_REF(fd, "poll");
gpr_mu_lock(&fd->watcher_mu);
/* if there is nobody polling for read, but we need to, then start doing so */
@ -374,7 +406,7 @@ void grpc_fd_end_poll(grpc_fd_watcher *watcher, int got_read, int got_write) {
}
gpr_mu_unlock(&fd->watcher_mu);
grpc_fd_unref(fd);
GRPC_FD_UNREF(fd, "poll");
}
void grpc_fd_become_readable(grpc_fd *fd, int allow_synchronous_callback) {

@ -62,12 +62,12 @@ struct grpc_fd {
gpr_atm shutdown;
/* The watcher list.
The following watcher related fields are protected by watcher_mu.
An fd_watcher is an ephemeral object created when an fd wants to
begin polling, and destroyed after the poll.
It denotes the fd's interest in whether to read poll or write poll
or both or neither on this fd.
@ -93,7 +93,7 @@ struct grpc_fd {
struct grpc_fd *freelist_next;
grpc_iomgr_closure on_done_closure;
grpc_iomgr_closure *on_done_closure;
grpc_iomgr_closure *shutdown_closures[2];
grpc_iomgr_object iomgr_object;
@ -109,7 +109,8 @@ grpc_fd *grpc_fd_create(int fd, const char *name);
If on_done is NULL, no callback will be made.
Requires: *fd initialized; no outstanding notify_on_read or
notify_on_write. */
void grpc_fd_orphan(grpc_fd *fd, grpc_iomgr_cb_func on_done, void *user_data);
void grpc_fd_orphan(grpc_fd *fd, grpc_iomgr_closure *on_done,
const char *reason);
/* Begin polling on an fd.
Registers that the given pollset is interested in this fd - so that if read
@ -159,10 +160,19 @@ void grpc_fd_become_readable(grpc_fd *fd, int allow_synchronous_callback);
void grpc_fd_become_writable(grpc_fd *fd, int allow_synchronous_callback);
/* Reference counting for fds */
#ifdef GRPC_FD_REF_COUNT_DEBUG
void grpc_fd_ref(grpc_fd *fd, const char *reason, const char *file, int line);
void grpc_fd_unref(grpc_fd *fd, const char *reason, const char *file, int line);
#define GRPC_FD_REF(fd, reason) grpc_fd_ref(fd, reason, __FILE__, __LINE__)
#define GRPC_FD_UNREF(fd, reason) grpc_fd_unref(fd, reason, __FILE__, __LINE__)
#else
void grpc_fd_ref(grpc_fd *fd);
void grpc_fd_unref(grpc_fd *fd);
#define GRPC_FD_REF(fd, reason) grpc_fd_ref(fd)
#define GRPC_FD_UNREF(fd, reason) grpc_fd_unref(fd)
#endif
void grpc_fd_global_init(void);
void grpc_fd_global_shutdown(void);
#endif /* GRPC_INTERNAL_CORE_IOMGR_FD_POSIX_H */
#endif /* GRPC_INTERNAL_CORE_IOMGR_FD_POSIX_H */

@ -112,13 +112,20 @@ void grpc_iomgr_shutdown(void) {
gpr_timespec shutdown_deadline =
gpr_time_add(gpr_now(), gpr_time_from_seconds(10));
gpr_mu_lock(&g_mu);
g_shutdown = 1;
while (g_cbs_head || g_root_object.next != &g_root_object) {
size_t nobjs = count_objects();
gpr_log(GPR_DEBUG, "Waiting for %d iomgr objects to be destroyed%s", nobjs,
g_cbs_head ? " and executing final callbacks" : "");
while (g_cbs_head != NULL || g_root_object.next != &g_root_object) {
if (g_cbs_head != NULL && g_root_object.next != &g_root_object) {
gpr_log(GPR_DEBUG,
"Waiting for %d iomgr objects to be destroyed and executing "
"final callbacks",
count_objects());
} else if (g_cbs_head != NULL) {
gpr_log(GPR_DEBUG, "Executing final iomgr callbacks");
} else {
gpr_log(GPR_DEBUG, "Waiting for %d iomgr objects to be destroyed",
count_objects());
}
if (g_cbs_head) {
do {
closure = g_cbs_head;
@ -131,10 +138,14 @@ void grpc_iomgr_shutdown(void) {
} while (g_cbs_head);
continue;
}
if (nobjs > 0) {
if (grpc_alarm_check(&g_mu, gpr_inf_future, NULL)) {
gpr_log(GPR_DEBUG, "got late alarm");
continue;
}
if (g_root_object.next != &g_root_object) {
int timeout = 0;
gpr_timespec short_deadline = gpr_time_add(gpr_now(),
gpr_time_from_millis(100));
gpr_timespec short_deadline =
gpr_time_add(gpr_now(), gpr_time_from_millis(100));
while (gpr_cv_wait(&g_rcv, &g_mu, short_deadline) && g_cbs_head == NULL) {
if (gpr_time_cmp(gpr_now(), shutdown_deadline) > 0) {
timeout = 1;
@ -158,15 +169,16 @@ void grpc_iomgr_shutdown(void) {
grpc_kick_poller();
gpr_event_wait(&g_background_callback_executor_done, gpr_inf_future);
grpc_iomgr_platform_shutdown();
grpc_alarm_list_shutdown();
grpc_iomgr_platform_shutdown();
gpr_mu_destroy(&g_mu);
gpr_cv_destroy(&g_rcv);
}
void grpc_iomgr_register_object(grpc_iomgr_object *obj, const char *name) {
obj->name = gpr_strdup(name);
gpr_mu_lock(&g_mu);
obj->name = gpr_strdup(name);
obj->next = &g_root_object;
obj->prev = obj->next->prev;
obj->next->prev = obj->prev->next = obj;
@ -174,15 +186,14 @@ void grpc_iomgr_register_object(grpc_iomgr_object *obj, const char *name) {
}
void grpc_iomgr_unregister_object(grpc_iomgr_object *obj) {
gpr_free(obj->name);
gpr_mu_lock(&g_mu);
obj->next->prev = obj->prev;
obj->prev->next = obj->next;
gpr_free(obj->name);
gpr_cv_signal(&g_rcv);
gpr_mu_unlock(&g_mu);
}
void grpc_iomgr_closure_init(grpc_iomgr_closure *closure, grpc_iomgr_cb_func cb,
void *cb_arg) {
closure->cb = cb;
@ -200,15 +211,16 @@ void grpc_iomgr_add_delayed_callback(grpc_iomgr_closure *closure, int success) {
g_cbs_tail->next = closure;
g_cbs_tail = closure;
}
if (g_shutdown) {
gpr_cv_signal(&g_rcv);
}
gpr_mu_unlock(&g_mu);
}
void grpc_iomgr_add_callback(grpc_iomgr_closure *closure) {
grpc_iomgr_add_delayed_callback(closure, 1 /* GPR_TRUE */);
}
int grpc_maybe_call_delayed_callbacks(gpr_mu *drop_mu, int success) {
int n = 0;
gpr_mu *retake_mu = NULL;

@ -52,23 +52,24 @@
#include "src/core/iomgr/pollset_windows.h"
#endif
void grpc_pollset_init(grpc_pollset *pollset);
void grpc_pollset_shutdown(grpc_pollset *pollset,
void (*shutdown_done)(void *arg),
void *shutdown_done_arg);
void grpc_pollset_destroy(grpc_pollset *pollset);
/* Do some work on a pollset.
May involve invoking asynchronous callbacks, or actually polling file
descriptors.
Requires GRPC_POLLSET_MU(pollset) locked.
May unlock GRPC_POLLSET_MU(pollset) during its execution. */
May unlock GRPC_POLLSET_MU(pollset) during its execution.
Returns true if some work has been done, and false if the deadline
got attained. */
int grpc_pollset_work(grpc_pollset *pollset, gpr_timespec deadline);
/* Break a pollset out of polling work
/* Break one polling thread out of polling work for this pollset.
Requires GRPC_POLLSET_MU(pollset) locked. */
void grpc_pollset_kick(grpc_pollset *pollset);
#endif /* GRPC_INTERNAL_CORE_IOMGR_POLLSET_H */
#endif /* GRPC_INTERNAL_CORE_IOMGR_POLLSET_H */

@ -34,7 +34,7 @@
#include <grpc/support/port_platform.h>
#ifdef GPR_POSIX_SOCKET
#include "src/core/iomgr/pollset_kick.h"
#include "src/core/iomgr/pollset_kick_posix.h"
#include <errno.h>
#include <string.h>
@ -73,7 +73,7 @@ static grpc_kick_fd_info *allocate_wfd(void) {
return info;
}
static void destroy_wfd(grpc_kick_fd_info* wfd) {
static void destroy_wfd(grpc_kick_fd_info *wfd) {
grpc_wakeup_fd_destroy(&wfd->wakeup_fd);
gpr_free(wfd);
}
@ -96,41 +96,49 @@ static void free_wfd(grpc_kick_fd_info *fd_info) {
void grpc_pollset_kick_init(grpc_pollset_kick_state *kick_state) {
gpr_mu_init(&kick_state->mu);
kick_state->kicked = 0;
kick_state->fd_info = NULL;
kick_state->fd_list.next = kick_state->fd_list.prev = &kick_state->fd_list;
}
void grpc_pollset_kick_destroy(grpc_pollset_kick_state *kick_state) {
gpr_mu_destroy(&kick_state->mu);
GPR_ASSERT(kick_state->fd_info == NULL);
GPR_ASSERT(kick_state->fd_list.next == &kick_state->fd_list);
}
int grpc_pollset_kick_pre_poll(grpc_pollset_kick_state *kick_state) {
grpc_kick_fd_info *grpc_pollset_kick_pre_poll(
grpc_pollset_kick_state *kick_state) {
grpc_kick_fd_info *fd_info;
gpr_mu_lock(&kick_state->mu);
if (kick_state->kicked) {
kick_state->kicked = 0;
gpr_mu_unlock(&kick_state->mu);
return -1;
return NULL;
}
kick_state->fd_info = allocate_wfd();
fd_info = allocate_wfd();
fd_info->next = &kick_state->fd_list;
fd_info->prev = fd_info->next->prev;
fd_info->next->prev = fd_info->prev->next = fd_info;
gpr_mu_unlock(&kick_state->mu);
return GRPC_WAKEUP_FD_GET_READ_FD(&kick_state->fd_info->wakeup_fd);
return fd_info;
}
void grpc_pollset_kick_consume(grpc_pollset_kick_state *kick_state) {
grpc_wakeup_fd_consume_wakeup(&kick_state->fd_info->wakeup_fd);
void grpc_pollset_kick_consume(grpc_pollset_kick_state *kick_state,
grpc_kick_fd_info *fd_info) {
grpc_wakeup_fd_consume_wakeup(&fd_info->wakeup_fd);
}
void grpc_pollset_kick_post_poll(grpc_pollset_kick_state *kick_state) {
void grpc_pollset_kick_post_poll(grpc_pollset_kick_state *kick_state,
grpc_kick_fd_info *fd_info) {
gpr_mu_lock(&kick_state->mu);
free_wfd(kick_state->fd_info);
kick_state->fd_info = NULL;
fd_info->next->prev = fd_info->prev;
fd_info->prev->next = fd_info->next;
free_wfd(fd_info);
gpr_mu_unlock(&kick_state->mu);
}
void grpc_pollset_kick_kick(grpc_pollset_kick_state *kick_state) {
gpr_mu_lock(&kick_state->mu);
if (kick_state->fd_info != NULL) {
grpc_wakeup_fd_wakeup(&kick_state->fd_info->wakeup_fd);
if (kick_state->fd_list.next != &kick_state->fd_list) {
grpc_wakeup_fd_wakeup(&kick_state->fd_list.next->wakeup_fd);
} else {
kick_state->kicked = 1;
}
@ -157,5 +165,4 @@ void grpc_pollset_kick_global_destroy(void) {
gpr_mu_destroy(&fd_freelist_mu);
}
#endif /* GPR_POSIX_SOCKET */
#endif /* GPR_POSIX_SOCKET */

@ -37,15 +37,57 @@
#include "src/core/iomgr/wakeup_fd_posix.h"
#include <grpc/support/sync.h>
/* pollset kicking allows breaking a thread out of polling work for
a given pollset.
writing a byte to a pipe is used as a posix-ly portable base
mechanism, and eventfds are utilized on Linux for better performance. */
typedef struct grpc_kick_fd_info {
grpc_wakeup_fd_info wakeup_fd;
/* used for polling list and free list */
struct grpc_kick_fd_info *next;
/* only used when polling */
struct grpc_kick_fd_info *prev;
} grpc_kick_fd_info;
typedef struct grpc_pollset_kick_state {
gpr_mu mu;
int kicked;
struct grpc_kick_fd_info *fd_info;
struct grpc_kick_fd_info fd_list;
} grpc_pollset_kick_state;
#endif /* GRPC_INTERNAL_CORE_IOMGR_POLLSET_KICK_POSIX_H */
#define GRPC_POLLSET_KICK_GET_FD(kick_fd_info) \
GRPC_WAKEUP_FD_GET_READ_FD(&(kick_fd_info)->wakeup_fd)
/* This is an abstraction around the typical pipe mechanism for waking up a
thread sitting in a poll() style call. */
void grpc_pollset_kick_global_init(void);
void grpc_pollset_kick_global_destroy(void);
void grpc_pollset_kick_init(grpc_pollset_kick_state *kick_state);
void grpc_pollset_kick_destroy(grpc_pollset_kick_state *kick_state);
/* Guarantees a pure posix implementation rather than a specialized one, if
* applicable. Intended for testing. */
void grpc_pollset_kick_global_init_fallback_fd(void);
/* Must be called before entering poll(). If return value is NULL, this consumed
an existing kick. Otherwise the return value is an FD to add to the poll set.
*/
grpc_kick_fd_info *grpc_pollset_kick_pre_poll(
grpc_pollset_kick_state *kick_state);
/* Consume an existing kick. Must be called after poll returns that the fd was
readable, and before calling kick_post_poll. */
void grpc_pollset_kick_consume(grpc_pollset_kick_state *kick_state,
grpc_kick_fd_info *fd_info);
/* Must be called after pre_poll, and after consume if applicable */
void grpc_pollset_kick_post_poll(grpc_pollset_kick_state *kick_state,
grpc_kick_fd_info *fd_info);
/* Actually kick */
void grpc_pollset_kick_kick(grpc_pollset_kick_state *kick_state);
#endif /* GRPC_INTERNAL_CORE_IOMGR_POLLSET_KICK_POSIX_H */

@ -97,14 +97,7 @@ static int multipoll_with_epoll_pollset_maybe_work(
* here.
*/
if (gpr_time_cmp(deadline, gpr_inf_future) == 0) {
timeout_ms = -1;
} else {
timeout_ms = gpr_time_to_millis(gpr_time_sub(deadline, now));
if (timeout_ms <= 0) {
return 1;
}
}
timeout_ms = grpc_poll_deadline_to_millis_timeout(deadline, now);
pollset->counter += 1;
gpr_mu_unlock(&pollset->mu);
@ -140,13 +133,12 @@ static int multipoll_with_epoll_pollset_maybe_work(
gpr_mu_lock(&pollset->mu);
pollset->counter -= 1;
/* TODO(klempner): This should signal once per event rather than broadcast,
* although it probably doesn't matter because threads will generally be
* blocked in epoll_wait rather than being blocked on the cv. */
gpr_cv_broadcast(&pollset->cv);
return 1;
}
static void multipoll_with_epoll_pollset_finish_shutdown(
grpc_pollset *pollset) {}
static void multipoll_with_epoll_pollset_destroy(grpc_pollset *pollset) {
pollset_hdr *h = pollset->data.ptr;
grpc_wakeup_fd_destroy(&h->wakeup_fd);
@ -160,8 +152,11 @@ static void epoll_kick(grpc_pollset *pollset) {
}
static const grpc_pollset_vtable multipoll_with_epoll_pollset = {
multipoll_with_epoll_pollset_add_fd, multipoll_with_epoll_pollset_del_fd,
multipoll_with_epoll_pollset_maybe_work, epoll_kick,
multipoll_with_epoll_pollset_add_fd,
multipoll_with_epoll_pollset_del_fd,
multipoll_with_epoll_pollset_maybe_work,
epoll_kick,
multipoll_with_epoll_pollset_finish_shutdown,
multipoll_with_epoll_pollset_destroy};
static void epoll_become_multipoller(grpc_pollset *pollset, grpc_fd **fds,

@ -78,7 +78,7 @@ static void multipoll_with_poll_pollset_add_fd(grpc_pollset *pollset,
h->fds = gpr_realloc(h->fds, sizeof(grpc_fd *) * h->fd_capacity);
}
h->fds[h->fd_count++] = fd;
grpc_fd_ref(fd);
GRPC_FD_REF(fd, "multipoller");
}
static void multipoll_with_poll_pollset_del_fd(grpc_pollset *pollset,
@ -90,7 +90,7 @@ static void multipoll_with_poll_pollset_del_fd(grpc_pollset *pollset,
h->dels = gpr_realloc(h->dels, sizeof(grpc_fd *) * h->del_capacity);
}
h->dels[h->del_count++] = fd;
grpc_fd_ref(fd);
GRPC_FD_REF(fd, "multipoller_del");
}
static void end_polling(grpc_pollset *pollset) {
@ -110,19 +110,10 @@ static int multipoll_with_poll_pollset_maybe_work(
int r;
size_t i, np, nf, nd;
pollset_hdr *h;
grpc_kick_fd_info *kfd;
if (pollset->counter) {
return 0;
}
h = pollset->data.ptr;
if (gpr_time_cmp(deadline, gpr_inf_future) == 0) {
timeout = -1;
} else {
timeout = gpr_time_to_millis(gpr_time_sub(deadline, now));
if (timeout <= 0) {
return 1;
}
}
timeout = grpc_poll_deadline_to_millis_timeout(deadline, now);
if (h->pfd_capacity < h->fd_count + 1) {
h->pfd_capacity = GPR_MAX(h->pfd_capacity * 3 / 2, h->fd_count + 1);
gpr_free(h->pfds);
@ -132,11 +123,12 @@ static int multipoll_with_poll_pollset_maybe_work(
}
nf = 0;
np = 1;
h->pfds[0].fd = grpc_pollset_kick_pre_poll(&pollset->kick_state);
if (h->pfds[0].fd < 0) {
kfd = grpc_pollset_kick_pre_poll(&pollset->kick_state);
if (kfd == NULL) {
/* Already kicked */
return 1;
}
h->pfds[0].fd = GRPC_POLLSET_KICK_GET_FD(kfd);
h->pfds[0].events = POLLIN;
h->pfds[0].revents = POLLOUT;
for (i = 0; i < h->fd_count; i++) {
@ -145,7 +137,7 @@ static int multipoll_with_poll_pollset_maybe_work(
if (h->fds[i] == h->dels[nd]) remove = 1;
}
if (remove) {
grpc_fd_unref(h->fds[i]);
GRPC_FD_UNREF(h->fds[i], "multipoller");
} else {
h->fds[nf++] = h->fds[i];
h->watchers[np].fd = h->fds[i];
@ -157,14 +149,14 @@ static int multipoll_with_poll_pollset_maybe_work(
h->pfd_count = np;
h->fd_count = nf;
for (nd = 0; nd < h->del_count; nd++) {
grpc_fd_unref(h->dels[nd]);
GRPC_FD_UNREF(h->dels[nd], "multipoller_del");
}
h->del_count = 0;
if (h->pfd_count == 0) {
end_polling(pollset);
return 0;
}
pollset->counter = 1;
pollset->counter++;
gpr_mu_unlock(&pollset->mu);
for (i = 1; i < np; i++) {
@ -184,7 +176,7 @@ static int multipoll_with_poll_pollset_maybe_work(
/* do nothing */
} else {
if (h->pfds[0].revents & POLLIN) {
grpc_pollset_kick_consume(&pollset->kick_state);
grpc_pollset_kick_consume(&pollset->kick_state, kfd);
}
for (i = 1; i < np; i++) {
if (h->pfds[i].revents & (POLLIN | POLLHUP | POLLERR)) {
@ -195,11 +187,11 @@ static int multipoll_with_poll_pollset_maybe_work(
}
}
}
grpc_pollset_kick_post_poll(&pollset->kick_state);
grpc_pollset_kick_post_poll(&pollset->kick_state, kfd);
gpr_mu_lock(&pollset->mu);
pollset->counter = 0;
gpr_cv_broadcast(&pollset->cv);
pollset->counter--;
return 1;
}
@ -207,16 +199,23 @@ static void multipoll_with_poll_pollset_kick(grpc_pollset *p) {
grpc_pollset_force_kick(p);
}
static void multipoll_with_poll_pollset_destroy(grpc_pollset *pollset) {
static void multipoll_with_poll_pollset_finish_shutdown(grpc_pollset *pollset) {
size_t i;
pollset_hdr *h = pollset->data.ptr;
GPR_ASSERT(pollset->counter == 0);
for (i = 0; i < h->fd_count; i++) {
grpc_fd_unref(h->fds[i]);
GRPC_FD_UNREF(h->fds[i], "multipoller");
}
for (i = 0; i < h->del_count; i++) {
grpc_fd_unref(h->dels[i]);
GRPC_FD_UNREF(h->dels[i], "multipoller_del");
}
h->fd_count = 0;
h->del_count = 0;
}
static void multipoll_with_poll_pollset_destroy(grpc_pollset *pollset) {
pollset_hdr *h = pollset->data.ptr;
multipoll_with_poll_pollset_finish_shutdown(pollset);
gpr_free(h->pfds);
gpr_free(h->watchers);
gpr_free(h->fds);
@ -225,8 +224,11 @@ static void multipoll_with_poll_pollset_destroy(grpc_pollset *pollset) {
}
static const grpc_pollset_vtable multipoll_with_poll_pollset = {
multipoll_with_poll_pollset_add_fd, multipoll_with_poll_pollset_del_fd,
multipoll_with_poll_pollset_maybe_work, multipoll_with_poll_pollset_kick,
multipoll_with_poll_pollset_add_fd,
multipoll_with_poll_pollset_del_fd,
multipoll_with_poll_pollset_maybe_work,
multipoll_with_poll_pollset_kick,
multipoll_with_poll_pollset_finish_shutdown,
multipoll_with_poll_pollset_destroy};
void grpc_poll_become_multipoller(grpc_pollset *pollset, grpc_fd **fds,
@ -247,7 +249,7 @@ void grpc_poll_become_multipoller(grpc_pollset *pollset, grpc_fd **fds,
h->dels = NULL;
for (i = 0; i < nfds; i++) {
h->fds[i] = fds[i];
grpc_fd_ref(fds[i]);
GRPC_FD_REF(fds[i], "multipoller");
}
}

@ -54,31 +54,8 @@
#include <grpc/support/tls.h>
#include <grpc/support/useful.h>
static grpc_pollset g_backup_pollset;
static int g_shutdown_backup_poller;
static gpr_event g_backup_poller_done;
static gpr_event g_backup_pollset_shutdown_done;
GPR_TLS_DECL(g_current_thread_poller);
static void backup_poller(void *p) {
gpr_timespec delta = gpr_time_from_millis(100);
gpr_timespec last_poll = gpr_now();
gpr_mu_lock(&g_backup_pollset.mu);
while (g_shutdown_backup_poller == 0) {
gpr_timespec next_poll = gpr_time_add(last_poll, delta);
grpc_pollset_work(&g_backup_pollset, gpr_time_add(gpr_now(), gpr_time_from_seconds(1)));
gpr_mu_unlock(&g_backup_pollset.mu);
gpr_sleep_until(next_poll);
gpr_mu_lock(&g_backup_pollset.mu);
last_poll = next_poll;
}
gpr_mu_unlock(&g_backup_pollset.mu);
gpr_event_set(&g_backup_poller_done, (void *)1);
}
void grpc_pollset_kick(grpc_pollset *p) {
if (gpr_tls_get(&g_current_thread_poller) != (gpr_intptr)p && p->counter) {
p->vtable->kick(p);
@ -99,44 +76,14 @@ static void kick_using_pollset_kick(grpc_pollset *p) {
/* global state management */
grpc_pollset *grpc_backup_pollset(void) { return &g_backup_pollset; }
void grpc_pollset_global_init(void) {
gpr_thd_id id;
gpr_tls_init(&g_current_thread_poller);
/* Initialize kick fd state */
grpc_pollset_kick_global_init();
/* initialize the backup pollset */
grpc_pollset_init(&g_backup_pollset);
/* start the backup poller thread */
g_shutdown_backup_poller = 0;
gpr_event_init(&g_backup_poller_done);
gpr_event_init(&g_backup_pollset_shutdown_done);
gpr_thd_new(&id, backup_poller, NULL, NULL);
}
static void on_backup_pollset_shutdown_done(void *arg) {
gpr_event_set(&g_backup_pollset_shutdown_done, (void *)1);
}
void grpc_pollset_global_shutdown(void) {
/* terminate the backup poller thread */
gpr_mu_lock(&g_backup_pollset.mu);
g_shutdown_backup_poller = 1;
gpr_mu_unlock(&g_backup_pollset.mu);
gpr_event_wait(&g_backup_poller_done, gpr_inf_future);
grpc_pollset_shutdown(&g_backup_pollset, on_backup_pollset_shutdown_done,
NULL);
gpr_event_wait(&g_backup_pollset_shutdown_done, gpr_inf_future);
/* destroy the backup pollset */
grpc_pollset_destroy(&g_backup_pollset);
/* destroy the kick pipes */
grpc_pollset_kick_global_destroy();
@ -145,32 +92,34 @@ void grpc_pollset_global_shutdown(void) {
/* main interface */
static void become_empty_pollset(grpc_pollset *pollset);
static void become_unary_pollset(grpc_pollset *pollset, grpc_fd *fd);
static void become_basic_pollset(grpc_pollset *pollset, grpc_fd *fd_or_null);
void grpc_pollset_init(grpc_pollset *pollset) {
gpr_mu_init(&pollset->mu);
gpr_cv_init(&pollset->cv);
grpc_pollset_kick_init(&pollset->kick_state);
pollset->in_flight_cbs = 0;
pollset->shutting_down = 0;
become_empty_pollset(pollset);
pollset->called_shutdown = 0;
become_basic_pollset(pollset, NULL);
}
void grpc_pollset_add_fd(grpc_pollset *pollset, grpc_fd *fd) {
gpr_mu_lock(&pollset->mu);
pollset->vtable->add_fd(pollset, fd);
gpr_cv_broadcast(&pollset->cv);
gpr_mu_unlock(&pollset->mu);
}
void grpc_pollset_del_fd(grpc_pollset *pollset, grpc_fd *fd) {
gpr_mu_lock(&pollset->mu);
pollset->vtable->del_fd(pollset, fd);
gpr_cv_broadcast(&pollset->cv);
gpr_mu_unlock(&pollset->mu);
}
static void finish_shutdown(grpc_pollset *pollset) {
pollset->vtable->finish_shutdown(pollset);
pollset->shutdown_done_cb(pollset->shutdown_done_arg);
}
int grpc_pollset_work(grpc_pollset *pollset, gpr_timespec deadline) {
/* pollset->mu already held */
gpr_timespec now = gpr_now();
@ -193,9 +142,10 @@ int grpc_pollset_work(grpc_pollset *pollset, gpr_timespec deadline) {
if (pollset->shutting_down) {
if (pollset->counter > 0) {
grpc_pollset_kick(pollset);
} else if (pollset->in_flight_cbs == 0) {
} else if (!pollset->called_shutdown && pollset->in_flight_cbs == 0) {
pollset->called_shutdown = 1;
gpr_mu_unlock(&pollset->mu);
pollset->shutdown_done_cb(pollset->shutdown_done_arg);
finish_shutdown(pollset);
/* Continuing to access pollset here is safe -- it is the caller's
* responsibility to not destroy when it has outstanding calls to
* grpc_pollset_work.
@ -209,21 +159,24 @@ int grpc_pollset_work(grpc_pollset *pollset, gpr_timespec deadline) {
void grpc_pollset_shutdown(grpc_pollset *pollset,
void (*shutdown_done)(void *arg),
void *shutdown_done_arg) {
int in_flight_cbs;
int counter;
int call_shutdown = 0;
gpr_mu_lock(&pollset->mu);
GPR_ASSERT(!pollset->shutting_down);
pollset->shutting_down = 1;
in_flight_cbs = pollset->in_flight_cbs;
counter = pollset->counter;
if (!pollset->called_shutdown && pollset->in_flight_cbs == 0 &&
pollset->counter == 0) {
pollset->called_shutdown = 1;
call_shutdown = 1;
}
pollset->shutdown_done_cb = shutdown_done;
pollset->shutdown_done_arg = shutdown_done_arg;
if (counter > 0) {
if (pollset->counter > 0) {
grpc_pollset_kick(pollset);
}
gpr_mu_unlock(&pollset->mu);
if (in_flight_cbs == 0 && counter == 0) {
shutdown_done(shutdown_done_arg);
if (call_shutdown) {
finish_shutdown(pollset);
}
}
@ -233,41 +186,29 @@ void grpc_pollset_destroy(grpc_pollset *pollset) {
pollset->vtable->destroy(pollset);
grpc_pollset_kick_destroy(&pollset->kick_state);
gpr_mu_destroy(&pollset->mu);
gpr_cv_destroy(&pollset->cv);
}
/*
* empty_pollset - a vtable that provides polling for NO file descriptors
*/
static void empty_pollset_add_fd(grpc_pollset *pollset, grpc_fd *fd) {
become_unary_pollset(pollset, fd);
}
static void empty_pollset_del_fd(grpc_pollset *pollset, grpc_fd *fd) {}
static int empty_pollset_maybe_work(grpc_pollset *pollset,
gpr_timespec deadline, gpr_timespec now,
int allow_synchronous_callback) {
return 0;
}
static void empty_pollset_destroy(grpc_pollset *pollset) {}
static const grpc_pollset_vtable empty_pollset = {
empty_pollset_add_fd, empty_pollset_del_fd, empty_pollset_maybe_work,
kick_using_pollset_kick, empty_pollset_destroy};
static void become_empty_pollset(grpc_pollset *pollset) {
pollset->vtable = &empty_pollset;
int grpc_poll_deadline_to_millis_timeout(gpr_timespec deadline, gpr_timespec now) {
gpr_timespec timeout;
static const int max_spin_polling_us = 10;
if (gpr_time_cmp(deadline, gpr_inf_future) == 0) {
return -1;
}
if (gpr_time_cmp(
deadline,
gpr_time_add(now, gpr_time_from_micros(max_spin_polling_us))) <= 0) {
return 0;
}
timeout = gpr_time_sub(deadline, now);
return gpr_time_to_millis(
gpr_time_add(timeout, gpr_time_from_nanos(GPR_NS_PER_SEC - 1)));
}
/*
* unary_poll_pollset - a vtable that provides polling for one file descriptor
* via poll()
* basic_pollset - a vtable that provides polling for zero or one file
* descriptor via poll()
*/
typedef struct grpc_unary_promote_args {
const grpc_pollset_vtable *original_vtable;
grpc_pollset *pollset;
@ -275,7 +216,7 @@ typedef struct grpc_unary_promote_args {
grpc_iomgr_closure promotion_closure;
} grpc_unary_promote_args;
static void unary_poll_do_promote(void *args, int success) {
static void basic_do_promote(void *args, int success) {
grpc_unary_promote_args *up_args = args;
const grpc_pollset_vtable *original_vtable = up_args->original_vtable;
grpc_pollset *pollset = up_args->pollset;
@ -293,7 +234,7 @@ static void unary_poll_do_promote(void *args, int success) {
gpr_mu_lock(&pollset->mu);
/* First we need to ensure that nobody is polling concurrently */
while (pollset->counter != 0) {
if (pollset->counter != 0) {
grpc_pollset_kick(pollset);
grpc_iomgr_add_callback(&up_args->promotion_closure);
gpr_mu_unlock(&pollset->mu);
@ -321,33 +262,33 @@ static void unary_poll_do_promote(void *args, int success) {
fds[0] = pollset->data.ptr;
fds[1] = fd;
if (!grpc_fd_is_orphaned(fds[0])) {
if (fds[0] && !grpc_fd_is_orphaned(fds[0])) {
grpc_platform_become_multipoller(pollset, fds, GPR_ARRAY_SIZE(fds));
grpc_fd_unref(fds[0]);
GRPC_FD_UNREF(fds[0], "basicpoll");
} else {
/* old fd is orphaned and we haven't cleaned it up until now, so remain a
* unary poller */
/* Note that it is possible that fds[1] is also orphaned at this point.
* That's okay, we'll correct it at the next add or poll. */
grpc_fd_unref(fds[0]);
if (fds[0]) GRPC_FD_UNREF(fds[0], "basicpoll");
pollset->data.ptr = fd;
grpc_fd_ref(fd);
GRPC_FD_REF(fd, "basicpoll");
}
}
gpr_cv_broadcast(&pollset->cv);
gpr_mu_unlock(&pollset->mu);
if (do_shutdown_cb) {
pollset->shutdown_done_cb(pollset->shutdown_done_arg);
}
/* Matching ref in unary_poll_pollset_add_fd */
grpc_fd_unref(fd);
/* Matching ref in basic_pollset_add_fd */
GRPC_FD_UNREF(fd, "basicpoll_add");
}
static void unary_poll_pollset_add_fd(grpc_pollset *pollset, grpc_fd *fd) {
static void basic_pollset_add_fd(grpc_pollset *pollset, grpc_fd *fd) {
grpc_unary_promote_args *up_args;
GPR_ASSERT(fd);
if (fd == pollset->data.ptr) return;
if (!pollset->counter) {
@ -358,92 +299,100 @@ static void unary_poll_pollset_add_fd(grpc_pollset *pollset, grpc_fd *fd) {
fds[0] = pollset->data.ptr;
fds[1] = fd;
if (!grpc_fd_is_orphaned(fds[0])) {
if (fds[0] == NULL) {
pollset->data.ptr = fd;
GRPC_FD_REF(fd, "basicpoll");
} else if (!grpc_fd_is_orphaned(fds[0])) {
grpc_platform_become_multipoller(pollset, fds, GPR_ARRAY_SIZE(fds));
grpc_fd_unref(fds[0]);
GRPC_FD_UNREF(fds[0], "basicpoll");
} else {
/* old fd is orphaned and we haven't cleaned it up until now, so remain a
* unary poller */
grpc_fd_unref(fds[0]);
GRPC_FD_UNREF(fds[0], "basicpoll");
pollset->data.ptr = fd;
grpc_fd_ref(fd);
GRPC_FD_REF(fd, "basicpoll");
}
return;
}
/* Now we need to promote. This needs to happen when we're not polling. Since
* this may be called from poll, the wait needs to happen asynchronously. */
grpc_fd_ref(fd);
GRPC_FD_REF(fd, "basicpoll_add");
pollset->in_flight_cbs++;
up_args = gpr_malloc(sizeof(*up_args));
up_args->pollset = pollset;
up_args->fd = fd;
up_args->original_vtable = pollset->vtable;
up_args->promotion_closure.cb = unary_poll_do_promote;
up_args->promotion_closure.cb = basic_do_promote;
up_args->promotion_closure.cb_arg = up_args;
grpc_iomgr_add_callback(&up_args->promotion_closure);
grpc_pollset_kick(pollset);
}
static void unary_poll_pollset_del_fd(grpc_pollset *pollset, grpc_fd *fd) {
static void basic_pollset_del_fd(grpc_pollset *pollset, grpc_fd *fd) {
GPR_ASSERT(fd);
if (fd == pollset->data.ptr) {
grpc_fd_unref(pollset->data.ptr);
become_empty_pollset(pollset);
GRPC_FD_UNREF(pollset->data.ptr, "basicpoll");
pollset->data.ptr = NULL;
}
}
static int unary_poll_pollset_maybe_work(grpc_pollset *pollset,
gpr_timespec deadline,
gpr_timespec now,
int allow_synchronous_callback) {
static int basic_pollset_maybe_work(grpc_pollset *pollset,
gpr_timespec deadline, gpr_timespec now,
int allow_synchronous_callback) {
struct pollfd pfd[2];
grpc_fd *fd;
grpc_fd_watcher fd_watcher;
grpc_kick_fd_info *kfd;
int timeout;
int r;
int nfds;
if (pollset->counter) {
return 0;
}
if (pollset->in_flight_cbs) {
/* Give do_promote priority so we don't starve it out */
return 0;
gpr_mu_unlock(&pollset->mu);
gpr_mu_lock(&pollset->mu);
return 1;
}
fd = pollset->data.ptr;
if (grpc_fd_is_orphaned(fd)) {
grpc_fd_unref(fd);
become_empty_pollset(pollset);
return 0;
if (fd && grpc_fd_is_orphaned(fd)) {
GRPC_FD_UNREF(fd, "basicpoll");
fd = pollset->data.ptr = NULL;
}
if (gpr_time_cmp(deadline, gpr_inf_future) == 0) {
timeout = -1;
} else {
timeout = gpr_time_to_millis(gpr_time_sub(deadline, now));
if (timeout <= 0) {
return 1;
}
}
pfd[0].fd = grpc_pollset_kick_pre_poll(&pollset->kick_state);
if (pfd[0].fd < 0) {
timeout = grpc_poll_deadline_to_millis_timeout(deadline, now);
kfd = grpc_pollset_kick_pre_poll(&pollset->kick_state);
if (kfd == NULL) {
/* Already kicked */
return 1;
}
pfd[0].fd = GRPC_POLLSET_KICK_GET_FD(kfd);
pfd[0].events = POLLIN;
pfd[0].revents = 0;
pfd[1].fd = fd->fd;
pfd[1].revents = 0;
pollset->counter = 1;
gpr_mu_unlock(&pollset->mu);
pfd[1].events = grpc_fd_begin_poll(fd, pollset, POLLIN, POLLOUT, &fd_watcher);
nfds = 1;
pollset->counter++;
if (fd) {
pfd[1].fd = fd->fd;
pfd[1].revents = 0;
gpr_mu_unlock(&pollset->mu);
pfd[1].events =
grpc_fd_begin_poll(fd, pollset, POLLIN, POLLOUT, &fd_watcher);
if (pfd[1].events != 0) {
nfds++;
}
} else {
gpr_mu_unlock(&pollset->mu);
}
/* poll fd count (argument 2) is shortened by one if we have no events
to poll on - such that it only includes the kicker */
r = poll(pfd, GPR_ARRAY_SIZE(pfd) - (pfd[1].events == 0), timeout);
r = poll(pfd, nfds, timeout);
GRPC_TIMER_MARK(GRPC_PTAG_POLL_FINISHED, r);
grpc_fd_end_poll(&fd_watcher, pfd[1].revents & POLLIN, pfd[1].revents & POLLOUT);
if (fd) {
grpc_fd_end_poll(&fd_watcher, pfd[1].revents & POLLIN,
pfd[1].revents & POLLOUT);
}
if (r < 0) {
if (errno != EINTR) {
@ -453,39 +402,44 @@ static int unary_poll_pollset_maybe_work(grpc_pollset *pollset,
/* do nothing */
} else {
if (pfd[0].revents & POLLIN) {
grpc_pollset_kick_consume(&pollset->kick_state);
}
if (pfd[1].revents & (POLLIN | POLLHUP | POLLERR)) {
grpc_fd_become_readable(fd, allow_synchronous_callback);
grpc_pollset_kick_consume(&pollset->kick_state, kfd);
}
if (pfd[1].revents & (POLLOUT | POLLHUP | POLLERR)) {
grpc_fd_become_writable(fd, allow_synchronous_callback);
if (nfds > 1) {
if (pfd[1].revents & (POLLIN | POLLHUP | POLLERR)) {
grpc_fd_become_readable(fd, allow_synchronous_callback);
}
if (pfd[1].revents & (POLLOUT | POLLHUP | POLLERR)) {
grpc_fd_become_writable(fd, allow_synchronous_callback);
}
}
}
grpc_pollset_kick_post_poll(&pollset->kick_state);
grpc_pollset_kick_post_poll(&pollset->kick_state, kfd);
gpr_mu_lock(&pollset->mu);
pollset->counter = 0;
gpr_cv_broadcast(&pollset->cv);
pollset->counter--;
return 1;
}
static void unary_poll_pollset_destroy(grpc_pollset *pollset) {
static void basic_pollset_destroy(grpc_pollset *pollset) {
GPR_ASSERT(pollset->counter == 0);
grpc_fd_unref(pollset->data.ptr);
if (pollset->data.ptr != NULL) {
GRPC_FD_UNREF(pollset->data.ptr, "basicpoll");
pollset->data.ptr = NULL;
}
}
static const grpc_pollset_vtable unary_poll_pollset = {
unary_poll_pollset_add_fd, unary_poll_pollset_del_fd,
unary_poll_pollset_maybe_work, kick_using_pollset_kick,
unary_poll_pollset_destroy};
static const grpc_pollset_vtable basic_pollset = {
basic_pollset_add_fd, basic_pollset_del_fd, basic_pollset_maybe_work,
kick_using_pollset_kick, basic_pollset_destroy, basic_pollset_destroy};
static void become_unary_pollset(grpc_pollset *pollset, grpc_fd *fd) {
pollset->vtable = &unary_poll_pollset;
static void become_basic_pollset(grpc_pollset *pollset, grpc_fd *fd_or_null) {
pollset->vtable = &basic_pollset;
pollset->counter = 0;
pollset->data.ptr = fd;
grpc_fd_ref(fd);
pollset->data.ptr = fd_or_null;
if (fd_or_null) {
GRPC_FD_REF(fd_or_null, "basicpoll");
}
}
#endif /* GPR_POSIX_POLLSET */

@ -36,7 +36,7 @@
#include <grpc/support/sync.h>
#include "src/core/iomgr/pollset_kick.h"
#include "src/core/iomgr/pollset_kick_posix.h"
typedef struct grpc_pollset_vtable grpc_pollset_vtable;
@ -52,11 +52,11 @@ typedef struct grpc_pollset {
few fds, and an epoll() based implementation for many fds */
const grpc_pollset_vtable *vtable;
gpr_mu mu;
gpr_cv cv;
grpc_pollset_kick_state kick_state;
int counter;
int in_flight_cbs;
int shutting_down;
int called_shutdown;
void (*shutdown_done_cb)(void *arg);
void *shutdown_done_arg;
union {
@ -71,11 +71,11 @@ struct grpc_pollset_vtable {
int (*maybe_work)(grpc_pollset *pollset, gpr_timespec deadline,
gpr_timespec now, int allow_synchronous_callback);
void (*kick)(grpc_pollset *pollset);
void (*finish_shutdown)(grpc_pollset *pollset);
void (*destroy)(grpc_pollset *pollset);
};
#define GRPC_POLLSET_MU(pollset) (&(pollset)->mu)
#define GRPC_POLLSET_CV(pollset) (&(pollset)->cv)
/* Add an fd to a pollset */
void grpc_pollset_add_fd(grpc_pollset *pollset, struct grpc_fd *fd);
@ -94,11 +94,14 @@ int grpc_kick_read_fd(grpc_pollset *p);
/* Call after polling has been kicked to leave the kicked state */
void grpc_kick_drain(grpc_pollset *p);
/* All fds get added to a backup pollset to ensure that progress is made
regardless of applications listening to events. Relying on this is slow
however (the backup pollset only listens every 100ms or so) - so it's not
to be relied on. */
grpc_pollset *grpc_backup_pollset(void);
/* Convert a timespec to milliseconds:
- very small or negative poll times are clamped to zero to do a
non-blocking poll (which becomes spin polling)
- other small values are rounded up to one millisecond
- longer than a millisecond polls are rounded up to the next nearest
millisecond to avoid spinning
- infinite timeouts are converted to -1 */
int grpc_poll_deadline_to_millis_timeout(gpr_timespec deadline, gpr_timespec now);
/* turn a pollset into a multipoller: platform specific */
typedef void (*grpc_platform_become_multipoller_type)(grpc_pollset *pollset,

@ -31,44 +31,29 @@
*
*/
#ifndef GRPC_INTERNAL_CORE_IOMGR_POLLSET_KICK_H
#define GRPC_INTERNAL_CORE_IOMGR_POLLSET_KICK_H
#ifndef GRPC_INTERNAL_CORE_IOMGR_POLLSET_SET_H
#define GRPC_INTERNAL_CORE_IOMGR_POLLSET_SET_H
#include <grpc/support/port_platform.h>
#include "src/core/iomgr/pollset.h"
/* A grpc_pollset_set is a set of pollsets that are interested in an
action. Adding a pollset to a pollset_set automatically adds any
fd's (etc) that have been registered with the set_set with that pollset.
Registering fd's automatically adds them to all current pollsets. */
#ifdef GPR_POSIX_SOCKET
#include "src/core/iomgr/pollset_kick_posix.h"
#include "src/core/iomgr/pollset_set_posix.h"
#endif
#ifdef GPR_WIN32
#include "src/core/iomgr/pollset_kick_windows.h"
#include "src/core/iomgr/pollset_set_windows.h"
#endif
/* This is an abstraction around the typical pipe mechanism for waking up a
thread sitting in a poll() style call. */
void grpc_pollset_kick_global_init(void);
void grpc_pollset_kick_global_destroy(void);
void grpc_pollset_kick_init(grpc_pollset_kick_state *kick_state);
void grpc_pollset_kick_destroy(grpc_pollset_kick_state *kick_state);
/* Guarantees a pure posix implementation rather than a specialized one, if
* applicable. Intended for testing. */
void grpc_pollset_kick_global_init_fallback_fd(void);
/* Must be called before entering poll(). If return value is -1, this consumed
an existing kick. Otherwise the return value is an FD to add to the poll set.
*/
int grpc_pollset_kick_pre_poll(grpc_pollset_kick_state *kick_state);
/* Consume an existing kick. Must be called after poll returns that the fd was
readable, and before calling kick_post_poll. */
void grpc_pollset_kick_consume(grpc_pollset_kick_state *kick_state);
/* Must be called after pre_poll, and after consume if applicable */
void grpc_pollset_kick_post_poll(grpc_pollset_kick_state *kick_state);
void grpc_pollset_kick_kick(grpc_pollset_kick_state *kick_state);
void grpc_pollset_set_init(grpc_pollset_set *pollset_set);
void grpc_pollset_set_destroy(grpc_pollset_set *pollset_set);
void grpc_pollset_set_add_pollset(grpc_pollset_set *pollset_set,
grpc_pollset *pollset);
void grpc_pollset_set_del_pollset(grpc_pollset_set *pollset_set,
grpc_pollset *pollset);
#endif /* GRPC_INTERNAL_CORE_IOMGR_POLLSET_KICK_H */
#endif /* GRPC_INTERNAL_CORE_IOMGR_POLLSET_H */

@ -0,0 +1,125 @@
/*
*
* Copyright 2015, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following disclaimer
* in the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Google Inc. nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
#include <grpc/support/port_platform.h>
#ifdef GPR_POSIX_SOCKET
#include <stdlib.h>
#include <string.h>
#include <grpc/support/alloc.h>
#include <grpc/support/useful.h>
#include "src/core/iomgr/pollset_set.h"
void grpc_pollset_set_init(grpc_pollset_set *pollset_set) {
memset(pollset_set, 0, sizeof(*pollset_set));
gpr_mu_init(&pollset_set->mu);
}
void grpc_pollset_set_destroy(grpc_pollset_set *pollset_set) {
size_t i;
gpr_mu_destroy(&pollset_set->mu);
for (i = 0; i < pollset_set->fd_count; i++) {
GRPC_FD_UNREF(pollset_set->fds[i], "pollset");
}
gpr_free(pollset_set->pollsets);
gpr_free(pollset_set->fds);
}
void grpc_pollset_set_add_pollset(grpc_pollset_set *pollset_set,
grpc_pollset *pollset) {
size_t i;
gpr_mu_lock(&pollset_set->mu);
if (pollset_set->pollset_count == pollset_set->pollset_capacity) {
pollset_set->pollset_capacity =
GPR_MAX(8, 2 * pollset_set->pollset_capacity);
pollset_set->pollsets =
gpr_realloc(pollset_set->pollsets, pollset_set->pollset_capacity *
sizeof(*pollset_set->pollsets));
}
pollset_set->pollsets[pollset_set->pollset_count++] = pollset;
for (i = 0; i < pollset_set->fd_count; i++) {
grpc_pollset_add_fd(pollset, pollset_set->fds[i]);
}
gpr_mu_unlock(&pollset_set->mu);
}
void grpc_pollset_set_del_pollset(grpc_pollset_set *pollset_set,
grpc_pollset *pollset) {
size_t i;
gpr_mu_lock(&pollset_set->mu);
for (i = 0; i < pollset_set->pollset_count; i++) {
if (pollset_set->pollsets[i] == pollset) {
pollset_set->pollset_count--;
GPR_SWAP(grpc_pollset *, pollset_set->pollsets[i],
pollset_set->pollsets[pollset_set->pollset_count]);
break;
}
}
gpr_mu_unlock(&pollset_set->mu);
}
void grpc_pollset_set_add_fd(grpc_pollset_set *pollset_set, grpc_fd *fd) {
size_t i;
gpr_mu_lock(&pollset_set->mu);
if (pollset_set->fd_count == pollset_set->fd_capacity) {
pollset_set->fd_capacity = GPR_MAX(8, 2 * pollset_set->fd_capacity);
pollset_set->fds = gpr_realloc(
pollset_set->fds, pollset_set->fd_capacity * sizeof(*pollset_set->fds));
}
GRPC_FD_REF(fd, "pollset_set");
pollset_set->fds[pollset_set->fd_count++] = fd;
for (i = 0; i < pollset_set->pollset_count; i++) {
grpc_pollset_add_fd(pollset_set->pollsets[i], fd);
}
gpr_mu_unlock(&pollset_set->mu);
}
void grpc_pollset_set_del_fd(grpc_pollset_set *pollset_set, grpc_fd *fd) {
size_t i;
gpr_mu_lock(&pollset_set->mu);
for (i = 0; i < pollset_set->fd_count; i++) {
if (pollset_set->fds[i] == fd) {
pollset_set->fd_count--;
GPR_SWAP(grpc_fd *, pollset_set->fds[i],
pollset_set->fds[pollset_set->pollset_count]);
GRPC_FD_UNREF(fd, "pollset_set");
break;
}
}
gpr_mu_unlock(&pollset_set->mu);
}
#endif /* GPR_POSIX_SOCKET */

@ -0,0 +1,55 @@
/*
*
* Copyright 2015, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following disclaimer
* in the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Google Inc. nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
#ifndef GRPC_INTERNAL_CORE_IOMGR_POLLSET_SET_POSIX_H
#define GRPC_INTERNAL_CORE_IOMGR_POLLSET_SET_POSIX_H
#include "src/core/iomgr/fd_posix.h"
#include "src/core/iomgr/pollset_posix.h"
typedef struct grpc_pollset_set {
gpr_mu mu;
size_t pollset_count;
size_t pollset_capacity;
grpc_pollset **pollsets;
size_t fd_count;
size_t fd_capacity;
grpc_fd **fds;
} grpc_pollset_set;
void grpc_pollset_set_add_fd(grpc_pollset_set *pollset_set, grpc_fd *fd);
void grpc_pollset_set_del_fd(grpc_pollset_set *pollset_set, grpc_fd *fd);
#endif /* GRPC_INTERNAL_CORE_IOMGR_POLLSET_WINDOWS_H */

@ -31,25 +31,20 @@
*
*/
#ifndef GRPC_INTERNAL_CPP_PROTO_PROTO_UTILS_H
#define GRPC_INTERNAL_CPP_PROTO_PROTO_UTILS_H
#include <grpc/support/port_platform.h>
#include <grpc++/config.h>
#ifdef GPR_WINSOCK_SOCKET
struct grpc_byte_buffer;
#include "src/core/iomgr/pollset_set.h"
namespace grpc {
void grpc_pollset_set_init(grpc_pollset_set *pollset_set) {}
// Serialize the msg into a buffer created inside the function. The caller
// should destroy the returned buffer when done with it. If serialization fails,
// false is returned and buffer is left unchanged.
bool SerializeProto(const grpc::protobuf::Message& msg,
grpc_byte_buffer** buffer);
void grpc_pollset_set_destroy(grpc_pollset_set *pollset_set) {}
// The caller keeps ownership of buffer and msg.
bool DeserializeProto(grpc_byte_buffer* buffer, grpc::protobuf::Message* msg,
int max_message_size);
void grpc_pollset_set_add_pollset(grpc_pollset_set *pollset_set,
grpc_pollset *pollset) {}
} // namespace grpc
void grpc_pollset_set_del_pollset(grpc_pollset_set *pollset_set,
grpc_pollset *pollset) {}
#endif // GRPC_INTERNAL_CPP_PROTO_PROTO_UTILS_H
#endif /* GPR_WINSOCK_SOCKET */

@ -31,18 +31,9 @@
*
*/
#ifndef GRPC_INTERNAL_CORE_IOMGR_POLLSET_KICK_WINDOWS_H
#define GRPC_INTERNAL_CORE_IOMGR_POLLSET_KICK_WINDOWS_H
#ifndef GRPC_INTERNAL_CORE_IOMGR_POLLSET_SET_WINDOWS_H
#define GRPC_INTERNAL_CORE_IOMGR_POLLSET_SET_WINDOWS_H
#include <grpc/support/sync.h>
typedef struct grpc_pollset_set { void *unused; } grpc_pollset_set;
/* There isn't really any such thing as a pollset under Windows, due to the
nature of the IO completion ports. */
struct grpc_kick_fd_info;
typedef struct grpc_pollset_kick_state {
int unused;
} grpc_pollset_kick_state;
#endif /* GRPC_INTERNAL_CORE_IOMGR_POLLSET_KICK_WINDOWS_H */
#endif /* GRPC_INTERNAL_CORE_IOMGR_POLLSET_WINDOWS_H */

@ -74,9 +74,12 @@ int grpc_pollset_work(grpc_pollset *pollset, gpr_timespec deadline) {
if (grpc_alarm_check(NULL, now, &deadline)) {
return 1 /* GPR_TRUE */;
}
return 0 /* GPR_FALSE */;
gpr_cv_wait(&pollset->cv, &pollset->mu, deadline);
return 1 /* GPR_TRUE */;
}
void grpc_pollset_kick(grpc_pollset *p) { }
void grpc_pollset_kick(grpc_pollset *p) {
gpr_cv_signal(&p->cv);
}
#endif /* GPR_WINSOCK_SOCKET */
#endif /* GPR_WINSOCK_SOCKET */

@ -37,13 +37,11 @@
#include <windows.h>
#include <grpc/support/sync.h>
#include "src/core/iomgr/pollset_kick.h"
#include "src/core/iomgr/socket_windows.h"
/* There isn't really any such thing as a pollset under Windows, due to the
nature of the IO completion ports. A Windows "pollset" is merely a mutex
and a condition variable, as this is the minimal set of features we need
implemented for the rest of grpc. But we won't use them directly. */
and a condition variable, used to synchronize with the IOCP. */
typedef struct grpc_pollset {
gpr_mu mu;
@ -51,6 +49,5 @@ typedef struct grpc_pollset {
} grpc_pollset;
#define GRPC_POLLSET_MU(pollset) (&(pollset)->mu)
#define GRPC_POLLSET_CV(pollset) (&(pollset)->cv)
#endif /* GRPC_INTERNAL_CORE_IOMGR_POLLSET_WINDOWS_H */
#endif /* GRPC_INTERNAL_CORE_IOMGR_POLLSET_WINDOWS_H */

@ -35,14 +35,18 @@
#define GRPC_INTERNAL_CORE_IOMGR_TCP_CLIENT_H
#include "src/core/iomgr/endpoint.h"
#include "src/core/iomgr/pollset_set.h"
#include "src/core/iomgr/sockaddr.h"
#include <grpc/support/time.h>
/* Asynchronously connect to an address (specified as (addr, len)), and call
cb with arg and the completed connection when done (or call cb with arg and
NULL on failure) */
NULL on failure).
interested_parties points to a set of pollsets that would be interested
in this connection being established (in order to continue their work) */
void grpc_tcp_client_connect(void (*cb)(void *arg, grpc_endpoint *tcp),
void *arg, const struct sockaddr *addr,
int addr_len, gpr_timespec deadline);
void *arg, grpc_pollset_set *interested_parties,
const struct sockaddr *addr, int addr_len,
gpr_timespec deadline);
#endif /* GRPC_INTERNAL_CORE_IOMGR_TCP_CLIENT_H */
#endif /* GRPC_INTERNAL_CORE_IOMGR_TCP_CLIENT_H */

@ -113,8 +113,6 @@ static void on_writable(void *acp, int success) {
void (*cb)(void *arg, grpc_endpoint *tcp) = ac->cb;
void *cb_arg = ac->cb_arg;
grpc_alarm_cancel(&ac->alarm);
if (success) {
do {
so_error_size = sizeof(so_error);
@ -167,26 +165,30 @@ static void on_writable(void *acp, int success) {
finish:
gpr_mu_lock(&ac->mu);
if (!ep) {
grpc_fd_orphan(ac->fd, NULL, NULL);
grpc_fd_orphan(ac->fd, NULL, "tcp_client_orphan");
}
done = (--ac->refs == 0);
gpr_mu_unlock(&ac->mu);
if (done) {
gpr_mu_destroy(&ac->mu);
gpr_free(ac);
} else {
grpc_alarm_cancel(&ac->alarm);
}
cb(cb_arg, ep);
}
void grpc_tcp_client_connect(void (*cb)(void *arg, grpc_endpoint *ep),
void *arg, const struct sockaddr *addr,
int addr_len, gpr_timespec deadline) {
void *arg, grpc_pollset_set *interested_parties,
const struct sockaddr *addr, int addr_len,
gpr_timespec deadline) {
int fd;
grpc_dualstack_mode dsmode;
int err;
async_connect *ac;
struct sockaddr_in6 addr6_v4mapped;
struct sockaddr_in addr4_copy;
grpc_fd *fdobj;
char *name;
char *addr_str;
@ -218,31 +220,35 @@ void grpc_tcp_client_connect(void (*cb)(void *arg, grpc_endpoint *ep),
grpc_sockaddr_to_string(&addr_str, addr, 1);
gpr_asprintf(&name, "tcp-client:%s", addr_str);
fdobj = grpc_fd_create(fd, name);
if (err >= 0) {
gpr_log(GPR_DEBUG, "instant connect");
cb(arg, grpc_tcp_create(grpc_fd_create(fd, name),
GRPC_TCP_DEFAULT_READ_SLICE_SIZE));
cb(arg, grpc_tcp_create(fdobj, GRPC_TCP_DEFAULT_READ_SLICE_SIZE));
goto done;
}
if (errno != EWOULDBLOCK && errno != EINPROGRESS) {
gpr_log(GPR_ERROR, "connect error to '%s': %s", addr_str, strerror(errno));
close(fd);
grpc_fd_orphan(fdobj, NULL, "tcp_client_connect_error");
cb(arg, NULL);
goto done;
}
grpc_pollset_set_add_fd(interested_parties, fdobj);
ac = gpr_malloc(sizeof(async_connect));
ac->cb = cb;
ac->cb_arg = arg;
ac->fd = grpc_fd_create(fd, name);
ac->fd = fdobj;
gpr_mu_init(&ac->mu);
ac->refs = 2;
ac->write_closure.cb = on_writable;
ac->write_closure.cb_arg = ac;
gpr_mu_lock(&ac->mu);
grpc_alarm_init(&ac->alarm, deadline, on_alarm, ac, gpr_now());
grpc_fd_notify_on_write(ac->fd, &ac->write_closure);
gpr_mu_unlock(&ac->mu);
done:
gpr_free(name);

@ -52,7 +52,7 @@
#include "src/core/iomgr/socket_windows.h"
typedef struct {
void(*cb)(void *arg, grpc_endpoint *tcp);
void (*cb)(void *arg, grpc_endpoint *tcp);
void *cb_arg;
gpr_mu mu;
grpc_winsocket *socket;
@ -86,7 +86,7 @@ static void on_connect(void *acp, int from_iocp) {
SOCKET sock = ac->socket->socket;
grpc_endpoint *ep = NULL;
grpc_winsocket_callback_info *info = &ac->socket->write_info;
void(*cb)(void *arg, grpc_endpoint *tcp) = ac->cb;
void (*cb)(void *arg, grpc_endpoint *tcp) = ac->cb;
void *cb_arg = ac->cb_arg;
int aborted;
@ -99,8 +99,7 @@ static void on_connect(void *acp, int from_iocp) {
DWORD transfered_bytes = 0;
DWORD flags;
BOOL wsa_success = WSAGetOverlappedResult(sock, &info->overlapped,
&transfered_bytes, FALSE,
&flags);
&transfered_bytes, FALSE, &flags);
info->outstanding = 0;
GPR_ASSERT(transfered_bytes == 0);
if (!wsa_success) {
@ -138,9 +137,10 @@ static void on_connect(void *acp, int from_iocp) {
/* Tries to issue one async connection, then schedules both an IOCP
notification request for the connection, and one timeout alert. */
void grpc_tcp_client_connect(void(*cb)(void *arg, grpc_endpoint *tcp),
void *arg, const struct sockaddr *addr,
int addr_len, gpr_timespec deadline) {
void grpc_tcp_client_connect(void (*cb)(void *arg, grpc_endpoint *tcp),
void *arg, grpc_pollset_set *interested_parties,
const struct sockaddr *addr, int addr_len,
gpr_timespec deadline) {
SOCKET sock = INVALID_SOCKET;
BOOL success;
int status;
@ -175,9 +175,9 @@ void grpc_tcp_client_connect(void(*cb)(void *arg, grpc_endpoint *tcp),
/* Grab the function pointer for ConnectEx for that specific socket.
It may change depending on the interface. */
status = WSAIoctl(sock, SIO_GET_EXTENSION_FUNCTION_POINTER,
&guid, sizeof(guid), &ConnectEx, sizeof(ConnectEx),
&ioctl_num_bytes, NULL, NULL);
status =
WSAIoctl(sock, SIO_GET_EXTENSION_FUNCTION_POINTER, &guid, sizeof(guid),
&ConnectEx, sizeof(ConnectEx), &ioctl_num_bytes, NULL, NULL);
if (status != 0) {
message = "Unable to retrieve ConnectEx pointer: %s";
@ -186,8 +186,7 @@ void grpc_tcp_client_connect(void(*cb)(void *arg, grpc_endpoint *tcp),
grpc_sockaddr_make_wildcard6(0, &local_address);
status = bind(sock, (struct sockaddr *) &local_address,
sizeof(local_address));
status = bind(sock, (struct sockaddr *)&local_address, sizeof(local_address));
if (status != 0) {
message = "Unable to bind socket: %s";
goto failure;
@ -233,4 +232,4 @@ failure:
cb(arg, NULL);
}
#endif /* GPR_WINSOCK_SOCKET */
#endif /* GPR_WINSOCK_SOCKET */

@ -266,7 +266,7 @@ typedef struct {
grpc_endpoint base;
grpc_fd *em_fd;
int fd;
int iov_size; /* Number of slices to allocate per read attempt */
int iov_size; /* Number of slices to allocate per read attempt */
int finished_edge;
size_t slice_size;
gpr_refcount refcount;
@ -295,7 +295,7 @@ static void grpc_tcp_shutdown(grpc_endpoint *ep) {
static void grpc_tcp_unref(grpc_tcp *tcp) {
int refcount_zero = gpr_unref(&tcp->refcount);
if (refcount_zero) {
grpc_fd_orphan(tcp->em_fd, NULL, NULL);
grpc_fd_orphan(tcp->em_fd, NULL, "tcp_unref_orphan");
gpr_free(tcp);
}
}
@ -412,8 +412,7 @@ static void grpc_tcp_continue_read(grpc_tcp *tcp) {
++tcp->iov_size;
}
GPR_ASSERT(slice_state_has_available(&read_state));
slice_state_transfer_ownership(&read_state, &final_slices,
&final_nslices);
slice_state_transfer_ownership(&read_state, &final_slices, &final_nslices);
call_read_cb(tcp, final_slices, final_nslices, GRPC_ENDPOINT_CB_OK);
slice_state_destroy(&read_state);
grpc_tcp_unref(tcp);

@ -85,6 +85,7 @@ typedef struct {
} addr;
int addr_len;
grpc_iomgr_closure read_closure;
grpc_iomgr_closure destroyed_closure;
} server_port;
static void unlink_if_unix_domain_socket(const struct sockaddr_un *un) {
@ -101,13 +102,15 @@ struct grpc_tcp_server {
void *cb_arg;
gpr_mu mu;
gpr_cv cv;
/* active port count: how many ports are actually still listening */
size_t active_ports;
/* destroyed port count: how many ports are completely destroyed */
size_t destroyed_ports;
/* is this server shutting down? (boolean) */
int shutdown;
/* all listening ports */
server_port *ports;
size_t nports;
@ -116,14 +119,19 @@ struct grpc_tcp_server {
/* shutdown callback */
void (*shutdown_complete)(void *);
void *shutdown_complete_arg;
/* all pollsets interested in new connections */
grpc_pollset **pollsets;
/* number of pollsets in the pollsets array */
size_t pollset_count;
};
grpc_tcp_server *grpc_tcp_server_create(void) {
grpc_tcp_server *s = gpr_malloc(sizeof(grpc_tcp_server));
gpr_mu_init(&s->mu);
gpr_cv_init(&s->cv);
s->active_ports = 0;
s->destroyed_ports = 0;
s->shutdown = 0;
s->cb = NULL;
s->cb_arg = NULL;
s->ports = gpr_malloc(sizeof(server_port) * INIT_PORT_CAP);
@ -136,7 +144,6 @@ static void finish_shutdown(grpc_tcp_server *s) {
s->shutdown_complete(s->shutdown_complete_arg);
gpr_mu_destroy(&s->mu);
gpr_cv_destroy(&s->cv);
gpr_free(s->ports);
gpr_free(s);
@ -156,40 +163,60 @@ static void destroyed_port(void *server, int success) {
static void dont_care_about_shutdown_completion(void *ignored) {}
/* called when all listening endpoints have been shutdown, so no further
events will be received on them - at this point it's safe to destroy
things */
static void deactivated_all_ports(grpc_tcp_server *s) {
size_t i;
/* delete ALL the things */
gpr_mu_lock(&s->mu);
if (!s->shutdown) {
gpr_mu_unlock(&s->mu);
return;
}
if (s->nports) {
for (i = 0; i < s->nports; i++) {
server_port *sp = &s->ports[i];
if (sp->addr.sockaddr.sa_family == AF_UNIX) {
unlink_if_unix_domain_socket(&sp->addr.un);
}
sp->destroyed_closure.cb = destroyed_port;
sp->destroyed_closure.cb_arg = s;
grpc_fd_orphan(sp->emfd, &sp->destroyed_closure, "tcp_listener_shutdown");
}
gpr_mu_unlock(&s->mu);
} else {
gpr_mu_unlock(&s->mu);
finish_shutdown(s);
}
}
void grpc_tcp_server_destroy(
grpc_tcp_server *s, void (*shutdown_complete)(void *shutdown_complete_arg),
void *shutdown_complete_arg) {
size_t i;
gpr_mu_lock(&s->mu);
GPR_ASSERT(!s->shutdown);
s->shutdown = 1;
s->shutdown_complete = shutdown_complete
? shutdown_complete
: dont_care_about_shutdown_completion;
s->shutdown_complete_arg = shutdown_complete_arg;
/* shutdown all fd's */
for (i = 0; i < s->nports; i++) {
grpc_fd_shutdown(s->ports[i].emfd);
}
/* wait while that happens */
/* TODO(ctiller): make this asynchronous also */
while (s->active_ports) {
gpr_cv_wait(&s->cv, &s->mu, gpr_inf_future);
}
/* delete ALL the things */
if (s->nports) {
if (s->active_ports) {
for (i = 0; i < s->nports; i++) {
server_port *sp = &s->ports[i];
if (sp->addr.sockaddr.sa_family == AF_UNIX) {
unlink_if_unix_domain_socket(&sp->addr.un);
}
grpc_fd_orphan(sp->emfd, destroyed_port, s);
grpc_fd_shutdown(s->ports[i].emfd);
}
gpr_mu_unlock(&s->mu);
} else {
gpr_mu_unlock(&s->mu);
finish_shutdown(s);
deactivated_all_ports(s);
}
}
@ -274,6 +301,8 @@ error:
/* event manager callback when reads are ready */
static void on_read(void *arg, int success) {
server_port *sp = arg;
grpc_fd *fdobj;
size_t i;
if (!success) {
goto error;
@ -306,12 +335,18 @@ static void on_read(void *arg, int success) {
grpc_sockaddr_to_string(&addr_str, (struct sockaddr *)&addr, 1);
gpr_asprintf(&name, "tcp-server-connection:%s", addr_str);
fdobj = grpc_fd_create(fd, name);
/* TODO(ctiller): revise this when we have server-side sharding
of channels -- we certainly should not be automatically adding every
incoming channel to every pollset owned by the server */
for (i = 0; i < sp->server->pollset_count; i++) {
grpc_pollset_add_fd(sp->server->pollsets[i], fdobj);
}
sp->server->cb(sp->server->cb_arg,
grpc_tcp_create(grpc_fd_create(fd, name),
GRPC_TCP_DEFAULT_READ_SLICE_SIZE));
grpc_tcp_create(fdobj, GRPC_TCP_DEFAULT_READ_SLICE_SIZE));
gpr_free(addr_str);
gpr_free(name);
gpr_free(addr_str);
}
abort();
@ -319,9 +354,11 @@ static void on_read(void *arg, int success) {
error:
gpr_mu_lock(&sp->server->mu);
if (0 == --sp->server->active_ports) {
gpr_cv_broadcast(&sp->server->cv);
gpr_mu_unlock(&sp->server->mu);
deactivated_all_ports(sp->server);
} else {
gpr_mu_unlock(&sp->server->mu);
}
gpr_mu_unlock(&sp->server->mu);
}
static int add_socket_to_server(grpc_tcp_server *s, int fd,
@ -452,6 +489,8 @@ void grpc_tcp_server_start(grpc_tcp_server *s, grpc_pollset **pollsets,
GPR_ASSERT(s->active_ports == 0);
s->cb = cb;
s->cb_arg = cb_arg;
s->pollsets = pollsets;
s->pollset_count = pollset_count;
for (i = 0; i < s->nports; i++) {
for (j = 0; j < pollset_count; j++) {
grpc_pollset_add_fd(pollsets[j], s->ports[i].emfd);

@ -60,7 +60,7 @@ typedef struct grpc_json {
* strings in the tree. The input stream's UTF-8 isn't validated,
* as in, what you input is what you get as an output.
*
* All the keys and values in the grpc_json_t objects will be strings
* All the keys and values in the grpc_json objects will be strings
* pointing at your input buffer.
*
* Delete the allocated tree afterward using grpc_json_destroy().

@ -151,7 +151,7 @@ grpc_json_reader_status grpc_json_reader_run(grpc_json_reader* reader) {
case GRPC_JSON_STATE_VALUE_NUMBER_WITH_DECIMAL:
case GRPC_JSON_STATE_VALUE_NUMBER_ZERO:
case GRPC_JSON_STATE_VALUE_NUMBER_EPM:
success = json_reader_set_number(reader);
success = (gpr_uint32)json_reader_set_number(reader);
if (!success) return GRPC_JSON_PARSE_ERROR;
json_reader_string_clear(reader);
reader->state = GRPC_JSON_STATE_VALUE_END;
@ -177,7 +177,7 @@ grpc_json_reader_status grpc_json_reader_run(grpc_json_reader* reader) {
case GRPC_JSON_STATE_VALUE_NUMBER_WITH_DECIMAL:
case GRPC_JSON_STATE_VALUE_NUMBER_ZERO:
case GRPC_JSON_STATE_VALUE_NUMBER_EPM:
success = json_reader_set_number(reader);
success = (gpr_uint32)json_reader_set_number(reader);
if (!success) return GRPC_JSON_PARSE_ERROR;
json_reader_string_clear(reader);
reader->state = GRPC_JSON_STATE_VALUE_END;
@ -410,8 +410,8 @@ grpc_json_reader_status grpc_json_reader_run(grpc_json_reader* reader) {
} else {
return GRPC_JSON_PARSE_ERROR;
}
reader->unicode_char <<= 4;
reader->unicode_char |= c;
reader->unicode_char = (gpr_uint16)(reader->unicode_char << 4);
reader->unicode_char = (gpr_uint16)(reader->unicode_char | c);
switch (reader->state) {
case GRPC_JSON_STATE_STRING_ESCAPE_U1:
@ -438,8 +438,8 @@ grpc_json_reader_status grpc_json_reader_run(grpc_json_reader* reader) {
if (reader->unicode_high_surrogate == 0)
return GRPC_JSON_PARSE_ERROR;
utf32 = 0x10000;
utf32 += (reader->unicode_high_surrogate - 0xd800) * 0x400;
utf32 += reader->unicode_char - 0xdc00;
utf32 += (gpr_uint32)((reader->unicode_high_surrogate - 0xd800) * 0x400);
utf32 += (gpr_uint32)(reader->unicode_char - 0xdc00);
json_reader_string_add_utf32(reader, utf32);
reader->unicode_high_surrogate = 0;
} else {

@ -83,7 +83,7 @@ static void json_writer_output_check(void* userdata, size_t needed) {
if (state->free_space >= needed) return;
needed -= state->free_space;
/* Round up by 256 bytes. */
needed = (needed + 0xff) & ~0xff;
needed = (needed + 0xff) & ~0xffU;
state->output = gpr_realloc(state->output, state->allocated + needed);
state->free_space += needed;
state->allocated += needed;
@ -128,7 +128,7 @@ static void json_reader_string_add_char(void* userdata, gpr_uint32 c) {
json_reader_userdata* state = userdata;
GPR_ASSERT(state->string_ptr < state->input);
GPR_ASSERT(c <= 0xff);
*state->string_ptr++ = (char)c;
*state->string_ptr++ = (gpr_uint8)c;
}
/* We are converting a UTF-32 character into UTF-8 here,
@ -138,22 +138,22 @@ static void json_reader_string_add_utf32(void* userdata, gpr_uint32 c) {
if (c <= 0x7f) {
json_reader_string_add_char(userdata, c);
} else if (c <= 0x7ff) {
int b1 = 0xc0 | ((c >> 6) & 0x1f);
int b2 = 0x80 | (c & 0x3f);
gpr_uint32 b1 = 0xc0 | ((c >> 6) & 0x1f);
gpr_uint32 b2 = 0x80 | (c & 0x3f);
json_reader_string_add_char(userdata, b1);
json_reader_string_add_char(userdata, b2);
} else if (c <= 0xffff) {
int b1 = 0xe0 | ((c >> 12) & 0x0f);
int b2 = 0x80 | ((c >> 6) & 0x3f);
int b3 = 0x80 | (c & 0x3f);
gpr_uint32 b1 = 0xe0 | ((c >> 12) & 0x0f);
gpr_uint32 b2 = 0x80 | ((c >> 6) & 0x3f);
gpr_uint32 b3 = 0x80 | (c & 0x3f);
json_reader_string_add_char(userdata, b1);
json_reader_string_add_char(userdata, b2);
json_reader_string_add_char(userdata, b3);
} else if (c <= 0x1fffff) {
int b1 = 0xf0 | ((c >> 18) & 0x07);
int b2 = 0x80 | ((c >> 12) & 0x3f);
int b3 = 0x80 | ((c >> 6) & 0x3f);
int b4 = 0x80 | (c & 0x3f);
gpr_uint32 b1 = 0xf0 | ((c >> 18) & 0x07);
gpr_uint32 b2 = 0x80 | ((c >> 12) & 0x3f);
gpr_uint32 b3 = 0x80 | ((c >> 6) & 0x3f);
gpr_uint32 b4 = 0x80 | (c & 0x3f);
json_reader_string_add_char(userdata, b1);
json_reader_string_add_char(userdata, b2);
json_reader_string_add_char(userdata, b3);

@ -66,7 +66,7 @@ static void json_writer_output_indent(
" "
" ";
unsigned spaces = writer->depth * writer->indent;
unsigned spaces = (unsigned)(writer->depth * writer->indent);
if (writer->indent == 0) return;
@ -78,7 +78,7 @@ static void json_writer_output_indent(
while (spaces >= (sizeof(spacesstr) - 1)) {
json_writer_output_string_with_len(writer, spacesstr,
sizeof(spacesstr) - 1);
spaces -= (sizeof(spacesstr) - 1);
spaces -= (unsigned)(sizeof(spacesstr) - 1);
}
if (spaces == 0) return;
@ -119,7 +119,7 @@ static void json_writer_escape_string(grpc_json_writer* writer,
break;
} else if ((c >= 32) && (c <= 126)) {
if ((c == '\\') || (c == '"')) json_writer_output_char(writer, '\\');
json_writer_output_char(writer, c);
json_writer_output_char(writer, (char)c);
} else if ((c < 32) || (c == 127)) {
switch (c) {
case '\b':
@ -160,7 +160,7 @@ static void json_writer_escape_string(grpc_json_writer* writer,
}
for (i = 0; i < extra; i++) {
utf32 <<= 6;
c = *string++;
c = (gpr_uint8)(*string++);
/* Breaks out and bail on any invalid UTF-8 sequence, including \0. */
if ((c & 0xc0) != 0x80) {
valid = 0;
@ -193,10 +193,10 @@ static void json_writer_escape_string(grpc_json_writer* writer,
* That range is exactly 20 bits.
*/
utf32 -= 0x10000;
json_writer_escape_utf16(writer, 0xd800 | (utf32 >> 10));
json_writer_escape_utf16(writer, 0xdc00 | (utf32 & 0x3ff));
json_writer_escape_utf16(writer, (gpr_uint16)(0xd800 | (utf32 >> 10)));
json_writer_escape_utf16(writer, (gpr_uint16)(0xdc00 | (utf32 & 0x3ff)));
} else {
json_writer_escape_utf16(writer, utf32);
json_writer_escape_utf16(writer, (gpr_uint16)utf32);
}
}
}

@ -53,6 +53,11 @@ typedef struct {
grpc_credentials *creds;
grpc_mdstr *host;
grpc_mdstr *method;
/* pollset bound to this call; if we need to make external
network requests, they should be done under this pollset
so that work can progress when this call wants work to
progress */
grpc_pollset *pollset;
grpc_transport_op op;
size_t op_md_idx;
int sent_initial_metadata;
@ -161,8 +166,9 @@ static void send_security_metadata(grpc_call_element *elem,
service_url =
build_service_url(chand->security_connector->base.url_scheme, calld);
calld->op = *op; /* Copy op (originates from the caller's stack). */
grpc_credentials_get_request_metadata(calld->creds, service_url,
on_credentials_metadata, elem);
GPR_ASSERT(calld->pollset);
grpc_credentials_get_request_metadata(
calld->creds, calld->pollset, service_url, on_credentials_metadata, elem);
gpr_free(service_url);
}
@ -196,6 +202,10 @@ static void auth_start_transport_op(grpc_call_element *elem,
/* TODO(jboeuf): write the call auth context. */
if (op->bind_pollset) {
calld->pollset = op->bind_pollset;
}
if (op->send_ops && !calld->sent_initial_metadata) {
size_t nops = op->send_ops->nops;
grpc_stream_op *ops = op->send_ops->ops;
@ -258,6 +268,7 @@ static void init_call_elem(grpc_call_element *elem,
calld->creds = NULL;
calld->host = NULL;
calld->method = NULL;
calld->pollset = NULL;
calld->sent_initial_metadata = 0;
GPR_ASSERT(!initial_op || !initial_op->send_ops);
@ -296,13 +307,10 @@ static void init_channel_elem(grpc_channel_element *elem,
chand->security_connector =
(grpc_channel_security_connector *)grpc_security_connector_ref(sc);
chand->md_ctx = metadata_context;
chand->authority_string =
grpc_mdstr_from_string(chand->md_ctx, ":authority");
chand->authority_string = grpc_mdstr_from_string(chand->md_ctx, ":authority");
chand->path_string = grpc_mdstr_from_string(chand->md_ctx, ":path");
chand->error_msg_key =
grpc_mdstr_from_string(chand->md_ctx, "grpc-message");
chand->status_key =
grpc_mdstr_from_string(chand->md_ctx, "grpc-status");
chand->error_msg_key = grpc_mdstr_from_string(chand->md_ctx, "grpc-message");
chand->status_key = grpc_mdstr_from_string(chand->md_ctx, "grpc-status");
}
/* Destructor for channel data */
@ -326,6 +334,6 @@ static void destroy_channel_elem(grpc_channel_element *elem) {
}
const grpc_channel_filter grpc_client_auth_filter = {
auth_start_transport_op, channel_op, sizeof(call_data), init_call_elem,
destroy_call_elem, sizeof(channel_data), init_channel_elem,
destroy_channel_elem, "client-auth"};
auth_start_transport_op, channel_op, sizeof(call_data),
init_call_elem, destroy_call_elem, sizeof(channel_data),
init_channel_elem, destroy_channel_elem, "client-auth"};

@ -106,6 +106,7 @@ int grpc_credentials_has_request_metadata_only(grpc_credentials *creds) {
}
void grpc_credentials_get_request_metadata(grpc_credentials *creds,
grpc_pollset *pollset,
const char *service_url,
grpc_credentials_metadata_cb cb,
void *user_data) {
@ -116,7 +117,8 @@ void grpc_credentials_get_request_metadata(grpc_credentials *creds,
}
return;
}
creds->vtable->get_request_metadata(creds, service_url, cb, user_data);
creds->vtable->get_request_metadata(creds, pollset, service_url, cb,
user_data);
}
grpc_security_status grpc_credentials_create_security_connector(
@ -191,9 +193,7 @@ static void ssl_server_destroy(grpc_server_credentials *creds) {
gpr_free(creds);
}
static int ssl_has_request_metadata(const grpc_credentials *creds) {
return 0;
}
static int ssl_has_request_metadata(const grpc_credentials *creds) { return 0; }
static int ssl_has_request_metadata_only(const grpc_credentials *creds) {
return 0;
@ -368,8 +368,8 @@ static int jwt_has_request_metadata_only(const grpc_credentials *creds) {
return 1;
}
static void jwt_get_request_metadata(grpc_credentials *creds,
grpc_pollset *pollset,
const char *service_url,
grpc_credentials_metadata_cb cb,
void *user_data) {
@ -450,6 +450,8 @@ grpc_credentials *grpc_jwt_credentials_create(const char *json_key,
from an http service. */
typedef void (*grpc_fetch_oauth2_func)(grpc_credentials_metadata_request *req,
grpc_httpcli_context *http_context,
grpc_pollset *pollset,
grpc_httpcli_response_cb response_cb,
gpr_timespec deadline);
@ -458,6 +460,8 @@ typedef struct {
gpr_mu mu;
grpc_credentials_md_store *access_token_md;
gpr_timespec token_expiration;
grpc_httpcli_context httpcli_context;
grpc_pollset_set pollset_set;
grpc_fetch_oauth2_func fetch_func;
} grpc_oauth2_token_fetcher_credentials;
@ -466,6 +470,7 @@ static void oauth2_token_fetcher_destroy(grpc_credentials *creds) {
(grpc_oauth2_token_fetcher_credentials *)creds;
grpc_credentials_md_store_unref(c->access_token_md);
gpr_mu_destroy(&c->mu);
grpc_httpcli_context_destroy(&c->httpcli_context);
gpr_free(c);
}
@ -481,8 +486,8 @@ static int oauth2_token_fetcher_has_request_metadata_only(
grpc_credentials_status
grpc_oauth2_token_fetcher_credentials_parse_server_response(
const grpc_httpcli_response *response,
grpc_credentials_md_store **token_md, gpr_timespec *token_lifetime) {
const grpc_httpcli_response *response, grpc_credentials_md_store **token_md,
gpr_timespec *token_lifetime) {
char *null_terminated_body = NULL;
char *new_access_token = NULL;
grpc_credentials_status status = GRPC_CREDENTIALS_OK;
@ -593,7 +598,7 @@ static void on_oauth2_token_fetcher_http_response(
}
static void oauth2_token_fetcher_get_request_metadata(
grpc_credentials *creds, const char *service_url,
grpc_credentials *creds, grpc_pollset *pollset, const char *service_url,
grpc_credentials_metadata_cb cb, void *user_data) {
grpc_oauth2_token_fetcher_credentials *c =
(grpc_oauth2_token_fetcher_credentials *)creds;
@ -605,7 +610,8 @@ static void oauth2_token_fetcher_get_request_metadata(
if (c->access_token_md != NULL &&
(gpr_time_cmp(gpr_time_sub(c->token_expiration, gpr_now()),
refresh_threshold) > 0)) {
cached_access_token_md = grpc_credentials_md_store_ref(c->access_token_md);
cached_access_token_md =
grpc_credentials_md_store_ref(c->access_token_md);
}
gpr_mu_unlock(&c->mu);
}
@ -616,7 +622,7 @@ static void oauth2_token_fetcher_get_request_metadata(
} else {
c->fetch_func(
grpc_credentials_metadata_request_create(creds, cb, user_data),
on_oauth2_token_fetcher_http_response,
&c->httpcli_context, pollset, on_oauth2_token_fetcher_http_response,
gpr_time_add(gpr_now(), refresh_threshold));
}
}
@ -629,6 +635,7 @@ static void init_oauth2_token_fetcher(grpc_oauth2_token_fetcher_credentials *c,
gpr_mu_init(&c->mu);
c->token_expiration = gpr_inf_past;
c->fetch_func = fetch_func;
grpc_pollset_set_init(&c->pollset_set);
}
/* -- ComputeEngine credentials. -- */
@ -640,6 +647,7 @@ static grpc_credentials_vtable compute_engine_vtable = {
static void compute_engine_fetch_oauth2(
grpc_credentials_metadata_request *metadata_req,
grpc_httpcli_context *httpcli_context, grpc_pollset *pollset,
grpc_httpcli_response_cb response_cb, gpr_timespec deadline) {
grpc_httpcli_header header = {"Metadata-Flavor", "Google"};
grpc_httpcli_request request;
@ -648,7 +656,8 @@ static void compute_engine_fetch_oauth2(
request.path = GRPC_COMPUTE_ENGINE_METADATA_TOKEN_PATH;
request.hdr_count = 1;
request.hdrs = &header;
grpc_httpcli_get(&request, deadline, response_cb, metadata_req);
grpc_httpcli_get(httpcli_context, pollset, &request, deadline, response_cb,
metadata_req);
}
grpc_credentials *grpc_compute_engine_credentials_create(void) {
@ -683,6 +692,7 @@ static grpc_credentials_vtable service_account_vtable = {
static void service_account_fetch_oauth2(
grpc_credentials_metadata_request *metadata_req,
grpc_httpcli_context *httpcli_context, grpc_pollset *pollset,
grpc_httpcli_response_cb response_cb, gpr_timespec deadline) {
grpc_service_account_credentials *c =
(grpc_service_account_credentials *)metadata_req->creds;
@ -708,8 +718,8 @@ static void service_account_fetch_oauth2(
request.hdr_count = 1;
request.hdrs = &header;
request.use_ssl = 1;
grpc_httpcli_post(&request, body, strlen(body), deadline, response_cb,
metadata_req);
grpc_httpcli_post(httpcli_context, pollset, &request, body, strlen(body),
deadline, response_cb, metadata_req);
gpr_free(body);
gpr_free(jwt);
}
@ -743,8 +753,7 @@ typedef struct {
} grpc_refresh_token_credentials;
static void refresh_token_destroy(grpc_credentials *creds) {
grpc_refresh_token_credentials *c =
(grpc_refresh_token_credentials *)creds;
grpc_refresh_token_credentials *c = (grpc_refresh_token_credentials *)creds;
grpc_auth_refresh_token_destruct(&c->refresh_token);
oauth2_token_fetcher_destroy(&c->base.base);
}
@ -756,6 +765,7 @@ static grpc_credentials_vtable refresh_token_vtable = {
static void refresh_token_fetch_oauth2(
grpc_credentials_metadata_request *metadata_req,
grpc_httpcli_context *httpcli_context, grpc_pollset *pollset,
grpc_httpcli_response_cb response_cb, gpr_timespec deadline) {
grpc_refresh_token_credentials *c =
(grpc_refresh_token_credentials *)metadata_req->creds;
@ -772,8 +782,8 @@ static void refresh_token_fetch_oauth2(
request.hdr_count = 1;
request.hdrs = &header;
request.use_ssl = 1;
grpc_httpcli_post(&request, body, strlen(body), deadline, response_cb,
metadata_req);
grpc_httpcli_post(httpcli_context, pollset, &request, body, strlen(body),
deadline, response_cb, metadata_req);
gpr_free(body);
}
@ -784,8 +794,7 @@ grpc_credentials *grpc_refresh_token_credentials_create(
grpc_auth_refresh_token_create_from_string(json_refresh_token);
if (!grpc_auth_refresh_token_is_valid(&refresh_token)) {
gpr_log(GPR_ERROR,
"Invalid input for refresh token credentials creation");
gpr_log(GPR_ERROR, "Invalid input for refresh token credentials creation");
return NULL;
}
c = gpr_malloc(sizeof(grpc_refresh_token_credentials));
@ -830,6 +839,7 @@ void on_simulated_token_fetch_done(void *user_data, int success) {
}
static void fake_oauth2_get_request_metadata(grpc_credentials *creds,
grpc_pollset *pollset,
const char *service_url,
grpc_credentials_metadata_cb cb,
void *user_data) {
@ -888,8 +898,7 @@ static int fake_transport_security_has_request_metadata_only(
return 0;
}
static grpc_security_status
fake_transport_security_create_security_connector(
static grpc_security_status fake_transport_security_create_security_connector(
grpc_credentials *c, const char *target, const grpc_channel_args *args,
grpc_credentials *request_metadata_creds,
grpc_channel_security_connector **sc, grpc_channel_args **new_args) {
@ -947,6 +956,7 @@ typedef struct {
grpc_credentials_md_store *md_elems;
char *service_url;
void *user_data;
grpc_pollset *pollset;
grpc_credentials_metadata_cb cb;
} grpc_composite_credentials_metadata_context;
@ -1015,7 +1025,8 @@ static void composite_metadata_cb(void *user_data,
grpc_credentials *inner_creds =
ctx->composite_creds->inner.creds_array[ctx->creds_index++];
if (grpc_credentials_has_request_metadata(inner_creds)) {
grpc_credentials_get_request_metadata(inner_creds, ctx->service_url,
grpc_credentials_get_request_metadata(inner_creds, ctx->pollset,
ctx->service_url,
composite_metadata_cb, ctx);
return;
}
@ -1028,6 +1039,7 @@ static void composite_metadata_cb(void *user_data,
}
static void composite_get_request_metadata(grpc_credentials *creds,
grpc_pollset *pollset,
const char *service_url,
grpc_credentials_metadata_cb cb,
void *user_data) {
@ -1043,11 +1055,12 @@ static void composite_get_request_metadata(grpc_credentials *creds,
ctx->user_data = user_data;
ctx->cb = cb;
ctx->composite_creds = c;
ctx->pollset = pollset;
ctx->md_elems = grpc_credentials_md_store_create(c->inner.num_creds);
while (ctx->creds_index < c->inner.num_creds) {
grpc_credentials *inner_creds = c->inner.creds_array[ctx->creds_index++];
if (grpc_credentials_has_request_metadata(inner_creds)) {
grpc_credentials_get_request_metadata(inner_creds, service_url,
grpc_credentials_get_request_metadata(inner_creds, pollset, service_url,
composite_metadata_cb, ctx);
return;
}
@ -1178,15 +1191,14 @@ static void iam_destroy(grpc_credentials *creds) {
gpr_free(c);
}
static int iam_has_request_metadata(const grpc_credentials *creds) {
return 1;
}
static int iam_has_request_metadata(const grpc_credentials *creds) { return 1; }
static int iam_has_request_metadata_only(const grpc_credentials *creds) {
return 1;
}
static void iam_get_request_metadata(grpc_credentials *creds,
grpc_pollset *pollset,
const char *service_url,
grpc_credentials_metadata_cb cb,
void *user_data) {

@ -108,7 +108,6 @@ grpc_credentials_md_store *grpc_credentials_md_store_ref(
grpc_credentials_md_store *store);
void grpc_credentials_md_store_unref(grpc_credentials_md_store *store);
/* --- grpc_credentials. --- */
/* It is the caller's responsibility to gpr_free the result if not NULL. */
@ -123,7 +122,7 @@ typedef struct {
void (*destroy)(grpc_credentials *c);
int (*has_request_metadata)(const grpc_credentials *c);
int (*has_request_metadata_only)(const grpc_credentials *c);
void (*get_request_metadata)(grpc_credentials *c,
void (*get_request_metadata)(grpc_credentials *c, grpc_pollset *pollset,
const char *service_url,
grpc_credentials_metadata_cb cb,
void *user_data);
@ -131,7 +130,6 @@ typedef struct {
grpc_credentials *c, const char *target, const grpc_channel_args *args,
grpc_credentials *request_metadata_creds,
grpc_channel_security_connector **sc, grpc_channel_args **new_args);
} grpc_credentials_vtable;
struct grpc_credentials {
@ -145,6 +143,7 @@ void grpc_credentials_unref(grpc_credentials *creds);
int grpc_credentials_has_request_metadata(grpc_credentials *creds);
int grpc_credentials_has_request_metadata_only(grpc_credentials *creds);
void grpc_credentials_get_request_metadata(grpc_credentials *creds,
grpc_pollset *pollset,
const char *service_url,
grpc_credentials_metadata_cb cb,
void *user_data);
@ -177,8 +176,8 @@ grpc_credentials *grpc_credentials_contains_type(
/* Exposed for testing only. */
grpc_credentials_status
grpc_oauth2_token_fetcher_credentials_parse_server_response(
const struct grpc_httpcli_response *response, grpc_credentials_md_store **token_md,
gpr_timespec *token_lifetime);
const struct grpc_httpcli_response *response,
grpc_credentials_md_store **token_md, gpr_timespec *token_lifetime);
/* Simulates an oauth2 token fetch with the specified value for testing. */
grpc_credentials *grpc_fake_oauth2_credentials_create(
@ -200,4 +199,4 @@ struct grpc_server_credentials {
grpc_security_status grpc_server_credentials_create_security_connector(
grpc_server_credentials *creds, grpc_security_connector **sc);
#endif /* GRPC_INTERNAL_CORE_SECURITY_CREDENTIALS_H */
#endif /* GRPC_INTERNAL_CORE_SECURITY_CREDENTIALS_H */

@ -55,13 +55,10 @@ static int compute_engine_detection_done = 0;
static gpr_mu g_mu;
static gpr_once g_once = GPR_ONCE_INIT;
static void init_default_credentials(void) {
gpr_mu_init(&g_mu);
}
static void init_default_credentials(void) { gpr_mu_init(&g_mu); }
typedef struct {
gpr_cv cv;
gpr_mu mu;
grpc_pollset pollset;
int is_done;
int success;
} compute_engine_detector;
@ -82,22 +79,22 @@ static void on_compute_engine_detection_http_response(
}
}
}
gpr_mu_lock(&detector->mu);
gpr_mu_lock(GRPC_POLLSET_MU(&detector->pollset));
detector->is_done = 1;
gpr_mu_unlock(&detector->mu);
gpr_cv_signal(&detector->cv);
grpc_pollset_kick(&detector->pollset);
gpr_mu_unlock(GRPC_POLLSET_MU(&detector->pollset));
}
static int is_stack_running_on_compute_engine(void) {
compute_engine_detector detector;
grpc_httpcli_request request;
grpc_httpcli_context context;
/* The http call is local. If it takes more than one sec, it is for sure not
on compute engine. */
gpr_timespec max_detection_delay = {1, 0};
gpr_mu_init(&detector.mu);
gpr_cv_init(&detector.cv);
grpc_pollset_init(&detector.pollset);
detector.is_done = 0;
detector.success = 0;
@ -105,19 +102,23 @@ static int is_stack_running_on_compute_engine(void) {
request.host = GRPC_COMPUTE_ENGINE_DETECTION_HOST;
request.path = "/";
grpc_httpcli_get(&request, gpr_time_add(gpr_now(), max_detection_delay),
grpc_httpcli_context_init(&context);
grpc_httpcli_get(&context, &detector.pollset, &request,
gpr_time_add(gpr_now(), max_detection_delay),
on_compute_engine_detection_http_response, &detector);
/* Block until we get the response. This is not ideal but this should only be
called once for the lifetime of the process by the default credentials. */
gpr_mu_lock(&detector.mu);
gpr_mu_lock(GRPC_POLLSET_MU(&detector.pollset));
while (!detector.is_done) {
gpr_cv_wait(&detector.cv, &detector.mu, gpr_inf_future);
grpc_pollset_work(&detector.pollset, gpr_inf_future);
}
gpr_mu_unlock(&detector.mu);
gpr_mu_unlock(GRPC_POLLSET_MU(&detector.pollset));
grpc_httpcli_context_destroy(&context);
grpc_pollset_destroy(&detector.pollset);
gpr_mu_destroy(&detector.mu);
gpr_cv_destroy(&detector.cv);
return detector.success;
}

@ -386,29 +386,13 @@ static int ssl_host_matches_name(const tsi_peer *peer, const char *peer_name) {
return r;
}
static grpc_auth_context *tsi_ssl_peer_to_auth_context(const tsi_peer *peer) {
/* We bet that iterating over a handful of properties twice will be faster
than having to realloc on average . */
size_t auth_prop_count = 1; /* for transport_security_type. */
grpc_auth_context *tsi_ssl_peer_to_auth_context(const tsi_peer *peer) {
size_t i;
const char *peer_identity_property_name = NULL;
grpc_auth_context *ctx = NULL;
for (i = 0; i < peer->property_count; i++) {
const tsi_peer_property *prop = &peer->properties[i];
if (prop->name == NULL) continue;
if (strcmp(prop->name, TSI_X509_SUBJECT_COMMON_NAME_PEER_PROPERTY) == 0) {
auth_prop_count++;
/* If there is no subject alt name, have the CN as the identity. */
if (peer_identity_property_name == NULL) {
peer_identity_property_name = prop->name;
}
} else if (strcmp(prop->name,
TSI_X509_SUBJECT_ALTERNATIVE_NAME_PEER_PROPERTY) == 0) {
auth_prop_count++;
peer_identity_property_name = prop->name;
}
}
ctx = grpc_auth_context_create(NULL, auth_prop_count);
/* The caller has checked the certificate type property. */
GPR_ASSERT(peer->property_count >= 1);
ctx = grpc_auth_context_create(NULL, peer->property_count);
ctx->properties[0] = grpc_auth_property_init_from_cstring(
GRPC_TRANSPORT_SECURITY_TYPE_PROPERTY_NAME,
GRPC_SSL_TRANSPORT_SECURITY_TYPE);
@ -417,15 +401,19 @@ static grpc_auth_context *tsi_ssl_peer_to_auth_context(const tsi_peer *peer) {
const tsi_peer_property *prop = &peer->properties[i];
if (prop->name == NULL) continue;
if (strcmp(prop->name, TSI_X509_SUBJECT_COMMON_NAME_PEER_PROPERTY) == 0) {
/* If there is no subject alt name, have the CN as the identity. */
if (ctx->peer_identity_property_name == NULL) {
ctx->peer_identity_property_name = GRPC_X509_CN_PROPERTY_NAME;
}
ctx->properties[ctx->property_count++] = grpc_auth_property_init(
GRPC_X509_CN_PROPERTY_NAME, prop->value.data, prop->value.length);
} else if (strcmp(prop->name,
TSI_X509_SUBJECT_ALTERNATIVE_NAME_PEER_PROPERTY) == 0) {
ctx->peer_identity_property_name = GRPC_X509_SAN_PROPERTY_NAME;
ctx->properties[ctx->property_count++] = grpc_auth_property_init(
GRPC_X509_SAN_PROPERTY_NAME, prop->value.data, prop->value.length);
}
}
GPR_ASSERT(auth_prop_count == ctx->property_count);
return ctx;
}

@ -203,4 +203,7 @@ grpc_security_status grpc_ssl_server_security_connector_create(
const tsi_peer_property *tsi_peer_get_property_by_name(
const tsi_peer *peer, const char *name);
/* Exposed for testing only. */
grpc_auth_context *tsi_ssl_peer_to_auth_context(const tsi_peer *peer);
#endif /* GRPC_INTERNAL_CORE_SECURITY_SECURITY_CONNECTOR_H */

@ -66,6 +66,10 @@ static void state_ref(grpc_server_secure_state *state) {
static void state_unref(grpc_server_secure_state *state) {
if (gpr_unref(&state->refcount)) {
/* ensure all threads have unlocked */
gpr_mu_lock(&state->mu);
gpr_mu_unlock(&state->mu);
/* clean up */
grpc_security_connector_unref(state->sc);
gpr_free(state);
}
@ -124,16 +128,22 @@ static void start(grpc_server *server, void *statep, grpc_pollset **pollsets,
grpc_tcp_server_start(state->tcp, pollsets, pollset_count, on_accept, state);
}
static void destroy_done(void *statep) {
grpc_server_secure_state *state = statep;
grpc_server_listener_destroy_done(state->server);
state_unref(state);
}
/* Server callback: destroy the tcp listener (so we don't generate further
callbacks) */
static void destroy(grpc_server *server, void *statep) {
grpc_server_secure_state *state = statep;
grpc_tcp_server *tcp;
gpr_mu_lock(&state->mu);
state->is_shutdown = 1;
grpc_tcp_server_destroy(state->tcp, grpc_server_listener_destroy_done,
server);
tcp = state->tcp;
gpr_mu_unlock(&state->mu);
state_unref(state);
grpc_tcp_server_destroy(tcp, destroy_done, state);
}
int grpc_server_add_secure_http2_port(grpc_server *server, const char *addr,

@ -228,7 +228,7 @@ static void value_state(gpr_cmdline *cl, char *arg) {
cl->cur_arg->name);
print_usage_and_die(cl);
}
*(int *)cl->cur_arg->value = intval;
*(int *)cl->cur_arg->value = (int)intval;
break;
case ARGTYPE_BOOL:
if (0 == strcmp(arg, "1") || 0 == strcmp(arg, "true")) {
@ -287,8 +287,8 @@ static void normal_state(gpr_cmdline *cl, char *arg) {
eq = strchr(arg, '=');
if (eq != NULL) {
/* copy the string into a temp buffer and extract the name */
tmp = arg_name = gpr_malloc(eq - arg + 1);
memcpy(arg_name, arg, eq - arg);
tmp = arg_name = gpr_malloc((size_t)(eq - arg + 1));
memcpy(arg_name, arg, (size_t)(eq - arg));
arg_name[eq - arg] = 0;
} else {
arg_name = arg;

@ -51,7 +51,9 @@
static int ncpus = 0;
static void init_num_cpus() {
ncpus = sysconf(_SC_NPROCESSORS_ONLN);
/* This must be signed. sysconf returns -1 when the number cannot be
determined */
ncpus = (int)sysconf(_SC_NPROCESSORS_ONLN);
if (ncpus < 1) {
gpr_log(GPR_ERROR, "Cannot determine number of CPUs: assuming 1");
ncpus = 1;
@ -61,7 +63,7 @@ static void init_num_cpus() {
unsigned gpr_cpu_num_cores(void) {
static gpr_once once = GPR_ONCE_INIT;
gpr_once_init(&once, init_num_cpus);
return ncpus;
return (unsigned)ncpus;
}
unsigned gpr_cpu_current_cpu(void) {
@ -70,7 +72,7 @@ unsigned gpr_cpu_current_cpu(void) {
gpr_log(GPR_ERROR, "Error determining current CPU: %s\n", strerror(errno));
return 0;
}
return cpu;
return (unsigned)cpu;
}
#endif /* GPR_CPU_LINUX */

@ -58,7 +58,8 @@ gpr_slice gpr_load_file(const char *filename, int add_null_terminator,
goto end;
}
fseek(file, 0, SEEK_END);
contents_size = ftell(file);
/* Converting to size_t on the assumption that it will not fail */
contents_size = (size_t)ftell(file);
fseek(file, 0, SEEK_SET);
contents = gpr_malloc(contents_size + (add_null_terminator ? 1 : 0));
bytes_read = fread(contents, 1, contents_size, file);

@ -189,12 +189,12 @@ static double threshold_for_count_below(gpr_histogram *h, double count_below) {
break;
}
}
return (bucket_start(h, lower_idx) + bucket_start(h, upper_idx)) / 2.0;
return (bucket_start(h, (double)lower_idx) + bucket_start(h, (double)upper_idx)) / 2.0;
} else {
/* treat values as uniform throughout the bucket, and find where this value
should lie */
lower_bound = bucket_start(h, lower_idx);
upper_bound = bucket_start(h, lower_idx + 1);
lower_bound = bucket_start(h, (double)lower_idx);
upper_bound = bucket_start(h, (double)(lower_idx + 1));
return GPR_CLAMP(upper_bound - (upper_bound - lower_bound) *
(count_so_far - count_below) /
h->buckets[lower_idx],

@ -76,7 +76,7 @@ void gpr_split_host_port(const char *name, char **host, char **port) {
return;
}
host_start = name + 1;
host_len = rbracket - host_start;
host_len = (size_t)(rbracket - host_start);
if (memchr(host_start, ':', host_len) == NULL) {
/* Require all bracketed hosts to contain a colon, because a hostname or
IPv4 address should never use brackets. */
@ -87,7 +87,7 @@ void gpr_split_host_port(const char *name, char **host, char **port) {
if (colon != NULL && strchr(colon + 1, ':') == NULL) {
/* Exactly 1 colon. Split into host:port. */
host_start = name;
host_len = colon - name;
host_len = (size_t)(colon - name);
port_start = colon + 1;
} else {
/* 0 or 2+ colons. Bare hostname or IPv6 litearal. */

@ -94,23 +94,22 @@ void gpr_default_log(gpr_log_func_args *args) {
fprintf(stderr, "%s%s.%09u %5lu %s:%d] %s\n",
gpr_log_severity_string(args->severity), time_buffer,
(int)(now.tv_nsec), GetCurrentThreadId(),
args->file, args->line, args->message);
(int)(now.tv_nsec), GetCurrentThreadId(), args->file, args->line,
args->message);
}
char *gpr_format_message(DWORD messageid) {
LPTSTR tmessage;
char *message;
DWORD status = FormatMessage(FORMAT_MESSAGE_ALLOCATE_BUFFER |
FORMAT_MESSAGE_FROM_SYSTEM |
FORMAT_MESSAGE_IGNORE_INSERTS,
NULL, messageid,
MAKELANGID(LANG_NEUTRAL, SUBLANG_DEFAULT),
(LPTSTR)(&tmessage), 0, NULL);
DWORD status = FormatMessage(
FORMAT_MESSAGE_ALLOCATE_BUFFER | FORMAT_MESSAGE_FROM_SYSTEM |
FORMAT_MESSAGE_IGNORE_INSERTS,
NULL, messageid, MAKELANGID(LANG_NEUTRAL, SUBLANG_DEFAULT),
(LPTSTR)(&tmessage), 0, NULL);
if (status == 0) return gpr_strdup("Unable to retrieve error string");
message = gpr_tchar_to_char(tmessage);
LocalFree(tmessage);
return message;
}
#endif /* GPR_WIN32 */
#endif /* GPR_WIN32 */

@ -48,7 +48,7 @@
gpr_uint32 gpr_murmur_hash3(const void *key, size_t len, gpr_uint32 seed) {
const gpr_uint8 *data = (const gpr_uint8 *)key;
const int nblocks = len / 4;
const size_t nblocks = len / 4;
int i;
gpr_uint32 h1 = seed;
@ -57,11 +57,11 @@ gpr_uint32 gpr_murmur_hash3(const void *key, size_t len, gpr_uint32 seed) {
const gpr_uint32 c1 = 0xcc9e2d51;
const gpr_uint32 c2 = 0x1b873593;
const gpr_uint32 *blocks = (const uint32_t *)(data + nblocks * 4);
const uint8_t *tail = (const uint8_t *)(data + nblocks * 4);
const gpr_uint32 *blocks = ((const gpr_uint32 *)key) + nblocks;
const gpr_uint8 *tail = (const gpr_uint8 *)(data + nblocks * 4);
/* body */
for (i = -nblocks; i; i++) {
for (i = -(int)nblocks; i; i++) {
k1 = GETBLOCK32(blocks, i);
k1 *= c1;
@ -78,9 +78,9 @@ gpr_uint32 gpr_murmur_hash3(const void *key, size_t len, gpr_uint32 seed) {
/* tail */
switch (len & 3) {
case 3:
k1 ^= tail[2] << 16;
k1 ^= ((gpr_uint32)tail[2]) << 16;
case 2:
k1 ^= tail[1] << 8;
k1 ^= ((gpr_uint32)tail[1]) << 8;
case 1:
k1 ^= tail[0];
k1 *= c1;
@ -90,7 +90,7 @@ gpr_uint32 gpr_murmur_hash3(const void *key, size_t len, gpr_uint32 seed) {
};
/* finalization */
h1 ^= len;
h1 ^= (gpr_uint32)len;
FMIX32(h1);
return h1;
}

@ -194,7 +194,7 @@ gpr_slice gpr_slice_malloc(size_t length) {
} else {
/* small slice: just inline the data */
slice.refcount = NULL;
slice.data.inlined.length = length;
slice.data.inlined.length = (gpr_uint8)length;
}
return slice;
}
@ -202,11 +202,11 @@ gpr_slice gpr_slice_malloc(size_t length) {
gpr_slice gpr_slice_sub_no_ref(gpr_slice source, size_t begin, size_t end) {
gpr_slice subset;
GPR_ASSERT(end >= begin);
if (source.refcount) {
/* Enforce preconditions */
GPR_ASSERT(source.data.refcounted.length >= begin);
GPR_ASSERT(source.data.refcounted.length >= end);
GPR_ASSERT(end >= begin);
/* Build the result */
subset.refcount = source.refcount;
@ -214,8 +214,10 @@ gpr_slice gpr_slice_sub_no_ref(gpr_slice source, size_t begin, size_t end) {
subset.data.refcounted.bytes = source.data.refcounted.bytes + begin;
subset.data.refcounted.length = end - begin;
} else {
/* Enforce preconditions */
GPR_ASSERT(source.data.inlined.length >= end);
subset.refcount = NULL;
subset.data.inlined.length = end - begin;
subset.data.inlined.length = (gpr_uint8)(end - begin);
memcpy(subset.data.inlined.bytes, source.data.inlined.bytes + begin,
end - begin);
}
@ -227,7 +229,7 @@ gpr_slice gpr_slice_sub(gpr_slice source, size_t begin, size_t end) {
if (end - begin <= sizeof(subset.data.inlined.bytes)) {
subset.refcount = NULL;
subset.data.inlined.length = end - begin;
subset.data.inlined.length = (gpr_uint8)(end - begin);
memcpy(subset.data.inlined.bytes, GPR_SLICE_START_PTR(source) + begin,
end - begin);
} else {
@ -245,17 +247,17 @@ gpr_slice gpr_slice_split_tail(gpr_slice *source, size_t split) {
/* inlined data, copy it out */
GPR_ASSERT(source->data.inlined.length >= split);
tail.refcount = NULL;
tail.data.inlined.length = source->data.inlined.length - split;
tail.data.inlined.length = (gpr_uint8)(source->data.inlined.length - split);
memcpy(tail.data.inlined.bytes, source->data.inlined.bytes + split,
tail.data.inlined.length);
source->data.inlined.length = split;
source->data.inlined.length = (gpr_uint8)split;
} else {
size_t tail_length = source->data.refcounted.length - split;
GPR_ASSERT(source->data.refcounted.length >= split);
if (tail_length < sizeof(tail.data.inlined.bytes)) {
/* Copy out the bytes - it'll be cheaper than refcounting */
tail.refcount = NULL;
tail.data.inlined.length = tail_length;
tail.data.inlined.length = (gpr_uint8)tail_length;
memcpy(tail.data.inlined.bytes, source->data.refcounted.bytes + split,
tail_length);
} else {
@ -280,16 +282,16 @@ gpr_slice gpr_slice_split_head(gpr_slice *source, size_t split) {
GPR_ASSERT(source->data.inlined.length >= split);
head.refcount = NULL;
head.data.inlined.length = split;
head.data.inlined.length = (gpr_uint8)split;
memcpy(head.data.inlined.bytes, source->data.inlined.bytes, split);
source->data.inlined.length -= split;
source->data.inlined.length = (gpr_uint8)(source->data.inlined.length - split);
memmove(source->data.inlined.bytes, source->data.inlined.bytes + split,
source->data.inlined.length);
} else if (split < sizeof(head.data.inlined.bytes)) {
GPR_ASSERT(source->data.refcounted.length >= split);
head.refcount = NULL;
head.data.inlined.length = split;
head.data.inlined.length = (gpr_uint8)split;
memcpy(head.data.inlined.bytes, source->data.refcounted.bytes, split);
source->data.refcounted.bytes += split;
source->data.refcounted.length -= split;
@ -311,7 +313,7 @@ gpr_slice gpr_slice_split_head(gpr_slice *source, size_t split) {
}
int gpr_slice_cmp(gpr_slice a, gpr_slice b) {
int d = GPR_SLICE_LENGTH(a) - GPR_SLICE_LENGTH(b);
int d = (int)(GPR_SLICE_LENGTH(a) - GPR_SLICE_LENGTH(b));
if (d != 0) return d;
return memcmp(GPR_SLICE_START_PTR(a), GPR_SLICE_START_PTR(b),
GPR_SLICE_LENGTH(a));
@ -319,7 +321,7 @@ int gpr_slice_cmp(gpr_slice a, gpr_slice b) {
int gpr_slice_str_cmp(gpr_slice a, const char *b) {
size_t b_length = strlen(b);
int d = GPR_SLICE_LENGTH(a) - b_length;
int d = (int)(GPR_SLICE_LENGTH(a) - b_length);
if (d != 0) return d;
return memcmp(GPR_SLICE_START_PTR(a), b, b_length);
}

@ -81,7 +81,7 @@ gpr_uint8 *gpr_slice_buffer_tiny_add(gpr_slice_buffer *sb, unsigned n) {
if ((back->data.inlined.length + n) > sizeof(back->data.inlined.bytes))
goto add_new;
out = back->data.inlined.bytes + back->data.inlined.length;
back->data.inlined.length += n;
back->data.inlined.length = (gpr_uint8)(back->data.inlined.length + n);
return out;
add_new:
@ -89,7 +89,7 @@ add_new:
back = &sb->slices[sb->count];
sb->count++;
back->refcount = NULL;
back->data.inlined.length = n;
back->data.inlined.length = (gpr_uint8)n;
return back->data.inlined.bytes;
}
@ -116,7 +116,7 @@ void gpr_slice_buffer_add(gpr_slice_buffer *sb, gpr_slice s) {
GPR_SLICE_INLINED_SIZE) {
memcpy(back->data.inlined.bytes + back->data.inlined.length,
s.data.inlined.bytes, s.data.inlined.length);
back->data.inlined.length += s.data.inlined.length;
back->data.inlined.length = (gpr_uint8)(back->data.inlined.length + s.data.inlined.length);
} else {
size_t cp1 = GPR_SLICE_INLINED_SIZE - back->data.inlined.length;
memcpy(back->data.inlined.bytes + back->data.inlined.length,
@ -126,7 +126,7 @@ void gpr_slice_buffer_add(gpr_slice_buffer *sb, gpr_slice s) {
back = &sb->slices[n];
sb->count = n + 1;
back->refcount = NULL;
back->data.inlined.length = s.data.inlined.length - cp1;
back->data.inlined.length = (gpr_uint8)(s.data.inlined.length - cp1);
memcpy(back->data.inlined.bytes, s.data.inlined.bytes + cp1,
s.data.inlined.length - cp1);
}

@ -94,7 +94,7 @@ char *gpr_hexdump(const char *buf, size_t len, gpr_uint32 flags) {
if (len) hexout_append(&out, ' ');
hexout_append(&out, '\'');
for (cur = beg; cur != end; ++cur) {
hexout_append(&out, isprint(*cur) ? *cur : '.');
hexout_append(&out, isprint(*cur) ? *(char*)cur : '.');
}
hexout_append(&out, '\'');
}
@ -113,7 +113,7 @@ int gpr_parse_bytes_to_uint32(const char *buf, size_t len, gpr_uint32 *result) {
for (i = 0; i < len; i++) {
if (buf[i] < '0' || buf[i] > '9') return 0; /* bad char */
new = 10 * out + (buf[i] - '0');
new = 10 * out + (gpr_uint32)(buf[i] - '0');
if (new < out) return 0; /* overflow */
out = new;
}
@ -143,7 +143,7 @@ int gpr_ltoa(long value, char *string) {
if (neg) value = -value;
while (value) {
string[i++] = '0' + value % 10;
string[i++] = (char)('0' + value % 10);
value /= 10;
}
if (neg) string[i++] = '-';

@ -66,8 +66,8 @@ gpr_subprocess *gpr_subprocess_create(int argc, const char **argv) {
if (pid == -1) {
return NULL;
} else if (pid == 0) {
exec_args = gpr_malloc((argc + 1) * sizeof(char *));
memcpy(exec_args, argv, argc * sizeof(char *));
exec_args = gpr_malloc(((size_t)argc + 1) * sizeof(char *));
memcpy(exec_args, argv, (size_t)argc * sizeof(char *));
exec_args[argc] = NULL;
execv(exec_args[0], exec_args);
/* if we reach here, an error has occurred */

@ -86,11 +86,11 @@ gpr_timespec gpr_time_from_nanos(long ns) {
result = gpr_inf_past;
} else if (ns >= 0) {
result.tv_sec = ns / GPR_NS_PER_SEC;
result.tv_nsec = ns - result.tv_sec * GPR_NS_PER_SEC;
result.tv_nsec = (int)(ns - result.tv_sec * GPR_NS_PER_SEC);
} else {
/* Calculation carefully formulated to avoid any possible under/overflow. */
result.tv_sec = (-(999999999 - (ns + GPR_NS_PER_SEC)) / GPR_NS_PER_SEC) - 1;
result.tv_nsec = ns - result.tv_sec * GPR_NS_PER_SEC;
result.tv_nsec = (int)(ns - result.tv_sec * GPR_NS_PER_SEC);
}
return result;
}
@ -103,11 +103,11 @@ gpr_timespec gpr_time_from_micros(long us) {
result = gpr_inf_past;
} else if (us >= 0) {
result.tv_sec = us / 1000000;
result.tv_nsec = (us - result.tv_sec * 1000000) * 1000;
result.tv_nsec = (int)((us - result.tv_sec * 1000000) * 1000);
} else {
/* Calculation carefully formulated to avoid any possible under/overflow. */
result.tv_sec = (-(999999 - (us + 1000000)) / 1000000) - 1;
result.tv_nsec = (us - result.tv_sec * 1000000) * 1000;
result.tv_nsec = (int)((us - result.tv_sec * 1000000) * 1000);
}
return result;
}
@ -120,11 +120,11 @@ gpr_timespec gpr_time_from_millis(long ms) {
result = gpr_inf_past;
} else if (ms >= 0) {
result.tv_sec = ms / 1000;
result.tv_nsec = (ms - result.tv_sec * 1000) * 1000000;
result.tv_nsec = (int)((ms - result.tv_sec * 1000) * 1000000);
} else {
/* Calculation carefully formulated to avoid any possible under/overflow. */
result.tv_sec = (-(999 - (ms + 1000)) / 1000) - 1;
result.tv_nsec = (ms - result.tv_sec * 1000) * 1000000;
result.tv_nsec = (int)((ms - result.tv_sec * 1000) * 1000000);
}
return result;
}
@ -245,10 +245,10 @@ gpr_int32 gpr_time_to_millis(gpr_timespec t) {
care?) */
return -2147483647;
} else {
return t.tv_sec * GPR_MS_PER_SEC + t.tv_nsec / GPR_NS_PER_MS;
return (gpr_int32)(t.tv_sec * GPR_MS_PER_SEC + t.tv_nsec / GPR_NS_PER_MS);
}
}
double gpr_timespec_to_micros(gpr_timespec t) {
return t.tv_sec * GPR_US_PER_SEC + t.tv_nsec * 1e-3;
return (double)t.tv_sec * GPR_US_PER_SEC + t.tv_nsec * 1e-3;
}

@ -51,7 +51,7 @@ static struct timespec timespec_from_gpr(gpr_timespec gts) {
static gpr_timespec gpr_from_timespec(struct timespec ts) {
gpr_timespec rv;
rv.tv_sec = ts.tv_sec;
rv.tv_nsec = ts.tv_nsec;
rv.tv_nsec = (int)ts.tv_nsec;
return rv;
}

@ -64,11 +64,11 @@ void grpc_byte_buffer_reader_init(grpc_byte_buffer_reader *reader,
grpc_msg_decompress(reader->buffer_in->data.raw.compression,
&reader->buffer_in->data.raw.slice_buffer,
&decompressed_slices_buffer);
reader->buffer_out = grpc_raw_byte_buffer_create(
decompressed_slices_buffer.slices,
decompressed_slices_buffer.count);
reader->buffer_out =
grpc_raw_byte_buffer_create(decompressed_slices_buffer.slices,
decompressed_slices_buffer.count);
gpr_slice_buffer_destroy(&decompressed_slices_buffer);
} else { /* not compressed, use the input buffer as output */
} else { /* not compressed, use the input buffer as output */
reader->buffer_out = reader->buffer_in;
}
reader->current.index = 0;

@ -100,6 +100,8 @@ typedef enum {
/* Status came from 'the wire' - or somewhere below the surface
layer */
STATUS_FROM_WIRE,
/* Status came from the server sending status */
STATUS_FROM_SERVER_STATUS,
STATUS_SOURCE_COUNT
} status_source;
@ -153,9 +155,13 @@ struct grpc_call {
gpr_uint8 num_completed_requests;
/* are we currently reading a message? */
gpr_uint8 reading_message;
/* have we bound a pollset yet? */
gpr_uint8 bound_pollset;
/* flags with bits corresponding to write states allowing us to determine
what was sent */
gpr_uint16 last_send_contains;
/* cancel with this status on the next outgoing transport op */
grpc_status_code cancel_with_status;
/* Active ioreqs.
request_set and request_data contain one element per active ioreq
@ -252,8 +258,10 @@ static void execute_op(grpc_call *call, grpc_transport_op *op);
static void recv_metadata(grpc_call *call, grpc_metadata_batch *metadata);
static void finish_read_ops(grpc_call *call);
static grpc_call_error cancel_with_status(grpc_call *c, grpc_status_code status,
const char *description,
gpr_uint8 locked);
const char *description);
static void lock(grpc_call *call);
static void unlock(grpc_call *call);
grpc_call *grpc_call_create(grpc_channel *channel, grpc_completion_queue *cq,
const void *server_transport_data,
@ -270,6 +278,9 @@ grpc_call *grpc_call_create(grpc_channel *channel, grpc_completion_queue *cq,
gpr_mu_init(&call->mu);
call->channel = channel;
call->cq = cq;
if (cq) {
GRPC_CQ_INTERNAL_REF(cq, "bind");
}
call->is_client = server_transport_data == NULL;
for (i = 0; i < GRPC_IOREQ_OP_COUNT; i++) {
call->request_set[i] = REQSET_EMPTY;
@ -286,7 +297,7 @@ grpc_call *grpc_call_create(grpc_channel *channel, grpc_completion_queue *cq,
}
call->send_initial_metadata_count = add_initial_metadata_count;
call->send_deadline = send_deadline;
grpc_channel_internal_ref(channel);
GRPC_CHANNEL_INTERNAL_REF(channel, "call");
call->metadata_context = grpc_channel_get_metadata_context(channel);
grpc_sopb_init(&call->send_ops);
grpc_sopb_init(&call->recv_ops);
@ -316,7 +327,12 @@ grpc_call *grpc_call_create(grpc_channel *channel, grpc_completion_queue *cq,
void grpc_call_set_completion_queue(grpc_call *call,
grpc_completion_queue *cq) {
lock(call);
call->cq = cq;
if (cq) {
GRPC_CQ_INTERNAL_REF(cq, "bind");
}
unlock(call);
}
grpc_completion_queue *grpc_call_get_completion_queue(grpc_call *call) {
@ -337,7 +353,7 @@ static void destroy_call(void *call, int ignored_success) {
size_t i;
grpc_call *c = call;
grpc_call_stack_destroy(CALL_STACK_FROM_CALL(c));
grpc_channel_internal_unref(c->channel);
GRPC_CHANNEL_INTERNAL_UNREF(c->channel, "call");
gpr_mu_destroy(&c->mu);
for (i = 0; i < STATUS_SOURCE_COUNT; i++) {
if (c->status[i].details) {
@ -363,6 +379,9 @@ static void destroy_call(void *call, int ignored_success) {
grpc_sopb_destroy(&c->recv_ops);
grpc_bbq_destroy(&c->incoming_queue);
gpr_slice_buffer_destroy(&c->incoming_message);
if (c->cq) {
GRPC_CQ_INTERNAL_UNREF(c->cq, "bind");
}
gpr_free(c);
}
@ -387,6 +406,8 @@ void grpc_call_internal_unref(grpc_call *c, int allow_immediate_deletion) {
static void set_status_code(grpc_call *call, status_source source,
gpr_uint32 status) {
if (call->status[source].is_set) return;
call->status[source].is_set = 1;
call->status[source].code = status;
@ -420,6 +441,7 @@ static void lock(grpc_call *call) { gpr_mu_lock(&call->mu); }
static int need_more_data(grpc_call *call) {
if (call->read_state == READ_STATE_STREAM_CLOSED) return 0;
/* TODO(ctiller): this needs some serious cleanup */
return is_op_live(call, GRPC_IOREQ_RECV_INITIAL_METADATA) ||
(is_op_live(call, GRPC_IOREQ_RECV_MESSAGE) &&
grpc_bbq_empty(&call->incoming_queue)) ||
@ -428,7 +450,8 @@ static int need_more_data(grpc_call *call) {
is_op_live(call, GRPC_IOREQ_RECV_STATUS_DETAILS) ||
(is_op_live(call, GRPC_IOREQ_RECV_CLOSE) &&
grpc_bbq_empty(&call->incoming_queue)) ||
(call->write_state == WRITE_STATE_INITIAL && !call->is_client);
(call->write_state == WRITE_STATE_INITIAL && !call->is_client) ||
(call->cancel_with_status != GRPC_STATUS_OK);
}
static void unlock(grpc_call *call) {
@ -440,6 +463,10 @@ static void unlock(grpc_call *call) {
memset(&op, 0, sizeof(op));
op.cancel_with_status = call->cancel_with_status;
start_op = op.cancel_with_status != GRPC_STATUS_OK;
call->cancel_with_status = GRPC_STATUS_OK; /* reset */
if (!call->receiving && need_more_data(call)) {
op.recv_ops = &call->recv_ops;
op.recv_state = &call->recv_state;
@ -458,6 +485,12 @@ static void unlock(grpc_call *call) {
}
}
if (!call->bound_pollset && call->cq && (!call->is_client || start_op)) {
call->bound_pollset = 1;
op.bind_pollset = grpc_cq_pollset(call->cq);
start_op = 1;
}
if (!call->completing && call->num_completed_requests != 0) {
completing_requests = call->num_completed_requests;
memcpy(completed_requests, call->completed_requests,
@ -560,10 +593,18 @@ static void finish_live_ioreq_op(grpc_call *call, grpc_ioreq_op op,
call->write_state = WRITE_STATE_WRITE_CLOSED;
}
break;
case GRPC_IOREQ_SEND_STATUS:
if (call->request_data[GRPC_IOREQ_SEND_STATUS].send_status.details !=
NULL) {
grpc_mdstr_unref(
call->request_data[GRPC_IOREQ_SEND_STATUS].send_status.details);
call->request_data[GRPC_IOREQ_SEND_STATUS].send_status.details =
NULL;
}
break;
case GRPC_IOREQ_RECV_CLOSE:
case GRPC_IOREQ_SEND_INITIAL_METADATA:
case GRPC_IOREQ_SEND_TRAILING_METADATA:
case GRPC_IOREQ_SEND_STATUS:
case GRPC_IOREQ_SEND_CLOSE:
break;
case GRPC_IOREQ_RECV_STATUS:
@ -673,7 +714,7 @@ static int begin_message(grpc_call *call, grpc_begin_message msg) {
gpr_asprintf(
&message, "Message terminated early; read %d bytes, expected %d",
(int)call->incoming_message.length, (int)call->incoming_message_length);
cancel_with_status(call, GRPC_STATUS_INVALID_ARGUMENT, message, 1);
cancel_with_status(call, GRPC_STATUS_INVALID_ARGUMENT, message);
gpr_free(message);
return 0;
}
@ -686,7 +727,7 @@ static int begin_message(grpc_call *call, grpc_begin_message msg) {
gpr_asprintf(
&message, "Invalid compression algorithm (%s) for compressed message.",
grpc_compression_algorithm_name(call->compression_algorithm));
cancel_with_status(call, GRPC_STATUS_FAILED_PRECONDITION, message, 1);
cancel_with_status(call, GRPC_STATUS_FAILED_PRECONDITION, message);
}
/* stash away parameters, and prepare for incoming slices */
if (msg.length > grpc_channel_get_max_message_length(call->channel)) {
@ -695,7 +736,7 @@ static int begin_message(grpc_call *call, grpc_begin_message msg) {
&message,
"Maximum message length of %d exceeded by a message of length %d",
grpc_channel_get_max_message_length(call->channel), msg.length);
cancel_with_status(call, GRPC_STATUS_INVALID_ARGUMENT, message, 1);
cancel_with_status(call, GRPC_STATUS_INVALID_ARGUMENT, message);
gpr_free(message);
return 0;
} else if (msg.length > 0) {
@ -717,7 +758,7 @@ static int add_slice_to_message(grpc_call *call, gpr_slice slice) {
/* we have to be reading a message to know what to do here */
if (!call->reading_message) {
cancel_with_status(call, GRPC_STATUS_INVALID_ARGUMENT,
"Received payload data while not reading a message", 1);
"Received payload data while not reading a message");
return 0;
}
/* append the slice to the incoming buffer */
@ -728,7 +769,7 @@ static int add_slice_to_message(grpc_call *call, gpr_slice slice) {
gpr_asprintf(
&message, "Receiving message overflow; read %d bytes, expected %d",
(int)call->incoming_message.length, (int)call->incoming_message_length);
cancel_with_status(call, GRPC_STATUS_INVALID_ARGUMENT, message, 1);
cancel_with_status(call, GRPC_STATUS_INVALID_ARGUMENT, message);
gpr_free(message);
return 0;
} else if (call->incoming_message.length == call->incoming_message_length) {
@ -870,7 +911,6 @@ static int fill_send_ops(grpc_call *call, grpc_transport_op *op) {
}
grpc_sopb_add_metadata(&call->send_ops, mdb);
op->send_ops = &call->send_ops;
op->bind_pollset = grpc_cq_pollset(call->cq);
call->last_send_contains |= 1 << GRPC_IOREQ_SEND_INITIAL_METADATA;
call->send_initial_metadata_count = 0;
/* fall through intended */
@ -909,8 +949,9 @@ static int fill_send_ops(grpc_call *call, grpc_transport_op *op) {
call->metadata_context,
grpc_mdstr_ref(
grpc_channel_get_message_string(call->channel)),
grpc_mdstr_from_string(call->metadata_context,
data.send_status.details)));
data.send_status.details));
call->request_data[GRPC_IOREQ_SEND_STATUS].send_status.details =
NULL;
}
grpc_sopb_add_metadata(&call->send_ops, mdb);
}
@ -1010,6 +1051,14 @@ static grpc_call_error start_ioreq(grpc_call *call, const grpc_ioreq *reqs,
GRPC_CALL_ERROR_INVALID_METADATA);
}
}
if (op == GRPC_IOREQ_SEND_STATUS) {
set_status_code(call, STATUS_FROM_SERVER_STATUS,
reqs[i].data.send_status.code);
if (reqs[i].data.send_status.details) {
set_status_details(call, STATUS_FROM_SERVER_STATUS,
grpc_mdstr_ref(reqs[i].data.send_status.details));
}
}
have_ops |= 1u << op;
call->request_data[op] = data;
@ -1060,35 +1109,43 @@ grpc_call_error grpc_call_cancel(grpc_call *call) {
grpc_call_error grpc_call_cancel_with_status(grpc_call *c,
grpc_status_code status,
const char *description) {
return cancel_with_status(c, status, description, 0);
grpc_call_error r;
lock(c);
r = cancel_with_status(c, status, description);
unlock(c);
return r;
}
static grpc_call_error cancel_with_status(grpc_call *c, grpc_status_code status,
const char *description,
gpr_uint8 locked) {
grpc_transport_op op;
const char *description) {
grpc_mdstr *details =
description ? grpc_mdstr_from_string(c->metadata_context, description)
: NULL;
memset(&op, 0, sizeof(op));
op.cancel_with_status = status;
if (locked == 0) {
lock(c);
}
GPR_ASSERT(status != GRPC_STATUS_OK);
set_status_code(c, STATUS_FROM_API_OVERRIDE, status);
set_status_details(c, STATUS_FROM_API_OVERRIDE, details);
if (locked == 0) {
unlock(c);
}
execute_op(c, &op);
c->cancel_with_status = status;
return GRPC_CALL_OK;
}
static void finished_loose_op(void *call, int success_ignored) {
GRPC_CALL_INTERNAL_UNREF(call, "loose-op", 0);
}
static void execute_op(grpc_call *call, grpc_transport_op *op) {
grpc_call_element *elem;
GPR_ASSERT(op->on_consumed == NULL);
if (op->cancel_with_status != GRPC_STATUS_OK || op->bind_pollset) {
GRPC_CALL_INTERNAL_REF(call, "loose-op");
op->on_consumed = finished_loose_op;
op->on_consumed_user_data = call;
}
elem = CALL_ELEM_FROM_CALL(call, 0);
op->context = call->context;
elem->filter->start_transport_op(elem, op);
@ -1101,12 +1158,10 @@ grpc_call *grpc_call_from_top_element(grpc_call_element *elem) {
static void call_alarm(void *arg, int success) {
grpc_call *call = arg;
if (success) {
if (call->is_client) {
cancel_with_status(call, GRPC_STATUS_DEADLINE_EXCEEDED,
"Deadline Exceeded", 0);
} else {
grpc_call_cancel(call);
}
lock(call);
cancel_with_status(call, GRPC_STATUS_DEADLINE_EXCEEDED,
"Deadline Exceeded");
unlock(call);
}
GRPC_CALL_INTERNAL_UNREF(call, "alarm", 1);
}
@ -1156,8 +1211,16 @@ static gpr_uint32 decode_compression(grpc_mdelem *md) {
if (user_data) {
clevel = ((grpc_compression_level)(gpr_intptr)user_data) - COMPRESS_OFFSET;
} else {
GPR_ASSERT(sizeof(clevel) == GPR_SLICE_LENGTH(md->value->slice));
memcpy(&clevel, GPR_SLICE_START_PTR(md->value->slice), sizeof(clevel));
gpr_uint32 parsed_clevel_bytes;
if (gpr_parse_bytes_to_uint32(grpc_mdstr_as_c_string(md->value),
GPR_SLICE_LENGTH(md->value->slice),
&parsed_clevel_bytes)) {
/* the following cast is safe, as a gpr_uint32 should be able to hold all
* possible values of the grpc_compression_level enum */
clevel = (grpc_compression_level) parsed_clevel_bytes;
} else {
clevel = GRPC_COMPRESS_LEVEL_NONE; /* could not parse, no compression */
}
grpc_mdelem_set_user_data(md, destroy_compression,
(void *)(gpr_intptr)(clevel + COMPRESS_OFFSET));
}
@ -1286,7 +1349,7 @@ grpc_call_error grpc_call_start_batch(grpc_call *call, const grpc_op *ops,
req->flags = op->flags;
break;
case GRPC_OP_SEND_MESSAGE:
if (!are_write_flags_valid(op->flags)){
if (!are_write_flags_valid(op->flags)) {
return GRPC_CALL_ERROR_INVALID_FLAGS;
}
req = &reqs[out++];
@ -1321,7 +1384,11 @@ grpc_call_error grpc_call_start_batch(grpc_call *call, const grpc_op *ops,
req->op = GRPC_IOREQ_SEND_STATUS;
req->data.send_status.code = op->data.send_status_from_server.status;
req->data.send_status.details =
op->data.send_status_from_server.status_details;
op->data.send_status_from_server.status_details != NULL
? grpc_mdstr_from_string(
call->metadata_context,
op->data.send_status_from_server.status_details)
: NULL;
req = &reqs[out++];
req->op = GRPC_IOREQ_SEND_CLOSE;
break;

@ -72,14 +72,14 @@ typedef union {
grpc_byte_buffer *send_message;
struct {
grpc_status_code code;
const char *details;
grpc_mdstr *details;
} send_status;
} grpc_ioreq_data;
typedef struct {
grpc_ioreq_op op;
grpc_ioreq_data data;
gpr_uint32 flags; /**< A copy of the write flags from grpc_op */
gpr_uint32 flags; /**< A copy of the write flags from grpc_op */
} grpc_ioreq;
typedef void (*grpc_ioreq_completion_func)(grpc_call *call, int success,
@ -96,8 +96,10 @@ grpc_completion_queue *grpc_call_get_completion_queue(grpc_call *call);
#ifdef GRPC_CALL_REF_COUNT_DEBUG
void grpc_call_internal_ref(grpc_call *call, const char *reason);
void grpc_call_internal_unref(grpc_call *call, const char *reason, int allow_immediate_deletion);
#define GRPC_CALL_INTERNAL_REF(call, reason) grpc_call_internal_ref(call, reason)
void grpc_call_internal_unref(grpc_call *call, const char *reason,
int allow_immediate_deletion);
#define GRPC_CALL_INTERNAL_REF(call, reason) \
grpc_call_internal_ref(call, reason)
#define GRPC_CALL_INTERNAL_UNREF(call, reason, allow_immediate_deletion) \
grpc_call_internal_unref(call, reason, allow_immediate_deletion)
#else
@ -125,8 +127,7 @@ void grpc_call_log_batch(char *file, int line, gpr_log_severity severity,
void grpc_server_log_request_call(char *file, int line,
gpr_log_severity severity,
grpc_server *server,
grpc_call **call,
grpc_server *server, grpc_call **call,
grpc_call_details *details,
grpc_metadata_array *initial_metadata,
grpc_completion_queue *cq_bound_to_call,
@ -135,16 +136,20 @@ void grpc_server_log_request_call(char *file, int line,
/* Set a context pointer.
No thread safety guarantees are made wrt this value. */
void grpc_call_context_set(grpc_call *call, grpc_context_index elem, void *value,
void (*destroy)(void *value));
void grpc_call_context_set(grpc_call *call, grpc_context_index elem,
void *value, void (*destroy)(void *value));
/* Get a context pointer. */
void *grpc_call_context_get(grpc_call *call, grpc_context_index elem);
#define GRPC_CALL_LOG_BATCH(sev, call, ops, nops, tag) \
if (grpc_trace_batch) grpc_call_log_batch(sev, call, ops, nops, tag)
#define GRPC_SERVER_LOG_REQUEST_CALL(sev, server, call, details, initial_metadata, cq_bound_to_call, cq_for_notifications, tag) \
if (grpc_trace_batch) grpc_server_log_request_call(sev, server, call, details, initial_metadata, cq_bound_to_call, cq_for_notifications, tag)
#define GRPC_SERVER_LOG_REQUEST_CALL(sev, server, call, details, \
initial_metadata, cq_bound_to_call, \
cq_for_notifications, tag) \
if (grpc_trace_batch) \
grpc_server_log_request_call(sev, server, call, details, initial_metadata, \
cq_bound_to_call, cq_for_notifications, tag)
gpr_uint8 grpc_call_is_client(grpc_call *call);

@ -190,8 +190,14 @@ grpc_call *grpc_channel_create_registered_call(
grpc_mdelem_ref(rc->authority), deadline);
}
void grpc_channel_internal_ref(grpc_channel *channel) {
gpr_ref(&channel->refs);
#ifdef GRPC_CHANNEL_REF_COUNT_DEBUG
void grpc_channel_internal_ref(grpc_channel *c, const char *reason) {
gpr_log(GPR_DEBUG, "CHANNEL: ref %p %d -> %d [%s]", c, c->refs.count,
c->refs.count + 1, reason);
#else
void grpc_channel_internal_ref(grpc_channel *c) {
#endif
gpr_ref(&c->refs);
}
static void destroy_channel(void *p, int ok) {
@ -218,7 +224,13 @@ static void destroy_channel(void *p, int ok) {
gpr_free(channel);
}
#ifdef GRPC_CHANNEL_REF_COUNT_DEBUG
void grpc_channel_internal_unref(grpc_channel *channel, const char *reason) {
gpr_log(GPR_DEBUG, "CHANNEL: unref %p %d -> %d [%s]", channel,
channel->refs.count, channel->refs.count - 1, reason);
#else
void grpc_channel_internal_unref(grpc_channel *channel) {
#endif
if (gpr_unref(&channel->refs)) {
channel->destroy_closure.cb = destroy_channel;
channel->destroy_closure.cb_arg = channel;
@ -242,11 +254,11 @@ void grpc_channel_destroy(grpc_channel *channel) {
op.dir = GRPC_CALL_DOWN;
elem->filter->channel_op(elem, NULL, &op);
grpc_channel_internal_unref(channel);
GRPC_CHANNEL_INTERNAL_UNREF(channel, "channel");
}
void grpc_client_channel_closed(grpc_channel_element *elem) {
grpc_channel_internal_unref(CHANNEL_FROM_TOP_ELEM(elem));
GRPC_CHANNEL_INTERNAL_UNREF(CHANNEL_FROM_TOP_ELEM(elem), "closed");
}
grpc_channel_stack *grpc_channel_get_channel_stack(grpc_channel *channel) {

@ -60,7 +60,20 @@ gpr_uint32 grpc_channel_get_max_message_length(grpc_channel *channel);
void grpc_client_channel_closed(grpc_channel_element *elem);
#ifdef GRPC_CHANNEL_REF_COUNT_DEBUG
void grpc_channel_internal_ref(grpc_channel *channel, const char *reason);
void grpc_channel_internal_unref(grpc_channel *channel, const char *reason);
#define GRPC_CHANNEL_INTERNAL_REF(channel, reason) \
grpc_channel_internal_ref(channel, reason)
#define GRPC_CHANNEL_INTERNAL_UNREF(channel, reason) \
grpc_channel_internal_unref(channel, reason)
#else
void grpc_channel_internal_ref(grpc_channel *channel);
void grpc_channel_internal_unref(grpc_channel *channel);
#define GRPC_CHANNEL_INTERNAL_REF(channel, reason) \
grpc_channel_internal_ref(channel)
#define GRPC_CHANNEL_INTERNAL_UNREF(channel, reason) \
grpc_channel_internal_unref(channel)
#endif
#endif /* GRPC_INTERNAL_CORE_SURFACE_CHANNEL_H */

@ -92,7 +92,7 @@ static void done(request *r, int was_successful) {
static void on_connect(void *rp, grpc_endpoint *tcp) {
request *r = rp;
if (!grpc_client_setup_request_should_continue(r->cs_request)) {
if (!grpc_client_setup_request_should_continue(r->cs_request, "on_connect")) {
if (tcp) {
grpc_endpoint_shutdown(tcp);
grpc_endpoint_destroy(tcp);
@ -108,12 +108,12 @@ static void on_connect(void *rp, grpc_endpoint *tcp) {
} else {
return;
}
} else if (grpc_client_setup_cb_begin(r->cs_request)) {
} else if (grpc_client_setup_cb_begin(r->cs_request, "on_connect")) {
grpc_create_chttp2_transport(
r->setup->setup_callback, r->setup->setup_user_data,
grpc_client_setup_get_channel_args(r->cs_request), tcp, NULL, 0,
grpc_client_setup_get_mdctx(r->cs_request), 1);
grpc_client_setup_cb_end(r->cs_request);
grpc_client_setup_cb_end(r->cs_request, "on_connect");
done(r, 1);
return;
} else {
@ -127,9 +127,10 @@ static int maybe_try_next_resolved(request *r) {
if (!r->resolved) return 0;
if (r->resolved_index == r->resolved->naddrs) return 0;
addr = &r->resolved->addrs[r->resolved_index++];
grpc_tcp_client_connect(on_connect, r, (struct sockaddr *)&addr->addr,
addr->len,
grpc_client_setup_request_deadline(r->cs_request));
grpc_tcp_client_connect(
on_connect, r, grpc_client_setup_get_interested_parties(r->cs_request),
(struct sockaddr *)&addr->addr, addr->len,
grpc_client_setup_request_deadline(r->cs_request));
return 1;
}
@ -138,7 +139,8 @@ static void on_resolved(void *rp, grpc_resolved_addresses *resolved) {
request *r = rp;
/* if we're not still the active request, abort */
if (!grpc_client_setup_request_should_continue(r->cs_request)) {
if (!grpc_client_setup_request_should_continue(r->cs_request,
"on_resolved")) {
if (resolved) {
grpc_resolved_addresses_destroy(resolved);
}

@ -59,9 +59,6 @@ typedef struct event {
/* Completion queue structure */
struct grpc_completion_queue {
/* TODO(ctiller): see if this can be removed */
int allow_polling;
/* When refs drops to zero, we are in shutdown mode, and will be destroyable
once all queued events are drained */
gpr_refcount refs;
@ -76,6 +73,7 @@ struct grpc_completion_queue {
event *queue;
/* Fixed size chained hash table of events for pluck() */
event *buckets[NUM_TAG_BUCKETS];
int is_server_cq;
};
grpc_completion_queue *grpc_completion_queue_create(void) {
@ -86,20 +84,31 @@ grpc_completion_queue *grpc_completion_queue_create(void) {
/* One for destroy(), one for pollset_shutdown */
gpr_ref_init(&cc->owning_refs, 2);
grpc_pollset_init(&cc->pollset);
cc->allow_polling = 1;
return cc;
}
#ifdef GRPC_CQ_REF_COUNT_DEBUG
void grpc_cq_internal_ref(grpc_completion_queue *cc, const char *reason) {
gpr_log(GPR_DEBUG, "CQ:%p ref %d -> %d %s", cc, (int)cc->owning_refs.count,
(int)cc->owning_refs.count + 1, reason);
#else
void grpc_cq_internal_ref(grpc_completion_queue *cc) {
#endif
gpr_ref(&cc->owning_refs);
}
static void on_pollset_destroy_done(void *arg) {
grpc_completion_queue *cc = arg;
grpc_cq_internal_unref(cc);
GRPC_CQ_INTERNAL_UNREF(cc, "pollset_destroy");
}
#ifdef GRPC_CQ_REF_COUNT_DEBUG
void grpc_cq_internal_unref(grpc_completion_queue *cc, const char *reason) {
gpr_log(GPR_DEBUG, "CQ:%p unref %d -> %d %s", cc, (int)cc->owning_refs.count,
(int)cc->owning_refs.count - 1, reason);
#else
void grpc_cq_internal_unref(grpc_completion_queue *cc) {
#endif
if (gpr_unref(&cc->owning_refs)) {
GPR_ASSERT(cc->queue == NULL);
grpc_pollset_destroy(&cc->pollset);
@ -107,10 +116,6 @@ void grpc_cq_internal_unref(grpc_completion_queue *cc) {
}
}
void grpc_completion_queue_dont_poll_test_only(grpc_completion_queue *cc) {
cc->allow_polling = 0;
}
/* Create and append an event to the queue. Returns the event so that its data
members can be filled in.
Requires GRPC_POLLSET_MU(&cc->pollset) locked. */
@ -134,7 +139,6 @@ static event *add_locked(grpc_completion_queue *cc, grpc_completion_type type,
ev->bucket_prev = cc->buckets[bucket]->bucket_prev;
ev->bucket_next->bucket_prev = ev->bucket_prev->bucket_next = ev;
}
gpr_cv_broadcast(GRPC_POLLSET_CV(&cc->pollset));
grpc_pollset_kick(&cc->pollset);
return ev;
}
@ -157,7 +161,6 @@ void grpc_cq_end_op(grpc_completion_queue *cc, void *tag, grpc_call *call,
GPR_ASSERT(!cc->shutdown);
GPR_ASSERT(cc->shutdown_called);
cc->shutdown = 1;
gpr_cv_broadcast(GRPC_POLLSET_CV(&cc->pollset));
shutdown = 1;
}
gpr_mu_unlock(GRPC_POLLSET_MU(&cc->pollset));
@ -180,7 +183,7 @@ grpc_event grpc_completion_queue_next(grpc_completion_queue *cc,
event *ev = NULL;
grpc_event ret;
grpc_cq_internal_ref(cc);
GRPC_CQ_INTERNAL_REF(cc, "next");
gpr_mu_lock(GRPC_POLLSET_MU(&cc->pollset));
for (;;) {
if (cc->queue != NULL) {
@ -207,16 +210,12 @@ grpc_event grpc_completion_queue_next(grpc_completion_queue *cc,
ev = create_shutdown_event();
break;
}
if (cc->allow_polling && grpc_pollset_work(&cc->pollset, deadline)) {
continue;
}
if (gpr_cv_wait(GRPC_POLLSET_CV(&cc->pollset),
GRPC_POLLSET_MU(&cc->pollset), deadline)) {
if (!grpc_pollset_work(&cc->pollset, deadline)) {
gpr_mu_unlock(GRPC_POLLSET_MU(&cc->pollset));
memset(&ret, 0, sizeof(ret));
ret.type = GRPC_QUEUE_TIMEOUT;
GRPC_SURFACE_TRACE_RETURNED_EVENT(cc, &ret);
grpc_cq_internal_unref(cc);
GRPC_CQ_INTERNAL_UNREF(cc, "next");
return ret;
}
}
@ -224,7 +223,7 @@ grpc_event grpc_completion_queue_next(grpc_completion_queue *cc,
ret = ev->base;
gpr_free(ev);
GRPC_SURFACE_TRACE_RETURNED_EVENT(cc, &ret);
grpc_cq_internal_unref(cc);
GRPC_CQ_INTERNAL_UNREF(cc, "next");
return ret;
}
@ -262,7 +261,7 @@ grpc_event grpc_completion_queue_pluck(grpc_completion_queue *cc, void *tag,
event *ev = NULL;
grpc_event ret;
grpc_cq_internal_ref(cc);
GRPC_CQ_INTERNAL_REF(cc, "pluck");
gpr_mu_lock(GRPC_POLLSET_MU(&cc->pollset));
for (;;) {
if ((ev = pluck_event(cc, tag))) {
@ -272,16 +271,12 @@ grpc_event grpc_completion_queue_pluck(grpc_completion_queue *cc, void *tag,
ev = create_shutdown_event();
break;
}
if (cc->allow_polling && grpc_pollset_work(&cc->pollset, deadline)) {
continue;
}
if (gpr_cv_wait(GRPC_POLLSET_CV(&cc->pollset),
GRPC_POLLSET_MU(&cc->pollset), deadline)) {
if (!grpc_pollset_work(&cc->pollset, deadline)) {
gpr_mu_unlock(GRPC_POLLSET_MU(&cc->pollset));
memset(&ret, 0, sizeof(ret));
ret.type = GRPC_QUEUE_TIMEOUT;
GRPC_SURFACE_TRACE_RETURNED_EVENT(cc, &ret);
grpc_cq_internal_unref(cc);
GRPC_CQ_INTERNAL_UNREF(cc, "pluck");
return ret;
}
}
@ -289,7 +284,7 @@ grpc_event grpc_completion_queue_pluck(grpc_completion_queue *cc, void *tag,
ret = ev->base;
gpr_free(ev);
GRPC_SURFACE_TRACE_RETURNED_EVENT(cc, &ret);
grpc_cq_internal_unref(cc);
GRPC_CQ_INTERNAL_UNREF(cc, "pluck");
return ret;
}
@ -297,6 +292,10 @@ grpc_event grpc_completion_queue_pluck(grpc_completion_queue *cc, void *tag,
to zero here, then enter shutdown mode and wake up any waiters */
void grpc_completion_queue_shutdown(grpc_completion_queue *cc) {
gpr_mu_lock(GRPC_POLLSET_MU(&cc->pollset));
if (cc->shutdown_called) {
gpr_mu_unlock(GRPC_POLLSET_MU(&cc->pollset));
return;
}
cc->shutdown_called = 1;
gpr_mu_unlock(GRPC_POLLSET_MU(&cc->pollset));
@ -304,14 +303,14 @@ void grpc_completion_queue_shutdown(grpc_completion_queue *cc) {
gpr_mu_lock(GRPC_POLLSET_MU(&cc->pollset));
GPR_ASSERT(!cc->shutdown);
cc->shutdown = 1;
gpr_cv_broadcast(GRPC_POLLSET_CV(&cc->pollset));
gpr_mu_unlock(GRPC_POLLSET_MU(&cc->pollset));
grpc_pollset_shutdown(&cc->pollset, on_pollset_destroy_done, cc);
}
}
void grpc_completion_queue_destroy(grpc_completion_queue *cc) {
grpc_cq_internal_unref(cc);
grpc_completion_queue_shutdown(cc);
GRPC_CQ_INTERNAL_UNREF(cc, "destroy");
}
grpc_pollset *grpc_cq_pollset(grpc_completion_queue *cc) {
@ -325,3 +324,7 @@ void grpc_cq_hack_spin_pollset(grpc_completion_queue *cc) {
gpr_time_add(gpr_now(), gpr_time_from_millis(100)));
gpr_mu_unlock(GRPC_POLLSET_MU(&cc->pollset));
}
void grpc_cq_mark_server_cq(grpc_completion_queue *cc) { cc->is_server_cq = 1; }
int grpc_cq_is_server_cq(grpc_completion_queue *cc) { return cc->is_server_cq; }

@ -39,8 +39,17 @@
#include "src/core/iomgr/pollset.h"
#include <grpc/grpc.h>
#ifdef GRPC_CQ_REF_COUNT_DEBUG
void grpc_cq_internal_ref(grpc_completion_queue *cc, const char *reason);
void grpc_cq_internal_unref(grpc_completion_queue *cc, const char *reason);
#define GRPC_CQ_INTERNAL_REF(cc, reason) grpc_cq_internal_ref(cc, reason)
#define GRPC_CQ_INTERNAL_UNREF(cc, reason) grpc_cq_internal_unref(cc, reason)
#else
void grpc_cq_internal_ref(grpc_completion_queue *cc);
void grpc_cq_internal_unref(grpc_completion_queue *cc);
#define GRPC_CQ_INTERNAL_REF(cc, reason) grpc_cq_internal_ref(cc)
#define GRPC_CQ_INTERNAL_UNREF(cc, reason) grpc_cq_internal_unref(cc)
#endif
/* Flag that an operation is beginning: the completion channel will not finish
shutdown until a corrensponding grpc_cq_end_* call is made */
@ -50,11 +59,11 @@ void grpc_cq_begin_op(grpc_completion_queue *cc, grpc_call *call);
void grpc_cq_end_op(grpc_completion_queue *cc, void *tag, grpc_call *call,
int success);
/* disable polling for some tests */
void grpc_completion_queue_dont_poll_test_only(grpc_completion_queue *cc);
grpc_pollset *grpc_cq_pollset(grpc_completion_queue *cc);
void grpc_cq_hack_spin_pollset(grpc_completion_queue *cc);
void grpc_cq_mark_server_cq(grpc_completion_queue *cc);
int grpc_cq_is_server_cq(grpc_completion_queue *cc);
#endif /* GRPC_INTERNAL_CORE_SURFACE_COMPLETION_QUEUE_H */

@ -77,6 +77,9 @@ static void lame_start_transport_op(grpc_call_element *elem,
*op->recv_state = GRPC_STREAM_CLOSED;
op->on_done_recv(op->recv_user_data, 1);
}
if (op->on_consumed) {
op->on_consumed(op->on_consumed_user_data, 0);
}
}
static void channel_op(grpc_channel_element *elem,
@ -115,9 +118,9 @@ static void init_channel_elem(grpc_channel_element *elem,
static void destroy_channel_elem(grpc_channel_element *elem) {}
static const grpc_channel_filter lame_filter = {
lame_start_transport_op, channel_op, sizeof(call_data), init_call_elem,
destroy_call_elem, sizeof(channel_data), init_channel_elem,
destroy_channel_elem, "lame-client",
lame_start_transport_op, channel_op, sizeof(call_data),
init_call_elem, destroy_call_elem, sizeof(channel_data),
init_channel_elem, destroy_channel_elem, "lame-client",
};
grpc_channel *grpc_lame_client_channel_create(void) {

@ -98,12 +98,13 @@ static void on_secure_transport_setup_done(void *rp,
if (status != GRPC_SECURITY_OK) {
gpr_log(GPR_ERROR, "Secure transport setup failed with error %d.", status);
done(r, 0);
} else if (grpc_client_setup_cb_begin(r->cs_request)) {
} else if (grpc_client_setup_cb_begin(r->cs_request,
"on_secure_transport_setup_done")) {
grpc_create_chttp2_transport(
r->setup->setup_callback, r->setup->setup_user_data,
grpc_client_setup_get_channel_args(r->cs_request), secure_endpoint,
NULL, 0, grpc_client_setup_get_mdctx(r->cs_request), 1);
grpc_client_setup_cb_end(r->cs_request);
grpc_client_setup_cb_end(r->cs_request, "on_secure_transport_setup_done");
done(r, 1);
} else {
done(r, 0);
@ -114,7 +115,8 @@ static void on_secure_transport_setup_done(void *rp,
static void on_connect(void *rp, grpc_endpoint *tcp) {
request *r = rp;
if (!grpc_client_setup_request_should_continue(r->cs_request)) {
if (!grpc_client_setup_request_should_continue(r->cs_request,
"on_connect.secure")) {
if (tcp) {
grpc_endpoint_shutdown(tcp);
grpc_endpoint_destroy(tcp);
@ -142,9 +144,10 @@ static int maybe_try_next_resolved(request *r) {
if (!r->resolved) return 0;
if (r->resolved_index == r->resolved->naddrs) return 0;
addr = &r->resolved->addrs[r->resolved_index++];
grpc_tcp_client_connect(on_connect, r, (struct sockaddr *)&addr->addr,
addr->len,
grpc_client_setup_request_deadline(r->cs_request));
grpc_tcp_client_connect(
on_connect, r, grpc_client_setup_get_interested_parties(r->cs_request),
(struct sockaddr *)&addr->addr, addr->len,
grpc_client_setup_request_deadline(r->cs_request));
return 1;
}
@ -153,7 +156,8 @@ static void on_resolved(void *rp, grpc_resolved_addresses *resolved) {
request *r = rp;
/* if we're not still the active request, abort */
if (!grpc_client_setup_request_should_continue(r->cs_request)) {
if (!grpc_client_setup_request_should_continue(r->cs_request,
"on_resolved.secure")) {
if (resolved) {
grpc_resolved_addresses_destroy(resolved);
}

@ -114,6 +114,7 @@ typedef struct channel_registered_method {
struct channel_data {
grpc_server *server;
size_t num_calls;
grpc_channel *channel;
grpc_mdstr *path_key;
grpc_mdstr *authority_key;
@ -123,7 +124,6 @@ struct channel_data {
channel_registered_method *registered_methods;
gpr_uint32 registered_method_slots;
gpr_uint32 registered_method_max_probes;
grpc_iomgr_closure finish_shutdown_channel_closure;
grpc_iomgr_closure finish_destroy_channel_closure;
};
@ -141,7 +141,15 @@ struct grpc_server {
grpc_pollset **pollsets;
size_t cq_count;
gpr_mu mu;
/* The two following mutexes control access to server-state
mu_global controls access to non-call-related state (e.g., channel state)
mu_call controls access to call-related state (e.g., the call lists)
If they are ever required to be nested, you must lock mu_global
before mu_call. This is currently used in shutdown processing
(grpc_server_shutdown_and_notify and maybe_finish_shutdown) */
gpr_mu mu_global; /* mutex for server and channel state */
gpr_mu mu_call; /* mutex for call-specific state */
registered_method *registered_methods;
requested_call_array requested_calls;
@ -198,6 +206,11 @@ struct call_data {
static void begin_call(grpc_server *server, call_data *calld,
requested_call *rc);
static void fail_call(grpc_server *server, requested_call *rc);
static void shutdown_channel(channel_data *chand, int send_goaway,
int send_disconnect);
/* Before calling maybe_finish_shutdown, we must hold mu_global and not
hold mu_call */
static void maybe_finish_shutdown(grpc_server *server);
static int call_list_join(call_data **root, call_data *call, call_list list) {
GPR_ASSERT(!call->root[list]);
@ -270,7 +283,8 @@ static void server_delete(grpc_server *server) {
registered_method *rm;
size_t i;
grpc_channel_args_destroy(server->channel_args);
gpr_mu_destroy(&server->mu);
gpr_mu_destroy(&server->mu_global);
gpr_mu_destroy(&server->mu_call);
gpr_free(server->channel_filters);
requested_call_array_destroy(&server->requested_calls);
while ((rm = server->registered_methods) != NULL) {
@ -281,7 +295,7 @@ static void server_delete(grpc_server *server) {
gpr_free(rm);
}
for (i = 0; i < server->cq_count; i++) {
grpc_cq_internal_unref(server->cqs[i]);
GRPC_CQ_INTERNAL_UNREF(server->cqs[i], "server");
}
gpr_free(server->cqs);
gpr_free(server->pollsets);
@ -308,7 +322,7 @@ static void orphan_channel(channel_data *chand) {
static void finish_destroy_channel(void *cd, int success) {
channel_data *chand = cd;
grpc_server *server = chand->server;
grpc_channel_internal_unref(chand->channel);
GRPC_CHANNEL_INTERNAL_UNREF(chand->channel, "server");
server_unref(server);
}
@ -317,6 +331,7 @@ static void destroy_channel(channel_data *chand) {
GPR_ASSERT(chand->server != NULL);
orphan_channel(chand);
server_ref(chand->server);
maybe_finish_shutdown(chand->server);
chand->finish_destroy_channel_closure.cb = finish_destroy_channel;
chand->finish_destroy_channel_closure.cb_arg = chand;
grpc_iomgr_add_callback(&chand->finish_destroy_channel_closure);
@ -331,11 +346,11 @@ static void finish_start_new_rpc_and_unlock(grpc_server *server,
if (array->count == 0) {
calld->state = PENDING;
call_list_join(pending_root, calld, PENDING_START);
gpr_mu_unlock(&server->mu);
gpr_mu_unlock(&server->mu_call);
} else {
rc = array->calls[--array->count];
calld->state = ACTIVATED;
gpr_mu_unlock(&server->mu);
gpr_mu_unlock(&server->mu_call);
begin_call(server, calld, &rc);
}
}
@ -348,7 +363,7 @@ static void start_new_rpc(grpc_call_element *elem) {
gpr_uint32 hash;
channel_registered_method *rm;
gpr_mu_lock(&server->mu);
gpr_mu_lock(&server->mu_call);
if (chand->registered_methods && calld->path && calld->host) {
/* TODO(ctiller): unify these two searches */
/* check for an exact match with host */
@ -397,12 +412,33 @@ static int num_listeners(grpc_server *server) {
static void maybe_finish_shutdown(grpc_server *server) {
size_t i;
if (server->shutdown && !server->shutdown_published && server->lists[ALL_CALLS] == NULL && server->listeners_destroyed == num_listeners(server)) {
server->shutdown_published = 1;
for (i = 0; i < server->num_shutdown_tags; i++) {
grpc_cq_end_op(server->shutdown_tags[i].cq, server->shutdown_tags[i].tag,
NULL, 1);
}
if (!server->shutdown || server->shutdown_published) {
return;
}
gpr_mu_lock(&server->mu_call);
if (server->lists[ALL_CALLS] != NULL) {
gpr_log(GPR_DEBUG,
"Waiting for all calls to finish before destroying server");
gpr_mu_unlock(&server->mu_call);
return;
}
gpr_mu_unlock(&server->mu_call);
if (server->root_channel_data.next != &server->root_channel_data) {
gpr_log(GPR_DEBUG,
"Waiting for all channels to close before destroying server");
return;
}
if (server->listeners_destroyed < num_listeners(server)) {
gpr_log(GPR_DEBUG, "Waiting for all listeners to be destroyed (@ %d/%d)",
server->listeners_destroyed, num_listeners(server));
return;
}
server->shutdown_published = 1;
for (i = 0; i < server->num_shutdown_tags; i++) {
grpc_cq_end_op(server->shutdown_tags[i].cq, server->shutdown_tags[i].tag,
NULL, 1);
}
}
@ -420,10 +456,19 @@ static grpc_mdelem *server_filter(void *user_data, grpc_mdelem *md) {
return md;
}
static void decrement_call_count(channel_data *chand) {
chand->num_calls--;
if (0 == chand->num_calls && chand->server->shutdown) {
shutdown_channel(chand, 0, 1);
}
maybe_finish_shutdown(chand->server);
}
static void server_on_recv(void *ptr, int success) {
grpc_call_element *elem = ptr;
call_data *calld = elem->call_data;
channel_data *chand = elem->channel_data;
int remove_res;
if (success && !calld->got_initial_metadata) {
size_t i;
@ -448,16 +493,16 @@ static void server_on_recv(void *ptr, int success) {
case GRPC_STREAM_SEND_CLOSED:
break;
case GRPC_STREAM_RECV_CLOSED:
gpr_mu_lock(&chand->server->mu);
gpr_mu_lock(&chand->server->mu_call);
if (calld->state == NOT_STARTED) {
calld->state = ZOMBIED;
grpc_iomgr_closure_init(&calld->kill_zombie_closure, kill_zombie, elem);
grpc_iomgr_add_callback(&calld->kill_zombie_closure);
}
gpr_mu_unlock(&chand->server->mu);
gpr_mu_unlock(&chand->server->mu_call);
break;
case GRPC_STREAM_CLOSED:
gpr_mu_lock(&chand->server->mu);
gpr_mu_lock(&chand->server->mu_call);
if (calld->state == NOT_STARTED) {
calld->state = ZOMBIED;
grpc_iomgr_closure_init(&calld->kill_zombie_closure, kill_zombie, elem);
@ -467,12 +512,14 @@ static void server_on_recv(void *ptr, int success) {
calld->state = ZOMBIED;
grpc_iomgr_closure_init(&calld->kill_zombie_closure, kill_zombie, elem);
grpc_iomgr_add_callback(&calld->kill_zombie_closure);
}
if (call_list_remove(calld, ALL_CALLS)) {
maybe_finish_shutdown(chand->server);
remove_res = call_list_remove(calld, ALL_CALLS);
gpr_mu_unlock(&chand->server->mu_call);
gpr_mu_lock(&chand->server->mu_global);
if (remove_res) {
decrement_call_count(chand);
}
gpr_mu_unlock(&chand->server->mu);
gpr_mu_unlock(&chand->server->mu_global);
break;
}
@ -515,10 +562,10 @@ static void channel_op(grpc_channel_element *elem,
case GRPC_TRANSPORT_CLOSED:
/* if the transport is closed for a server channel, we destroy the
channel */
gpr_mu_lock(&server->mu);
gpr_mu_lock(&server->mu_global);
server_ref(server);
destroy_channel(chand);
gpr_mu_unlock(&server->mu);
gpr_mu_unlock(&server->mu_global);
server_unref(server);
break;
case GRPC_TRANSPORT_GOAWAY:
@ -531,22 +578,49 @@ static void channel_op(grpc_channel_element *elem,
}
}
static void finish_shutdown_channel(void *cd, int success) {
channel_data *chand = cd;
typedef struct {
channel_data *chand;
int send_goaway;
int send_disconnect;
grpc_iomgr_closure finish_shutdown_channel_closure;
} shutdown_channel_args;
static void finish_shutdown_channel(void *p, int success) {
shutdown_channel_args *sca = p;
grpc_channel_op op;
op.type = GRPC_CHANNEL_DISCONNECT;
op.dir = GRPC_CALL_DOWN;
channel_op(grpc_channel_stack_element(
grpc_channel_get_channel_stack(chand->channel), 0),
NULL, &op);
grpc_channel_internal_unref(chand->channel);
if (sca->send_goaway) {
op.type = GRPC_CHANNEL_GOAWAY;
op.dir = GRPC_CALL_DOWN;
op.data.goaway.status = GRPC_STATUS_OK;
op.data.goaway.message = gpr_slice_from_copied_string("Server shutdown");
channel_op(grpc_channel_stack_element(
grpc_channel_get_channel_stack(sca->chand->channel), 0),
NULL, &op);
}
if (sca->send_disconnect) {
op.type = GRPC_CHANNEL_DISCONNECT;
op.dir = GRPC_CALL_DOWN;
channel_op(grpc_channel_stack_element(
grpc_channel_get_channel_stack(sca->chand->channel), 0),
NULL, &op);
}
GRPC_CHANNEL_INTERNAL_UNREF(sca->chand->channel, "shutdown");
gpr_free(sca);
}
static void shutdown_channel(channel_data *chand) {
grpc_channel_internal_ref(chand->channel);
chand->finish_shutdown_channel_closure.cb = finish_shutdown_channel;
chand->finish_shutdown_channel_closure.cb_arg = chand;
grpc_iomgr_add_callback(&chand->finish_shutdown_channel_closure);
static void shutdown_channel(channel_data *chand, int send_goaway,
int send_disconnect) {
shutdown_channel_args *sca;
GRPC_CHANNEL_INTERNAL_REF(chand->channel, "shutdown");
sca = gpr_malloc(sizeof(shutdown_channel_args));
sca->chand = chand;
sca->send_goaway = send_goaway;
sca->send_disconnect = send_disconnect;
sca->finish_shutdown_channel_closure.cb = finish_shutdown_channel;
sca->finish_shutdown_channel_closure.cb_arg = sca;
grpc_iomgr_add_callback(&sca->finish_shutdown_channel_closure);
}
static void init_call_elem(grpc_call_element *elem,
@ -558,9 +632,13 @@ static void init_call_elem(grpc_call_element *elem,
calld->deadline = gpr_inf_future;
calld->call = grpc_call_from_top_element(elem);
gpr_mu_lock(&chand->server->mu);
gpr_mu_lock(&chand->server->mu_call);
call_list_join(&chand->server->lists[ALL_CALLS], calld, ALL_CALLS);
gpr_mu_unlock(&chand->server->mu);
gpr_mu_unlock(&chand->server->mu_call);
gpr_mu_lock(&chand->server->mu_global);
chand->num_calls++;
gpr_mu_unlock(&chand->server->mu_global);
server_ref(chand->server);
@ -573,14 +651,16 @@ static void destroy_call_elem(grpc_call_element *elem) {
int removed[CALL_LIST_COUNT];
size_t i;
gpr_mu_lock(&chand->server->mu);
gpr_mu_lock(&chand->server->mu_call);
for (i = 0; i < CALL_LIST_COUNT; i++) {
removed[i] = call_list_remove(elem->call_data, i);
}
gpr_mu_unlock(&chand->server->mu_call);
if (removed[ALL_CALLS]) {
maybe_finish_shutdown(chand->server);
gpr_mu_lock(&chand->server->mu_global);
decrement_call_count(chand);
gpr_mu_unlock(&chand->server->mu_global);
}
gpr_mu_unlock(&chand->server->mu);
if (calld->host) {
grpc_mdstr_unref(calld->host);
@ -600,6 +680,7 @@ static void init_channel_elem(grpc_channel_element *elem,
GPR_ASSERT(is_first);
GPR_ASSERT(!is_last);
chand->server = NULL;
chand->num_calls = 0;
chand->channel = NULL;
chand->path_key = grpc_mdstr_from_string(metadata_context, ":path");
chand->authority_key = grpc_mdstr_from_string(metadata_context, ":authority");
@ -622,11 +703,12 @@ static void destroy_channel_elem(grpc_channel_element *elem) {
gpr_free(chand->registered_methods);
}
if (chand->server) {
gpr_mu_lock(&chand->server->mu);
gpr_mu_lock(&chand->server->mu_global);
chand->next->prev = chand->prev;
chand->prev->next = chand->next;
chand->next = chand->prev = chand;
gpr_mu_unlock(&chand->server->mu);
maybe_finish_shutdown(chand->server);
gpr_mu_unlock(&chand->server->mu_global);
grpc_mdstr_unref(chand->path_key);
grpc_mdstr_unref(chand->authority_key);
server_unref(chand->server);
@ -651,7 +733,8 @@ void grpc_server_register_completion_queue(grpc_server *server,
for (i = 0; i < server->cq_count; i++) {
if (server->cqs[i] == cq) return;
}
grpc_cq_internal_ref(cq);
GRPC_CQ_INTERNAL_REF(cq, "server");
grpc_cq_mark_server_cq(cq);
n = server->cq_count++;
server->cqs = gpr_realloc(server->cqs,
server->cq_count * sizeof(grpc_completion_queue *));
@ -672,7 +755,8 @@ grpc_server *grpc_server_create_from_filters(
memset(server, 0, sizeof(grpc_server));
gpr_mu_init(&server->mu);
gpr_mu_init(&server->mu_global);
gpr_mu_init(&server->mu_call);
/* decremented by grpc_server_destroy */
gpr_ref_init(&server->internal_refcount, 1);
@ -713,7 +797,8 @@ void *grpc_server_register_method(grpc_server *server, const char *method,
const char *host) {
registered_method *m;
if (!method) {
gpr_log(GPR_ERROR, "grpc_server_register_method method string cannot be NULL");
gpr_log(GPR_ERROR,
"grpc_server_register_method method string cannot be NULL");
return NULL;
}
for (m = server->registered_methods; m; m = m->next) {
@ -821,11 +906,11 @@ grpc_transport_setup_result grpc_server_setup_transport(
result = grpc_connected_channel_bind_transport(
grpc_channel_get_channel_stack(channel), transport);
gpr_mu_lock(&s->mu);
gpr_mu_lock(&s->mu_global);
chand->next = &s->root_channel_data;
chand->prev = chand->next->prev;
chand->next->prev = chand->prev->next = chand;
gpr_mu_unlock(&s->mu);
gpr_mu_unlock(&s->mu_global);
gpr_free(filters);
@ -836,17 +921,13 @@ void grpc_server_shutdown_and_notify(grpc_server *server,
grpc_completion_queue *cq, void *tag) {
listener *l;
requested_call_array requested_calls;
channel_data **channels;
channel_data *c;
size_t nchannels;
size_t i;
grpc_channel_op op;
grpc_channel_element *elem;
registered_method *rm;
shutdown_tag *sdt;
/* lock, and gather up some stuff to do */
gpr_mu_lock(&server->mu);
gpr_mu_lock(&server->mu_global);
grpc_cq_begin_op(cq, NULL);
server->shutdown_tags =
gpr_realloc(server->shutdown_tags,
@ -855,25 +936,17 @@ void grpc_server_shutdown_and_notify(grpc_server *server,
sdt->tag = tag;
sdt->cq = cq;
if (server->shutdown) {
gpr_mu_unlock(&server->mu);
gpr_mu_unlock(&server->mu_global);
return;
}
nchannels = 0;
for (c = server->root_channel_data.next; c != &server->root_channel_data;
c = c->next) {
nchannels++;
}
channels = gpr_malloc(sizeof(channel_data *) * nchannels);
i = 0;
for (c = server->root_channel_data.next; c != &server->root_channel_data;
c = c->next) {
grpc_channel_internal_ref(c->channel);
channels[i] = c;
i++;
shutdown_channel(c, 1, c->num_calls == 0);
}
/* collect all unregistered then registered calls */
gpr_mu_lock(&server->mu_call);
requested_calls = server->requested_calls;
memset(&server->requested_calls, 0, sizeof(server->requested_calls));
for (rm = server->registered_methods; rm; rm = rm->next) {
@ -892,25 +965,11 @@ void grpc_server_shutdown_and_notify(grpc_server *server,
gpr_free(rm->requested.calls);
memset(&rm->requested, 0, sizeof(rm->requested));
}
gpr_mu_unlock(&server->mu_call);
server->shutdown = 1;
maybe_finish_shutdown(server);
gpr_mu_unlock(&server->mu);
for (i = 0; i < nchannels; i++) {
c = channels[i];
elem = grpc_channel_stack_element(
grpc_channel_get_channel_stack(c->channel), 0);
op.type = GRPC_CHANNEL_GOAWAY;
op.dir = GRPC_CALL_DOWN;
op.data.goaway.status = GRPC_STATUS_OK;
op.data.goaway.message = gpr_slice_from_copied_string("Server shutdown");
elem->filter->channel_op(elem, NULL, &op);
grpc_channel_internal_unref(c->channel);
}
gpr_free(channels);
gpr_mu_unlock(&server->mu_global);
/* terminate all the requested calls */
for (i = 0; i < requested_calls.count; i++) {
@ -926,10 +985,10 @@ void grpc_server_shutdown_and_notify(grpc_server *server,
void grpc_server_listener_destroy_done(void *s) {
grpc_server *server = s;
gpr_mu_lock(&server->mu);
gpr_mu_lock(&server->mu_global);
server->listeners_destroyed++;
maybe_finish_shutdown(server);
gpr_mu_unlock(&server->mu);
gpr_mu_unlock(&server->mu_global);
}
void grpc_server_cancel_all_calls(grpc_server *server) {
@ -940,12 +999,12 @@ void grpc_server_cancel_all_calls(grpc_server *server) {
int is_first = 1;
size_t i;
gpr_mu_lock(&server->mu);
gpr_mu_lock(&server->mu_call);
GPR_ASSERT(server->shutdown);
if (!server->lists[ALL_CALLS]) {
gpr_mu_unlock(&server->mu);
gpr_mu_unlock(&server->mu_call);
return;
}
@ -953,7 +1012,9 @@ void grpc_server_cancel_all_calls(grpc_server *server) {
call_count = 0;
calls = gpr_malloc(sizeof(grpc_call *) * call_capacity);
for (calld = server->lists[ALL_CALLS]; calld != server->lists[ALL_CALLS] || is_first; calld = calld->links[ALL_CALLS].next) {
for (calld = server->lists[ALL_CALLS];
calld != server->lists[ALL_CALLS] || is_first;
calld = calld->links[ALL_CALLS].next) {
if (call_count == call_capacity) {
call_capacity *= 2;
calls = gpr_realloc(calls, sizeof(grpc_call *) * call_capacity);
@ -963,10 +1024,11 @@ void grpc_server_cancel_all_calls(grpc_server *server) {
is_first = 0;
}
gpr_mu_unlock(&server->mu);
gpr_mu_unlock(&server->mu_call);
for (i = 0; i < call_count; i++) {
grpc_call_cancel_with_status(calls[i], GRPC_STATUS_UNAVAILABLE, "Unavailable");
grpc_call_cancel_with_status(calls[i], GRPC_STATUS_UNAVAILABLE,
"Unavailable");
GRPC_CALL_INTERNAL_UNREF(calls[i], "cancel_all", 1);
}
@ -974,11 +1036,9 @@ void grpc_server_cancel_all_calls(grpc_server *server) {
}
void grpc_server_destroy(grpc_server *server) {
channel_data *c;
listener *l;
call_data *calld;
gpr_mu_lock(&server->mu);
gpr_mu_lock(&server->mu_global);
GPR_ASSERT(server->shutdown || !server->listeners);
GPR_ASSERT(server->listeners_destroyed == num_listeners(server));
@ -988,20 +1048,7 @@ void grpc_server_destroy(grpc_server *server) {
gpr_free(l);
}
while ((calld = call_list_remove_head(&server->lists[PENDING_START],
PENDING_START)) != NULL) {
calld->state = ZOMBIED;
grpc_iomgr_closure_init(
&calld->kill_zombie_closure, kill_zombie,
grpc_call_stack_element(grpc_call_get_call_stack(calld->call), 0));
grpc_iomgr_add_callback(&calld->kill_zombie_closure);
}
for (c = server->root_channel_data.next; c != &server->root_channel_data;
c = c->next) {
shutdown_channel(c);
}
gpr_mu_unlock(&server->mu);
gpr_mu_unlock(&server->mu_global);
server_unref(server);
}
@ -1023,9 +1070,9 @@ static grpc_call_error queue_call_request(grpc_server *server,
requested_call *rc) {
call_data *calld = NULL;
requested_call_array *requested_calls = NULL;
gpr_mu_lock(&server->mu);
gpr_mu_lock(&server->mu_call);
if (server->shutdown) {
gpr_mu_unlock(&server->mu);
gpr_mu_unlock(&server->mu_call);
fail_call(server, rc);
return GRPC_CALL_OK;
}
@ -1044,12 +1091,12 @@ static grpc_call_error queue_call_request(grpc_server *server,
if (calld) {
GPR_ASSERT(calld->state == PENDING);
calld->state = ACTIVATED;
gpr_mu_unlock(&server->mu);
gpr_mu_unlock(&server->mu_call);
begin_call(server, calld, rc);
return GRPC_CALL_OK;
} else {
*requested_call_array_add(requested_calls) = *rc;
gpr_mu_unlock(&server->mu);
gpr_mu_unlock(&server->mu_call);
return GRPC_CALL_OK;
}
}
@ -1063,6 +1110,9 @@ grpc_call_error grpc_server_request_call(
GRPC_SERVER_LOG_REQUEST_CALL(GPR_INFO, server, call, details,
initial_metadata, cq_bound_to_call,
cq_for_notification, tag);
if (!grpc_cq_is_server_cq(cq_for_notification)) {
return GRPC_CALL_ERROR_NOT_SERVER_COMPLETION_QUEUE;
}
grpc_cq_begin_op(cq_for_notification, NULL);
rc.type = BATCH_CALL;
rc.tag = tag;
@ -1081,6 +1131,9 @@ grpc_call_error grpc_server_request_registered_call(
grpc_completion_queue *cq_for_notification, void *tag) {
requested_call rc;
registered_method *registered_method = rm;
if (!grpc_cq_is_server_cq(cq_for_notification)) {
return GRPC_CALL_ERROR_NOT_SERVER_COMPLETION_QUEUE;
}
grpc_cq_begin_op(cq_for_notification, NULL);
rc.type = REGISTERED_CALL;
rc.tag = tag;
@ -1187,9 +1240,8 @@ const grpc_channel_args *grpc_server_get_channel_args(grpc_server *server) {
int grpc_server_has_open_connections(grpc_server *server) {
int r;
gpr_mu_lock(&server->mu);
gpr_mu_lock(&server->mu_global);
r = server->root_channel_data.next != &server->root_channel_data;
gpr_mu_unlock(&server->mu);
gpr_mu_unlock(&server->mu_global);
return r;
}

@ -617,14 +617,19 @@ static void destroy_transport(grpc_transport *gt) {
unref_transport(t);
}
static void close_transport_locked(transport *t) {
if (!t->closed) {
t->closed = 1;
if (t->ep) {
grpc_endpoint_shutdown(t->ep);
}
}
}
static void close_transport(grpc_transport *gt) {
transport *t = (transport *)gt;
gpr_mu_lock(&t->mu);
GPR_ASSERT(!t->closed);
t->closed = 1;
if (t->ep) {
grpc_endpoint_shutdown(t->ep);
}
close_transport_locked(t);
gpr_mu_unlock(&t->mu);
}
@ -1001,10 +1006,12 @@ static void finalize_outbuf(transport *t) {
while ((s = stream_list_remove_head(t, WRITING))) {
grpc_chttp2_encode(s->writing_sopb.ops, s->writing_sopb.nops,
s->send_closed != DONT_SEND_CLOSED, s->id, &t->hpack_compressor, &t->outbuf);
s->send_closed != DONT_SEND_CLOSED, s->id,
&t->hpack_compressor, &t->outbuf);
s->writing_sopb.nops = 0;
if (s->send_closed == SEND_CLOSED_WITH_RST_STREAM) {
gpr_slice_buffer_add(&t->outbuf, grpc_chttp2_rst_stream_create(s->id, GRPC_CHTTP2_NO_ERROR));
gpr_slice_buffer_add(&t->outbuf, grpc_chttp2_rst_stream_create(
s->id, GRPC_CHTTP2_NO_ERROR));
}
if (s->send_closed != DONT_SEND_CLOSED) {
stream_list_join(t, s, WRITTEN_CLOSED);
@ -1067,12 +1074,12 @@ static void perform_write(transport *t, grpc_endpoint *ep) {
}
}
static void add_goaway(transport *t, gpr_uint32 goaway_error, gpr_slice goaway_text) {
static void add_goaway(transport *t, gpr_uint32 goaway_error,
gpr_slice goaway_text) {
if (t->num_pending_goaways == t->cap_pending_goaways) {
t->cap_pending_goaways = GPR_MAX(1, t->cap_pending_goaways * 2);
t->pending_goaways =
gpr_realloc(t->pending_goaways,
sizeof(pending_goaway) * t->cap_pending_goaways);
t->pending_goaways = gpr_realloc(
t->pending_goaways, sizeof(pending_goaway) * t->cap_pending_goaways);
}
t->pending_goaways[t->num_pending_goaways].status =
grpc_chttp2_http2_error_to_grpc_status(goaway_error);
@ -1080,13 +1087,12 @@ static void add_goaway(transport *t, gpr_uint32 goaway_error, gpr_slice goaway_t
t->num_pending_goaways++;
}
static void maybe_start_some_streams(transport *t) {
/* start streams where we have free stream ids and free concurrency */
while (
t->next_stream_id <= MAX_CLIENT_STREAM_ID &&
grpc_chttp2_stream_map_size(&t->stream_map) <
t->settings[PEER_SETTINGS][GRPC_CHTTP2_SETTINGS_MAX_CONCURRENT_STREAMS]) {
while (t->next_stream_id <= MAX_CLIENT_STREAM_ID &&
grpc_chttp2_stream_map_size(&t->stream_map) <
t->settings[PEER_SETTINGS]
[GRPC_CHTTP2_SETTINGS_MAX_CONCURRENT_STREAMS]) {
stream *s = stream_list_remove_head(t, WAITING_FOR_CONCURRENCY);
if (!s) return;
@ -1094,7 +1100,9 @@ static void maybe_start_some_streams(transport *t) {
t->is_client ? "CLI" : "SVR", s, t->next_stream_id));
if (t->next_stream_id == MAX_CLIENT_STREAM_ID) {
add_goaway(t, GRPC_CHTTP2_NO_ERROR, gpr_slice_from_copied_string("Exceeded sequence number limit"));
add_goaway(
t, GRPC_CHTTP2_NO_ERROR,
gpr_slice_from_copied_string("Exceeded sequence number limit"));
}
GPR_ASSERT(s->id == 0);
@ -1112,7 +1120,10 @@ static void maybe_start_some_streams(transport *t) {
stream *s = stream_list_remove_head(t, WAITING_FOR_CONCURRENCY);
if (!s) return;
cancel_stream(t, s, GRPC_STATUS_UNAVAILABLE, grpc_chttp2_grpc_status_to_http2_error(GRPC_STATUS_UNAVAILABLE), NULL, 0);
cancel_stream(
t, s, GRPC_STATUS_UNAVAILABLE,
grpc_chttp2_grpc_status_to_http2_error(GRPC_STATUS_UNAVAILABLE), NULL,
0);
}
}
@ -1165,6 +1176,13 @@ static void perform_op_locked(transport *t, stream *s, grpc_transport_op *op) {
if (op->bind_pollset) {
add_to_pollset_locked(t, op->bind_pollset);
}
if (op->on_consumed) {
op_closure c;
c.cb = op->on_consumed;
c.user_data = op->on_consumed_user_data;
schedule_cb(t, c, 1);
}
}
static void perform_op(grpc_transport *gt, grpc_stream *gs,
@ -1258,8 +1276,8 @@ static void cancel_stream_inner(transport *t, stream *s, gpr_uint32 id,
/* synthesize a status if we don't believe we'll get one */
gpr_ltoa(local_status, buffer);
add_incoming_metadata(
t, s,
grpc_mdelem_from_strings(t->metadata_context, "grpc-status", buffer));
t, s, grpc_mdelem_from_strings(t->metadata_context, "grpc-status",
buffer));
if (!optional_message) {
switch (local_status) {
case GRPC_STATUS_CANCELLED:
@ -1321,6 +1339,7 @@ static void drop_connection(transport *t) {
if (t->error_state == ERROR_STATE_NONE) {
t->error_state = ERROR_STATE_SEEN;
}
close_transport_locked(t);
end_all_the_calls(t);
}
@ -1497,7 +1516,8 @@ static int init_header_frame_parser(transport *t, int is_continuation) {
t->last_incoming_stream_id, t->incoming_stream_id);
return init_skip_frame(t, 1);
} else if ((t->incoming_stream_id & 1) == 0) {
gpr_log(GPR_ERROR, "ignoring stream with non-client generated index %d", t->incoming_stream_id);
gpr_log(GPR_ERROR, "ignoring stream with non-client generated index %d",
t->incoming_stream_id);
return init_skip_frame(t, 1);
}
t->incoming_stream = NULL;
@ -1557,10 +1577,10 @@ static int init_ping_parser(transport *t) {
}
static int init_rst_stream_parser(transport *t) {
int ok = GRPC_CHTTP2_PARSE_OK ==
grpc_chttp2_rst_stream_parser_begin_frame(&t->simple_parsers.rst_stream,
t->incoming_frame_size,
t->incoming_frame_flags);
int ok = GRPC_CHTTP2_PARSE_OK == grpc_chttp2_rst_stream_parser_begin_frame(
&t->simple_parsers.rst_stream,
t->incoming_frame_size,
t->incoming_frame_flags);
if (!ok) {
drop_connection(t);
}
@ -1586,15 +1606,16 @@ static int init_settings_frame_parser(transport *t) {
int ok;
if (t->incoming_stream_id != 0) {
gpr_log(GPR_ERROR, "settings frame received for stream %d", t->incoming_stream_id);
gpr_log(GPR_ERROR, "settings frame received for stream %d",
t->incoming_stream_id);
drop_connection(t);
return 0;
}
ok = GRPC_CHTTP2_PARSE_OK ==
grpc_chttp2_settings_parser_begin_frame(
&t->simple_parsers.settings, t->incoming_frame_size,
t->incoming_frame_flags, t->settings[PEER_SETTINGS]);
grpc_chttp2_settings_parser_begin_frame(
&t->simple_parsers.settings, t->incoming_frame_size,
t->incoming_frame_flags, t->settings[PEER_SETTINGS]);
if (!ok) {
drop_connection(t);
return 0;
@ -1659,7 +1680,7 @@ static void add_metadata_batch(transport *t, stream *s) {
we can reconstitute the list.
We can't do list building here as later incoming metadata may reallocate
the underlying array. */
b.list.tail = (void*)(gpr_intptr)s->incoming_metadata_count;
b.list.tail = (void *)(gpr_intptr)s->incoming_metadata_count;
b.garbage.head = b.garbage.tail = NULL;
b.deadline = s->incoming_deadline;
s->incoming_deadline = gpr_inf_future;
@ -2017,7 +2038,7 @@ static void patch_metadata_ops(stream *s) {
int found_metadata = 0;
/* rework the array of metadata into a linked list, making use
of the breadcrumbs we left in metadata batches during
of the breadcrumbs we left in metadata batches during
add_metadata_batch */
for (i = 0; i < nops; i++) {
grpc_stream_op *op = &ops[i];
@ -2033,11 +2054,11 @@ static void patch_metadata_ops(stream *s) {
op->data.metadata.list.head = &s->incoming_metadata[mdidx];
op->data.metadata.list.tail = &s->incoming_metadata[last_mdidx - 1];
for (j = mdidx + 1; j < last_mdidx; j++) {
s->incoming_metadata[j].prev = &s->incoming_metadata[j-1];
s->incoming_metadata[j-1].next = &s->incoming_metadata[j];
s->incoming_metadata[j].prev = &s->incoming_metadata[j - 1];
s->incoming_metadata[j - 1].next = &s->incoming_metadata[j];
}
s->incoming_metadata[mdidx].prev = NULL;
s->incoming_metadata[last_mdidx-1].next = NULL;
s->incoming_metadata[last_mdidx - 1].next = NULL;
/* track where we're up to */
mdidx = last_mdidx;
}
@ -2049,7 +2070,8 @@ static void patch_metadata_ops(stream *s) {
size_t copy_bytes = sizeof(*s->incoming_metadata) * new_count;
GPR_ASSERT(mdidx < s->incoming_metadata_count);
s->incoming_metadata = gpr_malloc(copy_bytes);
memcpy(s->old_incoming_metadata + mdidx, s->incoming_metadata, copy_bytes);
memcpy(s->old_incoming_metadata + mdidx, s->incoming_metadata,
copy_bytes);
s->incoming_metadata_count = s->incoming_metadata_capacity = new_count;
} else {
s->incoming_metadata = NULL;
@ -2086,7 +2108,6 @@ static void finish_reads(transport *t) {
schedule_cb(t, s->recv_done_closure, 1);
}
}
}
static void schedule_cb(transport *t, op_closure closure, int success) {

@ -184,34 +184,34 @@ static void assert_valid_list(grpc_mdelem_list *list) {
}
#ifndef NDEBUG
void grpc_metadata_batch_assert_ok(grpc_metadata_batch *comd) {
assert_valid_list(&comd->list);
assert_valid_list(&comd->garbage);
void grpc_metadata_batch_assert_ok(grpc_metadata_batch *batch) {
assert_valid_list(&batch->list);
assert_valid_list(&batch->garbage);
}
#endif /* NDEBUG */
void grpc_metadata_batch_init(grpc_metadata_batch *comd) {
comd->list.head = comd->list.tail = comd->garbage.head = comd->garbage.tail =
void grpc_metadata_batch_init(grpc_metadata_batch *batch) {
batch->list.head = batch->list.tail = batch->garbage.head = batch->garbage.tail =
NULL;
comd->deadline = gpr_inf_future;
batch->deadline = gpr_inf_future;
}
void grpc_metadata_batch_destroy(grpc_metadata_batch *comd) {
void grpc_metadata_batch_destroy(grpc_metadata_batch *batch) {
grpc_linked_mdelem *l;
for (l = comd->list.head; l; l = l->next) {
for (l = batch->list.head; l; l = l->next) {
grpc_mdelem_unref(l->md);
}
for (l = comd->garbage.head; l; l = l->next) {
for (l = batch->garbage.head; l; l = l->next) {
grpc_mdelem_unref(l->md);
}
}
void grpc_metadata_batch_add_head(grpc_metadata_batch *comd,
void grpc_metadata_batch_add_head(grpc_metadata_batch *batch,
grpc_linked_mdelem *storage,
grpc_mdelem *elem_to_add) {
GPR_ASSERT(elem_to_add);
storage->md = elem_to_add;
grpc_metadata_batch_link_head(comd, storage);
grpc_metadata_batch_link_head(batch, storage);
}
static void link_head(grpc_mdelem_list *list, grpc_linked_mdelem *storage) {
@ -228,17 +228,17 @@ static void link_head(grpc_mdelem_list *list, grpc_linked_mdelem *storage) {
assert_valid_list(list);
}
void grpc_metadata_batch_link_head(grpc_metadata_batch *comd,
void grpc_metadata_batch_link_head(grpc_metadata_batch *batch,
grpc_linked_mdelem *storage) {
link_head(&comd->list, storage);
link_head(&batch->list, storage);
}
void grpc_metadata_batch_add_tail(grpc_metadata_batch *comd,
void grpc_metadata_batch_add_tail(grpc_metadata_batch *batch,
grpc_linked_mdelem *storage,
grpc_mdelem *elem_to_add) {
GPR_ASSERT(elem_to_add);
storage->md = elem_to_add;
grpc_metadata_batch_link_tail(comd, storage);
grpc_metadata_batch_link_tail(batch, storage);
}
static void link_tail(grpc_mdelem_list *list, grpc_linked_mdelem *storage) {
@ -255,9 +255,9 @@ static void link_tail(grpc_mdelem_list *list, grpc_linked_mdelem *storage) {
assert_valid_list(list);
}
void grpc_metadata_batch_link_tail(grpc_metadata_batch *comd,
void grpc_metadata_batch_link_tail(grpc_metadata_batch *batch,
grpc_linked_mdelem *storage) {
link_tail(&comd->list, storage);
link_tail(&batch->list, storage);
}
void grpc_metadata_batch_merge(grpc_metadata_batch *target,
@ -274,16 +274,16 @@ void grpc_metadata_batch_merge(grpc_metadata_batch *target,
}
}
void grpc_metadata_batch_filter(grpc_metadata_batch *comd,
void grpc_metadata_batch_filter(grpc_metadata_batch *batch,
grpc_mdelem *(*filter)(void *user_data,
grpc_mdelem *elem),
void *user_data) {
grpc_linked_mdelem *l;
grpc_linked_mdelem *next;
assert_valid_list(&comd->list);
assert_valid_list(&comd->garbage);
for (l = comd->list.head; l; l = next) {
assert_valid_list(&batch->list);
assert_valid_list(&batch->garbage);
for (l = batch->list.head; l; l = next) {
grpc_mdelem *orig = l->md;
grpc_mdelem *filt = filter(user_data, orig);
next = l->next;
@ -294,19 +294,19 @@ void grpc_metadata_batch_filter(grpc_metadata_batch *comd,
if (l->next) {
l->next->prev = l->prev;
}
if (comd->list.head == l) {
comd->list.head = l->next;
if (batch->list.head == l) {
batch->list.head = l->next;
}
if (comd->list.tail == l) {
comd->list.tail = l->prev;
if (batch->list.tail == l) {
batch->list.tail = l->prev;
}
assert_valid_list(&comd->list);
link_head(&comd->garbage, l);
assert_valid_list(&batch->list);
link_head(&batch->garbage, l);
} else if (filt != orig) {
grpc_mdelem_unref(orig);
l->md = filt;
}
}
assert_valid_list(&comd->list);
assert_valid_list(&comd->garbage);
assert_valid_list(&batch->list);
assert_valid_list(&batch->garbage);
}

@ -85,29 +85,62 @@ typedef struct grpc_mdelem_list {
} grpc_mdelem_list;
typedef struct grpc_metadata_batch {
/** Metadata elements in this batch */
grpc_mdelem_list list;
/** Elements that have been removed from the batch, but have
not yet been unreffed - used to allow collecting garbage
under a single metadata context lock */
grpc_mdelem_list garbage;
/** Used to calculate grpc-timeout at the point of sending,
or gpr_inf_future if this batch does not need to send a
grpc-timeout */
gpr_timespec deadline;
} grpc_metadata_batch;
void grpc_metadata_batch_init(grpc_metadata_batch *comd);
void grpc_metadata_batch_destroy(grpc_metadata_batch *comd);
void grpc_metadata_batch_init(grpc_metadata_batch *batch);
void grpc_metadata_batch_destroy(grpc_metadata_batch *batch);
void grpc_metadata_batch_merge(grpc_metadata_batch *target,
grpc_metadata_batch *add);
void grpc_metadata_batch_link_head(grpc_metadata_batch *comd,
/** Add \a storage to the beginning of \a batch. storage->md is
assumed to be valid.
\a storage is owned by the caller and must survive for the
lifetime of batch. This usually means it should be around
for the lifetime of the call. */
void grpc_metadata_batch_link_head(grpc_metadata_batch *batch,
grpc_linked_mdelem *storage);
void grpc_metadata_batch_link_tail(grpc_metadata_batch *comd,
/** Add \a storage to the end of \a batch. storage->md is
assumed to be valid.
\a storage is owned by the caller and must survive for the
lifetime of batch. This usually means it should be around
for the lifetime of the call. */
void grpc_metadata_batch_link_tail(grpc_metadata_batch *batch,
grpc_linked_mdelem *storage);
void grpc_metadata_batch_add_head(grpc_metadata_batch *comd,
/** Add \a elem_to_add as the first element in \a batch, using
\a storage as backing storage for the linked list element.
\a storage is owned by the caller and must survive for the
lifetime of batch. This usually means it should be around
for the lifetime of the call.
Takes ownership of \a elem_to_add */
void grpc_metadata_batch_add_head(grpc_metadata_batch *batch,
grpc_linked_mdelem *storage,
grpc_mdelem *elem_to_add);
void grpc_metadata_batch_add_tail(grpc_metadata_batch *comd,
/** Add \a elem_to_add as the last element in \a batch, using
\a storage as backing storage for the linked list element.
\a storage is owned by the caller and must survive for the
lifetime of batch. This usually means it should be around
for the lifetime of the call.
Takes ownership of \a elem_to_add */
void grpc_metadata_batch_add_tail(grpc_metadata_batch *batch,
grpc_linked_mdelem *storage,
grpc_mdelem *elem_to_add);
void grpc_metadata_batch_filter(grpc_metadata_batch *comd,
/** For each element in \a batch, execute \a filter.
The return value from \a filter will be substituted for the
grpc_mdelem passed to \a filter. If \a filter returns NULL,
the element will be moved to the garbage list. */
void grpc_metadata_batch_filter(grpc_metadata_batch *batch,
grpc_mdelem *(*filter)(void *user_data,
grpc_mdelem *elem),
void *user_data);

@ -86,6 +86,16 @@ void grpc_transport_setup_initiate(grpc_transport_setup *setup) {
setup->vtable->initiate(setup);
}
void grpc_transport_setup_add_interested_party(grpc_transport_setup *setup,
grpc_pollset *pollset) {
setup->vtable->add_interested_party(setup, pollset);
}
void grpc_transport_setup_del_interested_party(grpc_transport_setup *setup,
grpc_pollset *pollset) {
setup->vtable->del_interested_party(setup, pollset);
}
void grpc_transport_op_finish_with_failure(grpc_transport_op *op) {
if (op->send_ops) {
op->on_done_send(op->send_user_data, 0);
@ -93,6 +103,9 @@ void grpc_transport_op_finish_with_failure(grpc_transport_op *op) {
if (op->recv_ops) {
op->on_done_recv(op->recv_user_data, 0);
}
if (op->on_consumed) {
op->on_consumed(op->on_consumed_user_data, 0);
}
}
void grpc_transport_op_add_cancellation(grpc_transport_op *op,

@ -37,6 +37,7 @@
#include <stddef.h>
#include "src/core/iomgr/pollset.h"
#include "src/core/iomgr/pollset_set.h"
#include "src/core/transport/stream_op.h"
#include "src/core/channel/context.h"
@ -63,6 +64,9 @@ typedef enum grpc_stream_state {
/* Transport op: a set of operations to perform on a transport */
typedef struct grpc_transport_op {
void (*on_consumed)(void *user_data, int success);
void *on_consumed_user_data;
grpc_stream_op_buffer *send_ops;
int is_last_send;
int dont_compress;
@ -196,6 +200,10 @@ typedef struct grpc_transport_setup_vtable grpc_transport_setup_vtable;
struct grpc_transport_setup_vtable {
void (*initiate)(grpc_transport_setup *setup);
void (*add_interested_party)(grpc_transport_setup *setup,
grpc_pollset *pollset);
void (*del_interested_party)(grpc_transport_setup *setup,
grpc_pollset *pollset);
void (*cancel)(grpc_transport_setup *setup);
};
@ -212,6 +220,12 @@ struct grpc_transport_setup {
This *may* be implemented as a no-op if the setup process monitors something
continuously. */
void grpc_transport_setup_initiate(grpc_transport_setup *setup);
void grpc_transport_setup_add_interested_party(grpc_transport_setup *setup,
grpc_pollset *pollset);
void grpc_transport_setup_del_interested_party(grpc_transport_setup *setup,
grpc_pollset *pollset);
/* Cancel transport setup. After this returns, no new transports should be
created, and all pending transport setup callbacks should be completed.
After this call completes, setup should be considered invalid (this can be

@ -54,8 +54,16 @@
#define TSI_SSL_MAX_PROTECTED_FRAME_SIZE_UPPER_BOUND 16384
#define TSI_SSL_MAX_PROTECTED_FRAME_SIZE_LOWER_BOUND 1024
/* Putting a macro like this and littering the source file with #if is really
bad practice.
TODO(jboeuf): refactor all the #if / #endif in a separate module. */
#ifndef TSI_OPENSSL_ALPN_SUPPORT
#define TSI_OPENSSL_ALPN_SUPPORT 1
#endif
/* TODO(jboeuf): I have not found a way to get this number dynamically from the
* SSL structure. This is what we would ultimately want though... */
SSL structure. This is what we would ultimately want though... */
#define TSI_SSL_MAX_PROTECTION_OVERHEAD 100
/* --- Structure definitions. ---*/
@ -70,6 +78,8 @@ struct tsi_ssl_handshaker_factory {
typedef struct {
tsi_ssl_handshaker_factory base;
SSL_CTX* ssl_context;
unsigned char* alpn_protocol_list;
size_t alpn_protocol_list_length;
} tsi_ssl_client_handshaker_factory;
typedef struct {
@ -841,7 +851,7 @@ static tsi_result ssl_handshaker_process_bytes_from_peer(
static tsi_result ssl_handshaker_extract_peer(tsi_handshaker* self,
tsi_peer* peer) {
tsi_result result = TSI_OK;
const unsigned char* alpn_selected;
const unsigned char* alpn_selected = NULL;
unsigned int alpn_selected_len;
tsi_ssl_handshaker* impl = (tsi_ssl_handshaker*)self;
X509* peer_cert = SSL_get_peer_certificate(impl->ssl);
@ -850,7 +860,14 @@ static tsi_result ssl_handshaker_extract_peer(tsi_handshaker* self,
X509_free(peer_cert);
if (result != TSI_OK) return result;
}
#if TSI_OPENSSL_ALPN_SUPPORT
SSL_get0_alpn_selected(impl->ssl, &alpn_selected, &alpn_selected_len);
#endif /* TSI_OPENSSL_ALPN_SUPPORT */
if (alpn_selected == NULL) {
/* Try npn. */
SSL_get0_next_proto_negotiated(impl->ssl, &alpn_selected,
&alpn_selected_len);
}
if (alpn_selected != NULL) {
size_t i;
tsi_peer_property* new_properties =
@ -1012,6 +1029,32 @@ static tsi_result create_tsi_ssl_handshaker(SSL_CTX* ctx, int is_client,
return TSI_OK;
}
static int select_protocol_list(const unsigned char** out,
unsigned char* outlen,
const unsigned char* client_list,
unsigned int client_list_len,
const unsigned char* server_list,
unsigned int server_list_len) {
const unsigned char* client_current = client_list;
while ((unsigned int)(client_current - client_list) < client_list_len) {
unsigned char client_current_len = *(client_current++);
const unsigned char* server_current = server_list;
while ((server_current >= server_list) &&
(gpr_uintptr)(server_current - server_list) < server_list_len) {
unsigned char server_current_len = *(server_current++);
if ((client_current_len == server_current_len) &&
!memcmp(client_current, server_current, server_current_len)) {
*out = server_current;
*outlen = server_current_len;
return SSL_TLSEXT_ERR_OK;
}
server_current += server_current_len;
}
client_current += client_current_len;
}
return SSL_TLSEXT_ERR_NOACK;
}
/* --- tsi_ssl__client_handshaker_factory methods implementation. --- */
static tsi_result ssl_client_handshaker_factory_create_handshaker(
@ -1027,10 +1070,21 @@ static void ssl_client_handshaker_factory_destroy(
tsi_ssl_handshaker_factory* self) {
tsi_ssl_client_handshaker_factory* impl =
(tsi_ssl_client_handshaker_factory*)self;
SSL_CTX_free(impl->ssl_context);
if (impl->ssl_context != NULL) SSL_CTX_free(impl->ssl_context);
if (impl->alpn_protocol_list != NULL) free(impl->alpn_protocol_list);
free(impl);
}
static int client_handshaker_factory_npn_callback(
SSL* ssl, unsigned char** out, unsigned char* outlen,
const unsigned char* in, unsigned int inlen, void* arg) {
tsi_ssl_client_handshaker_factory* factory =
(tsi_ssl_client_handshaker_factory*)arg;
return select_protocol_list((const unsigned char**)out, outlen,
factory->alpn_protocol_list,
factory->alpn_protocol_list_length, in, inlen);
}
/* --- tsi_ssl_server_handshaker_factory methods implementation. --- */
static tsi_result ssl_server_handshaker_factory_create_handshaker(
@ -1134,30 +1188,25 @@ static int ssl_server_handshaker_factory_servername_callback(SSL* ssl, int* ap,
return SSL_TLSEXT_ERR_ALERT_WARNING;
}
#if TSI_OPENSSL_ALPN_SUPPORT
static int server_handshaker_factory_alpn_callback(
SSL* ssl, const unsigned char** out, unsigned char* outlen,
const unsigned char* in, unsigned int inlen, void* arg) {
tsi_ssl_server_handshaker_factory* factory =
(tsi_ssl_server_handshaker_factory*)arg;
const unsigned char* client_current = in;
while ((unsigned int)(client_current - in) < inlen) {
unsigned char client_current_len = *(client_current++);
const unsigned char* server_current = factory->alpn_protocol_list;
while ((server_current >= factory->alpn_protocol_list) &&
(gpr_uintptr)(server_current - factory->alpn_protocol_list) <
factory->alpn_protocol_list_length) {
unsigned char server_current_len = *(server_current++);
if ((client_current_len == server_current_len) &&
!memcmp(client_current, server_current, server_current_len)) {
*out = server_current;
*outlen = server_current_len;
return SSL_TLSEXT_ERR_OK;
}
server_current += server_current_len;
}
client_current += client_current_len;
}
return SSL_TLSEXT_ERR_NOACK;
return select_protocol_list(out, outlen, in, inlen,
factory->alpn_protocol_list,
factory->alpn_protocol_list_length);
}
#endif /* TSI_OPENSSL_ALPN_SUPPORT */
static int server_handshaker_factory_npn_advertised_callback(
SSL* ssl, const unsigned char** out, unsigned int* outlen, void* arg) {
tsi_ssl_server_handshaker_factory* factory =
(tsi_ssl_server_handshaker_factory*)arg;
*out = factory->alpn_protocol_list;
*outlen = factory->alpn_protocol_list_length;
return SSL_TLSEXT_ERR_OK;
}
/* --- tsi_ssl_handshaker_factory constructors. --- */
@ -1184,6 +1233,14 @@ tsi_result tsi_create_ssl_client_handshaker_factory(
gpr_log(GPR_ERROR, "Could not create ssl context.");
return TSI_INVALID_ARGUMENT;
}
impl = calloc(1, sizeof(tsi_ssl_client_handshaker_factory));
if (impl == NULL) {
SSL_CTX_free(ssl_context);
return TSI_OUT_OF_RESOURCES;
}
impl->ssl_context = ssl_context;
do {
result =
populate_ssl_context(ssl_context, pem_private_key, pem_private_key_size,
@ -1197,41 +1254,33 @@ tsi_result tsi_create_ssl_client_handshaker_factory(
}
if (num_alpn_protocols != 0) {
unsigned char* alpn_protocol_list = NULL;
size_t alpn_protocol_list_length = 0;
int ssl_failed;
result = build_alpn_protocol_name_list(
alpn_protocols, alpn_protocols_lengths, num_alpn_protocols,
&alpn_protocol_list, &alpn_protocol_list_length);
&impl->alpn_protocol_list, &impl->alpn_protocol_list_length);
if (result != TSI_OK) {
gpr_log(GPR_ERROR, "Building alpn list failed with error %s.",
tsi_result_to_string(result));
free(alpn_protocol_list);
break;
}
ssl_failed = SSL_CTX_set_alpn_protos(ssl_context, alpn_protocol_list,
alpn_protocol_list_length);
free(alpn_protocol_list);
if (ssl_failed) {
#if TSI_OPENSSL_ALPN_SUPPORT
if (SSL_CTX_set_alpn_protos(ssl_context, impl->alpn_protocol_list,
impl->alpn_protocol_list_length)) {
gpr_log(GPR_ERROR, "Could not set alpn protocol list to context.");
result = TSI_INVALID_ARGUMENT;
break;
}
#endif /* TSI_OPENSSL_ALPN_SUPPORT */
SSL_CTX_set_next_proto_select_cb(
ssl_context, client_handshaker_factory_npn_callback, impl);
}
} while (0);
if (result != TSI_OK) {
SSL_CTX_free(ssl_context);
ssl_client_handshaker_factory_destroy(&impl->base);
return result;
}
SSL_CTX_set_verify(ssl_context, SSL_VERIFY_PEER, NULL);
/* TODO(jboeuf): Add revocation verification. */
impl = calloc(1, sizeof(tsi_ssl_client_handshaker_factory));
if (impl == NULL) {
SSL_CTX_free(ssl_context);
return TSI_OUT_OF_RESOURCES;
}
impl->ssl_context = ssl_context;
impl->base.create_handshaker =
ssl_client_handshaker_factory_create_handshaker;
impl->base.destroy = ssl_client_handshaker_factory_destroy;
@ -1322,8 +1371,13 @@ tsi_result tsi_create_ssl_server_handshaker_factory(
impl->ssl_contexts[i],
ssl_server_handshaker_factory_servername_callback);
SSL_CTX_set_tlsext_servername_arg(impl->ssl_contexts[i], impl);
#if TSI_OPENSSL_ALPN_SUPPORT
SSL_CTX_set_alpn_select_cb(impl->ssl_contexts[i],
server_handshaker_factory_alpn_callback, impl);
#endif /* TSI_OPENSSL_ALPN_SUPPORT */
SSL_CTX_set_next_protos_advertised_cb(
impl->ssl_contexts[i],
server_handshaker_factory_npn_advertised_callback, impl);
} while (0);
if (result != TSI_OK) {

Some files were not shown because too many files have changed in this diff Show More

Loading…
Cancel
Save