Fold CompletionQueue and ServerCompletionQueue into grpc_impl

pull/18731/head
Karthik Ravi Shankar 6 years ago
parent 21e3fd490a
commit 62fb156122
  1. 1
      BUILD
  2. 1
      BUILD.gn
  3. 5
      CMakeLists.txt
  4. 5
      Makefile
  5. 1
      build.yaml
  6. 1
      gRPC-C++.podspec
  7. 1
      include/grpcpp/channel.h
  8. 6
      include/grpcpp/create_channel.h
  9. 7
      include/grpcpp/create_channel_impl.h
  10. 2
      include/grpcpp/generic/generic_stub_impl.h
  11. 2
      include/grpcpp/impl/codegen/async_stream.h
  12. 1
      include/grpcpp/impl/codegen/async_unary_call.h
  13. 14
      include/grpcpp/impl/codegen/call.h
  14. 1
      include/grpcpp/impl/codegen/call_op_set.h
  15. 16
      include/grpcpp/impl/codegen/channel_interface.h
  16. 1
      include/grpcpp/impl/codegen/client_callback.h
  17. 1
      include/grpcpp/impl/codegen/client_context.h
  18. 2
      include/grpcpp/impl/codegen/client_unary_call.h
  19. 392
      include/grpcpp/impl/codegen/completion_queue.h
  20. 422
      include/grpcpp/impl/codegen/completion_queue_impl.h
  21. 13
      include/grpcpp/impl/codegen/intercepted_channel.h
  22. 8
      include/grpcpp/impl/codegen/server_context.h
  23. 70
      include/grpcpp/impl/codegen/server_interface.h
  24. 19
      include/grpcpp/impl/codegen/server_interface.h.rej
  25. 2
      include/grpcpp/impl/codegen/service_type.h
  26. 6
      include/grpcpp/server_builder.h
  27. 8
      include/grpcpp/server_builder_impl.h
  28. 6
      src/compiler/cpp_generator.cc
  29. 12
      src/cpp/common/completion_queue_cc.cc
  30. 6
      test/cpp/codegen/compiler_test_golden
  31. 1
      tools/doxygen/Doxyfile.c++
  32. 1
      tools/doxygen/Doxyfile.c++.internal
  33. 2
      tools/run_tests/generated/sources_and_headers.json

@ -2128,6 +2128,7 @@ grpc_cc_library(
"include/grpcpp/impl/codegen/client_interceptor.h",
"include/grpcpp/impl/codegen/client_unary_call.h",
"include/grpcpp/impl/codegen/completion_queue.h",
"include/grpcpp/impl/codegen/completion_queue_impl.h",
"include/grpcpp/impl/codegen/completion_queue_tag.h",
"include/grpcpp/impl/codegen/config.h",
"include/grpcpp/impl/codegen/core_codegen_interface.h",

@ -1034,6 +1034,7 @@ config("grpc_config") {
"include/grpcpp/impl/codegen/client_interceptor.h",
"include/grpcpp/impl/codegen/client_unary_call.h",
"include/grpcpp/impl/codegen/completion_queue.h",
"include/grpcpp/impl/codegen/completion_queue_impl.h",
"include/grpcpp/impl/codegen/completion_queue_tag.h",
"include/grpcpp/impl/codegen/config.h",
"include/grpcpp/impl/codegen/config_protobuf.h",

@ -3155,6 +3155,7 @@ foreach(_hdr
include/grpcpp/impl/codegen/client_interceptor.h
include/grpcpp/impl/codegen/client_unary_call.h
include/grpcpp/impl/codegen/completion_queue.h
include/grpcpp/impl/codegen/completion_queue_impl.h
include/grpcpp/impl/codegen/completion_queue_tag.h
include/grpcpp/impl/codegen/config.h
include/grpcpp/impl/codegen/core_codegen_interface.h
@ -3758,6 +3759,7 @@ foreach(_hdr
include/grpcpp/impl/codegen/client_interceptor.h
include/grpcpp/impl/codegen/client_unary_call.h
include/grpcpp/impl/codegen/completion_queue.h
include/grpcpp/impl/codegen/completion_queue_impl.h
include/grpcpp/impl/codegen/completion_queue_tag.h
include/grpcpp/impl/codegen/config.h
include/grpcpp/impl/codegen/core_codegen_interface.h
@ -4191,6 +4193,7 @@ foreach(_hdr
include/grpcpp/impl/codegen/client_interceptor.h
include/grpcpp/impl/codegen/client_unary_call.h
include/grpcpp/impl/codegen/completion_queue.h
include/grpcpp/impl/codegen/completion_queue_impl.h
include/grpcpp/impl/codegen/completion_queue_tag.h
include/grpcpp/impl/codegen/config.h
include/grpcpp/impl/codegen/core_codegen_interface.h
@ -4388,6 +4391,7 @@ foreach(_hdr
include/grpcpp/impl/codegen/client_interceptor.h
include/grpcpp/impl/codegen/client_unary_call.h
include/grpcpp/impl/codegen/completion_queue.h
include/grpcpp/impl/codegen/completion_queue_impl.h
include/grpcpp/impl/codegen/completion_queue_tag.h
include/grpcpp/impl/codegen/config.h
include/grpcpp/impl/codegen/core_codegen_interface.h
@ -4737,6 +4741,7 @@ foreach(_hdr
include/grpcpp/impl/codegen/client_interceptor.h
include/grpcpp/impl/codegen/client_unary_call.h
include/grpcpp/impl/codegen/completion_queue.h
include/grpcpp/impl/codegen/completion_queue_impl.h
include/grpcpp/impl/codegen/completion_queue_tag.h
include/grpcpp/impl/codegen/config.h
include/grpcpp/impl/codegen/core_codegen_interface.h

@ -5490,6 +5490,7 @@ PUBLIC_HEADERS_CXX += \
include/grpcpp/impl/codegen/client_interceptor.h \
include/grpcpp/impl/codegen/client_unary_call.h \
include/grpcpp/impl/codegen/completion_queue.h \
include/grpcpp/impl/codegen/completion_queue_impl.h \
include/grpcpp/impl/codegen/completion_queue_tag.h \
include/grpcpp/impl/codegen/config.h \
include/grpcpp/impl/codegen/core_codegen_interface.h \
@ -6101,6 +6102,7 @@ PUBLIC_HEADERS_CXX += \
include/grpcpp/impl/codegen/client_interceptor.h \
include/grpcpp/impl/codegen/client_unary_call.h \
include/grpcpp/impl/codegen/completion_queue.h \
include/grpcpp/impl/codegen/completion_queue_impl.h \
include/grpcpp/impl/codegen/completion_queue_tag.h \
include/grpcpp/impl/codegen/config.h \
include/grpcpp/impl/codegen/core_codegen_interface.h \
@ -6506,6 +6508,7 @@ PUBLIC_HEADERS_CXX += \
include/grpcpp/impl/codegen/client_interceptor.h \
include/grpcpp/impl/codegen/client_unary_call.h \
include/grpcpp/impl/codegen/completion_queue.h \
include/grpcpp/impl/codegen/completion_queue_impl.h \
include/grpcpp/impl/codegen/completion_queue_tag.h \
include/grpcpp/impl/codegen/config.h \
include/grpcpp/impl/codegen/core_codegen_interface.h \
@ -6674,6 +6677,7 @@ PUBLIC_HEADERS_CXX += \
include/grpcpp/impl/codegen/client_interceptor.h \
include/grpcpp/impl/codegen/client_unary_call.h \
include/grpcpp/impl/codegen/completion_queue.h \
include/grpcpp/impl/codegen/completion_queue_impl.h \
include/grpcpp/impl/codegen/completion_queue_tag.h \
include/grpcpp/impl/codegen/config.h \
include/grpcpp/impl/codegen/core_codegen_interface.h \
@ -7029,6 +7033,7 @@ PUBLIC_HEADERS_CXX += \
include/grpcpp/impl/codegen/client_interceptor.h \
include/grpcpp/impl/codegen/client_unary_call.h \
include/grpcpp/impl/codegen/completion_queue.h \
include/grpcpp/impl/codegen/completion_queue_impl.h \
include/grpcpp/impl/codegen/completion_queue_tag.h \
include/grpcpp/impl/codegen/config.h \
include/grpcpp/impl/codegen/core_codegen_interface.h \

@ -1248,6 +1248,7 @@ filegroups:
- include/grpcpp/impl/codegen/client_interceptor.h
- include/grpcpp/impl/codegen/client_unary_call.h
- include/grpcpp/impl/codegen/completion_queue.h
- include/grpcpp/impl/codegen/completion_queue_impl.h
- include/grpcpp/impl/codegen/completion_queue_tag.h
- include/grpcpp/impl/codegen/config.h
- include/grpcpp/impl/codegen/core_codegen_interface.h

@ -158,6 +158,7 @@ Pod::Spec.new do |s|
'include/grpcpp/impl/codegen/client_interceptor.h',
'include/grpcpp/impl/codegen/client_unary_call.h',
'include/grpcpp/impl/codegen/completion_queue.h',
'include/grpcpp/impl/codegen/completion_queue_impl.h',
'include/grpcpp/impl/codegen/completion_queue_tag.h',
'include/grpcpp/impl/codegen/config.h',
'include/grpcpp/impl/codegen/core_codegen_interface.h',

@ -26,6 +26,7 @@
#include <grpcpp/impl/call.h>
#include <grpcpp/impl/codegen/channel_interface.h>
#include <grpcpp/impl/codegen/client_interceptor.h>
#include <grpcpp/impl/codegen/completion_queue.h>
#include <grpcpp/impl/codegen/config.h>
#include <grpcpp/impl/codegen/grpc_library.h>
#include <grpcpp/impl/codegen/sync.h>

@ -23,13 +23,13 @@
namespace grpc {
static inline std::shared_ptr<Channel> CreateChannel(
static inline std::shared_ptr<::grpc::Channel> CreateChannel(
const grpc::string& target,
const std::shared_ptr<ChannelCredentials>& creds) {
return ::grpc_impl::CreateChannel(target, creds);
}
static inline std::shared_ptr<Channel> CreateCustomChannel(
static inline std::shared_ptr<::grpc::Channel> CreateCustomChannel(
const grpc::string& target,
const std::shared_ptr<ChannelCredentials>& creds,
const ChannelArguments& args) {
@ -38,7 +38,7 @@ static inline std::shared_ptr<Channel> CreateCustomChannel(
namespace experimental {
static inline std::shared_ptr<Channel> CreateCustomChannelWithInterceptors(
static inline std::shared_ptr<::grpc::Channel> CreateCustomChannelWithInterceptors(
const grpc::string& target,
const std::shared_ptr<ChannelCredentials>& creds,
const ChannelArguments& args,

@ -28,14 +28,13 @@
#include <grpcpp/support/config.h>
namespace grpc_impl {
/// Create a new \a Channel pointing to \a target.
///
/// \param target The URI of the endpoint to connect to.
/// \param creds Credentials to use for the created channel. If it does not
/// hold an object or is invalid, a lame channel (one on which all operations
/// fail) is returned.
std::shared_ptr<grpc::Channel> CreateChannel(
std::shared_ptr<::grpc::Channel> CreateChannel(
const grpc::string& target,
const std::shared_ptr<grpc::ChannelCredentials>& creds);
@ -49,7 +48,7 @@ std::shared_ptr<grpc::Channel> CreateChannel(
/// hold an object or is invalid, a lame channel (one on which all operations
/// fail) is returned.
/// \param args Options for channel creation.
std::shared_ptr<grpc::Channel> CreateCustomChannel(
std::shared_ptr<::grpc::Channel> CreateCustomChannel(
const grpc::string& target,
const std::shared_ptr<grpc::ChannelCredentials>& creds,
const grpc::ChannelArguments& args);
@ -66,7 +65,7 @@ namespace experimental {
/// hold an object or is invalid, a lame channel (one on which all operations
/// fail) is returned.
/// \param args Options for channel creation.
std::shared_ptr<grpc::Channel> CreateCustomChannelWithInterceptors(
std::shared_ptr<::grpc::Channel> CreateCustomChannelWithInterceptors(
const grpc::string& target,
const std::shared_ptr<grpc::ChannelCredentials>& creds,
const grpc::ChannelArguments& args,

@ -29,12 +29,12 @@
namespace grpc {
class CompletionQueue;
typedef ClientAsyncReaderWriter<ByteBuffer, ByteBuffer>
GenericClientAsyncReaderWriter;
typedef ClientAsyncResponseReader<ByteBuffer> GenericClientAsyncResponseReader;
} // namespace grpc
namespace grpc_impl {
class CompletionQueue;
/// Generic stubs provide a type-unsafe interface to call gRPC methods
/// by name.

@ -28,8 +28,6 @@
namespace grpc {
class CompletionQueue;
namespace internal {
/// Common interface for all client side asynchronous streaming.
class ClientAsyncStreamingInterface {

@ -29,7 +29,6 @@
namespace grpc {
class CompletionQueue;
extern CoreCodegenInterface* g_core_codegen_interface;
/// An interface relevant for async client side unary RPCs (which send

@ -21,9 +21,11 @@
#include <grpc/impl/codegen/grpc_types.h>
#include <grpcpp/impl/codegen/call_hook.h>
namespace grpc {
namespace grpc_impl {
class CompletionQueue;
}
namespace grpc {
namespace experimental {
class ClientRpcInfo;
class ServerRpcInfo;
@ -41,13 +43,13 @@ class Call final {
call_(nullptr),
max_receive_message_size_(-1) {}
/** call is owned by the caller */
Call(grpc_call* call, CallHook* call_hook, CompletionQueue* cq)
Call(grpc_call* call, CallHook* call_hook, ::grpc_impl::CompletionQueue* cq)
: call_hook_(call_hook),
cq_(cq),
call_(call),
max_receive_message_size_(-1) {}
Call(grpc_call* call, CallHook* call_hook, CompletionQueue* cq,
Call(grpc_call* call, CallHook* call_hook, ::grpc_impl::CompletionQueue* cq,
experimental::ClientRpcInfo* rpc_info)
: call_hook_(call_hook),
cq_(cq),
@ -55,7 +57,7 @@ class Call final {
max_receive_message_size_(-1),
client_rpc_info_(rpc_info) {}
Call(grpc_call* call, CallHook* call_hook, CompletionQueue* cq,
Call(grpc_call* call, CallHook* call_hook, ::grpc_impl::CompletionQueue* cq,
int max_receive_message_size, experimental::ServerRpcInfo* rpc_info)
: call_hook_(call_hook),
cq_(cq),
@ -68,7 +70,7 @@ class Call final {
}
grpc_call* call() const { return call_; }
CompletionQueue* cq() const { return cq_; }
::grpc_impl::CompletionQueue* cq() const { return cq_; }
int max_receive_message_size() const { return max_receive_message_size_; }
@ -82,7 +84,7 @@ class Call final {
private:
CallHook* call_hook_;
CompletionQueue* cq_;
::grpc_impl::CompletionQueue* cq_;
grpc_call* call_;
int max_receive_message_size_;
experimental::ClientRpcInfo* client_rpc_info_ = nullptr;

@ -48,7 +48,6 @@
namespace grpc {
class CompletionQueue;
extern CoreCodegenInterface* g_core_codegen_interface;
namespace internal {

@ -24,10 +24,13 @@
#include <grpcpp/impl/codegen/status.h>
#include <grpcpp/impl/codegen/time.h>
namespace grpc_impl {
class CompletionQueue;
}
namespace grpc {
class ChannelInterface;
class ClientContext;
class CompletionQueue;
template <class R>
class ClientReader;
@ -73,7 +76,7 @@ class ChannelInterface {
/// deadline expires. \a GetState needs to called to get the current state.
template <typename T>
void NotifyOnStateChange(grpc_connectivity_state last_observed, T deadline,
CompletionQueue* cq, void* tag) {
::grpc_impl::CompletionQueue* cq, void* tag) {
TimePoint<T> deadline_tp(deadline);
NotifyOnStateChangeImpl(last_observed, deadline_tp.raw_time(), cq, tag);
}
@ -125,13 +128,14 @@ class ChannelInterface {
friend class ::grpc::internal::InterceptedChannel;
virtual internal::Call CreateCall(const internal::RpcMethod& method,
ClientContext* context,
CompletionQueue* cq) = 0;
::grpc_impl::CompletionQueue* cq) = 0;
virtual void PerformOpsOnCall(internal::CallOpSetInterface* ops,
internal::Call* call) = 0;
virtual void* RegisterMethod(const char* method) = 0;
virtual void NotifyOnStateChangeImpl(grpc_connectivity_state last_observed,
gpr_timespec deadline,
CompletionQueue* cq, void* tag) = 0;
::grpc_impl::CompletionQueue* cq,
void* tag) = 0;
virtual bool WaitForStateChangeImpl(grpc_connectivity_state last_observed,
gpr_timespec deadline) = 0;
@ -144,7 +148,7 @@ class ChannelInterface {
// change (even though this is private and non-API)
virtual internal::Call CreateCallInternal(const internal::RpcMethod& method,
ClientContext* context,
CompletionQueue* cq,
::grpc_impl::CompletionQueue* cq,
size_t interceptor_pos) {
return internal::Call();
}
@ -157,7 +161,7 @@ class ChannelInterface {
// Returns nullptr (rather than being pure) since this is a post-1.0 method
// and adding a new pure method to an interface would be a breaking change
// (even though this is private and non-API)
virtual CompletionQueue* CallbackCQ() { return nullptr; }
virtual ::grpc_impl::CompletionQueue* CallbackCQ() { return nullptr; }
};
} // namespace grpc

@ -33,7 +33,6 @@ namespace grpc {
class Channel;
class ClientContext;
class CompletionQueue;
namespace internal {
class RpcMethod;

@ -61,7 +61,6 @@ namespace grpc {
class Channel;
class ChannelInterface;
class CompletionQueue;
class CallCredentials;
class ClientContext;

@ -27,9 +27,7 @@
namespace grpc {
class Channel;
class ClientContext;
class CompletionQueue;
namespace internal {
class RpcMethod;

@ -16,401 +16,15 @@
*
*/
/// A completion queue implements a concurrent producer-consumer queue, with
/// two main API-exposed methods: \a Next and \a AsyncNext. These
/// methods are the essential component of the gRPC C++ asynchronous API.
/// There is also a \a Shutdown method to indicate that a given completion queue
/// will no longer have regular events. This must be called before the
/// completion queue is destroyed.
/// All completion queue APIs are thread-safe and may be used concurrently with
/// any other completion queue API invocation; it is acceptable to have
/// multiple threads calling \a Next or \a AsyncNext on the same or different
/// completion queues, or to call these methods concurrently with a \a Shutdown
/// elsewhere.
/// \remark{All other API calls on completion queue should be completed before
/// a completion queue destructor is called.}
#ifndef GRPCPP_IMPL_CODEGEN_COMPLETION_QUEUE_H
#define GRPCPP_IMPL_CODEGEN_COMPLETION_QUEUE_H
#include <grpc/impl/codegen/atm.h>
#include <grpcpp/impl/codegen/completion_queue_tag.h>
#include <grpcpp/impl/codegen/core_codegen_interface.h>
#include <grpcpp/impl/codegen/grpc_library.h>
#include <grpcpp/impl/codegen/status.h>
#include <grpcpp/impl/codegen/time.h>
#include <grpcpp/impl/codegen/completion_queue_impl.h>
struct grpc_completion_queue;
namespace grpc_impl {
class ServerBuilder;
}
namespace grpc {
template <class R>
class ClientReader;
template <class W>
class ClientWriter;
template <class W, class R>
class ClientReaderWriter;
template <class R>
class ServerReader;
template <class W>
class ServerWriter;
namespace internal {
template <class W, class R>
class ServerReaderWriterBody;
} // namespace internal
class Channel;
class ChannelInterface;
class ClientContext;
class CompletionQueue;
class Server;
class ServerContext;
class ServerInterface;
namespace internal {
class CompletionQueueTag;
class RpcMethod;
template <class ServiceType, class RequestType, class ResponseType>
class RpcMethodHandler;
template <class ServiceType, class RequestType, class ResponseType>
class ClientStreamingHandler;
template <class ServiceType, class RequestType, class ResponseType>
class ServerStreamingHandler;
template <class ServiceType, class RequestType, class ResponseType>
class BidiStreamingHandler;
template <class Streamer, bool WriteNeeded>
class TemplatedBidiStreamingHandler;
template <StatusCode code>
class ErrorMethodHandler;
template <class InputMessage, class OutputMessage>
class BlockingUnaryCallImpl;
template <class Op1, class Op2, class Op3, class Op4, class Op5, class Op6>
class CallOpSet;
} // namespace internal
extern CoreCodegenInterface* g_core_codegen_interface;
/// A thin wrapper around \ref grpc_completion_queue (see \ref
/// src/core/lib/surface/completion_queue.h).
/// See \ref doc/cpp/perf_notes.md for notes on best practices for high
/// performance servers.
class CompletionQueue : private GrpcLibraryCodegen {
public:
/// Default constructor. Implicitly creates a \a grpc_completion_queue
/// instance.
CompletionQueue()
: CompletionQueue(grpc_completion_queue_attributes{
GRPC_CQ_CURRENT_VERSION, GRPC_CQ_NEXT, GRPC_CQ_DEFAULT_POLLING,
nullptr}) {}
/// Wrap \a take, taking ownership of the instance.
///
/// \param take The completion queue instance to wrap. Ownership is taken.
explicit CompletionQueue(grpc_completion_queue* take);
/// Destructor. Destroys the owned wrapped completion queue / instance.
~CompletionQueue() {
g_core_codegen_interface->grpc_completion_queue_destroy(cq_);
}
/// Tri-state return for AsyncNext: SHUTDOWN, GOT_EVENT, TIMEOUT.
enum NextStatus {
SHUTDOWN, ///< The completion queue has been shutdown and fully-drained
GOT_EVENT, ///< Got a new event; \a tag will be filled in with its
///< associated value; \a ok indicating its success.
TIMEOUT ///< deadline was reached.
};
/// Read from the queue, blocking until an event is available or the queue is
/// shutting down.
///
/// \param tag [out] Updated to point to the read event's tag.
/// \param ok [out] true if read a successful event, false otherwise.
///
/// Note that each tag sent to the completion queue (through RPC operations
/// or alarms) will be delivered out of the completion queue by a call to
/// Next (or a related method), regardless of whether the operation succeeded
/// or not. Success here means that this operation completed in the normal
/// valid manner.
///
/// Server-side RPC request: \a ok indicates that the RPC has indeed
/// been started. If it is false, the server has been Shutdown
/// before this particular call got matched to an incoming RPC.
///
/// Client-side StartCall/RPC invocation: \a ok indicates that the RPC is
/// going to go to the wire. If it is false, it not going to the wire. This
/// would happen if the channel is either permanently broken or
/// transiently broken but with the fail-fast option. (Note that async unary
/// RPCs don't post a CQ tag at this point, nor do client-streaming
/// or bidi-streaming RPCs that have the initial metadata corked option set.)
///
/// Client-side Write, Client-side WritesDone, Server-side Write,
/// Server-side Finish, Server-side SendInitialMetadata (which is
/// typically included in Write or Finish when not done explicitly):
/// \a ok means that the data/metadata/status/etc is going to go to the
/// wire. If it is false, it not going to the wire because the call
/// is already dead (i.e., canceled, deadline expired, other side
/// dropped the channel, etc).
///
/// Client-side Read, Server-side Read, Client-side
/// RecvInitialMetadata (which is typically included in Read if not
/// done explicitly): \a ok indicates whether there is a valid message
/// that got read. If not, you know that there are certainly no more
/// messages that can ever be read from this stream. For the client-side
/// operations, this only happens because the call is dead. For the
/// server-sider operation, though, this could happen because the client
/// has done a WritesDone already.
///
/// Client-side Finish: \a ok should always be true
///
/// Server-side AsyncNotifyWhenDone: \a ok should always be true
///
/// Alarm: \a ok is true if it expired, false if it was canceled
///
/// \return true if got an event, false if the queue is fully drained and
/// shut down.
bool Next(void** tag, bool* ok) {
return (AsyncNextInternal(tag, ok,
g_core_codegen_interface->gpr_inf_future(
GPR_CLOCK_REALTIME)) != SHUTDOWN);
}
/// Read from the queue, blocking up to \a deadline (or the queue's shutdown).
/// Both \a tag and \a ok are updated upon success (if an event is available
/// within the \a deadline). A \a tag points to an arbitrary location usually
/// employed to uniquely identify an event.
///
/// \param tag [out] Upon sucess, updated to point to the event's tag.
/// \param ok [out] Upon sucess, true if a successful event, false otherwise
/// See documentation for CompletionQueue::Next for explanation of ok
/// \param deadline [in] How long to block in wait for an event.
///
/// \return The type of event read.
template <typename T>
NextStatus AsyncNext(void** tag, bool* ok, const T& deadline) {
TimePoint<T> deadline_tp(deadline);
return AsyncNextInternal(tag, ok, deadline_tp.raw_time());
}
/// EXPERIMENTAL
/// First executes \a F, then reads from the queue, blocking up to
/// \a deadline (or the queue's shutdown).
/// Both \a tag and \a ok are updated upon success (if an event is available
/// within the \a deadline). A \a tag points to an arbitrary location usually
/// employed to uniquely identify an event.
///
/// \param f [in] Function to execute before calling AsyncNext on this queue.
/// \param tag [out] Upon sucess, updated to point to the event's tag.
/// \param ok [out] Upon sucess, true if read a regular event, false
/// otherwise.
/// \param deadline [in] How long to block in wait for an event.
///
/// \return The type of event read.
template <typename T, typename F>
NextStatus DoThenAsyncNext(F&& f, void** tag, bool* ok, const T& deadline) {
CompletionQueueTLSCache cache = CompletionQueueTLSCache(this);
f();
if (cache.Flush(tag, ok)) {
return GOT_EVENT;
} else {
return AsyncNext(tag, ok, deadline);
}
}
/// Request the shutdown of the queue.
///
/// \warning This method must be called at some point if this completion queue
/// is accessed with Next or AsyncNext. \a Next will not return false
/// until this method has been called and all pending tags have been drained.
/// (Likewise for \a AsyncNext returning \a NextStatus::SHUTDOWN .)
/// Only once either one of these methods does that (that is, once the queue
/// has been \em drained) can an instance of this class be destroyed.
/// Also note that applications must ensure that no work is enqueued on this
/// completion queue after this method is called.
void Shutdown();
/// Returns a \em raw pointer to the underlying \a grpc_completion_queue
/// instance.
///
/// \warning Remember that the returned instance is owned. No transfer of
/// owership is performed.
grpc_completion_queue* cq() { return cq_; }
protected:
/// Private constructor of CompletionQueue only visible to friend classes
CompletionQueue(const grpc_completion_queue_attributes& attributes) {
cq_ = g_core_codegen_interface->grpc_completion_queue_create(
g_core_codegen_interface->grpc_completion_queue_factory_lookup(
&attributes),
&attributes, NULL);
InitialAvalanching(); // reserve this for the future shutdown
}
private:
// Friend synchronous wrappers so that they can access Pluck(), which is
// a semi-private API geared towards the synchronous implementation.
template <class R>
friend class ::grpc::ClientReader;
template <class W>
friend class ::grpc::ClientWriter;
template <class W, class R>
friend class ::grpc::ClientReaderWriter;
template <class R>
friend class ::grpc::ServerReader;
template <class W>
friend class ::grpc::ServerWriter;
template <class W, class R>
friend class ::grpc::internal::ServerReaderWriterBody;
template <class ServiceType, class RequestType, class ResponseType>
friend class ::grpc::internal::RpcMethodHandler;
template <class ServiceType, class RequestType, class ResponseType>
friend class ::grpc::internal::ClientStreamingHandler;
template <class ServiceType, class RequestType, class ResponseType>
friend class ::grpc::internal::ServerStreamingHandler;
template <class Streamer, bool WriteNeeded>
friend class ::grpc::internal::TemplatedBidiStreamingHandler;
template <StatusCode code>
friend class ::grpc::internal::ErrorMethodHandler;
friend class ::grpc::Server;
friend class ::grpc::ServerContext;
friend class ::grpc::ServerInterface;
template <class InputMessage, class OutputMessage>
friend class ::grpc::internal::BlockingUnaryCallImpl;
// Friends that need access to constructor for callback CQ
friend class ::grpc::Channel;
// For access to Register/CompleteAvalanching
template <class Op1, class Op2, class Op3, class Op4, class Op5, class Op6>
friend class ::grpc::internal::CallOpSet;
/// EXPERIMENTAL
/// Creates a Thread Local cache to store the first event
/// On this completion queue queued from this thread. Once
/// initialized, it must be flushed on the same thread.
class CompletionQueueTLSCache {
public:
CompletionQueueTLSCache(CompletionQueue* cq);
~CompletionQueueTLSCache();
bool Flush(void** tag, bool* ok);
private:
CompletionQueue* cq_;
bool flushed_;
};
NextStatus AsyncNextInternal(void** tag, bool* ok, gpr_timespec deadline);
/// Wraps \a grpc_completion_queue_pluck.
/// \warning Must not be mixed with calls to \a Next.
bool Pluck(internal::CompletionQueueTag* tag) {
auto deadline =
g_core_codegen_interface->gpr_inf_future(GPR_CLOCK_REALTIME);
while (true) {
auto ev = g_core_codegen_interface->grpc_completion_queue_pluck(
cq_, tag, deadline, nullptr);
bool ok = ev.success != 0;
void* ignored = tag;
if (tag->FinalizeResult(&ignored, &ok)) {
GPR_CODEGEN_ASSERT(ignored == tag);
return ok;
}
}
}
/// Performs a single polling pluck on \a tag.
/// \warning Must not be mixed with calls to \a Next.
///
/// TODO: sreek - This calls tag->FinalizeResult() even if the cq_ is already
/// shutdown. This is most likely a bug and if it is a bug, then change this
/// implementation to simple call the other TryPluck function with a zero
/// timeout. i.e:
/// TryPluck(tag, gpr_time_0(GPR_CLOCK_REALTIME))
void TryPluck(internal::CompletionQueueTag* tag) {
auto deadline = g_core_codegen_interface->gpr_time_0(GPR_CLOCK_REALTIME);
auto ev = g_core_codegen_interface->grpc_completion_queue_pluck(
cq_, tag, deadline, nullptr);
if (ev.type == GRPC_QUEUE_TIMEOUT) return;
bool ok = ev.success != 0;
void* ignored = tag;
// the tag must be swallowed if using TryPluck
GPR_CODEGEN_ASSERT(!tag->FinalizeResult(&ignored, &ok));
}
/// Performs a single polling pluck on \a tag. Calls tag->FinalizeResult if
/// the pluck() was successful and returned the tag.
///
/// This exects tag->FinalizeResult (if called) to return 'false' i.e expects
/// that the tag is internal not something that is returned to the user.
void TryPluck(internal::CompletionQueueTag* tag, gpr_timespec deadline) {
auto ev = g_core_codegen_interface->grpc_completion_queue_pluck(
cq_, tag, deadline, nullptr);
if (ev.type == GRPC_QUEUE_TIMEOUT || ev.type == GRPC_QUEUE_SHUTDOWN) {
return;
}
bool ok = ev.success != 0;
void* ignored = tag;
GPR_CODEGEN_ASSERT(!tag->FinalizeResult(&ignored, &ok));
}
/// Manage state of avalanching operations : completion queue tags that
/// trigger other completion queue operations. The underlying core completion
/// queue should not really shutdown until all avalanching operations have
/// been finalized. Note that we maintain the requirement that an avalanche
/// registration must take place before CQ shutdown (which must be maintained
/// elsehwere)
void InitialAvalanching() {
gpr_atm_rel_store(&avalanches_in_flight_, static_cast<gpr_atm>(1));
}
void RegisterAvalanching() {
gpr_atm_no_barrier_fetch_add(&avalanches_in_flight_,
static_cast<gpr_atm>(1));
}
void CompleteAvalanching() {
if (gpr_atm_no_barrier_fetch_add(&avalanches_in_flight_,
static_cast<gpr_atm>(-1)) == 1) {
g_core_codegen_interface->grpc_completion_queue_shutdown(cq_);
}
}
grpc_completion_queue* cq_; // owned
gpr_atm avalanches_in_flight_;
};
/// A specific type of completion queue used by the processing of notifications
/// by servers. Instantiated by \a ServerBuilder.
class ServerCompletionQueue : public CompletionQueue {
public:
bool IsFrequentlyPolled() { return polling_type_ != GRPC_CQ_NON_LISTENING; }
protected:
/// Default constructor
ServerCompletionQueue() : polling_type_(GRPC_CQ_DEFAULT_POLLING) {}
private:
/// \param completion_type indicates whether this is a NEXT or CALLBACK
/// completion queue.
/// \param polling_type Informs the GRPC library about the type of polling
/// allowed on this completion queue. See grpc_cq_polling_type's description
/// in grpc_types.h for more details.
/// \param shutdown_cb is the shutdown callback used for CALLBACK api queues
ServerCompletionQueue(grpc_cq_completion_type completion_type,
grpc_cq_polling_type polling_type,
grpc_experimental_completion_queue_functor* shutdown_cb)
: CompletionQueue(grpc_completion_queue_attributes{
GRPC_CQ_CURRENT_VERSION, completion_type, polling_type,
shutdown_cb}),
polling_type_(polling_type) {}
grpc_cq_polling_type polling_type_;
friend class ::grpc_impl::ServerBuilder;
friend class Server;
};
typedef ::grpc_impl::CompletionQueue CompletionQueue;
typedef ::grpc_impl::ServerCompletionQueue ServerCompletionQueue;
} // namespace grpc

@ -0,0 +1,422 @@
/*
*
* Copyright 2015-2016 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
/// A completion queue implements a concurrent producer-consumer queue, with
/// two main API-exposed methods: \a Next and \a AsyncNext. These
/// methods are the essential component of the gRPC C++ asynchronous API.
/// There is also a \a Shutdown method to indicate that a given completion queue
/// will no longer have regular events. This must be called before the
/// completion queue is destroyed.
/// All completion queue APIs are thread-safe and may be used concurrently with
/// any other completion queue API invocation; it is acceptable to have
/// multiple threads calling \a Next or \a AsyncNext on the same or different
/// completion queues, or to call these methods concurrently with a \a Shutdown
/// elsewhere.
/// \remark{All other API calls on completion queue should be completed before
/// a completion queue destructor is called.}
#ifndef GRPCPP_IMPL_CODEGEN_COMPLETION_QUEUE_IMPL_H
#define GRPCPP_IMPL_CODEGEN_COMPLETION_QUEUE_IMPL_H
#include <grpc/impl/codegen/atm.h>
#include <grpcpp/impl/codegen/completion_queue_tag.h>
#include <grpcpp/impl/codegen/core_codegen_interface.h>
#include <grpcpp/impl/codegen/grpc_library.h>
#include <grpcpp/impl/codegen/status.h>
#include <grpcpp/impl/codegen/time.h>
struct grpc_completion_queue;
namespace grpc_impl {
class ServerBuilder;
} // namespace grpc_impl
namespace grpc {
class Channel;
class Server;
template <class R>
class ClientReader;
template <class W>
class ClientWriter;
template <class W, class R>
class ClientReaderWriter;
template <class R>
class ServerReader;
template <class W>
class ServerWriter;
namespace internal {
template <class W, class R>
class ServerReaderWriterBody;
} // namespace internal
class ChannelInterface;
class ClientContext;
class ServerContext;
class ServerInterface;
namespace internal {
class CompletionQueueTag;
class RpcMethod;
template <class ServiceType, class RequestType, class ResponseType>
class RpcMethodHandler;
template <class ServiceType, class RequestType, class ResponseType>
class ClientStreamingHandler;
template <class ServiceType, class RequestType, class ResponseType>
class ServerStreamingHandler;
template <class ServiceType, class RequestType, class ResponseType>
class BidiStreamingHandler;
template <class Streamer, bool WriteNeeded>
class TemplatedBidiStreamingHandler;
template <StatusCode code>
class ErrorMethodHandler;
template <class InputMessage, class OutputMessage>
class BlockingUnaryCallImpl;
template <class Op1, class Op2, class Op3, class Op4, class Op5, class Op6>
class CallOpSet;
} // namespace internal
extern CoreCodegenInterface* g_core_codegen_interface;
} // namespace grpc
namespace grpc_impl {
/// A thin wrapper around \ref grpc_completion_queue (see \ref
/// src/core/lib/surface/completion_queue.h).
/// See \ref doc/cpp/perf_notes.md for notes on best practices for high
/// performance servers.
class CompletionQueue : private ::grpc::GrpcLibraryCodegen {
public:
/// Default constructor. Implicitly creates a \a grpc_completion_queue
/// instance.
CompletionQueue()
: CompletionQueue(grpc_completion_queue_attributes{
GRPC_CQ_CURRENT_VERSION, GRPC_CQ_NEXT, GRPC_CQ_DEFAULT_POLLING,
nullptr}) {}
/// Wrap \a take, taking ownership of the instance.
///
/// \param take The completion queue instance to wrap. Ownership is taken.
explicit CompletionQueue(grpc_completion_queue* take);
/// Destructor. Destroys the owned wrapped completion queue / instance.
~CompletionQueue() {
::grpc::g_core_codegen_interface->grpc_completion_queue_destroy(cq_);
}
/// Tri-state return for AsyncNext: SHUTDOWN, GOT_EVENT, TIMEOUT.
enum NextStatus {
SHUTDOWN, ///< The completion queue has been shutdown and fully-drained
GOT_EVENT, ///< Got a new event; \a tag will be filled in with its
///< associated value; \a ok indicating its success.
TIMEOUT ///< deadline was reached.
};
/// Read from the queue, blocking until an event is available or the queue is
/// shutting down.
///
/// \param tag [out] Updated to point to the read event's tag.
/// \param ok [out] true if read a successful event, false otherwise.
///
/// Note that each tag sent to the completion queue (through RPC operations
/// or alarms) will be delivered out of the completion queue by a call to
/// Next (or a related method), regardless of whether the operation succeeded
/// or not. Success here means that this operation completed in the normal
/// valid manner.
///
/// Server-side RPC request: \a ok indicates that the RPC has indeed
/// been started. If it is false, the server has been Shutdown
/// before this particular call got matched to an incoming RPC.
///
/// Client-side StartCall/RPC invocation: \a ok indicates that the RPC is
/// going to go to the wire. If it is false, it not going to the wire. This
/// would happen if the channel is either permanently broken or
/// transiently broken but with the fail-fast option. (Note that async unary
/// RPCs don't post a CQ tag at this point, nor do client-streaming
/// or bidi-streaming RPCs that have the initial metadata corked option set.)
///
/// Client-side Write, Client-side WritesDone, Server-side Write,
/// Server-side Finish, Server-side SendInitialMetadata (which is
/// typically included in Write or Finish when not done explicitly):
/// \a ok means that the data/metadata/status/etc is going to go to the
/// wire. If it is false, it not going to the wire because the call
/// is already dead (i.e., canceled, deadline expired, other side
/// dropped the channel, etc).
///
/// Client-side Read, Server-side Read, Client-side
/// RecvInitialMetadata (which is typically included in Read if not
/// done explicitly): \a ok indicates whether there is a valid message
/// that got read. If not, you know that there are certainly no more
/// messages that can ever be read from this stream. For the client-side
/// operations, this only happens because the call is dead. For the
/// server-sider operation, though, this could happen because the client
/// has done a WritesDone already.
///
/// Client-side Finish: \a ok should always be true
///
/// Server-side AsyncNotifyWhenDone: \a ok should always be true
///
/// Alarm: \a ok is true if it expired, false if it was canceled
///
/// \return true if got an event, false if the queue is fully drained and
/// shut down.
bool Next(void** tag, bool* ok) {
return (AsyncNextInternal(tag, ok,
::grpc::g_core_codegen_interface->gpr_inf_future(
GPR_CLOCK_REALTIME)) != SHUTDOWN);
}
/// Read from the queue, blocking up to \a deadline (or the queue's shutdown).
/// Both \a tag and \a ok are updated upon success (if an event is available
/// within the \a deadline). A \a tag points to an arbitrary location usually
/// employed to uniquely identify an event.
///
/// \param tag [out] Upon sucess, updated to point to the event's tag.
/// \param ok [out] Upon sucess, true if a successful event, false otherwise
/// See documentation for CompletionQueue::Next for explanation of ok
/// \param deadline [in] How long to block in wait for an event.
///
/// \return The type of event read.
template <typename T>
NextStatus AsyncNext(void** tag, bool* ok, const T& deadline) {
::grpc::TimePoint<T> deadline_tp(deadline);
return AsyncNextInternal(tag, ok, deadline_tp.raw_time());
}
/// EXPERIMENTAL
/// First executes \a F, then reads from the queue, blocking up to
/// \a deadline (or the queue's shutdown).
/// Both \a tag and \a ok are updated upon success (if an event is available
/// within the \a deadline). A \a tag points to an arbitrary location usually
/// employed to uniquely identify an event.
///
/// \param f [in] Function to execute before calling AsyncNext on this queue.
/// \param tag [out] Upon sucess, updated to point to the event's tag.
/// \param ok [out] Upon sucess, true if read a regular event, false
/// otherwise.
/// \param deadline [in] How long to block in wait for an event.
///
/// \return The type of event read.
template <typename T, typename F>
NextStatus DoThenAsyncNext(F&& f, void** tag, bool* ok, const T& deadline) {
CompletionQueueTLSCache cache = CompletionQueueTLSCache(this);
f();
if (cache.Flush(tag, ok)) {
return GOT_EVENT;
} else {
return AsyncNext(tag, ok, deadline);
}
}
/// Request the shutdown of the queue.
///
/// \warning This method must be called at some point if this completion queue
/// is accessed with Next or AsyncNext. \a Next will not return false
/// until this method has been called and all pending tags have been drained.
/// (Likewise for \a AsyncNext returning \a NextStatus::SHUTDOWN .)
/// Only once either one of these methods does that (that is, once the queue
/// has been \em drained) can an instance of this class be destroyed.
/// Also note that applications must ensure that no work is enqueued on this
/// completion queue after this method is called.
void Shutdown();
/// Returns a \em raw pointer to the underlying \a grpc_completion_queue
/// instance.
///
/// \warning Remember that the returned instance is owned. No transfer of
/// owership is performed.
grpc_completion_queue* cq() { return cq_; }
protected:
/// Private constructor of CompletionQueue only visible to friend classes
CompletionQueue(const grpc_completion_queue_attributes& attributes) {
cq_ = ::grpc::g_core_codegen_interface->grpc_completion_queue_create(
::grpc::g_core_codegen_interface->grpc_completion_queue_factory_lookup(
&attributes),
&attributes, NULL);
InitialAvalanching(); // reserve this for the future shutdown
}
private:
// Friend synchronous wrappers so that they can access Pluck(), which is
// a semi-private API geared towards the synchronous implementation.
template <class R>
friend class ::grpc::ClientReader;
template <class W>
friend class ::grpc::ClientWriter;
template <class W, class R>
friend class ::grpc::ClientReaderWriter;
template <class R>
friend class ::grpc::ServerReader;
template <class W>
friend class ::grpc::ServerWriter;
template <class W, class R>
friend class ::grpc::internal::ServerReaderWriterBody;
template <class ServiceType, class RequestType, class ResponseType>
friend class ::grpc::internal::RpcMethodHandler;
template <class ServiceType, class RequestType, class ResponseType>
friend class ::grpc::internal::ClientStreamingHandler;
template <class ServiceType, class RequestType, class ResponseType>
friend class ::grpc::internal::ServerStreamingHandler;
template <class Streamer, bool WriteNeeded>
friend class ::grpc::internal::TemplatedBidiStreamingHandler;
template <::grpc::StatusCode code>
friend class ::grpc::internal::ErrorMethodHandler;
friend class ::grpc::Server;
friend class ::grpc::ServerContext;
friend class ::grpc::ServerInterface;
template <class InputMessage, class OutputMessage>
friend class ::grpc::internal::BlockingUnaryCallImpl;
// Friends that need access to constructor for callback CQ
friend class ::grpc::Channel;
// For access to Register/CompleteAvalanching
template <class Op1, class Op2, class Op3, class Op4, class Op5, class Op6>
friend class ::grpc::internal::CallOpSet;
/// EXPERIMENTAL
/// Creates a Thread Local cache to store the first event
/// On this completion queue queued from this thread. Once
/// initialized, it must be flushed on the same thread.
class CompletionQueueTLSCache {
public:
CompletionQueueTLSCache(CompletionQueue* cq);
~CompletionQueueTLSCache();
bool Flush(void** tag, bool* ok);
private:
CompletionQueue* cq_;
bool flushed_;
};
NextStatus AsyncNextInternal(void** tag, bool* ok, gpr_timespec deadline);
/// Wraps \a grpc_completion_queue_pluck.
/// \warning Must not be mixed with calls to \a Next.
bool Pluck(::grpc::internal::CompletionQueueTag* tag) {
auto deadline =
::grpc::g_core_codegen_interface->gpr_inf_future(GPR_CLOCK_REALTIME);
while (true) {
auto ev = ::grpc::g_core_codegen_interface->grpc_completion_queue_pluck(
cq_, tag, deadline, nullptr);
bool ok = ev.success != 0;
void* ignored = tag;
if (tag->FinalizeResult(&ignored, &ok)) {
GPR_CODEGEN_ASSERT(ignored == tag);
return ok;
}
}
}
/// Performs a single polling pluck on \a tag.
/// \warning Must not be mixed with calls to \a Next.
///
/// TODO: sreek - This calls tag->FinalizeResult() even if the cq_ is already
/// shutdown. This is most likely a bug and if it is a bug, then change this
/// implementation to simple call the other TryPluck function with a zero
/// timeout. i.e:
/// TryPluck(tag, gpr_time_0(GPR_CLOCK_REALTIME))
void TryPluck(::grpc::internal::CompletionQueueTag* tag) {
auto deadline =
::grpc::g_core_codegen_interface->gpr_time_0(GPR_CLOCK_REALTIME);
auto ev = ::grpc::g_core_codegen_interface->grpc_completion_queue_pluck(
cq_, tag, deadline, nullptr);
if (ev.type == GRPC_QUEUE_TIMEOUT) return;
bool ok = ev.success != 0;
void* ignored = tag;
// the tag must be swallowed if using TryPluck
GPR_CODEGEN_ASSERT(!tag->FinalizeResult(&ignored, &ok));
}
/// Performs a single polling pluck on \a tag. Calls tag->FinalizeResult if
/// the pluck() was successful and returned the tag.
///
/// This exects tag->FinalizeResult (if called) to return 'false' i.e expects
/// that the tag is internal not something that is returned to the user.
void TryPluck(::grpc::internal::CompletionQueueTag* tag,
gpr_timespec deadline) {
auto ev = ::grpc::g_core_codegen_interface->grpc_completion_queue_pluck(
cq_, tag, deadline, nullptr);
if (ev.type == GRPC_QUEUE_TIMEOUT || ev.type == GRPC_QUEUE_SHUTDOWN) {
return;
}
bool ok = ev.success != 0;
void* ignored = tag;
GPR_CODEGEN_ASSERT(!tag->FinalizeResult(&ignored, &ok));
}
/// Manage state of avalanching operations : completion queue tags that
/// trigger other completion queue operations. The underlying core completion
/// queue should not really shutdown until all avalanching operations have
/// been finalized. Note that we maintain the requirement that an avalanche
/// registration must take place before CQ shutdown (which must be maintained
/// elsehwere)
void InitialAvalanching() {
gpr_atm_rel_store(&avalanches_in_flight_, static_cast<gpr_atm>(1));
}
void RegisterAvalanching() {
gpr_atm_no_barrier_fetch_add(&avalanches_in_flight_,
static_cast<gpr_atm>(1));
}
void CompleteAvalanching() {
if (gpr_atm_no_barrier_fetch_add(&avalanches_in_flight_,
static_cast<gpr_atm>(-1)) == 1) {
::grpc::g_core_codegen_interface->grpc_completion_queue_shutdown(cq_);
}
}
grpc_completion_queue* cq_; // owned
gpr_atm avalanches_in_flight_;
};
/// A specific type of completion queue used by the processing of notifications
/// by servers. Instantiated by \a ServerBuilder.
class ServerCompletionQueue : public CompletionQueue {
public:
bool IsFrequentlyPolled() { return polling_type_ != GRPC_CQ_NON_LISTENING; }
protected:
/// Default constructor
ServerCompletionQueue() : polling_type_(GRPC_CQ_DEFAULT_POLLING) {}
private:
/// \param completion_type indicates whether this is a NEXT or CALLBACK
/// completion queue.
/// \param polling_type Informs the GRPC library about the type of polling
/// allowed on this completion queue. See grpc_cq_polling_type's description
/// in grpc_types.h for more details.
/// \param shutdown_cb is the shutdown callback used for CALLBACK api queues
ServerCompletionQueue(grpc_cq_completion_type completion_type,
grpc_cq_polling_type polling_type,
grpc_experimental_completion_queue_functor* shutdown_cb)
: CompletionQueue(grpc_completion_queue_attributes{
GRPC_CQ_CURRENT_VERSION, completion_type, polling_type,
shutdown_cb}),
polling_type_(polling_type) {}
grpc_cq_polling_type polling_type_;
friend class ::grpc_impl::ServerBuilder;
friend class ::grpc::Server;
};
} // namespace grpc_impl
#endif // GRPCPP_IMPL_CODEGEN_COMPLETION_QUEUE_IMPL_H

@ -21,6 +21,10 @@
#include <grpcpp/impl/codegen/channel_interface.h>
namespace grpc_impl {
class CompletionQueue;
}
namespace grpc {
namespace internal {
@ -46,7 +50,7 @@ class InterceptedChannel : public ChannelInterface {
: channel_(channel), interceptor_pos_(pos) {}
Call CreateCall(const RpcMethod& method, ClientContext* context,
CompletionQueue* cq) override {
::grpc_impl::CompletionQueue* cq) override {
return channel_->CreateCallInternal(method, context, cq, interceptor_pos_);
}
@ -58,7 +62,8 @@ class InterceptedChannel : public ChannelInterface {
}
void NotifyOnStateChangeImpl(grpc_connectivity_state last_observed,
gpr_timespec deadline, CompletionQueue* cq,
gpr_timespec deadline,
::grpc_impl::CompletionQueue* cq,
void* tag) override {
return channel_->NotifyOnStateChangeImpl(last_observed, deadline, cq, tag);
}
@ -67,7 +72,9 @@ class InterceptedChannel : public ChannelInterface {
return channel_->WaitForStateChangeImpl(last_observed, deadline);
}
CompletionQueue* CallbackCQ() override { return channel_->CallbackCQ(); }
::grpc_impl::CompletionQueue* CallbackCQ() override {
return channel_->CallbackCQ();
}
ChannelInterface* channel_;
size_t interceptor_pos_;

@ -41,10 +41,13 @@ struct grpc_metadata;
struct grpc_call;
struct census_context;
namespace grpc_impl {
class CompletionQueue;
} // namespace grpc_impl
namespace grpc {
class ClientContext;
class GenericServerContext;
class CompletionQueue;
class Server;
class ServerInterface;
template <class W, class R>
@ -87,6 +90,7 @@ class Call;
class ServerReactor;
} // namespace internal
class ServerInterface;
namespace testing {
class InteropServerContextInspector;
class ServerContextTestSpouse;
@ -351,7 +355,7 @@ class ServerContext {
gpr_timespec deadline_;
grpc_call* call_;
CompletionQueue* cq_;
::grpc_impl::CompletionQueue* cq_;
bool sent_initial_metadata_;
mutable std::shared_ptr<const AuthContext> auth_context_;
mutable internal::MetadataMap client_metadata_;

@ -30,14 +30,14 @@
namespace grpc_impl {
class CompletionQueue;
class ServerCompletionQueue;
class ServerCredentials;
}
namespace grpc {
class AsyncGenericService;
class Channel;
class GenericServerContext;
class ServerCompletionQueue;
class ServerContext;
class Service;
@ -161,7 +161,8 @@ class ServerInterface : public internal::CallHook {
/// caller is required to keep all completion queues live until the server is
/// destroyed.
/// \param num_cqs How many completion queues does \a cqs hold.
virtual void Start(ServerCompletionQueue** cqs, size_t num_cqs) = 0;
virtual void Start(::grpc_impl::ServerCompletionQueue** cqs,
size_t num_cqs) = 0;
virtual void ShutdownInternal(gpr_timespec deadline) = 0;
@ -176,9 +177,9 @@ class ServerInterface : public internal::CallHook {
public:
BaseAsyncRequest(ServerInterface* server, ServerContext* context,
internal::ServerAsyncStreamingInterface* stream,
CompletionQueue* call_cq,
ServerCompletionQueue* notification_cq, void* tag,
bool delete_on_finalize);
::grpc_impl::CompletionQueue* call_cq,
::grpc_impl::ServerCompletionQueue* notification_cq,
void* tag, bool delete_on_finalize);
virtual ~BaseAsyncRequest();
bool FinalizeResult(void** tag, bool* status) override;
@ -190,8 +191,8 @@ class ServerInterface : public internal::CallHook {
ServerInterface* const server_;
ServerContext* const context_;
internal::ServerAsyncStreamingInterface* const stream_;
CompletionQueue* const call_cq_;
ServerCompletionQueue* const notification_cq_;
::grpc_impl::CompletionQueue* const call_cq_;
::grpc_impl::ServerCompletionQueue* const notification_cq_;
void* const tag_;
const bool delete_on_finalize_;
grpc_call* call_;
@ -205,16 +206,17 @@ class ServerInterface : public internal::CallHook {
public:
RegisteredAsyncRequest(ServerInterface* server, ServerContext* context,
internal::ServerAsyncStreamingInterface* stream,
CompletionQueue* call_cq,
ServerCompletionQueue* notification_cq, void* tag,
const char* name, internal::RpcMethod::RpcType type);
::grpc_impl::CompletionQueue* call_cq,
::grpc_impl::ServerCompletionQueue* notification_cq,
void* tag, const char* name,
internal::RpcMethod::RpcType type);
virtual bool FinalizeResult(void** tag, bool* status) override {
/* If we are done intercepting, then there is nothing more for us to do */
if (done_intercepting_) {
return BaseAsyncRequest::FinalizeResult(tag, status);
}
call_wrapper_ = internal::Call(
call_wrapper_ = ::grpc::internal::Call(
call_, server_, call_cq_, server_->max_receive_message_size(),
context_->set_server_rpc_info(name_, type_,
*server_->interceptor_creators()));
@ -223,7 +225,7 @@ class ServerInterface : public internal::CallHook {
protected:
void IssueRequest(void* registered_method, grpc_byte_buffer** payload,
ServerCompletionQueue* notification_cq);
::grpc_impl::ServerCompletionQueue* notification_cq);
const char* name_;
const internal::RpcMethod::RpcType type_;
};
@ -233,8 +235,9 @@ class ServerInterface : public internal::CallHook {
NoPayloadAsyncRequest(internal::RpcServiceMethod* registered_method,
ServerInterface* server, ServerContext* context,
internal::ServerAsyncStreamingInterface* stream,
CompletionQueue* call_cq,
ServerCompletionQueue* notification_cq, void* tag)
::grpc_impl::CompletionQueue* call_cq,
::grpc_impl::ServerCompletionQueue* notification_cq,
void* tag)
: RegisteredAsyncRequest(
server, context, stream, call_cq, notification_cq, tag,
registered_method->name(), registered_method->method_type()) {
@ -250,9 +253,9 @@ class ServerInterface : public internal::CallHook {
PayloadAsyncRequest(internal::RpcServiceMethod* registered_method,
ServerInterface* server, ServerContext* context,
internal::ServerAsyncStreamingInterface* stream,
CompletionQueue* call_cq,
ServerCompletionQueue* notification_cq, void* tag,
Message* request)
::grpc_impl::CompletionQueue* call_cq,
::grpc_impl::ServerCompletionQueue* notification_cq,
void* tag, Message* request)
: RegisteredAsyncRequest(
server, context, stream, call_cq, notification_cq, tag,
registered_method->name(), registered_method->method_type()),
@ -307,9 +310,9 @@ class ServerInterface : public internal::CallHook {
ServerInterface* const server_;
ServerContext* const context_;
internal::ServerAsyncStreamingInterface* const stream_;
CompletionQueue* const call_cq_;
::grpc_impl::CompletionQueue* const call_cq_;
ServerCompletionQueue* const notification_cq_;
::grpc_impl::ServerCompletionQueue* const notification_cq_;
void* const tag_;
Message* const request_;
ByteBuffer payload_;
@ -319,9 +322,9 @@ class ServerInterface : public internal::CallHook {
public:
GenericAsyncRequest(ServerInterface* server, GenericServerContext* context,
internal::ServerAsyncStreamingInterface* stream,
CompletionQueue* call_cq,
ServerCompletionQueue* notification_cq, void* tag,
bool delete_on_finalize);
::grpc_impl::CompletionQueue* call_cq,
::grpc_impl::ServerCompletionQueue* notification_cq,
void* tag, bool delete_on_finalize);
bool FinalizeResult(void** tag, bool* status) override;
@ -333,9 +336,9 @@ class ServerInterface : public internal::CallHook {
void RequestAsyncCall(internal::RpcServiceMethod* method,
ServerContext* context,
internal::ServerAsyncStreamingInterface* stream,
CompletionQueue* call_cq,
ServerCompletionQueue* notification_cq, void* tag,
Message* message) {
::grpc_impl::CompletionQueue* call_cq,
::grpc_impl::ServerCompletionQueue* notification_cq,
void* tag, Message* message) {
GPR_CODEGEN_ASSERT(method);
new PayloadAsyncRequest<Message>(method, this, context, stream, call_cq,
notification_cq, tag, message);
@ -344,18 +347,19 @@ class ServerInterface : public internal::CallHook {
void RequestAsyncCall(internal::RpcServiceMethod* method,
ServerContext* context,
internal::ServerAsyncStreamingInterface* stream,
CompletionQueue* call_cq,
ServerCompletionQueue* notification_cq, void* tag) {
::grpc_impl::CompletionQueue* call_cq,
::grpc_impl::ServerCompletionQueue* notification_cq,
void* tag) {
GPR_CODEGEN_ASSERT(method);
new NoPayloadAsyncRequest(method, this, context, stream, call_cq,
notification_cq, tag);
}
void RequestAsyncGenericCall(GenericServerContext* context,
void RequestAsyncGenericCall(
GenericServerContext* context,
internal::ServerAsyncStreamingInterface* stream,
CompletionQueue* call_cq,
ServerCompletionQueue* notification_cq,
void* tag) {
::grpc_impl::CompletionQueue* call_cq,
::grpc_impl::ServerCompletionQueue* notification_cq, void* tag) {
new GenericAsyncRequest(this, context, stream, call_cq, notification_cq,
tag, true);
}
@ -380,7 +384,7 @@ class ServerInterface : public internal::CallHook {
// Returns nullptr (rather than being pure) since this is a post-1.0 method
// and adding a new pure method to an interface would be a breaking change
// (even though this is private and non-API)
virtual CompletionQueue* CallbackCQ() { return nullptr; }
virtual ::grpc_impl::CompletionQueue* CallbackCQ() { return nullptr; }
};
} // namespace grpc

@ -0,0 +1,19 @@
--- include/grpcpp/impl/codegen/server_interface.h
+++ include/grpcpp/impl/codegen/server_interface.h
@@ -29,14 +29,14 @@
#include <grpcpp/impl/codegen/server_context.h>
namespace grpc_impl {
-class Channel;
+class CompletionQueue;
+class ServerCompletionQueue;
class ServerCredentials;
} // namespace grpc_impl
namespace grpc {
class AsyncGenericService;
class GenericServerContext;
-class ServerCompletionQueue;
class ServerContext;
class Service;

@ -28,10 +28,8 @@
namespace grpc {
class CompletionQueue;
class Server;
class ServerInterface;
class ServerCompletionQueue;
class ServerContext;
namespace internal {

@ -21,12 +21,6 @@
#include <grpcpp/server_builder_impl.h>
namespace grpc_impl {
class ServerCredentials;
class ResourceQuota;
} // namespace grpc_impl
namespace grpc {
typedef ::grpc_impl::ServerBuilder ServerBuilder;

@ -37,15 +37,15 @@ struct grpc_resource_quota;
namespace grpc_impl {
class CompletionQueue;
class ResourceQuota;
class ServerCompletionQueue;
class ServerCredentials;
} // namespace grpc_impl
namespace grpc {
class AsyncGenericService;
class CompletionQueue;
class Server;
class ServerCompletionQueue;
class Service;
namespace testing {
@ -133,7 +133,7 @@ class ServerBuilder {
/// not polling the completion queue frequently) will have a significantly
/// negative performance impact and hence should not be used in production
/// use cases.
std::unique_ptr<grpc::ServerCompletionQueue> AddCompletionQueue(
std::unique_ptr<grpc_impl::ServerCompletionQueue> AddCompletionQueue(
bool is_frequently_polled = true);
//////////////////////////////////////////////////////////////////////////////
@ -327,7 +327,7 @@ class ServerBuilder {
SyncServerSettings sync_server_settings_;
/// List of completion queues added via \a AddCompletionQueue method.
std::vector<grpc::ServerCompletionQueue*> cqs_;
std::vector<grpc_impl::ServerCompletionQueue*> cqs_;
std::shared_ptr<grpc_impl::ServerCredentials> creds_;
std::vector<std::unique_ptr<grpc::ServerBuilderPlugin>> plugins_;

@ -145,10 +145,12 @@ grpc::string GetHeaderIncludes(grpc_generator::File* file,
PrintIncludes(printer.get(), headers, params.use_system_headers,
params.grpc_search_path);
printer->Print(vars, "\n");
printer->Print(vars, "namespace grpc {\n");
printer->Print(vars, "namespace grpc_impl {\n");
printer->Print(vars, "class CompletionQueue;\n");
printer->Print(vars, "class Channel;\n");
printer->Print(vars, "class ServerCompletionQueue;\n");
printer->Print(vars, "} // namespace grpc_impl\n\n");
printer->Print(vars, "namespace grpc {\n");
printer->Print(vars, "class Channel;\n");
printer->Print(vars, "class ServerContext;\n");
printer->Print(vars, "} // namespace grpc\n\n");

@ -24,9 +24,9 @@
#include <grpcpp/impl/grpc_library.h>
#include <grpcpp/support/time.h>
namespace grpc {
namespace grpc_impl {
static internal::GrpcLibraryInitializer g_gli_initializer;
static ::grpc::internal::GrpcLibraryInitializer g_gli_initializer;
// 'CompletionQueue' constructor can safely call GrpcLibraryCodegen(false) here
// i.e not have GrpcLibraryCodegen call grpc_init(). This is because, to create
@ -52,7 +52,8 @@ CompletionQueue::NextStatus CompletionQueue::AsyncNextInternal(
case GRPC_QUEUE_SHUTDOWN:
return SHUTDOWN;
case GRPC_OP_COMPLETE:
auto core_cq_tag = static_cast<internal::CompletionQueueTag*>(ev.tag);
auto core_cq_tag =
static_cast<::grpc::internal::CompletionQueueTag*>(ev.tag);
*ok = ev.success != 0;
*tag = core_cq_tag;
if (core_cq_tag->FinalizeResult(tag, ok)) {
@ -79,7 +80,8 @@ bool CompletionQueue::CompletionQueueTLSCache::Flush(void** tag, bool* ok) {
flushed_ = true;
if (grpc_completion_queue_thread_local_cache_flush(cq_->cq_, &res_tag,
&res)) {
auto core_cq_tag = static_cast<internal::CompletionQueueTag*>(res_tag);
auto core_cq_tag =
static_cast<::grpc::internal::CompletionQueueTag*>(res_tag);
*ok = res == 1;
if (core_cq_tag->FinalizeResult(tag, ok)) {
return true;
@ -88,4 +90,4 @@ bool CompletionQueue::CompletionQueueTLSCache::Flush(void** tag, bool* ok) {
return false;
}
} // namespace grpc
} // namespace grpc_impl

@ -40,10 +40,12 @@
#include <grpcpp/impl/codegen/stub_options.h>
#include <grpcpp/impl/codegen/sync_stream.h>
namespace grpc {
namespace grpc_impl {
class CompletionQueue;
class Channel;
class ServerCompletionQueue;
} // namespace grpc_impl
namespace grpc {
class Channel;
class ServerContext;
} // namespace grpc

@ -958,6 +958,7 @@ include/grpcpp/impl/codegen/client_context.h \
include/grpcpp/impl/codegen/client_interceptor.h \
include/grpcpp/impl/codegen/client_unary_call.h \
include/grpcpp/impl/codegen/completion_queue.h \
include/grpcpp/impl/codegen/completion_queue_impl.h \
include/grpcpp/impl/codegen/completion_queue_tag.h \
include/grpcpp/impl/codegen/config.h \
include/grpcpp/impl/codegen/config_protobuf.h \

@ -959,6 +959,7 @@ include/grpcpp/impl/codegen/client_context.h \
include/grpcpp/impl/codegen/client_interceptor.h \
include/grpcpp/impl/codegen/client_unary_call.h \
include/grpcpp/impl/codegen/completion_queue.h \
include/grpcpp/impl/codegen/completion_queue_impl.h \
include/grpcpp/impl/codegen/completion_queue_tag.h \
include/grpcpp/impl/codegen/config.h \
include/grpcpp/impl/codegen/config_protobuf.h \

@ -9909,6 +9909,7 @@
"include/grpcpp/impl/codegen/client_interceptor.h",
"include/grpcpp/impl/codegen/client_unary_call.h",
"include/grpcpp/impl/codegen/completion_queue.h",
"include/grpcpp/impl/codegen/completion_queue_impl.h",
"include/grpcpp/impl/codegen/completion_queue_tag.h",
"include/grpcpp/impl/codegen/config.h",
"include/grpcpp/impl/codegen/core_codegen_interface.h",
@ -9985,6 +9986,7 @@
"include/grpcpp/impl/codegen/client_interceptor.h",
"include/grpcpp/impl/codegen/client_unary_call.h",
"include/grpcpp/impl/codegen/completion_queue.h",
"include/grpcpp/impl/codegen/completion_queue_impl.h",
"include/grpcpp/impl/codegen/completion_queue_tag.h",
"include/grpcpp/impl/codegen/config.h",
"include/grpcpp/impl/codegen/core_codegen_interface.h",

Loading…
Cancel
Save