Merge pull request #1066 from yang-g/clangformat

clang-format c++ code
pull/1099/head
Vijay Pai 10 years ago
commit f0b6e26f16
  1. 5
      include/grpc++/async_generic_service.h
  2. 7
      include/grpc++/async_unary_call.h
  3. 4
      include/grpc++/byte_buffer.h
  4. 4
      include/grpc++/channel_interface.h
  5. 30
      include/grpc++/client_context.h
  6. 37
      include/grpc++/completion_queue.h
  7. 7
      include/grpc++/config.h
  8. 2
      include/grpc++/generic_stub.h
  9. 72
      include/grpc++/impl/call.h
  10. 8
      include/grpc++/impl/client_unary_call.h
  11. 10
      src/cpp/client/channel.h
  12. 18
      src/cpp/client/channel_arguments.cc
  13. 8
      src/cpp/client/client_context.cc
  14. 8
      src/cpp/client/client_unary_call.cc
  15. 9
      src/cpp/client/create_channel.cc
  16. 22
      src/cpp/proto/proto_utils.cc
  17. 6
      src/cpp/proto/proto_utils.h
  18. 3
      src/cpp/server/async_generic_service.cc
  19. 16
      src/cpp/server/async_server_context.cc
  20. 3
      src/cpp/server/insecure_server_credentials.cc
  21. 12
      src/cpp/server/secure_server_credentials.cc
  22. 5
      src/cpp/server/server.cc
  23. 3
      src/cpp/server/server_builder.cc
  24. 4
      src/cpp/server/thread_pool.cc
  25. 2
      src/cpp/server/thread_pool.h
  26. 4
      src/cpp/util/slice.cc
  27. 4
      src/cpp/util/status.cc
  28. 4
      src/cpp/util/time.cc
  29. 4
      src/cpp/util/time.h
  30. 2
      test/cpp/client/credentials_test.cc
  31. 28
      test/cpp/end2end/async_end2end_test.cc
  32. 9
      test/cpp/end2end/generic_end2end_test.cc
  33. 7
      test/cpp/interop/client.cc
  34. 16
      test/cpp/interop/interop_test.cc
  35. 52
      test/cpp/qps/client_async.cc
  36. 2
      test/cpp/qps/qps_driver.cc
  37. 4
      test/cpp/qps/server.cc
  38. 44
      test/cpp/qps/server_async.cc
  39. 3
      test/cpp/util/create_test_channel.cc
  40. 2
      test/cpp/util/status_test.cc

@ -41,7 +41,8 @@ struct grpc_server;
namespace grpc {
typedef ServerAsyncReaderWriter<ByteBuffer, ByteBuffer> GenericServerAsyncReaderWriter;
typedef ServerAsyncReaderWriter<ByteBuffer, ByteBuffer>
GenericServerAsyncReaderWriter;
class GenericServerContext GRPC_FINAL : public ServerContext {
public:
@ -74,6 +75,6 @@ class AsyncGenericService GRPC_FINAL {
Server* server_;
};
} // namespace grpc
} // namespace grpc
#endif // GRPCXX_ASYNC_GENERIC_SERVICE_H

@ -48,10 +48,9 @@ template <class R>
class ClientAsyncResponseReader GRPC_FINAL {
public:
ClientAsyncResponseReader(ChannelInterface* channel, CompletionQueue* cq,
const RpcMethod& method, ClientContext* context,
const grpc::protobuf::Message& request, void* tag)
: context_(context),
call_(channel->CreateCall(method, context, cq)) {
const RpcMethod& method, ClientContext* context,
const grpc::protobuf::Message& request, void* tag)
: context_(context), call_(channel->CreateCall(method, context, cq)) {
init_buf_.Reset(tag);
init_buf_.AddSendInitialMetadata(&context->send_initial_metadata_);
init_buf_.AddSendMessage(request);

@ -72,9 +72,7 @@ class ByteBuffer GRPC_FINAL {
buffer_ = buf;
}
grpc_byte_buffer* buffer() const {
return buffer_;
}
grpc_byte_buffer* buffer() const { return buffer_; }
grpc_byte_buffer* buffer_;
};

@ -51,8 +51,8 @@ class ChannelInterface : public CallHook {
public:
virtual ~ChannelInterface() {}
virtual Call CreateCall(const RpcMethod &method, ClientContext *context,
CompletionQueue *cq) = 0;
virtual Call CreateCall(const RpcMethod& method, ClientContext* context,
CompletionQueue* cq) = 0;
};
} // namespace grpc

@ -74,8 +74,8 @@ class ClientContext {
ClientContext();
~ClientContext();
void AddMetadata(const grpc::string &meta_key,
const grpc::string &meta_value);
void AddMetadata(const grpc::string& meta_key,
const grpc::string& meta_value);
const std::multimap<grpc::string, grpc::string>& GetServerInitialMetadata() {
GPR_ASSERT(initial_metadata_received_);
@ -87,19 +87,17 @@ class ClientContext {
return trailing_metadata_;
}
void set_absolute_deadline(const system_clock::time_point &deadline);
void set_absolute_deadline(const system_clock::time_point& deadline);
system_clock::time_point absolute_deadline();
void set_authority(const grpc::string& authority) {
authority_ = authority;
}
void set_authority(const grpc::string& authority) { authority_ = authority; }
void TryCancel();
private:
// Disallow copy and assign.
ClientContext(const ClientContext &);
ClientContext &operator=(const ClientContext &);
ClientContext(const ClientContext&);
ClientContext& operator=(const ClientContext&);
friend class CallOpBuffer;
friend class Channel;
@ -118,24 +116,22 @@ class ClientContext {
template <class R>
friend class ::grpc::ClientAsyncResponseReader;
grpc_call *call() { return call_; }
void set_call(grpc_call *call) {
grpc_call* call() { return call_; }
void set_call(grpc_call* call) {
GPR_ASSERT(call_ == nullptr);
call_ = call;
}
grpc_completion_queue *cq() { return cq_; }
void set_cq(grpc_completion_queue *cq) { cq_ = cq; }
grpc_completion_queue* cq() { return cq_; }
void set_cq(grpc_completion_queue* cq) { cq_ = cq; }
gpr_timespec RawDeadline() { return absolute_deadline_; }
grpc::string authority() {
return authority_;
}
grpc::string authority() { return authority_; }
bool initial_metadata_received_;
grpc_call *call_;
grpc_completion_queue *cq_;
grpc_call* call_;
grpc_completion_queue* cq_;
gpr_timespec absolute_deadline_;
grpc::string authority_;
std::multimap<grpc::string, grpc::string> send_initial_metadata_;

@ -66,37 +66,38 @@ class CompletionQueueTag {
// to do)
// If this function returns false, the tag is dropped and not returned
// from the completion queue
virtual bool FinalizeResult(void **tag, bool *status) = 0;
virtual bool FinalizeResult(void** tag, bool* status) = 0;
};
// grpc_completion_queue wrapper class
class CompletionQueue {
public:
CompletionQueue();
explicit CompletionQueue(grpc_completion_queue *take);
explicit CompletionQueue(grpc_completion_queue* take);
~CompletionQueue();
// Tri-state return for AsyncNext: SHUTDOWN, GOT_EVENT, TIMEOUT
enum NextStatus {SHUTDOWN, GOT_EVENT, TIMEOUT};
enum NextStatus { SHUTDOWN, GOT_EVENT, TIMEOUT };
// Nonblocking (until deadline) read from queue.
// Cannot rely on result of tag or ok if return is TIMEOUT
NextStatus AsyncNext(void **tag, bool *ok,
std::chrono::system_clock::time_point deadline);
NextStatus AsyncNext(void** tag, bool* ok,
std::chrono::system_clock::time_point deadline);
// Blocking (until deadline) read from queue.
// Returns false if the queue is ready for destruction, true if event
bool Next(void **tag, bool *ok) {
return (AsyncNext(tag,ok,
(std::chrono::system_clock::time_point::max)()) !=
SHUTDOWN);
bool Next(void** tag, bool* ok) {
return (
AsyncNext(tag, ok, (std::chrono::system_clock::time_point::max)()) !=
SHUTDOWN);
}
// Shutdown has to be called, and the CompletionQueue can only be
// destructed when false is returned from Next().
void Shutdown();
grpc_completion_queue *cq() { return cq_; }
grpc_completion_queue* cq() { return cq_; }
private:
// Friend synchronous wrappers so that they can access Pluck(), which is
@ -115,20 +116,20 @@ class CompletionQueue {
friend class ::grpc::ServerReaderWriter;
friend class ::grpc::Server;
friend class ::grpc::ServerContext;
friend Status BlockingUnaryCall(ChannelInterface *channel,
const RpcMethod &method,
ClientContext *context,
const grpc::protobuf::Message &request,
grpc::protobuf::Message *result);
friend Status BlockingUnaryCall(ChannelInterface* channel,
const RpcMethod& method,
ClientContext* context,
const grpc::protobuf::Message& request,
grpc::protobuf::Message* result);
// Wraps grpc_completion_queue_pluck.
// Cannot be mixed with calls to Next().
bool Pluck(CompletionQueueTag *tag);
bool Pluck(CompletionQueueTag* tag);
// Does a single polling pluck on tag
void TryPluck(CompletionQueueTag *tag);
void TryPluck(CompletionQueueTag* tag);
grpc_completion_queue *cq_; // owned
grpc_completion_queue* cq_; // owned
};
} // namespace grpc

@ -59,11 +59,12 @@
#ifndef GRPC_CUSTOM_ZEROCOPYOUTPUTSTREAM
#include <google/protobuf/io/zero_copy_stream.h>
#define GRPC_CUSTOM_ZEROCOPYOUTPUTSTREAM ::google::protobuf::io::ZeroCopyOutputStream
#define GRPC_CUSTOM_ZEROCOPYINPUTSTREAM ::google::protobuf::io::ZeroCopyInputStream
#define GRPC_CUSTOM_ZEROCOPYOUTPUTSTREAM \
::google::protobuf::io::ZeroCopyOutputStream
#define GRPC_CUSTOM_ZEROCOPYINPUTSTREAM \
::google::protobuf::io::ZeroCopyInputStream
#endif
namespace grpc {
typedef GRPC_CUSTOM_STRING string;

@ -57,6 +57,6 @@ class GenericStub GRPC_FINAL {
std::shared_ptr<ChannelInterface> channel_;
};
} // namespace grpc
} // namespace grpc
#endif // GRPCXX_GENERIC_STUB_H

@ -55,89 +55,89 @@ class CallOpBuffer : public CompletionQueueTag {
CallOpBuffer();
~CallOpBuffer();
void Reset(void *next_return_tag);
void Reset(void* next_return_tag);
// Does not take ownership.
void AddSendInitialMetadata(
std::multimap<grpc::string, grpc::string> *metadata);
void AddSendInitialMetadata(ClientContext *ctx);
void AddRecvInitialMetadata(ClientContext *ctx);
void AddSendMessage(const grpc::protobuf::Message &message);
std::multimap<grpc::string, grpc::string>* metadata);
void AddSendInitialMetadata(ClientContext* ctx);
void AddRecvInitialMetadata(ClientContext* ctx);
void AddSendMessage(const grpc::protobuf::Message& message);
void AddSendMessage(const ByteBuffer& message);
void AddRecvMessage(grpc::protobuf::Message *message);
void AddRecvMessage(ByteBuffer *message);
void AddRecvMessage(grpc::protobuf::Message* message);
void AddRecvMessage(ByteBuffer* message);
void AddClientSendClose();
void AddClientRecvStatus(ClientContext *ctx, Status *status);
void AddServerSendStatus(std::multimap<grpc::string, grpc::string> *metadata,
const Status &status);
void AddServerRecvClose(bool *cancelled);
void AddClientRecvStatus(ClientContext* ctx, Status* status);
void AddServerSendStatus(std::multimap<grpc::string, grpc::string>* metadata,
const Status& status);
void AddServerRecvClose(bool* cancelled);
// INTERNAL API:
// Convert to an array of grpc_op elements
void FillOps(grpc_op *ops, size_t *nops);
void FillOps(grpc_op* ops, size_t* nops);
// Called by completion queue just prior to returning from Next() or Pluck()
bool FinalizeResult(void **tag, bool *status) GRPC_OVERRIDE;
bool FinalizeResult(void** tag, bool* status) GRPC_OVERRIDE;
bool got_message;
private:
void *return_tag_;
void* return_tag_;
// Send initial metadata
bool send_initial_metadata_;
size_t initial_metadata_count_;
grpc_metadata *initial_metadata_;
grpc_metadata* initial_metadata_;
// Recv initial metadta
std::multimap<grpc::string, grpc::string> *recv_initial_metadata_;
std::multimap<grpc::string, grpc::string>* recv_initial_metadata_;
grpc_metadata_array recv_initial_metadata_arr_;
// Send message
const grpc::protobuf::Message *send_message_;
const ByteBuffer *send_message_buffer_;
grpc_byte_buffer *send_buf_;
const grpc::protobuf::Message* send_message_;
const ByteBuffer* send_message_buffer_;
grpc_byte_buffer* send_buf_;
// Recv message
grpc::protobuf::Message *recv_message_;
ByteBuffer *recv_message_buffer_;
grpc_byte_buffer *recv_buf_;
grpc::protobuf::Message* recv_message_;
ByteBuffer* recv_message_buffer_;
grpc_byte_buffer* recv_buf_;
// Client send close
bool client_send_close_;
// Client recv status
std::multimap<grpc::string, grpc::string> *recv_trailing_metadata_;
Status *recv_status_;
std::multimap<grpc::string, grpc::string>* recv_trailing_metadata_;
Status* recv_status_;
grpc_metadata_array recv_trailing_metadata_arr_;
grpc_status_code status_code_;
char *status_details_;
char* status_details_;
size_t status_details_capacity_;
// Server send status
const Status *send_status_;
const Status* send_status_;
size_t trailing_metadata_count_;
grpc_metadata *trailing_metadata_;
grpc_metadata* trailing_metadata_;
int cancelled_buf_;
bool *recv_closed_;
bool* recv_closed_;
};
// Channel and Server implement this to allow them to hook performing ops
class CallHook {
public:
virtual ~CallHook() {}
virtual void PerformOpsOnCall(CallOpBuffer *ops, Call *call) = 0;
virtual void PerformOpsOnCall(CallOpBuffer* ops, Call* call) = 0;
};
// Straightforward wrapping of the C call object
class Call GRPC_FINAL {
public:
/* call is owned by the caller */
Call(grpc_call *call, CallHook *call_hook_, CompletionQueue *cq);
Call(grpc_call* call, CallHook* call_hook_, CompletionQueue* cq);
void PerformOps(CallOpBuffer *buffer);
void PerformOps(CallOpBuffer* buffer);
grpc_call *call() { return call_; }
CompletionQueue *cq() { return cq_; }
grpc_call* call() { return call_; }
CompletionQueue* cq() { return cq_; }
private:
CallHook *call_hook_;
CompletionQueue *cq_;
grpc_call *call_;
CallHook* call_hook_;
CompletionQueue* cq_;
grpc_call* call_;
};
} // namespace grpc

@ -45,10 +45,10 @@ class RpcMethod;
class Status;
// Wrapper that performs a blocking unary call
Status BlockingUnaryCall(ChannelInterface *channel, const RpcMethod &method,
ClientContext *context,
const grpc::protobuf::Message &request,
grpc::protobuf::Message *result);
Status BlockingUnaryCall(ChannelInterface* channel, const RpcMethod& method,
ClientContext* context,
const grpc::protobuf::Message& request,
grpc::protobuf::Message* result);
} // namespace grpc

@ -51,16 +51,16 @@ class StreamContextInterface;
class Channel GRPC_FINAL : public ChannelInterface {
public:
Channel(const grpc::string &target, grpc_channel *c_channel);
Channel(const grpc::string& target, grpc_channel* c_channel);
~Channel() GRPC_OVERRIDE;
virtual Call CreateCall(const RpcMethod &method, ClientContext *context,
CompletionQueue *cq) GRPC_OVERRIDE;
virtual void PerformOpsOnCall(CallOpBuffer *ops, Call *call) GRPC_OVERRIDE;
virtual Call CreateCall(const RpcMethod& method, ClientContext* context,
CompletionQueue* cq) GRPC_OVERRIDE;
virtual void PerformOpsOnCall(CallOpBuffer* ops, Call* call) GRPC_OVERRIDE;
private:
const grpc::string target_;
grpc_channel *const c_channel_; // owned
grpc_channel* const c_channel_; // owned
};
} // namespace grpc

@ -37,7 +37,7 @@
namespace grpc {
void ChannelArguments::SetSslTargetNameOverride(const grpc::string &name) {
void ChannelArguments::SetSslTargetNameOverride(const grpc::string& name) {
SetString(GRPC_SSL_TARGET_NAME_OVERRIDE_ARG, name);
}
@ -50,32 +50,32 @@ grpc::string ChannelArguments::GetSslTargetNameOverride() const {
return "";
}
void ChannelArguments::SetInt(const grpc::string &key, int value) {
void ChannelArguments::SetInt(const grpc::string& key, int value) {
grpc_arg arg;
arg.type = GRPC_ARG_INTEGER;
strings_.push_back(key);
arg.key = const_cast<char *>(strings_.back().c_str());
arg.key = const_cast<char*>(strings_.back().c_str());
arg.value.integer = value;
args_.push_back(arg);
}
void ChannelArguments::SetString(const grpc::string &key,
const grpc::string &value) {
void ChannelArguments::SetString(const grpc::string& key,
const grpc::string& value) {
grpc_arg arg;
arg.type = GRPC_ARG_STRING;
strings_.push_back(key);
arg.key = const_cast<char *>(strings_.back().c_str());
arg.key = const_cast<char*>(strings_.back().c_str());
strings_.push_back(value);
arg.value.string = const_cast<char *>(strings_.back().c_str());
arg.value.string = const_cast<char*>(strings_.back().c_str());
args_.push_back(arg);
}
void ChannelArguments::SetChannelArgs(grpc_channel_args *channel_args) const {
void ChannelArguments::SetChannelArgs(grpc_channel_args* channel_args) const {
channel_args->num_args = args_.size();
if (channel_args->num_args > 0) {
channel_args->args = const_cast<grpc_arg *>(&args_[0]);
channel_args->args = const_cast<grpc_arg*>(&args_[0]);
}
}

@ -53,7 +53,7 @@ ClientContext::~ClientContext() {
if (cq_) {
grpc_completion_queue_shutdown(cq_);
// Drain cq_.
grpc_event *ev;
grpc_event* ev;
grpc_completion_type t;
do {
ev = grpc_completion_queue_next(cq_, gpr_inf_future);
@ -65,7 +65,7 @@ ClientContext::~ClientContext() {
}
void ClientContext::set_absolute_deadline(
const system_clock::time_point &deadline) {
const system_clock::time_point& deadline) {
Timepoint2Timespec(deadline, &absolute_deadline_);
}
@ -73,8 +73,8 @@ system_clock::time_point ClientContext::absolute_deadline() {
return Timespec2Timepoint(absolute_deadline_);
}
void ClientContext::AddMetadata(const grpc::string &meta_key,
const grpc::string &meta_value) {
void ClientContext::AddMetadata(const grpc::string& meta_key,
const grpc::string& meta_value) {
send_initial_metadata_.insert(std::make_pair(meta_key, meta_value));
}

@ -42,10 +42,10 @@
namespace grpc {
// Wrapper that performs a blocking unary call
Status BlockingUnaryCall(ChannelInterface *channel, const RpcMethod &method,
ClientContext *context,
const grpc::protobuf::Message &request,
grpc::protobuf::Message *result) {
Status BlockingUnaryCall(ChannelInterface* channel, const RpcMethod& method,
ClientContext* context,
const grpc::protobuf::Message& request,
grpc::protobuf::Message* result) {
CompletionQueue cq;
Call call(channel->CreateCall(method, context, &cq));
CallOpBuffer buf;

@ -41,9 +41,10 @@ namespace grpc {
class ChannelArguments;
std::shared_ptr<ChannelInterface> CreateChannel(
const grpc::string &target, const std::unique_ptr<Credentials> &creds,
const ChannelArguments &args) {
return creds ? creds->CreateChannel(target, args) :
std::shared_ptr<ChannelInterface>(new Channel(target, grpc_lame_client_channel_create()));
const grpc::string& target, const std::unique_ptr<Credentials>& creds,
const ChannelArguments& args) {
return creds ? creds->CreateChannel(target, args)
: std::shared_ptr<ChannelInterface>(
new Channel(target, grpc_lame_client_channel_create()));
}
} // namespace grpc

@ -45,7 +45,7 @@ const int kMaxBufferLength = 8192;
class GrpcBufferWriter GRPC_FINAL
: public ::grpc::protobuf::io::ZeroCopyOutputStream {
public:
explicit GrpcBufferWriter(grpc_byte_buffer **bp,
explicit GrpcBufferWriter(grpc_byte_buffer** bp,
int block_size = kMaxBufferLength)
: block_size_(block_size), byte_count_(0), have_backup_(false) {
*bp = grpc_byte_buffer_create(NULL, 0);
@ -58,7 +58,7 @@ class GrpcBufferWriter GRPC_FINAL
}
}
bool Next(void **data, int *size) GRPC_OVERRIDE {
bool Next(void** data, int* size) GRPC_OVERRIDE {
if (have_backup_) {
slice_ = backup_slice_;
have_backup_ = false;
@ -89,7 +89,7 @@ class GrpcBufferWriter GRPC_FINAL
private:
const int block_size_;
gpr_int64 byte_count_;
gpr_slice_buffer *slice_buffer_;
gpr_slice_buffer* slice_buffer_;
bool have_backup_;
gpr_slice backup_slice_;
gpr_slice slice_;
@ -98,7 +98,7 @@ class GrpcBufferWriter GRPC_FINAL
class GrpcBufferReader GRPC_FINAL
: public ::grpc::protobuf::io::ZeroCopyInputStream {
public:
explicit GrpcBufferReader(grpc_byte_buffer *buffer)
explicit GrpcBufferReader(grpc_byte_buffer* buffer)
: byte_count_(0), backup_count_(0) {
reader_ = grpc_byte_buffer_reader_create(buffer);
}
@ -106,7 +106,7 @@ class GrpcBufferReader GRPC_FINAL
grpc_byte_buffer_reader_destroy(reader_);
}
bool Next(const void **data, int *size) GRPC_OVERRIDE {
bool Next(const void** data, int* size) GRPC_OVERRIDE {
if (backup_count_ > 0) {
*data = GPR_SLICE_START_PTR(slice_) + GPR_SLICE_LENGTH(slice_) -
backup_count_;
@ -123,12 +123,10 @@ class GrpcBufferReader GRPC_FINAL
return true;
}
void BackUp(int count) GRPC_OVERRIDE {
backup_count_ = count;
}
void BackUp(int count) GRPC_OVERRIDE { backup_count_ = count; }
bool Skip(int count) GRPC_OVERRIDE {
const void *data;
const void* data;
int size;
while (Next(&data, &size)) {
if (size >= count) {
@ -149,18 +147,18 @@ class GrpcBufferReader GRPC_FINAL
private:
gpr_int64 byte_count_;
gpr_int64 backup_count_;
grpc_byte_buffer_reader *reader_;
grpc_byte_buffer_reader* reader_;
gpr_slice slice_;
};
namespace grpc {
bool SerializeProto(const grpc::protobuf::Message &msg, grpc_byte_buffer **bp) {
bool SerializeProto(const grpc::protobuf::Message& msg, grpc_byte_buffer** bp) {
GrpcBufferWriter writer(bp);
return msg.SerializeToZeroCopyStream(&writer);
}
bool DeserializeProto(grpc_byte_buffer *buffer, grpc::protobuf::Message *msg) {
bool DeserializeProto(grpc_byte_buffer* buffer, grpc::protobuf::Message* msg) {
GrpcBufferReader reader(buffer);
return msg->ParseFromZeroCopyStream(&reader);
}

@ -43,11 +43,11 @@ namespace grpc {
// Serialize the msg into a buffer created inside the function. The caller
// should destroy the returned buffer when done with it. If serialization fails,
// false is returned and buffer is left unchanged.
bool SerializeProto(const grpc::protobuf::Message &msg,
grpc_byte_buffer **buffer);
bool SerializeProto(const grpc::protobuf::Message& msg,
grpc_byte_buffer** buffer);
// The caller keeps ownership of buffer and msg.
bool DeserializeProto(grpc_byte_buffer *buffer, grpc::protobuf::Message *msg);
bool DeserializeProto(grpc_byte_buffer* buffer, grpc::protobuf::Message* msg);
} // namespace grpc

@ -47,5 +47,4 @@ CompletionQueue* AsyncGenericService::completion_queue() {
return &server_->cq_;
}
} // namespace grpc
} // namespace grpc

@ -42,7 +42,7 @@
namespace grpc {
AsyncServerContext::AsyncServerContext(
grpc_call *call, const grpc::string &method, const grpc::string &host,
grpc_call* call, const grpc::string& method, const grpc::string& host,
system_clock::time_point absolute_deadline)
: method_(method),
host_(host),
@ -52,22 +52,22 @@ AsyncServerContext::AsyncServerContext(
AsyncServerContext::~AsyncServerContext() { grpc_call_destroy(call_); }
void AsyncServerContext::Accept(grpc_completion_queue *cq) {
void AsyncServerContext::Accept(grpc_completion_queue* cq) {
GPR_ASSERT(grpc_call_server_accept_old(call_, cq, this) == GRPC_CALL_OK);
GPR_ASSERT(grpc_call_server_end_initial_metadata_old(
call_, GRPC_WRITE_BUFFER_HINT) == GRPC_CALL_OK);
}
bool AsyncServerContext::StartRead(grpc::protobuf::Message *request) {
bool AsyncServerContext::StartRead(grpc::protobuf::Message* request) {
GPR_ASSERT(request);
request_ = request;
grpc_call_error err = grpc_call_start_read_old(call_, this);
return err == GRPC_CALL_OK;
}
bool AsyncServerContext::StartWrite(const grpc::protobuf::Message &response,
bool AsyncServerContext::StartWrite(const grpc::protobuf::Message& response,
int flags) {
grpc_byte_buffer *buffer = nullptr;
grpc_byte_buffer* buffer = nullptr;
if (!SerializeProto(response, &buffer)) {
return false;
}
@ -76,16 +76,16 @@ bool AsyncServerContext::StartWrite(const grpc::protobuf::Message &response,
return err == GRPC_CALL_OK;
}
bool AsyncServerContext::StartWriteStatus(const Status &status) {
bool AsyncServerContext::StartWriteStatus(const Status& status) {
grpc_call_error err = grpc_call_start_write_status_old(
call_, static_cast<grpc_status_code>(status.code()),
status.details().empty() ? nullptr
: const_cast<char *>(status.details().c_str()),
: const_cast<char*>(status.details().c_str()),
this);
return err == GRPC_CALL_OK;
}
bool AsyncServerContext::ParseRead(grpc_byte_buffer *read_buffer) {
bool AsyncServerContext::ParseRead(grpc_byte_buffer* read_buffer) {
GPR_ASSERT(request_);
bool success = DeserializeProto(read_buffer, request_);
request_ = nullptr;

@ -46,7 +46,8 @@ class InsecureServerCredentialsImpl GRPC_FINAL : public ServerCredentials {
} // namespace
std::shared_ptr<ServerCredentials> InsecureServerCredentials() {
return std::shared_ptr<ServerCredentials>(new InsecureServerCredentialsImpl());
return std::shared_ptr<ServerCredentials>(
new InsecureServerCredentialsImpl());
}
} // namespace grpc

@ -40,7 +40,8 @@ namespace grpc {
namespace {
class SecureServerCredentials GRPC_FINAL : public ServerCredentials {
public:
explicit SecureServerCredentials(grpc_server_credentials* creds) : creds_(creds) {}
explicit SecureServerCredentials(grpc_server_credentials* creds)
: creds_(creds) {}
~SecureServerCredentials() GRPC_OVERRIDE {
grpc_server_credentials_release(creds_);
}
@ -56,16 +57,17 @@ class SecureServerCredentials GRPC_FINAL : public ServerCredentials {
} // namespace
std::shared_ptr<ServerCredentials> SslServerCredentials(
const SslServerCredentialsOptions &options) {
const SslServerCredentialsOptions& options) {
std::vector<grpc_ssl_pem_key_cert_pair> pem_key_cert_pairs;
for (const auto &key_cert_pair : options.pem_key_cert_pairs) {
for (const auto& key_cert_pair : options.pem_key_cert_pairs) {
pem_key_cert_pairs.push_back(
{key_cert_pair.private_key.c_str(), key_cert_pair.cert_chain.c_str()});
}
grpc_server_credentials *c_creds = grpc_ssl_server_credentials_create(
grpc_server_credentials* c_creds = grpc_ssl_server_credentials_create(
options.pem_root_certs.empty() ? nullptr : options.pem_root_certs.c_str(),
&pem_key_cert_pairs[0], pem_key_cert_pairs.size());
return std::shared_ptr<ServerCredentials>(new SecureServerCredentials(c_creds));
return std::shared_ptr<ServerCredentials>(
new SecureServerCredentials(c_creds));
}
} // namespace grpc

@ -322,11 +322,10 @@ class Server::AsyncRequest GRPC_FINAL : public CompletionQueueTag {
payload_(nullptr) {
memset(&array_, 0, sizeof(array_));
grpc_call_details_init(&call_details_);
grpc_server_request_call(
server->server_, &call_, &call_details_, &array_, cq->cq(), this);
grpc_server_request_call(server->server_, &call_, &call_details_, &array_,
cq->cq(), this);
}
~AsyncRequest() {
if (payload_) {
grpc_byte_buffer_destroy(payload_);

@ -56,7 +56,8 @@ void ServerBuilder::RegisterAsyncGenericService(AsyncGenericService* service) {
if (generic_service_) {
gpr_log(GPR_ERROR,
"Adding multiple AsyncGenericService is unsupported for now. "
"Dropping the service %p", service);
"Dropping the service %p",
service);
return;
}
generic_service_ = service;

@ -66,12 +66,12 @@ ThreadPool::~ThreadPool() {
shutdown_ = true;
cv_.notify_all();
}
for (auto &t : threads_) {
for (auto& t : threads_) {
t.join();
}
}
void ThreadPool::ScheduleCallback(const std::function<void()> &callback) {
void ThreadPool::ScheduleCallback(const std::function<void()>& callback) {
std::lock_guard<std::mutex> lock(mu_);
callbacks_.push(callback);
cv_.notify_one();

@ -50,7 +50,7 @@ class ThreadPool GRPC_FINAL : public ThreadPoolInterface {
explicit ThreadPool(int num_threads);
~ThreadPool();
void ScheduleCallback(const std::function<void()> &callback) GRPC_OVERRIDE;
void ScheduleCallback(const std::function<void()>& callback) GRPC_OVERRIDE;
private:
std::mutex mu_;

@ -37,9 +37,7 @@ namespace grpc {
Slice::Slice() : slice_(gpr_empty_slice()) {}
Slice::~Slice() {
gpr_slice_unref(slice_);
}
Slice::~Slice() { gpr_slice_unref(slice_); }
Slice::Slice(gpr_slice slice, AddRef) : slice_(gpr_slice_ref(slice)) {}

@ -35,7 +35,7 @@
namespace grpc {
const Status &Status::OK = Status();
const Status &Status::Cancelled = Status(StatusCode::CANCELLED);
const Status& Status::OK = Status();
const Status& Status::Cancelled = Status(StatusCode::CANCELLED);
} // namespace grpc

@ -43,8 +43,8 @@ using std::chrono::system_clock;
namespace grpc {
// TODO(yangg) prevent potential overflow.
void Timepoint2Timespec(const system_clock::time_point &from,
gpr_timespec *to) {
void Timepoint2Timespec(const system_clock::time_point& from,
gpr_timespec* to) {
system_clock::duration deadline = from.time_since_epoch();
seconds secs = duration_cast<seconds>(deadline);
nanoseconds nsecs = duration_cast<nanoseconds>(deadline - secs);

@ -41,8 +41,8 @@
namespace grpc {
// from and to should be absolute time.
void Timepoint2Timespec(const std::chrono::system_clock::time_point &from,
gpr_timespec *to);
void Timepoint2Timespec(const std::chrono::system_clock::time_point& from,
gpr_timespec* to);
std::chrono::system_clock::time_point Timespec2Timepoint(gpr_timespec t);

@ -54,7 +54,7 @@ TEST_F(CredentialsTest, InvalidServiceAccountCreds) {
} // namespace testing
} // namespace grpc
int main(int argc, char **argv) {
int main(int argc, char** argv) {
::testing::InitGoogleTest(&argc, argv);
grpc_init();
int ret = RUN_ALL_TESTS();

@ -66,7 +66,7 @@ namespace testing {
namespace {
void* tag(int i) { return (void*)(gpr_intptr)i; }
void* tag(int i) { return (void*)(gpr_intptr) i; }
void verify_ok(CompletionQueue* cq, int i, bool expect_ok) {
bool ok;
@ -76,11 +76,11 @@ void verify_ok(CompletionQueue* cq, int i, bool expect_ok) {
EXPECT_EQ(tag(i), got_tag);
}
void verify_timed_ok(CompletionQueue* cq, int i, bool expect_ok,
std::chrono::system_clock::time_point deadline =
std::chrono::system_clock::time_point::max(),
CompletionQueue::NextStatus expected_outcome =
CompletionQueue::GOT_EVENT) {
void verify_timed_ok(
CompletionQueue* cq, int i, bool expect_ok,
std::chrono::system_clock::time_point deadline =
std::chrono::system_clock::time_point::max(),
CompletionQueue::NextStatus expected_outcome = CompletionQueue::GOT_EVENT) {
bool ok;
void* got_tag;
EXPECT_EQ(cq->AsyncNext(&got_tag, &ok, deadline), expected_outcome);
@ -195,18 +195,17 @@ TEST_F(AsyncEnd2endTest, AsyncNextRpc) {
grpc::ServerAsyncResponseWriter<EchoResponse> response_writer(&srv_ctx);
send_request.set_message("Hello");
std::unique_ptr<ClientAsyncResponseReader<EchoResponse> >
response_reader(stub_->AsyncEcho(&cli_ctx, send_request,
&cli_cq_, tag(1)));
std::unique_ptr<ClientAsyncResponseReader<EchoResponse> > response_reader(
stub_->AsyncEcho(&cli_ctx, send_request, &cli_cq_, tag(1)));
std::chrono::system_clock::time_point
time_now(std::chrono::system_clock::now()),
time_limit(std::chrono::system_clock::now()+std::chrono::seconds(5));
std::chrono::system_clock::time_point time_now(
std::chrono::system_clock::now()),
time_limit(std::chrono::system_clock::now() + std::chrono::seconds(5));
verify_timed_ok(&srv_cq_, -1, true, time_now, CompletionQueue::TIMEOUT);
verify_timed_ok(&cli_cq_, -1, true, time_now, CompletionQueue::TIMEOUT);
service_.RequestEcho(&srv_ctx, &recv_request, &response_writer, &srv_cq_,
tag(2));
tag(2));
verify_timed_ok(&srv_cq_, 2, true, time_limit);
EXPECT_EQ(send_request.message(), recv_request.message());
@ -221,9 +220,8 @@ TEST_F(AsyncEnd2endTest, AsyncNextRpc) {
EXPECT_EQ(send_response.message(), recv_response.message());
EXPECT_TRUE(recv_status.IsOk());
}
// Two pings and a final pong.
TEST_F(AsyncEnd2endTest, SimpleClientStreaming) {
ResetStub();

@ -68,7 +68,7 @@ namespace grpc {
namespace testing {
namespace {
void* tag(int i) { return (void*)(gpr_intptr)i; }
void* tag(int i) { return (void*)(gpr_intptr) i; }
void verify_ok(CompletionQueue* cq, int i, bool expect_ok) {
bool ok;
@ -91,7 +91,7 @@ bool ParseFromByteBuffer(ByteBuffer* buffer, grpc::protobuf::Message* message) {
class GenericEnd2endTest : public ::testing::Test {
protected:
GenericEnd2endTest() : generic_service_("*") {}
GenericEnd2endTest() : generic_service_("*") {}
void SetUp() GRPC_OVERRIDE {
int port = grpc_pick_unused_port_or_die();
@ -116,8 +116,8 @@ class GenericEnd2endTest : public ::testing::Test {
}
void ResetStub() {
std::shared_ptr<ChannelInterface> channel =
CreateChannel(server_address_.str(), InsecureCredentials(), ChannelArguments());
std::shared_ptr<ChannelInterface> channel = CreateChannel(
server_address_.str(), InsecureCredentials(), ChannelArguments());
stub_ = std::move(grpc::cpp::test::util::TestService::NewStub(channel));
}
@ -238,7 +238,6 @@ TEST_F(GenericEnd2endTest, SimpleBidiStreaming) {
client_ok(6);
EXPECT_EQ(send_response.message(), recv_response.message());
cli_stream->WritesDone(tag(7));
client_ok(7);

@ -165,8 +165,8 @@ void AssertOkOrPrintErrorStatus(const grpc::Status& s) {
if (s.IsOk()) {
return;
}
gpr_log(GPR_INFO, "Error status code: %d, message: %s",
s.code(), s.details().c_str());
gpr_log(GPR_INFO, "Error status code: %d, message: %s", s.code(),
s.details().c_str());
GPR_ASSERT(0);
}
@ -246,8 +246,7 @@ void DoServiceAccountCreds() {
}
void DoJwtTokenCreds() {
gpr_log(GPR_INFO,
"Sending a large unary rpc with JWT token credentials ...");
gpr_log(GPR_INFO, "Sending a large unary rpc with JWT token credentials ...");
std::shared_ptr<ChannelInterface> channel =
CreateChannelForTestCase("jwt_token_creds");
SimpleRequest request;

@ -54,13 +54,13 @@ extern "C" {
#include <grpc/support/log.h>
#include "test/core/util/port.h"
int test_client(const char *root, const char *host, int port) {
int test_client(const char* root, const char* host, int port) {
int status;
pid_t cli;
cli = fork();
if (cli == 0) {
char *binary_path;
char *port_arg;
char* binary_path;
char* port_arg;
gpr_asprintf(&binary_path, "%s/interop_client", root);
gpr_asprintf(&port_arg, "--server_port=%d", port);
@ -78,9 +78,9 @@ int test_client(const char *root, const char *host, int port) {
return 0;
}
int main(int argc, char **argv) {
char *me = argv[0];
char *lslash = strrchr(me, '/');
int main(int argc, char** argv) {
char* me = argv[0];
char* lslash = strrchr(me, '/');
char root[1024];
int port = grpc_pick_unused_port_or_die();
int status;
@ -104,8 +104,8 @@ int main(int argc, char **argv) {
/* start the server */
svr = fork();
if (svr == 0) {
char *binary_path;
char *port_arg;
char* binary_path;
char* port_arg;
gpr_asprintf(&binary_path, "%s/interop_server", root);
gpr_asprintf(&port_arg, "--port=%d", port);

@ -61,23 +61,23 @@ class ClientRpcContext {
virtual ~ClientRpcContext() {}
virtual bool RunNextState() = 0; // do next state, return false if steps done
virtual void StartNewClone() = 0;
static void *tag(ClientRpcContext *c) { return reinterpret_cast<void *>(c); }
static ClientRpcContext *detag(void *t) {
return reinterpret_cast<ClientRpcContext *>(t);
static void* tag(ClientRpcContext* c) { return reinterpret_cast<void*>(c); }
static ClientRpcContext* detag(void* t) {
return reinterpret_cast<ClientRpcContext*>(t);
}
virtual void report_stats(Histogram *hist) = 0;
virtual void report_stats(Histogram* hist) = 0;
};
template <class RequestType, class ResponseType>
class ClientRpcContextUnaryImpl : public ClientRpcContext {
public:
ClientRpcContextUnaryImpl(
TestService::Stub *stub, const RequestType &req,
TestService::Stub* stub, const RequestType& req,
std::function<
std::unique_ptr<grpc::ClientAsyncResponseReader<ResponseType>>(
TestService::Stub *, grpc::ClientContext *, const RequestType &,
void *)> start_req,
std::function<void(grpc::Status, ResponseType *)> on_done)
TestService::Stub*, grpc::ClientContext*, const RequestType&,
void*)> start_req,
std::function<void(grpc::Status, ResponseType*)> on_done)
: context_(),
stub_(stub),
req_(req),
@ -90,7 +90,7 @@ class ClientRpcContextUnaryImpl : public ClientRpcContext {
start_req(stub_, &context_, req_, ClientRpcContext::tag(this))) {}
~ClientRpcContextUnaryImpl() GRPC_OVERRIDE {}
bool RunNextState() GRPC_OVERRIDE { return (this->*next_state_)(); }
void report_stats(Histogram *hist) GRPC_OVERRIDE {
void report_stats(Histogram* hist) GRPC_OVERRIDE {
hist->Add((Timer::Now() - start_) * 1e9);
}
@ -113,13 +113,13 @@ class ClientRpcContextUnaryImpl : public ClientRpcContext {
return false;
}
grpc::ClientContext context_;
TestService::Stub *stub_;
TestService::Stub* stub_;
RequestType req_;
ResponseType response_;
bool (ClientRpcContextUnaryImpl::*next_state_)();
std::function<void(grpc::Status, ResponseType *)> callback_;
std::function<void(grpc::Status, ResponseType*)> callback_;
std::function<std::unique_ptr<grpc::ClientAsyncResponseReader<ResponseType>>(
TestService::Stub *, grpc::ClientContext *, const RequestType &, void *)>
TestService::Stub*, grpc::ClientContext*, const RequestType&, void*)>
start_req_;
grpc::Status status_;
double start_;
@ -129,13 +129,13 @@ class ClientRpcContextUnaryImpl : public ClientRpcContext {
class AsyncClient GRPC_FINAL : public Client {
public:
explicit AsyncClient(const ClientConfig &config) : Client(config) {
explicit AsyncClient(const ClientConfig& config) : Client(config) {
for (int i = 0; i < config.async_client_threads(); i++) {
cli_cqs_.emplace_back(new CompletionQueue);
}
auto payload_size = config.payload_size();
auto check_done = [payload_size](grpc::Status s, SimpleResponse *response) {
auto check_done = [payload_size](grpc::Status s, SimpleResponse* response) {
GPR_ASSERT(s.IsOk() && (response->payload().type() ==
grpc::testing::PayloadType::COMPRESSABLE) &&
(response->payload().body().length() ==
@ -144,16 +144,16 @@ class AsyncClient GRPC_FINAL : public Client {
int t = 0;
for (int i = 0; i < config.outstanding_rpcs_per_channel(); i++) {
for (auto &channel : channels_) {
auto *cq = cli_cqs_[t].get();
for (auto& channel : channels_) {
auto* cq = cli_cqs_[t].get();
t = (t + 1) % cli_cqs_.size();
auto start_req = [cq](TestService::Stub *stub, grpc::ClientContext *ctx,
const SimpleRequest &request, void *tag) {
auto start_req = [cq](TestService::Stub* stub, grpc::ClientContext* ctx,
const SimpleRequest& request, void* tag) {
return stub->AsyncUnaryCall(ctx, request, cq, tag);
};
TestService::Stub *stub = channel.get_stub();
const SimpleRequest &request = request_;
TestService::Stub* stub = channel.get_stub();
const SimpleRequest& request = request_;
new ClientRpcContextUnaryImpl<SimpleRequest, SimpleResponse>(
stub, request, start_req, check_done);
}
@ -165,9 +165,9 @@ class AsyncClient GRPC_FINAL : public Client {
~AsyncClient() GRPC_OVERRIDE {
EndThreads();
for (auto &cq : cli_cqs_) {
for (auto& cq : cli_cqs_) {
cq->Shutdown();
void *got_tag;
void* got_tag;
bool ok;
while (cq->Next(&got_tag, &ok)) {
delete ClientRpcContext::detag(got_tag);
@ -175,12 +175,12 @@ class AsyncClient GRPC_FINAL : public Client {
}
}
void ThreadFunc(Histogram *histogram, size_t thread_idx) GRPC_OVERRIDE {
void *got_tag;
void ThreadFunc(Histogram* histogram, size_t thread_idx) GRPC_OVERRIDE {
void* got_tag;
bool ok;
cli_cqs_[thread_idx]->Next(&got_tag, &ok);
ClientRpcContext *ctx = ClientRpcContext::detag(got_tag);
ClientRpcContext* ctx = ClientRpcContext::detag(got_tag);
if (ctx->RunNextState() == false) {
// call the callback and then delete it
ctx->report_stats(histogram);
@ -193,7 +193,7 @@ class AsyncClient GRPC_FINAL : public Client {
std::vector<std::unique_ptr<CompletionQueue>> cli_cqs_;
};
std::unique_ptr<Client> CreateAsyncClient(const ClientConfig &args) {
std::unique_ptr<Client> CreateAsyncClient(const ClientConfig& args) {
return std::unique_ptr<Client>(new AsyncClient(args));
}

@ -69,7 +69,7 @@ namespace gflags {}
using namespace google;
using namespace gflags;
int main(int argc, char **argv) {
int main(int argc, char** argv) {
grpc_init();
ParseCommandLineFlags(&argc, &argv, true);

@ -73,8 +73,8 @@ using grpc::Status;
// In some distros, gflags is in the namespace google, and in some others,
// in gflags. This hack is enabling us to find both.
namespace google { }
namespace gflags { }
namespace google {}
namespace gflags {}
using namespace google;
using namespace gflags;

@ -62,9 +62,9 @@ namespace testing {
class AsyncQpsServerTest : public Server {
public:
AsyncQpsServerTest(const ServerConfig &config, int port)
AsyncQpsServerTest(const ServerConfig& config, int port)
: srv_cq_(), async_service_(&srv_cq_), server_(nullptr) {
char *server_address = NULL;
char* server_address = NULL;
gpr_join_host_port(&server_address, "::", port);
ServerBuilder builder;
@ -87,10 +87,10 @@ class AsyncQpsServerTest : public Server {
threads_.push_back(std::thread([=]() {
// Wait until work is available or we are shutting down
bool ok;
void *got_tag;
void* got_tag;
while (srv_cq_.Next(&got_tag, &ok)) {
if (ok) {
ServerRpcContext *ctx = detag(got_tag);
ServerRpcContext* ctx = detag(got_tag);
// The tag is a pointer to an RPC context to invoke
if (ctx->RunNextState() == false) {
// this RPC context is done, so refresh it
@ -105,7 +105,7 @@ class AsyncQpsServerTest : public Server {
~AsyncQpsServerTest() {
server_->Shutdown();
srv_cq_.Shutdown();
for (auto &thr : threads_) {
for (auto& thr : threads_) {
thr.join();
}
while (!contexts_.empty()) {
@ -122,21 +122,21 @@ class AsyncQpsServerTest : public Server {
virtual bool RunNextState() = 0; // do next state, return false if all done
virtual void Reset() = 0; // start this back at a clean state
};
static void *tag(ServerRpcContext *func) {
return reinterpret_cast<void *>(func);
static void* tag(ServerRpcContext* func) {
return reinterpret_cast<void*>(func);
}
static ServerRpcContext *detag(void *tag) {
return reinterpret_cast<ServerRpcContext *>(tag);
static ServerRpcContext* detag(void* tag) {
return reinterpret_cast<ServerRpcContext*>(tag);
}
template <class RequestType, class ResponseType>
class ServerRpcContextUnaryImpl : public ServerRpcContext {
public:
ServerRpcContextUnaryImpl(
std::function<void(ServerContext *, RequestType *,
grpc::ServerAsyncResponseWriter<ResponseType> *,
void *)> request_method,
std::function<grpc::Status(const RequestType *, ResponseType *)>
std::function<void(ServerContext*, RequestType*,
grpc::ServerAsyncResponseWriter<ResponseType>*,
void*)> request_method,
std::function<grpc::Status(const RequestType*, ResponseType*)>
invoke_method)
: next_state_(&ServerRpcContextUnaryImpl::invoker),
request_method_(request_method),
@ -175,16 +175,16 @@ class AsyncQpsServerTest : public Server {
ServerContext srv_ctx_;
RequestType req_;
bool (ServerRpcContextUnaryImpl::*next_state_)();
std::function<void(ServerContext *, RequestType *,
grpc::ServerAsyncResponseWriter<ResponseType> *, void *)>
std::function<void(ServerContext*, RequestType*,
grpc::ServerAsyncResponseWriter<ResponseType>*, void*)>
request_method_;
std::function<grpc::Status(const RequestType *, ResponseType *)>
std::function<grpc::Status(const RequestType*, ResponseType*)>
invoke_method_;
grpc::ServerAsyncResponseWriter<ResponseType> response_writer_;
};
static Status UnaryCall(const SimpleRequest *request,
SimpleResponse *response) {
static Status UnaryCall(const SimpleRequest* request,
SimpleResponse* response) {
if (request->has_response_size() && request->response_size() > 0) {
if (!SetPayload(request->response_type(), request->response_size(),
response->mutable_payload())) {
@ -197,13 +197,13 @@ class AsyncQpsServerTest : public Server {
TestService::AsyncService async_service_;
std::vector<std::thread> threads_;
std::unique_ptr<grpc::Server> server_;
std::function<void(ServerContext *, SimpleRequest *,
grpc::ServerAsyncResponseWriter<SimpleResponse> *, void *)>
std::function<void(ServerContext*, SimpleRequest*,
grpc::ServerAsyncResponseWriter<SimpleResponse>*, void*)>
request_unary_;
std::forward_list<ServerRpcContext *> contexts_;
std::forward_list<ServerRpcContext*> contexts_;
};
std::unique_ptr<Server> CreateAsyncServer(const ServerConfig &config,
std::unique_ptr<Server> CreateAsyncServer(const ServerConfig& config,
int port) {
return std::unique_ptr<Server>(new AsyncQpsServerTest(config, port));
}

@ -72,8 +72,7 @@ std::shared_ptr<ChannelInterface> CreateTestChannel(
const grpc::string& connect_to =
server.empty() ? override_hostname : server;
if (creds.get()) {
channel_creds =
CompositeCredentials(creds, channel_creds);
channel_creds = CompositeCredentials(creds, channel_creds);
}
return CreateChannel(connect_to, channel_creds, channel_args);
} else {

@ -36,7 +36,7 @@
#include <grpc/support/log.h>
// Make sure the existing grpc_status_code match with grpc::Code.
int main(int argc, char **argv) {
int main(int argc, char** argv) {
GPR_ASSERT(grpc::StatusCode::OK ==
static_cast<grpc::StatusCode>(GRPC_STATUS_OK));
GPR_ASSERT(grpc::StatusCode::CANCELLED ==

Loading…
Cancel
Save