Merge pull request #24448 from veblush/clang-format-8

Upgrade clang-format 8
pull/24472/head
Esun Kim 4 years ago committed by GitHub
commit a62a70207c
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
  1. 136
      include/grpcpp/impl/codegen/client_callback.h
  2. 171
      include/grpcpp/impl/codegen/server_callback_handlers.h
  3. 22
      src/core/ext/filters/client_channel/subchannel.cc
  4. 72
      src/core/lib/gprpp/thd_posix.cc
  5. 6
      src/core/lib/iomgr/executor/threadpool.h
  6. 7
      src/core/tsi/alts/handshaker/alts_tsi_handshaker.cc
  7. 16
      src/cpp/client/client_callback.cc
  8. 28
      src/cpp/common/alarm.cc
  9. 11
      src/cpp/server/dynamic_thread_pool.cc
  10. 34
      src/cpp/server/server_callback.cc
  11. 8
      templates/tools/dockerfile/grpc_clang_format/Dockerfile.template
  12. 4
      templates/tools/dockerfile/test/sanity/Dockerfile.template
  13. 12
      test/cpp/microbenchmarks/bm_timer.cc
  14. 15
      test/cpp/util/slice_test.cc
  15. 8
      tools/dockerfile/grpc_clang_format/Dockerfile
  16. 4
      tools/dockerfile/test/sanity/Dockerfile

@ -537,12 +537,13 @@ class ClientCallbackReaderWriterImpl
}
void WritesDone() override {
writes_done_ops_.ClientSendClose();
writes_done_tag_.Set(call_.call(),
[this](bool ok) {
reactor_->OnWritesDoneDone(ok);
MaybeFinish(/*from_reaction=*/true);
},
&writes_done_ops_, /*can_inline=*/false);
writes_done_tag_.Set(
call_.call(),
[this](bool ok) {
reactor_->OnWritesDoneDone(ok);
MaybeFinish(/*from_reaction=*/true);
},
&writes_done_ops_, /*can_inline=*/false);
writes_done_ops_.set_core_cq_tag(&writes_done_tag_);
callbacks_outstanding_.fetch_add(1, std::memory_order_relaxed);
if (GPR_UNLIKELY(corked_write_needed_)) {
@ -579,29 +580,32 @@ class ClientCallbackReaderWriterImpl
this->BindReactor(reactor);
// Set up the unchanging parts of the start, read, and write tags and ops.
start_tag_.Set(call_.call(),
[this](bool ok) {
reactor_->OnReadInitialMetadataDone(ok);
MaybeFinish(/*from_reaction=*/true);
},
&start_ops_, /*can_inline=*/false);
start_tag_.Set(
call_.call(),
[this](bool ok) {
reactor_->OnReadInitialMetadataDone(ok);
MaybeFinish(/*from_reaction=*/true);
},
&start_ops_, /*can_inline=*/false);
start_ops_.RecvInitialMetadata(context_);
start_ops_.set_core_cq_tag(&start_tag_);
write_tag_.Set(call_.call(),
[this](bool ok) {
reactor_->OnWriteDone(ok);
MaybeFinish(/*from_reaction=*/true);
},
&write_ops_, /*can_inline=*/false);
write_tag_.Set(
call_.call(),
[this](bool ok) {
reactor_->OnWriteDone(ok);
MaybeFinish(/*from_reaction=*/true);
},
&write_ops_, /*can_inline=*/false);
write_ops_.set_core_cq_tag(&write_tag_);
read_tag_.Set(call_.call(),
[this](bool ok) {
reactor_->OnReadDone(ok);
MaybeFinish(/*from_reaction=*/true);
},
&read_ops_, /*can_inline=*/false);
read_tag_.Set(
call_.call(),
[this](bool ok) {
reactor_->OnReadDone(ok);
MaybeFinish(/*from_reaction=*/true);
},
&read_ops_, /*can_inline=*/false);
read_ops_.set_core_cq_tag(&read_tag_);
// Also set up the Finish tag and op set.
@ -719,12 +723,13 @@ class ClientCallbackReaderImpl : public ClientCallbackReader<Response> {
// 2. Any backlog
// 3. Recv trailing metadata
start_tag_.Set(call_.call(),
[this](bool ok) {
reactor_->OnReadInitialMetadataDone(ok);
MaybeFinish(/*from_reaction=*/true);
},
&start_ops_, /*can_inline=*/false);
start_tag_.Set(
call_.call(),
[this](bool ok) {
reactor_->OnReadInitialMetadataDone(ok);
MaybeFinish(/*from_reaction=*/true);
},
&start_ops_, /*can_inline=*/false);
start_ops_.SendInitialMetadata(&context_->send_initial_metadata_,
context_->initial_metadata_flags());
start_ops_.RecvInitialMetadata(context_);
@ -732,12 +737,13 @@ class ClientCallbackReaderImpl : public ClientCallbackReader<Response> {
call_.PerformOps(&start_ops_);
// Also set up the read tag so it doesn't have to be set up each time
read_tag_.Set(call_.call(),
[this](bool ok) {
reactor_->OnReadDone(ok);
MaybeFinish(/*from_reaction=*/true);
},
&read_ops_, /*can_inline=*/false);
read_tag_.Set(
call_.call(),
[this](bool ok) {
reactor_->OnReadDone(ok);
MaybeFinish(/*from_reaction=*/true);
},
&read_ops_, /*can_inline=*/false);
read_ops_.set_core_cq_tag(&read_tag_);
{
@ -928,12 +934,13 @@ class ClientCallbackWriterImpl : public ClientCallbackWriter<Request> {
void WritesDone() override {
writes_done_ops_.ClientSendClose();
writes_done_tag_.Set(call_.call(),
[this](bool ok) {
reactor_->OnWritesDoneDone(ok);
MaybeFinish(/*from_reaction=*/true);
},
&writes_done_ops_, /*can_inline=*/false);
writes_done_tag_.Set(
call_.call(),
[this](bool ok) {
reactor_->OnWritesDoneDone(ok);
MaybeFinish(/*from_reaction=*/true);
},
&writes_done_ops_, /*can_inline=*/false);
writes_done_ops_.set_core_cq_tag(&writes_done_tag_);
callbacks_outstanding_.fetch_add(1, std::memory_order_relaxed);
@ -973,21 +980,23 @@ class ClientCallbackWriterImpl : public ClientCallbackWriter<Request> {
this->BindReactor(reactor);
// Set up the unchanging parts of the start and write tags and ops.
start_tag_.Set(call_.call(),
[this](bool ok) {
reactor_->OnReadInitialMetadataDone(ok);
MaybeFinish(/*from_reaction=*/true);
},
&start_ops_, /*can_inline=*/false);
start_tag_.Set(
call_.call(),
[this](bool ok) {
reactor_->OnReadInitialMetadataDone(ok);
MaybeFinish(/*from_reaction=*/true);
},
&start_ops_, /*can_inline=*/false);
start_ops_.RecvInitialMetadata(context_);
start_ops_.set_core_cq_tag(&start_tag_);
write_tag_.Set(call_.call(),
[this](bool ok) {
reactor_->OnWriteDone(ok);
MaybeFinish(/*from_reaction=*/true);
},
&write_ops_, /*can_inline=*/false);
write_tag_.Set(
call_.call(),
[this](bool ok) {
reactor_->OnWriteDone(ok);
MaybeFinish(/*from_reaction=*/true);
},
&write_ops_, /*can_inline=*/false);
write_ops_.set_core_cq_tag(&write_tag_);
// Also set up the Finish tag and op set.
@ -1097,21 +1106,22 @@ class ClientCallbackUnaryImpl final : public ClientCallbackUnary {
// 1. Send initial metadata + write + writes done + recv initial metadata
// 2. Read message, recv trailing metadata
start_tag_.Set(call_.call(),
[this](bool ok) {
reactor_->OnReadInitialMetadataDone(ok);
MaybeFinish();
},
&start_ops_, /*can_inline=*/false);
start_tag_.Set(
call_.call(),
[this](bool ok) {
reactor_->OnReadInitialMetadataDone(ok);
MaybeFinish();
},
&start_ops_, /*can_inline=*/false);
start_ops_.SendInitialMetadata(&context_->send_initial_metadata_,
context_->initial_metadata_flags());
start_ops_.RecvInitialMetadata(context_);
start_ops_.set_core_cq_tag(&start_tag_);
call_.PerformOps(&start_ops_);
finish_tag_.Set(call_.call(), [this](bool /*ok*/) { MaybeFinish(); },
&finish_ops_,
/*can_inline=*/false);
finish_tag_.Set(
call_.call(), [this](bool /*ok*/) { MaybeFinish(); }, &finish_ops_,
/*can_inline=*/false);
finish_ops_.ClientRecvStatus(context_, &finish_status_);
finish_ops_.set_core_cq_tag(&finish_tag_);
call_.PerformOps(&finish_ops_);

@ -157,14 +157,15 @@ class CallbackUnaryHandler : public ::grpc::internal::MethodHandler {
// (OnSendInitialMetadataDone). Thus it must be dispatched to an executor
// thread. However, any OnDone needed after that can be inlined because it
// is already running on an executor thread.
meta_tag_.Set(call_.call(),
[this](bool ok) {
ServerUnaryReactor* reactor =
reactor_.load(std::memory_order_relaxed);
reactor->OnSendInitialMetadataDone(ok);
this->MaybeDone(/*inlineable_ondone=*/true);
},
&meta_ops_, /*can_inline=*/false);
meta_tag_.Set(
call_.call(),
[this](bool ok) {
ServerUnaryReactor* reactor =
reactor_.load(std::memory_order_relaxed);
reactor->OnSendInitialMetadataDone(ok);
this->MaybeDone(/*inlineable_ondone=*/true);
},
&meta_ops_, /*can_inline=*/false);
meta_ops_.SendInitialMetadata(&ctx_->initial_metadata_,
ctx_->initial_metadata_flags());
if (ctx_->compression_level_set()) {
@ -305,14 +306,15 @@ class CallbackClientStreamingHandler : public ::grpc::internal::MethodHandler {
// A finish tag with only MaybeDone can have its callback inlined
// regardless even if OnDone is not inlineable because this callback just
// checks a ref and then decides whether or not to dispatch OnDone.
finish_tag_.Set(call_.call(),
[this](bool) {
// Inlineable OnDone can be false here because there is
// no read reactor that has an inlineable OnDone; this
// only applies to the DefaultReactor (which is unary).
this->MaybeDone(/*inlineable_ondone=*/false);
},
&finish_ops_, /*can_inline=*/true);
finish_tag_.Set(
call_.call(),
[this](bool) {
// Inlineable OnDone can be false here because there is
// no read reactor that has an inlineable OnDone; this
// only applies to the DefaultReactor (which is unary).
this->MaybeDone(/*inlineable_ondone=*/false);
},
&finish_ops_, /*can_inline=*/true);
if (!ctx_->sent_initial_metadata_) {
finish_ops_.SendInitialMetadata(&ctx_->initial_metadata_,
ctx_->initial_metadata_flags());
@ -338,14 +340,15 @@ class CallbackClientStreamingHandler : public ::grpc::internal::MethodHandler {
// The callback for this function should not be inlined because it invokes
// a user-controlled reaction, but any resulting OnDone can be inlined in
// the executor to which this callback is dispatched.
meta_tag_.Set(call_.call(),
[this](bool ok) {
ServerReadReactor<RequestType>* reactor =
reactor_.load(std::memory_order_relaxed);
reactor->OnSendInitialMetadataDone(ok);
this->MaybeDone(/*inlineable_ondone=*/true);
},
&meta_ops_, /*can_inline=*/false);
meta_tag_.Set(
call_.call(),
[this](bool ok) {
ServerReadReactor<RequestType>* reactor =
reactor_.load(std::memory_order_relaxed);
reactor->OnSendInitialMetadataDone(ok);
this->MaybeDone(/*inlineable_ondone=*/true);
},
&meta_ops_, /*can_inline=*/false);
meta_ops_.SendInitialMetadata(&ctx_->initial_metadata_,
ctx_->initial_metadata_flags());
if (ctx_->compression_level_set()) {
@ -375,12 +378,13 @@ class CallbackClientStreamingHandler : public ::grpc::internal::MethodHandler {
// The callback for this function should not be inlined because it invokes
// a user-controlled reaction, but any resulting OnDone can be inlined in
// the executor to which this callback is dispatched.
read_tag_.Set(call_.call(),
[this, reactor](bool ok) {
reactor->OnReadDone(ok);
this->MaybeDone(/*inlineable_ondone=*/true);
},
&read_ops_, /*can_inline=*/false);
read_tag_.Set(
call_.call(),
[this, reactor](bool ok) {
reactor->OnReadDone(ok);
this->MaybeDone(/*inlineable_ondone=*/true);
},
&read_ops_, /*can_inline=*/false);
read_ops_.set_core_cq_tag(&read_tag_);
this->BindReactor(reactor);
this->MaybeCallOnCancel(reactor);
@ -505,14 +509,15 @@ class CallbackServerStreamingHandler : public ::grpc::internal::MethodHandler {
// A finish tag with only MaybeDone can have its callback inlined
// regardless even if OnDone is not inlineable because this callback just
// checks a ref and then decides whether or not to dispatch OnDone.
finish_tag_.Set(call_.call(),
[this](bool) {
// Inlineable OnDone can be false here because there is
// no write reactor that has an inlineable OnDone; this
// only applies to the DefaultReactor (which is unary).
this->MaybeDone(/*inlineable_ondone=*/false);
},
&finish_ops_, /*can_inline=*/true);
finish_tag_.Set(
call_.call(),
[this](bool) {
// Inlineable OnDone can be false here because there is
// no write reactor that has an inlineable OnDone; this
// only applies to the DefaultReactor (which is unary).
this->MaybeDone(/*inlineable_ondone=*/false);
},
&finish_ops_, /*can_inline=*/true);
finish_ops_.set_core_cq_tag(&finish_tag_);
if (!ctx_->sent_initial_metadata_) {
@ -533,14 +538,15 @@ class CallbackServerStreamingHandler : public ::grpc::internal::MethodHandler {
// The callback for this function should not be inlined because it invokes
// a user-controlled reaction, but any resulting OnDone can be inlined in
// the executor to which this callback is dispatched.
meta_tag_.Set(call_.call(),
[this](bool ok) {
ServerWriteReactor<ResponseType>* reactor =
reactor_.load(std::memory_order_relaxed);
reactor->OnSendInitialMetadataDone(ok);
this->MaybeDone(/*inlineable_ondone=*/true);
},
&meta_ops_, /*can_inline=*/false);
meta_tag_.Set(
call_.call(),
[this](bool ok) {
ServerWriteReactor<ResponseType>* reactor =
reactor_.load(std::memory_order_relaxed);
reactor->OnSendInitialMetadataDone(ok);
this->MaybeDone(/*inlineable_ondone=*/true);
},
&meta_ops_, /*can_inline=*/false);
meta_ops_.SendInitialMetadata(&ctx_->initial_metadata_,
ctx_->initial_metadata_flags());
if (ctx_->compression_level_set()) {
@ -595,12 +601,13 @@ class CallbackServerStreamingHandler : public ::grpc::internal::MethodHandler {
// The callback for this function should not be inlined because it invokes
// a user-controlled reaction, but any resulting OnDone can be inlined in
// the executor to which this callback is dispatched.
write_tag_.Set(call_.call(),
[this, reactor](bool ok) {
reactor->OnWriteDone(ok);
this->MaybeDone(/*inlineable_ondone=*/true);
},
&write_ops_, /*can_inline=*/false);
write_tag_.Set(
call_.call(),
[this, reactor](bool ok) {
reactor->OnWriteDone(ok);
this->MaybeDone(/*inlineable_ondone=*/true);
},
&write_ops_, /*can_inline=*/false);
write_ops_.set_core_cq_tag(&write_tag_);
this->BindReactor(reactor);
this->MaybeCallOnCancel(reactor);
@ -707,14 +714,15 @@ class CallbackBidiHandler : public ::grpc::internal::MethodHandler {
// A finish tag with only MaybeDone can have its callback inlined
// regardless even if OnDone is not inlineable because this callback just
// checks a ref and then decides whether or not to dispatch OnDone.
finish_tag_.Set(call_.call(),
[this](bool) {
// Inlineable OnDone can be false here because there is
// no bidi reactor that has an inlineable OnDone; this
// only applies to the DefaultReactor (which is unary).
this->MaybeDone(/*inlineable_ondone=*/false);
},
&finish_ops_, /*can_inline=*/true);
finish_tag_.Set(
call_.call(),
[this](bool) {
// Inlineable OnDone can be false here because there is
// no bidi reactor that has an inlineable OnDone; this
// only applies to the DefaultReactor (which is unary).
this->MaybeDone(/*inlineable_ondone=*/false);
},
&finish_ops_, /*can_inline=*/true);
finish_ops_.set_core_cq_tag(&finish_tag_);
if (!ctx_->sent_initial_metadata_) {
@ -735,14 +743,15 @@ class CallbackBidiHandler : public ::grpc::internal::MethodHandler {
// The callback for this function should not be inlined because it invokes
// a user-controlled reaction, but any resulting OnDone can be inlined in
// the executor to which this callback is dispatched.
meta_tag_.Set(call_.call(),
[this](bool ok) {
ServerBidiReactor<RequestType, ResponseType>* reactor =
reactor_.load(std::memory_order_relaxed);
reactor->OnSendInitialMetadataDone(ok);
this->MaybeDone(/*inlineable_ondone=*/true);
},
&meta_ops_, /*can_inline=*/false);
meta_tag_.Set(
call_.call(),
[this](bool ok) {
ServerBidiReactor<RequestType, ResponseType>* reactor =
reactor_.load(std::memory_order_relaxed);
reactor->OnSendInitialMetadataDone(ok);
this->MaybeDone(/*inlineable_ondone=*/true);
},
&meta_ops_, /*can_inline=*/false);
meta_ops_.SendInitialMetadata(&ctx_->initial_metadata_,
ctx_->initial_metadata_flags());
if (ctx_->compression_level_set()) {
@ -798,19 +807,21 @@ class CallbackBidiHandler : public ::grpc::internal::MethodHandler {
// The callbacks for these functions should not be inlined because they
// invoke user-controlled reactions, but any resulting OnDones can be
// inlined in the executor to which a callback is dispatched.
write_tag_.Set(call_.call(),
[this, reactor](bool ok) {
reactor->OnWriteDone(ok);
this->MaybeDone(/*inlineable_ondone=*/true);
},
&write_ops_, /*can_inline=*/false);
write_tag_.Set(
call_.call(),
[this, reactor](bool ok) {
reactor->OnWriteDone(ok);
this->MaybeDone(/*inlineable_ondone=*/true);
},
&write_ops_, /*can_inline=*/false);
write_ops_.set_core_cq_tag(&write_tag_);
read_tag_.Set(call_.call(),
[this, reactor](bool ok) {
reactor->OnReadDone(ok);
this->MaybeDone(/*inlineable_ondone=*/true);
},
&read_ops_, /*can_inline=*/false);
read_tag_.Set(
call_.call(),
[this, reactor](bool ok) {
reactor->OnReadDone(ok);
this->MaybeDone(/*inlineable_ondone=*/true);
},
&read_ops_, /*can_inline=*/false);
read_ops_.set_core_cq_tag(&read_tag_);
this->BindReactor(reactor);
this->MaybeCallOnCancel(reactor);

@ -391,17 +391,17 @@ class Subchannel::AsyncWatcherNotifierLocked {
}
watcher_->PushConnectivityStateChange(
{state, status, std::move(connected_subchannel)});
ExecCtx::Run(
DEBUG_LOCATION,
GRPC_CLOSURE_INIT(&closure_,
[](void* arg, grpc_error* /*error*/) {
auto* self =
static_cast<AsyncWatcherNotifierLocked*>(arg);
self->watcher_->OnConnectivityStateChange();
delete self;
},
this, nullptr),
GRPC_ERROR_NONE);
ExecCtx::Run(DEBUG_LOCATION,
GRPC_CLOSURE_INIT(
&closure_,
[](void* arg, grpc_error* /*error*/) {
auto* self =
static_cast<AsyncWatcherNotifierLocked*>(arg);
self->watcher_->OnConnectivityStateChange();
delete self;
},
this, nullptr),
GRPC_ERROR_NONE);
}
private:

@ -105,45 +105,45 @@ class ThreadInternalsPosix : public internal::ThreadInternalsInterface {
GPR_ASSERT(pthread_attr_setstacksize(&attr, stack_size) == 0);
}
*success =
(pthread_create(&pthread_id_, &attr,
[](void* v) -> void* {
thd_arg arg = *static_cast<thd_arg*>(v);
free(v);
if (arg.name != nullptr) {
*success = (pthread_create(
&pthread_id_, &attr,
[](void* v) -> void* {
thd_arg arg = *static_cast<thd_arg*>(v);
free(v);
if (arg.name != nullptr) {
#if GPR_APPLE_PTHREAD_NAME
/* Apple supports 64 characters, and will
* truncate if it's longer. */
pthread_setname_np(arg.name);
/* Apple supports 64 characters, and will
* truncate if it's longer. */
pthread_setname_np(arg.name);
#elif GPR_LINUX_PTHREAD_NAME
/* Linux supports 16 characters max, and will
* error if it's longer. */
char buf[16];
size_t buf_len = GPR_ARRAY_SIZE(buf) - 1;
strncpy(buf, arg.name, buf_len);
buf[buf_len] = '\0';
pthread_setname_np(pthread_self(), buf);
/* Linux supports 16 characters max, and will
* error if it's longer. */
char buf[16];
size_t buf_len = GPR_ARRAY_SIZE(buf) - 1;
strncpy(buf, arg.name, buf_len);
buf[buf_len] = '\0';
pthread_setname_np(pthread_self(), buf);
#endif // GPR_APPLE_PTHREAD_NAME
}
gpr_mu_lock(&arg.thread->mu_);
while (!arg.thread->started_) {
gpr_cv_wait(&arg.thread->ready_, &arg.thread->mu_,
gpr_inf_future(GPR_CLOCK_MONOTONIC));
}
gpr_mu_unlock(&arg.thread->mu_);
if (!arg.joinable) {
delete arg.thread;
}
(*arg.body)(arg.arg);
if (arg.tracked) {
Fork::DecThreadCount();
}
return nullptr;
},
info) == 0);
}
gpr_mu_lock(&arg.thread->mu_);
while (!arg.thread->started_) {
gpr_cv_wait(&arg.thread->ready_, &arg.thread->mu_,
gpr_inf_future(GPR_CLOCK_MONOTONIC));
}
gpr_mu_unlock(&arg.thread->mu_);
if (!arg.joinable) {
delete arg.thread;
}
(*arg.body)(arg.arg);
if (arg.tracked) {
Fork::DecThreadCount();
}
return nullptr;
},
info) == 0);
GPR_ASSERT(pthread_attr_destroy(&attr) == 0);

@ -65,9 +65,9 @@ class ThreadPoolWorker {
ThreadPoolWorker(const char* thd_name, MPMCQueueInterface* queue,
Thread::Options& options, int index)
: queue_(queue), thd_name_(thd_name), index_(index) {
thd_ = Thread(thd_name,
[](void* th) { static_cast<ThreadPoolWorker*>(th)->Run(); },
this, nullptr, options);
thd_ = Thread(
thd_name, [](void* th) { static_cast<ThreadPoolWorker*>(th)->Run(); },
this, nullptr, options);
}
~ThreadPoolWorker() {}

@ -394,9 +394,10 @@ static void on_handshaker_service_resp_recv_dedicated(void* arg,
grpc_error* /*error*/) {
alts_shared_resource_dedicated* resource =
grpc_alts_get_shared_resource_dedicated();
grpc_cq_end_op(resource->cq, arg, GRPC_ERROR_NONE,
[](void* /*done_arg*/, grpc_cq_completion* /*storage*/) {},
nullptr, &resource->storage);
grpc_cq_end_op(
resource->cq, arg, GRPC_ERROR_NONE,
[](void* /*done_arg*/, grpc_cq_completion* /*storage*/) {}, nullptr,
&resource->storage);
}
/* Returns TSI_OK if and only if no error is encountered. */

@ -34,14 +34,14 @@ void ClientReactor::InternalScheduleOnDone(grpc::Status s) {
const grpc::Status status;
ClosureWithArg(ClientReactor* reactor_arg, grpc::Status s)
: reactor(reactor_arg), status(std::move(s)) {
GRPC_CLOSURE_INIT(&closure,
[](void* void_arg, grpc_error*) {
ClosureWithArg* arg =
static_cast<ClosureWithArg*>(void_arg);
arg->reactor->OnDone(arg->status);
delete arg;
},
this, grpc_schedule_on_exec_ctx);
GRPC_CLOSURE_INIT(
&closure,
[](void* void_arg, grpc_error*) {
ClosureWithArg* arg = static_cast<ClosureWithArg*>(void_arg);
arg->reactor->OnDone(arg->status);
delete arg;
},
this, grpc_schedule_on_exec_ctx);
}
};
ClosureWithArg* arg = new ClosureWithArg(this, std::move(s));

@ -80,20 +80,20 @@ class AlarmImpl : public ::grpc::internal::CompletionQueueTag {
// Don't use any CQ at all. Instead just use the timer to fire the function
callback_ = std::move(f);
Ref();
GRPC_CLOSURE_INIT(&on_alarm_,
[](void* arg, grpc_error* error) {
grpc_core::Executor::Run(
GRPC_CLOSURE_CREATE(
[](void* arg, grpc_error* error) {
AlarmImpl* alarm =
static_cast<AlarmImpl*>(arg);
alarm->callback_(error == GRPC_ERROR_NONE);
alarm->Unref();
},
arg, nullptr),
error);
},
this, grpc_schedule_on_exec_ctx);
GRPC_CLOSURE_INIT(
&on_alarm_,
[](void* arg, grpc_error* error) {
grpc_core::Executor::Run(
GRPC_CLOSURE_CREATE(
[](void* arg, grpc_error* error) {
AlarmImpl* alarm = static_cast<AlarmImpl*>(arg);
alarm->callback_(error == GRPC_ERROR_NONE);
alarm->Unref();
},
arg, nullptr),
error);
},
this, grpc_schedule_on_exec_ctx);
grpc_timer_init(&timer_, grpc_timespec_to_millis_round_up(deadline),
&on_alarm_);
}

@ -27,11 +27,12 @@ namespace grpc {
DynamicThreadPool::DynamicThread::DynamicThread(DynamicThreadPool* pool)
: pool_(pool),
thd_("grpcpp_dynamic_pool",
[](void* th) {
static_cast<DynamicThreadPool::DynamicThread*>(th)->ThreadFunc();
},
this) {
thd_(
"grpcpp_dynamic_pool",
[](void* th) {
static_cast<DynamicThreadPool::DynamicThread*>(th)->ThreadFunc();
},
this) {
thd_.Start();
}
DynamicThreadPool::DynamicThread::~DynamicThread() { thd_.Join(); }

@ -35,14 +35,14 @@ void ServerCallbackCall::ScheduleOnDone(bool inline_ondone) {
grpc_closure closure;
ServerCallbackCall* call;
explicit ClosureWithArg(ServerCallbackCall* call_arg) : call(call_arg) {
GRPC_CLOSURE_INIT(&closure,
[](void* void_arg, grpc_error*) {
ClosureWithArg* arg =
static_cast<ClosureWithArg*>(void_arg);
arg->call->CallOnDone();
delete arg;
},
this, grpc_schedule_on_exec_ctx);
GRPC_CLOSURE_INIT(
&closure,
[](void* void_arg, grpc_error*) {
ClosureWithArg* arg = static_cast<ClosureWithArg*>(void_arg);
arg->call->CallOnDone();
delete arg;
},
this, grpc_schedule_on_exec_ctx);
}
};
ClosureWithArg* arg = new ClosureWithArg(this);
@ -64,15 +64,15 @@ void ServerCallbackCall::CallOnCancel(ServerReactor* reactor) {
ServerReactor* reactor;
ClosureWithArg(ServerCallbackCall* call_arg, ServerReactor* reactor_arg)
: call(call_arg), reactor(reactor_arg) {
GRPC_CLOSURE_INIT(&closure,
[](void* void_arg, grpc_error*) {
ClosureWithArg* arg =
static_cast<ClosureWithArg*>(void_arg);
arg->reactor->OnCancel();
arg->call->MaybeDone();
delete arg;
},
this, grpc_schedule_on_exec_ctx);
GRPC_CLOSURE_INIT(
&closure,
[](void* void_arg, grpc_error*) {
ClosureWithArg* arg = static_cast<ClosureWithArg*>(void_arg);
arg->reactor->OnCancel();
arg->call->MaybeDone();
delete arg;
},
this, grpc_schedule_on_exec_ctx);
}
};
ClosureWithArg* arg = new ClosureWithArg(this, reactor);

@ -16,7 +16,13 @@
FROM debian:10
RUN apt-get update && apt-get install -y clang-format
# Add buster-backports for more recent clang packages
RUN echo "deb http://deb.debian.org/debian buster-backports main" | tee /etc/apt/sources.list.d/buster-backports.list
# Install clang-format
RUN apt-get update && apt-get install -y clang-format-8
ENV CLANG_FORMAT=clang-format-8
ADD clang_format_all_the_things.sh /
# When running locally, we'll be impersonating the current user, so we need

@ -37,8 +37,8 @@
RUN echo "deb http://deb.debian.org/debian buster-backports main" | tee /etc/apt/sources.list.d/buster-backports.list
# Install clang, clang-format, and clang-tidy
RUN apt-get update && apt-get install -y clang clang-format-7 clang-tidy-8 jq
ENV CLANG_FORMAT=clang-format-7
RUN apt-get update && apt-get install -y clang clang-format-8 clang-tidy-8 jq
ENV CLANG_FORMAT=clang-format-8
ENV CLANG_TIDY=clang-tidy-8

@ -46,9 +46,9 @@ static void BM_InitCancelTimer(benchmark::State& state) {
int i = 0;
for (auto _ : state) {
TimerClosure* timer_closure = &timer_closures[i++ % kTimerCount];
GRPC_CLOSURE_INIT(&timer_closure->closure,
[](void* /*args*/, grpc_error* /*err*/) {}, nullptr,
grpc_schedule_on_exec_ctx);
GRPC_CLOSURE_INIT(
&timer_closure->closure, [](void* /*args*/, grpc_error* /*err*/) {},
nullptr, grpc_schedule_on_exec_ctx);
grpc_timer_init(&timer_closure->timer, GRPC_MILLIS_INF_FUTURE,
&timer_closure->closure);
grpc_timer_cancel(&timer_closure->timer);
@ -75,9 +75,9 @@ static void BM_TimerBatch(benchmark::State& state) {
for (auto _ : state) {
for (grpc_millis deadline = start; deadline != end; deadline += increment) {
TimerClosure* timer_closure = &timer_closures[deadline % kTimerCount];
GRPC_CLOSURE_INIT(&timer_closure->closure,
[](void* /*args*/, grpc_error* /*err*/) {}, nullptr,
grpc_schedule_on_exec_ctx);
GRPC_CLOSURE_INIT(
&timer_closure->closure, [](void* /*args*/, grpc_error* /*err*/) {},
nullptr, grpc_schedule_on_exec_ctx);
grpc_timer_init(&timer_closure->timer, deadline, &timer_closure->closure);
}

@ -94,13 +94,14 @@ TEST_F(SliceTest, SliceNewWithUserData) {
auto* t = new stest;
t->x = new char[strlen(kContent) + 1];
strcpy(t->x, kContent);
Slice spp(t->x, strlen(t->x),
[](void* p) {
auto* t = static_cast<stest*>(p);
delete[] t->x;
delete t;
},
t);
Slice spp(
t->x, strlen(t->x),
[](void* p) {
auto* t = static_cast<stest*>(p);
delete[] t->x;
delete t;
},
t);
CheckSlice(spp, kContent);
}

@ -14,7 +14,13 @@
FROM debian:10
RUN apt-get update && apt-get install -y clang-format
# Add buster-backports for more recent clang packages
RUN echo "deb http://deb.debian.org/debian buster-backports main" | tee /etc/apt/sources.list.d/buster-backports.list
# Install clang-format
RUN apt-get update && apt-get install -y clang-format-8
ENV CLANG_FORMAT=clang-format-8
ADD clang_format_all_the_things.sh /
# When running locally, we'll be impersonating the current user, so we need

@ -85,8 +85,8 @@ RUN python3 -m pip install simplejson mako virtualenv==16.7.9 lxml
RUN echo "deb http://deb.debian.org/debian buster-backports main" | tee /etc/apt/sources.list.d/buster-backports.list
# Install clang, clang-format, and clang-tidy
RUN apt-get update && apt-get install -y clang clang-format-7 clang-tidy-8 jq
ENV CLANG_FORMAT=clang-format-7
RUN apt-get update && apt-get install -y clang clang-format-8 clang-tidy-8 jq
ENV CLANG_FORMAT=clang-format-8
ENV CLANG_TIDY=clang-tidy-8

Loading…
Cancel
Save