Update clang-format to 5.0

pull/13255/head
Craig Tiller 8 years ago
parent ef68fe7239
commit baa14a975e
  1. 2
      .clang-format
  2. 2
      include/grpc++/impl/codegen/async_unary_call.h
  3. 3
      include/grpc++/impl/codegen/completion_queue.h
  4. 12
      include/grpc/impl/codegen/sync_generic.h
  5. 3
      src/compiler/cpp_generator.cc
  6. 13
      src/compiler/csharp_generator.cc
  7. 6
      src/compiler/node_generator.cc
  8. 4
      src/compiler/objective_c_generator.cc
  9. 2
      src/compiler/objective_c_generator.h
  10. 2
      src/compiler/objective_c_generator_helpers.h
  11. 6
      src/compiler/objective_c_plugin.cc
  12. 6
      src/compiler/php_generator.cc
  13. 2
      src/compiler/php_plugin.cc
  14. 2
      src/compiler/python_generator.cc
  15. 6
      src/compiler/python_generator_helpers.h
  16. 22
      src/compiler/ruby_generator.cc
  17. 3
      src/core/ext/filters/client_channel/channel_connectivity.cc
  18. 15
      src/core/ext/filters/client_channel/client_channel.cc
  19. 5
      src/core/ext/filters/client_channel/lb_policy.cc
  20. 3
      src/core/ext/filters/client_channel/lb_policy/grpclb/client_load_reporting_filter.cc
  21. 4
      src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.cc
  22. 8
      src/core/ext/filters/client_channel/lb_policy/grpclb/load_balancer_api.cc
  23. 8
      src/core/ext/filters/client_channel/lb_policy/round_robin/round_robin.cc
  24. 26
      src/core/ext/filters/client_channel/lb_policy/subchannel_list.cc
  25. 10
      src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver_posix.cc
  26. 5
      src/core/ext/filters/client_channel/subchannel.cc
  27. 3
      src/core/ext/filters/client_channel/subchannel_index.cc
  28. 3
      src/core/ext/filters/http/message_compress/message_compress_filter.cc
  29. 10
      src/core/ext/filters/http/server/http_server_filter.cc
  30. 3
      src/core/ext/transport/chttp2/client/insecure/channel_create.cc
  31. 3
      src/core/ext/transport/chttp2/client/insecure/channel_create_posix.cc
  32. 5
      src/core/ext/transport/chttp2/server/chttp2_server.cc
  33. 21
      src/core/ext/transport/chttp2/transport/chttp2_transport.cc
  34. 4
      src/core/ext/transport/chttp2/transport/flow_control.cc
  35. 5
      src/core/ext/transport/chttp2/transport/hpack_encoder.cc
  36. 5
      src/core/ext/transport/chttp2/transport/incoming_metadata.cc
  37. 10
      src/core/ext/transport/chttp2/transport/parsing.cc
  38. 15
      src/core/ext/transport/cronet/transport/cronet_transport.cc
  39. 3
      src/core/ext/transport/inproc/inproc_transport.cc
  40. 5
      src/core/lib/channel/channel_stack.cc
  41. 4
      src/core/lib/debug/stats.cc
  42. 9
      src/core/lib/debug/stats_data.cc
  43. 5
      src/core/lib/http/httpcli.cc
  44. 4
      src/core/lib/iomgr/endpoint_pair_windows.cc
  45. 4
      src/core/lib/iomgr/error.cc
  46. 10
      src/core/lib/iomgr/ev_epoll1_linux.cc
  47. 32
      src/core/lib/iomgr/ev_epollex_linux.cc
  48. 9
      src/core/lib/iomgr/ev_epollsig_linux.cc
  49. 3
      src/core/lib/iomgr/ev_poll_posix.cc
  50. 6
      src/core/lib/iomgr/iomgr.cc
  51. 4
      src/core/lib/iomgr/load_file.cc
  52. 12
      src/core/lib/iomgr/resource_quota.cc
  53. 3
      src/core/lib/iomgr/sockaddr_utils.cc
  54. 8
      src/core/lib/iomgr/tcp_client_posix.cc
  55. 3
      src/core/lib/iomgr/tcp_client_uv.cc
  56. 8
      src/core/lib/iomgr/tcp_client_windows.cc
  57. 5
      src/core/lib/iomgr/tcp_server_posix.cc
  58. 4
      src/core/lib/iomgr/tcp_server_uv.cc
  59. 12
      src/core/lib/iomgr/tcp_server_windows.cc
  60. 5
      src/core/lib/iomgr/tcp_uv.cc
  61. 8
      src/core/lib/iomgr/timer_generic.cc
  62. 5
      src/core/lib/iomgr/timer_heap.cc
  63. 7
      src/core/lib/iomgr/udp_server.cc
  64. 3
      src/core/lib/security/context/security_context.cc
  65. 3
      src/core/lib/security/credentials/fake/fake_credentials.cc
  66. 3
      src/core/lib/security/credentials/oauth2/oauth2_credentials.cc
  67. 3
      src/core/lib/security/credentials/plugin/plugin_credentials.cc
  68. 3
      src/core/lib/security/credentials/ssl/ssl_credentials.cc
  69. 3
      src/core/lib/security/transport/secure_endpoint.cc
  70. 11
      src/core/lib/security/transport/security_connector.cc
  71. 6
      src/core/lib/security/transport/security_handshaker.cc
  72. 3
      src/core/lib/support/avl.cc
  73. 3
      src/core/lib/support/histogram.cc
  74. 1
      src/core/lib/support/log_posix.cc
  75. 4
      src/core/lib/support/mpscq.h
  76. 4
      src/core/lib/support/spinlock.h
  77. 4
      src/core/lib/surface/call.cc
  78. 3
      src/core/lib/surface/channel.cc
  79. 8
      src/core/lib/surface/completion_queue.cc
  80. 14
      src/core/lib/surface/server.cc
  81. 3
      src/core/lib/transport/bdp_estimator.cc
  82. 6
      src/core/lib/transport/metadata.h
  83. 6
      src/core/tsi/fake_transport_security.cc
  84. 7
      src/core/tsi/ssl_transport_security.cc
  85. 3
      src/cpp/client/generic_stub.cc
  86. 6
      src/cpp/client/secure_credentials.cc
  87. 3
      src/cpp/common/channel_filter.h
  88. 2
      src/cpp/common/version_cc.cc
  89. 10
      src/cpp/ext/proto_server_reflection.cc
  90. 2
      src/cpp/server/health/health_check_service.cc
  91. 2
      src/cpp/util/time_cc.cc
  92. 3
      src/csharp/ext/grpc_csharp_ext.c
  93. 4
      src/node/ext/byte_buffer.cc
  94. 7
      src/node/ext/call.cc
  95. 2
      src/node/ext/call.h
  96. 2
      src/node/ext/call_credentials.h
  97. 4
      src/node/ext/node_grpc.cc
  98. 5
      src/ruby/ext/grpc/rb_call.c
  99. 15
      test/core/bad_client/tests/badreq.c
  100. 135
      test/core/bad_client/tests/headers.c
  101. Some files were not shown because too many files have changed in this diff Show More

@ -1,5 +1,7 @@
---
Language: Cpp
BasedOnStyle: Google
DerivePointerAlignment: false
PointerAlignment: Left
...

@ -297,6 +297,6 @@ class default_delete<grpc::ClientAsyncResponseReaderInterface<R>> {
public:
void operator()(void* p) {}
};
}
} // namespace std
#endif // GRPCXX_IMPL_CODEGEN_ASYNC_UNARY_CALL_H

@ -164,7 +164,8 @@ class CompletionQueue : private GrpcLibraryCodegen {
///
/// \return true if read a regular event, false if the queue is shutting down.
bool Next(void** tag, bool* ok) {
return (AsyncNextInternal(tag, ok, g_core_codegen_interface->gpr_inf_future(
return (AsyncNextInternal(tag, ok,
g_core_codegen_interface->gpr_inf_future(
GPR_CLOCK_REALTIME)) != SHUTDOWN);
}

@ -23,16 +23,22 @@
#include <grpc/impl/codegen/atm.h>
/* gpr_event */
typedef struct { gpr_atm state; } gpr_event;
typedef struct {
gpr_atm state;
} gpr_event;
#define GPR_EVENT_INIT \
{ 0 }
/* gpr_refcount */
typedef struct { gpr_atm count; } gpr_refcount;
typedef struct {
gpr_atm count;
} gpr_refcount;
/* gpr_stats_counter */
typedef struct { gpr_atm value; } gpr_stats_counter;
typedef struct {
gpr_atm value;
} gpr_stats_counter;
#define GPR_STATS_INIT \
{ 0 }

@ -1566,7 +1566,8 @@ grpc::string GetMockIncludes(grpc_generator::File *file,
static const char* headers_strs[] = {
"grpc++/impl/codegen/async_stream.h",
"grpc++/impl/codegen/sync_stream.h", "gmock/gmock.h",
"grpc++/impl/codegen/sync_stream.h",
"gmock/gmock.h",
};
std::vector<grpc::string> headers(headers_strs, array_end(headers_strs));
PrintIncludes(printer.get(), headers, params);

@ -23,24 +23,23 @@
#include "src/compiler/config.h"
#include "src/compiler/csharp_generator.h"
#include "src/compiler/csharp_generator.h"
#include "src/compiler/csharp_generator_helpers.h"
using google::protobuf::compiler::csharp::GetFileNamespace;
using google::protobuf::compiler::csharp::GetClassName;
using google::protobuf::compiler::csharp::GetFileNamespace;
using google::protobuf::compiler::csharp::GetReflectionClassName;
using grpc::protobuf::FileDescriptor;
using grpc::protobuf::Descriptor;
using grpc::protobuf::ServiceDescriptor;
using grpc::protobuf::FileDescriptor;
using grpc::protobuf::MethodDescriptor;
using grpc::protobuf::ServiceDescriptor;
using grpc::protobuf::io::Printer;
using grpc::protobuf::io::StringOutputStream;
using grpc_generator::MethodType;
using grpc_generator::GetMethodType;
using grpc_generator::METHODTYPE_NO_STREAMING;
using grpc_generator::METHODTYPE_BIDI_STREAMING;
using grpc_generator::METHODTYPE_CLIENT_STREAMING;
using grpc_generator::METHODTYPE_NO_STREAMING;
using grpc_generator::METHODTYPE_SERVER_STREAMING;
using grpc_generator::METHODTYPE_BIDI_STREAMING;
using grpc_generator::MethodType;
using grpc_generator::StringReplace;
using std::map;
using std::vector;

@ -22,10 +22,10 @@
#include "src/compiler/generator_helpers.h"
#include "src/compiler/node_generator_helpers.h"
using grpc::protobuf::Descriptor;
using grpc::protobuf::FileDescriptor;
using grpc::protobuf::ServiceDescriptor;
using grpc::protobuf::MethodDescriptor;
using grpc::protobuf::Descriptor;
using grpc::protobuf::ServiceDescriptor;
using grpc::protobuf::io::Printer;
using grpc::protobuf::io::StringOutputStream;
using std::map;
@ -234,7 +234,7 @@ void PrintServices(const FileDescriptor *file, Printer *out) {
PrintService(file->service(i), out);
}
}
}
} // namespace
grpc::string GenerateFile(const FileDescriptor* file) {
grpc::string output;

@ -27,10 +27,10 @@
#include <google/protobuf/compiler/objectivec/objectivec_helpers.h>
using ::google::protobuf::compiler::objectivec::ClassName;
using ::grpc::protobuf::io::Printer;
using ::grpc::protobuf::FileDescriptor;
using ::grpc::protobuf::MethodDescriptor;
using ::grpc::protobuf::ServiceDescriptor;
using ::grpc::protobuf::FileDescriptor;
using ::grpc::protobuf::io::Printer;
using ::std::map;
using ::std::set;

@ -23,8 +23,8 @@
namespace grpc_objective_c_generator {
using ::grpc::protobuf::ServiceDescriptor;
using ::grpc::protobuf::FileDescriptor;
using ::grpc::protobuf::ServiceDescriptor;
using ::grpc::string;
// Returns forward declaration of classes in the generated header file.

@ -40,5 +40,5 @@ inline string ServiceClassName(const ServiceDescriptor *service) {
string prefix = file->options().objc_class_prefix();
return prefix + service->name();
}
}
} // namespace grpc_objective_c_generator
#endif // GRPC_INTERNAL_COMPILER_OBJECTIVE_C_GENERATOR_HELPERS_H

@ -26,9 +26,9 @@
#include <google/protobuf/compiler/objectivec/objectivec_helpers.h>
using ::google::protobuf::compiler::objectivec::ProtobufLibraryFrameworkName;
using ::google::protobuf::compiler::objectivec::
IsProtobufLibraryBundledProtoFile;
using ::google::protobuf::compiler::objectivec::ProtobufLibraryFrameworkName;
class ObjectiveCGrpcGenerator : public grpc::protobuf::compiler::CodeGenerator {
public:
@ -96,8 +96,8 @@ class ObjectiveCGrpcGenerator : public grpc::protobuf::compiler::CodeGenerator {
"\nNS_ASSUME_NONNULL_BEGIN\n\n";
static const ::grpc::string kNonNullEnd = "\nNS_ASSUME_NONNULL_END\n";
Write(context, file_name + ".pbrpc.h", imports + '\n' + proto_imports +
'\n' + kNonNullBegin +
Write(context, file_name + ".pbrpc.h",
imports + '\n' + proto_imports + '\n' + kNonNullBegin +
declarations + kNonNullEnd);
}

@ -22,10 +22,10 @@
#include "src/compiler/generator_helpers.h"
#include "src/compiler/php_generator_helpers.h"
using grpc::protobuf::Descriptor;
using grpc::protobuf::FileDescriptor;
using grpc::protobuf::ServiceDescriptor;
using grpc::protobuf::MethodDescriptor;
using grpc::protobuf::Descriptor;
using grpc::protobuf::ServiceDescriptor;
using grpc::protobuf::io::Printer;
using grpc::protobuf::io::StringOutputStream;
using std::map;
@ -148,7 +148,7 @@ void PrintService(const ServiceDescriptor *service,
out->Outdent();
out->Print("}\n");
}
}
} // namespace
grpc::string GenerateFile(const FileDescriptor* file,
const ServiceDescriptor* service,

@ -24,9 +24,9 @@
#include "src/compiler/php_generator.h"
#include "src/compiler/php_generator_helpers.h"
using google::protobuf::compiler::ParseGeneratorParameter;
using grpc_php_generator::GenerateFile;
using grpc_php_generator::GetPHPServiceFilename;
using google::protobuf::compiler::ParseGeneratorParameter;
class PHPGrpcGenerator : public grpc::protobuf::compiler::CodeGenerator {
public:

@ -45,9 +45,9 @@ using std::make_pair;
using std::map;
using std::pair;
using std::replace;
using std::set;
using std::tuple;
using std::vector;
using std::set;
namespace grpc_python_generator {

@ -29,9 +29,6 @@
#include "src/compiler/python_generator.h"
#include "src/compiler/python_private_generator.h"
using std::vector;
using grpc_generator::StringReplace;
using grpc_generator::StripProto;
using grpc::protobuf::Descriptor;
using grpc::protobuf::FileDescriptor;
using grpc::protobuf::MethodDescriptor;
@ -41,6 +38,9 @@ using grpc::protobuf::io::CodedOutputStream;
using grpc::protobuf::io::Printer;
using grpc::protobuf::io::StringOutputStream;
using grpc::protobuf::io::ZeroCopyOutputStream;
using grpc_generator::StringReplace;
using grpc_generator::StripProto;
using std::vector;
namespace grpc_python_generator {

@ -27,8 +27,8 @@
#include "src/compiler/ruby_generator_string-inl.h"
using grpc::protobuf::FileDescriptor;
using grpc::protobuf::ServiceDescriptor;
using grpc::protobuf::MethodDescriptor;
using grpc::protobuf::ServiceDescriptor;
using grpc::protobuf::io::Printer;
using grpc::protobuf::io::StringOutputStream;
using std::map;
@ -51,7 +51,11 @@ void PrintMethod(const MethodDescriptor *method, const grpc::string &package,
output_type = "stream(" + output_type + ")";
}
std::map<grpc::string, grpc::string> method_vars = ListToDict({
"mth.name", method->name(), "input.type", input_type, "output.type",
"mth.name",
method->name(),
"input.type",
input_type,
"output.type",
output_type,
});
out->Print(GetRubyComments(method, true).c_str());
@ -68,7 +72,8 @@ void PrintService(const ServiceDescriptor *service, const grpc::string &package,
// Begin the service module
std::map<grpc::string, grpc::string> module_vars = ListToDict({
"module.name", CapitalizeFirst(service->name()),
"module.name",
CapitalizeFirst(service->name()),
});
out->Print(module_vars, "module $module.name$\n");
out->Indent();
@ -157,7 +162,10 @@ grpc::string GetServices(const FileDescriptor *file) {
// Write out a file header.
std::map<grpc::string, grpc::string> header_comment_vars = ListToDict({
"file.name", file->name(), "file.package", file->package(),
"file.name",
file->name(),
"file.package",
file->package(),
});
out.Print("# Generated by the protocol buffer compiler. DO NOT EDIT!\n");
out.Print(header_comment_vars,
@ -175,7 +183,8 @@ grpc::string GetServices(const FileDescriptor *file) {
// that defines the messages used by the service. This is generated by the
// main ruby plugin.
std::map<grpc::string, grpc::string> dep_vars = ListToDict({
"dep.name", MessagesRequireName(file),
"dep.name",
MessagesRequireName(file),
});
out.Print(dep_vars, "require '$dep.name$'\n");
@ -184,7 +193,8 @@ grpc::string GetServices(const FileDescriptor *file) {
std::vector<grpc::string> modules = Split(file->package(), '.');
for (size_t i = 0; i < modules.size(); ++i) {
std::map<grpc::string, grpc::string> module_vars = ListToDict({
"module.name", PackageToModule(modules[i]),
"module.name",
PackageToModule(modules[i]),
});
out.Print(module_vars, "module $module.name$\n");
out.Indent();

@ -213,7 +213,8 @@ void grpc_channel_watch_connectivity_state(
"deadline=gpr_timespec { tv_sec: %" PRId64
", tv_nsec: %d, clock_type: %d }, "
"cq=%p, tag=%p)",
7, (channel, (int)last_observed_state, deadline.tv_sec, deadline.tv_nsec,
7,
(channel, (int)last_observed_state, deadline.tv_sec, deadline.tv_nsec,
(int)deadline.clock_type, cq, tag));
GPR_ASSERT(grpc_cq_begin_op(cq, tag));

@ -771,7 +771,8 @@ static void cc_destroy_channel_elem(grpc_exec_ctx *exec_ctx,
channel_data* chand = (channel_data*)elem->channel_data;
if (chand->resolver != NULL) {
GRPC_CLOSURE_SCHED(
exec_ctx, GRPC_CLOSURE_CREATE(shutdown_resolver_locked, chand->resolver,
exec_ctx,
GRPC_CLOSURE_CREATE(shutdown_resolver_locked, chand->resolver,
grpc_combiner_scheduler(chand->combiner)),
GRPC_ERROR_NONE);
}
@ -943,7 +944,8 @@ static void waiting_for_pick_batches_resume(grpc_exec_ctx *exec_ctx,
channel_data* chand = (channel_data*)elem->channel_data;
call_data* calld = (call_data*)elem->call_data;
if (GRPC_TRACER_ON(grpc_client_channel_trace)) {
gpr_log(GPR_DEBUG, "chand=%p calld=%p: sending %" PRIuPTR
gpr_log(GPR_DEBUG,
"chand=%p calld=%p: sending %" PRIuPTR
" pending batches to subchannel_call=%p",
chand, calld, calld->waiting_for_pick_batches_count,
calld->subchannel_call);
@ -1176,8 +1178,7 @@ typedef struct {
static void pick_after_resolver_result_cancel_locked(grpc_exec_ctx* exec_ctx,
void* arg,
grpc_error* error) {
pick_after_resolver_result_args *args =
(pick_after_resolver_result_args *)arg;
pick_after_resolver_result_args* args = (pick_after_resolver_result_args*)arg;
if (args->finished) {
gpr_free(args);
return;
@ -1214,8 +1215,7 @@ static void pick_after_resolver_result_start_locked(grpc_exec_ctx *exec_ctx,
static void pick_after_resolver_result_done_locked(grpc_exec_ctx* exec_ctx,
void* arg,
grpc_error* error) {
pick_after_resolver_result_args *args =
(pick_after_resolver_result_args *)arg;
pick_after_resolver_result_args* args = (pick_after_resolver_result_args*)arg;
if (args->finished) {
/* cancelled, do nothing */
if (GRPC_TRACER_ON(grpc_client_channel_trace)) {
@ -1549,7 +1549,8 @@ grpc_connectivity_state grpc_client_channel_check_connectivity_state(
if (out == GRPC_CHANNEL_IDLE && try_to_connect) {
GRPC_CHANNEL_STACK_REF(chand->owning_stack, "try_to_connect");
GRPC_CLOSURE_SCHED(
exec_ctx, GRPC_CLOSURE_CREATE(try_to_connect_locked, chand,
exec_ctx,
GRPC_CLOSURE_CREATE(try_to_connect_locked, chand,
grpc_combiner_scheduler(chand->combiner)),
GRPC_ERROR_NONE);
}

@ -80,8 +80,9 @@ void grpc_lb_policy_unref(grpc_exec_ctx *exec_ctx,
gpr_atm mask = ~(gpr_atm)((1 << WEAK_REF_BITS) - 1);
gpr_atm check = 1 << WEAK_REF_BITS;
if ((old_val & mask) == check) {
GRPC_CLOSURE_SCHED(exec_ctx, GRPC_CLOSURE_CREATE(
shutdown_locked, policy,
GRPC_CLOSURE_SCHED(
exec_ctx,
GRPC_CLOSURE_CREATE(shutdown_locked, policy,
grpc_combiner_scheduler(policy->combiner)),
GRPC_ERROR_NONE);
} else {

@ -75,8 +75,7 @@ static grpc_error *init_call_elem(grpc_exec_ctx *exec_ctx,
GPR_ASSERT(args->context != NULL);
GPR_ASSERT(args->context[GRPC_GRPCLB_CLIENT_STATS].value != NULL);
calld->client_stats = grpc_grpclb_client_stats_ref(
(grpc_grpclb_client_stats *)args->context[GRPC_GRPCLB_CLIENT_STATS]
.value);
(grpc_grpclb_client_stats*)args->context[GRPC_GRPCLB_CLIENT_STATS].value);
// Record call started.
grpc_grpclb_client_stats_add_call_started(calld->client_stats);
return GRPC_ERROR_NONE;

@ -1169,8 +1169,8 @@ static int glb_pick_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
if (glb_policy->rr_policy != NULL) {
if (GRPC_TRACER_ON(grpc_lb_glb_trace)) {
gpr_log(GPR_INFO, "grpclb %p about to PICK from RR %p",
(void *)glb_policy, (void *)glb_policy->rr_policy);
gpr_log(GPR_INFO, "grpclb %p about to PICK from RR %p", (void*)glb_policy,
(void*)glb_policy->rr_policy);
}
GRPC_LB_POLICY_REF(glb_policy->rr_policy, "glb_pick");

@ -200,8 +200,8 @@ grpc_grpclb_serverlist *grpc_grpclb_response_parse_serverlist(
}
// Second pass: populate servers.
if (sl->num_servers > 0) {
sl->servers = (grpc_grpclb_server **)gpr_zalloc(
sizeof(grpc_grpclb_server *) * sl->num_servers);
sl->servers = (grpc_grpclb_server**)gpr_zalloc(sizeof(grpc_grpclb_server*) *
sl->num_servers);
decode_serverlist_arg decode_arg;
memset(&decode_arg, 0, sizeof(decode_arg));
decode_arg.serverlist = sl;
@ -239,8 +239,8 @@ grpc_grpclb_serverlist *grpc_grpclb_serverlist_copy(
copy->num_servers = sl->num_servers;
memcpy(&copy->expiration_interval, &sl->expiration_interval,
sizeof(grpc_grpclb_duration));
copy->servers = (grpc_grpclb_server **)gpr_malloc(
sizeof(grpc_grpclb_server *) * sl->num_servers);
copy->servers = (grpc_grpclb_server**)gpr_malloc(sizeof(grpc_grpclb_server*) *
sl->num_servers);
for (size_t i = 0; i < sl->num_servers; i++) {
copy->servers[i] =
(grpc_grpclb_server*)gpr_malloc(sizeof(grpc_grpclb_server));

@ -146,8 +146,7 @@ static void update_last_ready_subchannel_index_locked(round_robin_lb_policy *p,
GPR_ASSERT(last_ready_index < p->subchannel_list->num_subchannels);
p->last_ready_subchannel_index = last_ready_index;
if (GRPC_TRACER_ON(grpc_lb_round_robin_trace)) {
gpr_log(
GPR_DEBUG,
gpr_log(GPR_DEBUG,
"[RR %p] setting last_ready_subchannel_index=%lu (SC %p, CSC %p)",
(void*)p, (unsigned long)last_ready_index,
(void*)p->subchannel_list->subchannels[last_ready_index].subchannel,
@ -561,8 +560,9 @@ static void rr_ping_one_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
grpc_connected_subchannel_ping(exec_ctx, target, closure);
GRPC_CONNECTED_SUBCHANNEL_UNREF(exec_ctx, target, "rr_ping");
} else {
GRPC_CLOSURE_SCHED(exec_ctx, closure, GRPC_ERROR_CREATE_FROM_STATIC_STRING(
"Round Robin not connected"));
GRPC_CLOSURE_SCHED(
exec_ctx, closure,
GRPC_ERROR_CREATE_FROM_STATIC_STRING("Round Robin not connected"));
}
}

@ -33,11 +33,12 @@ void grpc_lb_subchannel_data_unref_subchannel(grpc_exec_ctx *exec_ctx,
const char* reason) {
if (sd->subchannel != NULL) {
if (GRPC_TRACER_ON(*sd->subchannel_list->tracer)) {
gpr_log(
GPR_DEBUG, "[%s %p] subchannel list %p index %" PRIuPTR
" of %" PRIuPTR " (subchannel %p): unreffing subchannel",
gpr_log(GPR_DEBUG,
"[%s %p] subchannel list %p index %" PRIuPTR " of %" PRIuPTR
" (subchannel %p): unreffing subchannel",
sd->subchannel_list->tracer->name, sd->subchannel_list->policy,
sd->subchannel_list, (size_t)(sd - sd->subchannel_list->subchannels),
sd->subchannel_list,
(size_t)(sd - sd->subchannel_list->subchannels),
sd->subchannel_list->num_subchannels, sd->subchannel);
}
GRPC_SUBCHANNEL_UNREF(exec_ctx, sd->subchannel, reason);
@ -76,11 +77,12 @@ void grpc_lb_subchannel_data_start_connectivity_watch(
void grpc_lb_subchannel_data_stop_connectivity_watch(
grpc_exec_ctx* exec_ctx, grpc_lb_subchannel_data* sd) {
if (GRPC_TRACER_ON(*sd->subchannel_list->tracer)) {
gpr_log(
GPR_DEBUG, "[%s %p] subchannel list %p index %" PRIuPTR " of %" PRIuPTR
gpr_log(GPR_DEBUG,
"[%s %p] subchannel list %p index %" PRIuPTR " of %" PRIuPTR
" (subchannel %p): stopping connectivity watch",
sd->subchannel_list->tracer->name, sd->subchannel_list->policy,
sd->subchannel_list, (size_t)(sd - sd->subchannel_list->subchannels),
sd->subchannel_list,
(size_t)(sd - sd->subchannel_list->subchannels),
sd->subchannel_list->num_subchannels, sd->subchannel);
}
GPR_ASSERT(sd->connectivity_notification_pending);
@ -140,7 +142,8 @@ grpc_lb_subchannel_list *grpc_lb_subchannel_list_create(
if (GRPC_TRACER_ON(*tracer)) {
char* address_uri =
grpc_sockaddr_to_uri(&addresses->addresses[i].address);
gpr_log(GPR_DEBUG, "[%s %p] subchannel list %p index %" PRIuPTR
gpr_log(GPR_DEBUG,
"[%s %p] subchannel list %p index %" PRIuPTR
": Created subchannel %p for address uri %s",
tracer->name, p, subchannel_list, subchannel_index, subchannel,
address_uri);
@ -229,11 +232,12 @@ void grpc_lb_subchannel_list_unref_for_connectivity_watch(
static void subchannel_data_cancel_connectivity_watch(
grpc_exec_ctx* exec_ctx, grpc_lb_subchannel_data* sd, const char* reason) {
if (GRPC_TRACER_ON(*sd->subchannel_list->tracer)) {
gpr_log(
GPR_DEBUG, "[%s %p] subchannel list %p index %" PRIuPTR " of %" PRIuPTR
gpr_log(GPR_DEBUG,
"[%s %p] subchannel list %p index %" PRIuPTR " of %" PRIuPTR
" (subchannel %p): canceling connectivity watch (%s)",
sd->subchannel_list->tracer->name, sd->subchannel_list->policy,
sd->subchannel_list, (size_t)(sd - sd->subchannel_list->subchannels),
sd->subchannel_list,
(size_t)(sd - sd->subchannel_list->subchannels),
sd->subchannel_list->num_subchannels, sd->subchannel, reason);
}
grpc_subchannel_notify_on_state_change(exec_ctx, sd->subchannel, NULL, NULL,

@ -118,8 +118,9 @@ static void fd_node_shutdown(grpc_exec_ctx *exec_ctx, fd_node *fdn) {
gpr_mu_unlock(&fdn->mu);
fd_node_destroy(exec_ctx, fdn);
} else {
grpc_fd_shutdown(exec_ctx, fdn->fd, GRPC_ERROR_CREATE_FROM_STATIC_STRING(
"c-ares fd shutdown"));
grpc_fd_shutdown(
exec_ctx, fdn->fd,
GRPC_ERROR_CREATE_FROM_STATIC_STRING("c-ares fd shutdown"));
gpr_mu_unlock(&fdn->mu);
}
}
@ -165,8 +166,9 @@ void grpc_ares_ev_driver_shutdown(grpc_exec_ctx *exec_ctx,
ev_driver->shutting_down = true;
fd_node* fn = ev_driver->fds;
while (fn != NULL) {
grpc_fd_shutdown(exec_ctx, fn->fd, GRPC_ERROR_CREATE_FROM_STATIC_STRING(
"grpc_ares_ev_driver_shutdown"));
grpc_fd_shutdown(
exec_ctx, fn->fd,
GRPC_ERROR_CREATE_FROM_STATIC_STRING("grpc_ares_ev_driver_shutdown"));
fn = fn->next;
}
gpr_mu_unlock(&ev_driver->mu);

@ -276,8 +276,9 @@ void grpc_subchannel_weak_unref(grpc_exec_ctx *exec_ctx,
gpr_atm old_refs;
old_refs = ref_mutate(c, -(gpr_atm)1, 1 REF_MUTATE_PURPOSE("WEAK_UNREF"));
if (old_refs == 1) {
GRPC_CLOSURE_SCHED(exec_ctx, GRPC_CLOSURE_CREATE(subchannel_destroy, c,
grpc_schedule_on_exec_ctx),
GRPC_CLOSURE_SCHED(
exec_ctx,
GRPC_CLOSURE_CREATE(subchannel_destroy, c, grpc_schedule_on_exec_ctx),
GRPC_ERROR_NONE);
}
}

@ -104,8 +104,7 @@ static long sck_avl_compare(void *a, void *b, void *unused) {
static void scv_avl_destroy(void* p, void* user_data) {
grpc_exec_ctx* exec_ctx = (grpc_exec_ctx*)user_data;
GRPC_SUBCHANNEL_WEAK_UNREF(exec_ctx, (grpc_subchannel *)p,
"subchannel_index");
GRPC_SUBCHANNEL_WEAK_UNREF(exec_ctx, (grpc_subchannel*)p, "subchannel_index");
}
static void* scv_avl_copy(void* p, void* unused) {

@ -250,7 +250,8 @@ static void finish_send_message(grpc_exec_ctx *exec_ctx,
const float savings_ratio = 1.0f - (float)after_size / (float)before_size;
GPR_ASSERT(grpc_compression_algorithm_name(calld->compression_algorithm,
&algo_name));
gpr_log(GPR_DEBUG, "Compressed[%s] %" PRIuPTR " bytes vs. %" PRIuPTR
gpr_log(GPR_DEBUG,
"Compressed[%s] %" PRIuPTR " bytes vs. %" PRIuPTR
" bytes (%.2f%% savings)",
algo_name, before_size, after_size, 100 * savings_ratio);
}

@ -62,7 +62,9 @@ typedef struct call_data {
grpc_closure hs_recv_message_ready;
} call_data;
typedef struct channel_data { uint8_t unused; } channel_data;
typedef struct channel_data {
uint8_t unused;
} channel_data;
static grpc_error* server_filter_outgoing_metadata(grpc_exec_ctx* exec_ctx,
grpc_call_element* elem,
@ -241,10 +243,10 @@ static grpc_error *server_filter_incoming_metadata(grpc_exec_ctx *exec_ctx,
grpc_linked_mdelem* el = b->idx.named.host;
grpc_mdelem md = GRPC_MDELEM_REF(el->md);
grpc_metadata_batch_remove(exec_ctx, b, el);
add_error(
error_name, &error,
add_error(error_name, &error,
grpc_metadata_batch_add_head(
exec_ctx, b, el, grpc_mdelem_from_slices(
exec_ctx, b, el,
grpc_mdelem_from_slices(
exec_ctx, GRPC_MDSTR_AUTHORITY,
grpc_slice_ref_internal(GRPC_MDVALUE(md)))));
GRPC_MDELEM_UNREF(exec_ctx, md);

@ -98,7 +98,8 @@ grpc_channel *grpc_insecure_channel_create(const char *target,
// Clean up.
grpc_channel_args_destroy(&exec_ctx, new_args);
grpc_exec_ctx_finish(&exec_ctx);
return channel != NULL ? channel : grpc_lame_client_channel_create(
return channel != NULL ? channel
: grpc_lame_client_channel_create(
target, GRPC_STATUS_INTERNAL,
"Failed to create client channel");
}

@ -62,7 +62,8 @@ grpc_channel *grpc_insecure_channel_create_from_fd(
grpc_exec_ctx_finish(&exec_ctx);
return channel != NULL ? channel : grpc_lame_client_channel_create(
return channel != NULL ? channel
: grpc_lame_client_channel_create(
target, GRPC_STATUS_INTERNAL,
"Failed to create client channel");
}

@ -250,8 +250,9 @@ grpc_error *grpc_chttp2_server_add_port(grpc_exec_ctx *exec_ctx,
goto error;
} else if (count != naddrs) {
char* msg;
gpr_asprintf(&msg, "Only %" PRIuPTR
" addresses added out of total %" PRIuPTR " resolved",
gpr_asprintf(&msg,
"Only %" PRIuPTR " addresses added out of total %" PRIuPTR
" resolved",
count, naddrs);
err = GRPC_ERROR_CREATE_REFERENCING_FROM_COPIED_STRING(msg, errors, naddrs);
gpr_free(msg);

@ -789,7 +789,8 @@ static void destroy_stream(grpc_exec_ctx *exec_ctx, grpc_transport *gt,
s->destroy_stream_arg = then_schedule_closure;
GRPC_CLOSURE_SCHED(
exec_ctx, GRPC_CLOSURE_INIT(&s->destroy_stream, destroy_stream_locked, s,
exec_ctx,
GRPC_CLOSURE_INIT(&s->destroy_stream, destroy_stream_locked, s,
grpc_combiner_scheduler(t->combiner)),
GRPC_ERROR_NONE);
GPR_TIMER_END("destroy_stream", 0);
@ -1025,11 +1026,13 @@ static void write_action_begin_locked(grpc_exec_ctx *exec_ctx, void *gt,
GRPC_STATS_INC_HTTP2_WRITES_OFFLOADED(exec_ctx);
}
set_write_state(
exec_ctx, t, r.partial ? GRPC_CHTTP2_WRITE_STATE_WRITING_WITH_MORE
exec_ctx, t,
r.partial ? GRPC_CHTTP2_WRITE_STATE_WRITING_WITH_MORE
: GRPC_CHTTP2_WRITE_STATE_WRITING,
begin_writing_desc(r.partial, scheduler == grpc_schedule_on_exec_ctx));
GRPC_CLOSURE_SCHED(exec_ctx, GRPC_CLOSURE_INIT(&t->write_action,
write_action, t, scheduler),
GRPC_CLOSURE_SCHED(
exec_ctx,
GRPC_CLOSURE_INIT(&t->write_action, write_action, t, scheduler),
GRPC_ERROR_NONE);
} else {
GRPC_STATS_INC_HTTP2_SPURIOUS_WRITES_BEGUN(exec_ctx);
@ -1772,7 +1775,8 @@ void grpc_chttp2_add_ping_strike(grpc_exec_ctx *exec_ctx,
GRPC_ERROR_INT_HTTP2_ERROR, GRPC_HTTP2_ENHANCE_YOUR_CALM));
/*The transport will be closed after the write is done */
close_transport_locked(
exec_ctx, t, grpc_error_set_int(
exec_ctx, t,
grpc_error_set_int(
GRPC_ERROR_CREATE_FROM_STATIC_STRING("Too many pings"),
GRPC_ERROR_INT_GRPC_STATUS, GRPC_STATUS_UNAVAILABLE));
}
@ -2955,9 +2959,10 @@ static void incoming_byte_stream_destroy(grpc_exec_ctx *exec_ctx,
grpc_chttp2_incoming_byte_stream* bs =
(grpc_chttp2_incoming_byte_stream*)byte_stream;
GRPC_CLOSURE_SCHED(
exec_ctx, GRPC_CLOSURE_INIT(
&bs->destroy_action, incoming_byte_stream_destroy_locked,
bs, grpc_combiner_scheduler(bs->transport->combiner)),
exec_ctx,
GRPC_CLOSURE_INIT(&bs->destroy_action,
incoming_byte_stream_destroy_locked, bs,
grpc_combiner_scheduler(bs->transport->combiner)),
GRPC_ERROR_NONE);
GPR_TIMER_END("incoming_byte_stream_destroy", 0);
}

@ -224,8 +224,8 @@ grpc_error* StreamFlowControl::RecvData(int64_t incoming_frame_size) {
incoming_frame_size, acked_stream_window, sent_stream_window);
} else {
char* msg;
gpr_asprintf(&msg, "frame of size %" PRId64
" overflows local window of %" PRId64,
gpr_asprintf(
&msg, "frame of size %" PRId64 " overflows local window of %" PRId64,
incoming_frame_size, acked_stream_window);
grpc_error* err = GRPC_ERROR_CREATE_FROM_COPIED_STRING(msg);
gpr_free(msg);

@ -540,9 +540,8 @@ static void hpack_enc(grpc_exec_ctx *exec_ctx, grpc_chttp2_hpack_compressor *c,
decoder_space_usage < MAX_DECODER_SPACE_USAGE &&
c->filter_elems[HASH_FRAGMENT_1(elem_hash)] >=
c->filter_elems_sum / ONE_ON_ADD_PROBABILITY;
void (*maybe_add)(grpc_exec_ctx *, grpc_chttp2_hpack_compressor *,
grpc_mdelem, size_t) =
should_add_elem ? add_elem : add_nothing;
void (*maybe_add)(grpc_exec_ctx*, grpc_chttp2_hpack_compressor*, grpc_mdelem,
size_t) = should_add_elem ? add_elem : add_nothing;
void (*emit)(grpc_exec_ctx*, grpc_chttp2_hpack_compressor*, uint32_t,
grpc_mdelem, framer_state*) =
should_add_elem ? emit_lithdr_incidx : emit_lithdr_noidx;

@ -42,8 +42,9 @@ grpc_error *grpc_chttp2_incoming_metadata_buffer_add(
grpc_mdelem elem) {
buffer->size += GRPC_MDELEM_LENGTH(elem);
return grpc_metadata_batch_add_tail(
exec_ctx, &buffer->batch, (grpc_linked_mdelem *)gpr_arena_alloc(
buffer->arena, sizeof(grpc_linked_mdelem)),
exec_ctx, &buffer->batch,
(grpc_linked_mdelem*)gpr_arena_alloc(buffer->arena,
sizeof(grpc_linked_mdelem)),
elem);
}

@ -216,8 +216,9 @@ grpc_error *grpc_chttp2_perform_read(grpc_exec_ctx *exec_ctx,
case GRPC_DTS_FRAME:
GPR_ASSERT(cur < end);
if ((uint32_t)(end - cur) == t->incoming_frame_size) {
err = parse_frame_slice(
exec_ctx, t, grpc_slice_sub_no_ref(slice, (size_t)(cur - beg),
err =
parse_frame_slice(exec_ctx, t,
grpc_slice_sub_no_ref(slice, (size_t)(cur - beg),
(size_t)(end - beg)),
1);
if (err != GRPC_ERROR_NONE) {
@ -240,8 +241,9 @@ grpc_error *grpc_chttp2_perform_read(grpc_exec_ctx *exec_ctx,
t->incoming_stream = NULL;
goto dts_fh_0; /* loop */
} else {
err = parse_frame_slice(
exec_ctx, t, grpc_slice_sub_no_ref(slice, (size_t)(cur - beg),
err =
parse_frame_slice(exec_ctx, t,
grpc_slice_sub_no_ref(slice, (size_t)(cur - beg),
(size_t)(end - beg)),
0);
if (err != GRPC_ERROR_NONE) {

@ -526,12 +526,12 @@ static void on_response_headers_received(
grpc_chttp2_incoming_metadata_buffer_init(&s->state.rs.initial_metadata,
s->arena);
for (size_t i = 0; i < headers->count; i++) {
GRPC_LOG_IF_ERROR(
"on_response_headers_received",
GRPC_LOG_IF_ERROR("on_response_headers_received",
grpc_chttp2_incoming_metadata_buffer_add(
&exec_ctx, &s->state.rs.initial_metadata,
grpc_mdelem_from_slices(
&exec_ctx, grpc_slice_intern(grpc_slice_from_static_string(
&exec_ctx,
grpc_slice_intern(grpc_slice_from_static_string(
headers->headers[i].key)),
grpc_slice_intern(grpc_slice_from_static_string(
headers->headers[i].value)))));
@ -636,12 +636,12 @@ static void on_response_trailers_received(
for (size_t i = 0; i < trailers->count; i++) {
CRONET_LOG(GPR_DEBUG, "trailer key=%s, value=%s", trailers->headers[i].key,
trailers->headers[i].value);
GRPC_LOG_IF_ERROR(
"on_response_trailers_received",
GRPC_LOG_IF_ERROR("on_response_trailers_received",
grpc_chttp2_incoming_metadata_buffer_add(
&exec_ctx, &s->state.rs.trailing_metadata,
grpc_mdelem_from_slices(
&exec_ctx, grpc_slice_intern(grpc_slice_from_static_string(
&exec_ctx,
grpc_slice_intern(grpc_slice_from_static_string(
trailers->headers[i].key)),
grpc_slice_intern(grpc_slice_from_static_string(
trailers->headers[i].value)))));
@ -1207,8 +1207,7 @@ static enum e_op_result execute_stream_op(grpc_exec_ctx *exec_ctx,
if (stream_state->rs.compressed) {
stream_state->rs.sbs.base.flags |= GRPC_WRITE_INTERNAL_COMPRESS;
}
*((grpc_byte_buffer **)
stream_op->payload->recv_message.recv_message) =
*((grpc_byte_buffer**)stream_op->payload->recv_message.recv_message) =
(grpc_byte_buffer*)&stream_state->rs.sbs;
GRPC_CLOSURE_SCHED(
exec_ctx, stream_op->payload->recv_message.recv_message_ready,

@ -972,7 +972,8 @@ static void perform_stream_op(grpc_exec_ctx *exec_ctx, grpc_transport *gt,
// 4. We want to receive a message and there is a message ready
// 5. There is trailing metadata, even if nothing specifically wants
// that because that can shut down the receive message as well
if ((op->send_message && other && ((other->recv_message_op != NULL) ||
if ((op->send_message && other &&
((other->recv_message_op != NULL) ||
(other->recv_trailing_md_op != NULL))) ||
(op->send_trailing_metadata && !op->send_message) ||
(op->recv_initial_metadata && s->to_read_initial_md_filled) ||

@ -104,9 +104,8 @@ grpc_error *grpc_channel_stack_init(
GRPC_STREAM_REF_INIT(&stack->refcount, initial_refs, destroy, destroy_arg,
name);
elems = CHANNEL_ELEMS_FROM_STACK(stack);
user_data =
((char *)elems) +
ROUND_UP_TO_ALIGNMENT_SIZE(filter_count * sizeof(grpc_channel_element));
user_data = ((char*)elems) + ROUND_UP_TO_ALIGNMENT_SIZE(
filter_count * sizeof(grpc_channel_element));
/* init per-filter data */
grpc_error* first_error = GRPC_ERROR_NONE;

@ -119,8 +119,8 @@ static double threshold_for_count_below(const gpr_atm *bucket_counts,
should lie */
lower_bound = bucket_boundaries[lower_idx];
upper_bound = bucket_boundaries[lower_idx + 1];
return upper_bound -
(upper_bound - lower_bound) * (count_so_far - count_below) /
return upper_bound - (upper_bound - lower_bound) *
(count_so_far - count_below) /
(double)bucket_counts[lower_idx];
}
}

@ -123,8 +123,10 @@ const char *grpc_stats_counter_name[GRPC_STATS_COUNTER_COUNT] = {
const char* grpc_stats_counter_doc[GRPC_STATS_COUNTER_COUNT] = {
"Number of client side calls created by this process",
"Number of server side calls created by this process",
"Number of completion queues created", "Number of client channels created",
"Number of client subchannels created", "Number of server channels created",
"Number of completion queues created",
"Number of client channels created",
"Number of client subchannels created",
"Number of server channels created",
"Number of polling syscalls (epoll_wait, poll, etc) made by this process",
"Number of sleeping syscalls made by this process",
"How many polling wakeups were performed by the process (only valid for "
@ -154,7 +156,8 @@ const char *grpc_stats_counter_doc[GRPC_STATS_COUNTER_COUNT] = {
"Number of batches containing receive initial metadata",
"Number of batches containing receive message",
"Number of batches containing receive trailing metadata",
"Number of settings frames sent", "Number of HTTP2 pings sent by process",
"Number of settings frames sent",
"Number of HTTP2 pings sent by process",
"Number of HTTP2 writes initiated",
"Number of HTTP2 writes offloaded to the executor from application threads",
"Number of HTTP2 writes that finished seeing more data needed to be "

@ -178,8 +178,9 @@ static void on_handshake_done(grpc_exec_ctx *exec_ctx, void *arg,
internal_request* req = (internal_request*)arg;
if (!ep) {
next_address(exec_ctx, req, GRPC_ERROR_CREATE_FROM_STATIC_STRING(
"Unexplained handshake failure"));
next_address(
exec_ctx, req,
GRPC_ERROR_CREATE_FROM_STATIC_STRING("Unexplained handshake failure"));
return;
}

@ -54,8 +54,8 @@ static void create_sockets(SOCKET sv[2]) {
WSA_FLAG_OVERLAPPED);
GPR_ASSERT(cli_sock != INVALID_SOCKET);
GPR_ASSERT(WSAConnect(cli_sock, (struct sockaddr *)&addr, addr_len, NULL,
NULL, NULL, NULL) == 0);
GPR_ASSERT(WSAConnect(cli_sock, (struct sockaddr*)&addr, addr_len, NULL, NULL,
NULL, NULL) == 0);
svr_sock = accept(lst_sock, (struct sockaddr*)&addr, &addr_len);
GPR_ASSERT(svr_sock != INVALID_SOCKET);

@ -712,8 +712,8 @@ static char *finish_kvs(kv_pairs *kvs) {
append_chr('{', &s, &sz, &cap);
for (size_t i = 0; i < kvs->num_kvs; i++) {
if (i != 0) append_chr(',', &s, &sz, &cap);
append_esc_str((const uint8_t *)kvs->kvs[i].key, strlen(kvs->kvs[i].key),
&s, &sz, &cap);
append_esc_str((const uint8_t*)kvs->kvs[i].key, strlen(kvs->kvs[i].key), &s,
&sz, &cap);
gpr_free(kvs->kvs[i].key);
append_chr(':', &s, &sz, &cap);
append_str(kvs->kvs[i].value, &s, &sz, &cap);

@ -444,8 +444,8 @@ static grpc_error *pollset_global_init(void) {
return GRPC_OS_ERROR(errno, "epoll_ctl");
}
g_num_neighborhoods = GPR_CLAMP(gpr_cpu_num_cores(), 1, MAX_NEIGHBORHOODS);
g_neighborhoods = (pollset_neighborhood *)gpr_zalloc(
sizeof(*g_neighborhoods) * g_num_neighborhoods);
g_neighborhoods = (pollset_neighborhood*)gpr_zalloc(sizeof(*g_neighborhoods) *
g_num_neighborhoods);
for (size_t i = 0; i < g_num_neighborhoods; i++) {
gpr_mu_init(&g_neighborhoods[i].mu);
}
@ -996,10 +996,10 @@ static grpc_error *pollset_kick(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
gpr_strvec log;
gpr_strvec_init(&log);
char* tmp;
gpr_asprintf(
&tmp, "PS:%p KICK:%p curps=%p curworker=%p root=%p", pollset,
gpr_asprintf(&tmp, "PS:%p KICK:%p curps=%p curworker=%p root=%p", pollset,
specific_worker, (void*)gpr_tls_get(&g_current_thread_pollset),
(void *)gpr_tls_get(&g_current_thread_worker), pollset->root_worker);
(void*)gpr_tls_get(&g_current_thread_worker),
pollset->root_worker);
gpr_strvec_add(&log, tmp);
if (pollset->root_worker != NULL) {
gpr_asprintf(&tmp, " {kick_state=%s next=%p {kick_state=%s}}",

@ -306,8 +306,9 @@ static void unref_by(grpc_exec_ctx *exec_ctx, grpc_fd *fd, int n) {
#endif
gpr_atm old = gpr_atm_full_fetch_add(&fd->refst, -n);
if (old == n) {
GRPC_CLOSURE_SCHED(exec_ctx, GRPC_CLOSURE_CREATE(fd_destroy, fd,
grpc_schedule_on_exec_ctx),
GRPC_CLOSURE_SCHED(
exec_ctx,
GRPC_CLOSURE_CREATE(fd_destroy, fd, grpc_schedule_on_exec_ctx),
GRPC_ERROR_NONE);
} else {
GPR_ASSERT(old > n);
@ -624,8 +625,7 @@ static grpc_error *pollset_kick(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
"PS:%p kick %p tls_pollset=%p tls_worker=%p pollset.root_worker=%p",
pollset, specific_worker,
(void*)gpr_tls_get(&g_current_thread_pollset),
(void *)gpr_tls_get(&g_current_thread_worker),
pollset->root_worker);
(void*)gpr_tls_get(&g_current_thread_worker), pollset->root_worker);
}
if (specific_worker == NULL) {
if (gpr_tls_get(&g_current_thread_pollset) != (intptr_t)pollset) {
@ -984,8 +984,9 @@ static grpc_error *pollset_work(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
WORKER_PTR->originator = gettid();
#endif
if (GRPC_TRACER_ON(grpc_polling_trace)) {
gpr_log(GPR_DEBUG, "PS:%p work hdl=%p worker=%p now=%" PRIdPTR
" deadline=%" PRIdPTR " kwp=%d pollable=%p",
gpr_log(GPR_DEBUG,
"PS:%p work hdl=%p worker=%p now=%" PRIdPTR " deadline=%" PRIdPTR
" kwp=%d pollable=%p",
pollset, worker_hdl, WORKER_PTR, grpc_exec_ctx_now(exec_ctx),
deadline, pollset->kicked_without_poller, pollset->active_pollable);
}
@ -999,8 +1000,9 @@ static grpc_error *pollset_work(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
gpr_tls_set(&g_current_thread_worker, (intptr_t)WORKER_PTR);
if (WORKER_PTR->pollable_obj->event_cursor ==
WORKER_PTR->pollable_obj->event_count) {
append_error(&error, pollable_epoll(exec_ctx, WORKER_PTR->pollable_obj,
deadline),
append_error(
&error,
pollable_epoll(exec_ctx, WORKER_PTR->pollable_obj, deadline),
err_desc);
}
append_error(&error,
@ -1368,13 +1370,15 @@ static void pollset_set_add_pollset_set(grpc_exec_ctx *exec_ctx,
}
size_t initial_a_fd_count = a->fd_count;
a->fd_count = 0;
append_error(&error, add_fds_to_pollsets(exec_ctx, a->fds, initial_a_fd_count,
b->pollsets, b->pollset_count,
"merge_a2b", a->fds, &a->fd_count),
append_error(
&error,
add_fds_to_pollsets(exec_ctx, a->fds, initial_a_fd_count, b->pollsets,
b->pollset_count, "merge_a2b", a->fds, &a->fd_count),
err_desc);
append_error(&error, add_fds_to_pollsets(exec_ctx, b->fds, b->fd_count,
a->pollsets, a->pollset_count,
"merge_b2a", a->fds, &a->fd_count),
append_error(
&error,
add_fds_to_pollsets(exec_ctx, b->fds, b->fd_count, a->pollsets,
a->pollset_count, "merge_b2a", a->fds, &a->fd_count),
err_desc);
if (a->pollset_capacity < a->pollset_count + b->pollset_count) {
a->pollset_capacity =

@ -290,7 +290,8 @@ static void pi_add_ref_dbg(polling_island *pi, const char *reason,
const char* file, int line) {
if (GRPC_TRACER_ON(grpc_polling_trace)) {
gpr_atm old_cnt = gpr_atm_acq_load(&pi->ref_count);
gpr_log(GPR_DEBUG, "Add ref pi: %p, old:%" PRIdPTR " -> new:%" PRIdPTR
gpr_log(GPR_DEBUG,
"Add ref pi: %p, old:%" PRIdPTR " -> new:%" PRIdPTR
" (%s) - (%s, %d)",
pi, old_cnt, old_cnt + 1, reason, file, line);
}
@ -301,7 +302,8 @@ static void pi_unref_dbg(grpc_exec_ctx *exec_ctx, polling_island *pi,
const char* reason, const char* file, int line) {
if (GRPC_TRACER_ON(grpc_polling_trace)) {
gpr_atm old_cnt = gpr_atm_acq_load(&pi->ref_count);
gpr_log(GPR_DEBUG, "Unref pi: %p, old:%" PRIdPTR " -> new:%" PRIdPTR
gpr_log(GPR_DEBUG,
"Unref pi: %p, old:%" PRIdPTR " -> new:%" PRIdPTR
" (%s) - (%s, %d)",
pi, old_cnt, (old_cnt - 1), reason, file, line);
}
@ -1486,8 +1488,7 @@ retry:
} else {
GRPC_POLLING_TRACE(
"add_poll_object: Same polling island. pi: %p (%s, %s)",
(void *)pi_new, poll_obj_string(item_type),
poll_obj_string(bag_type));
(void*)pi_new, poll_obj_string(item_type), poll_obj_string(bag_type));
}
} else if (item->pi == NULL) {
/* GPR_ASSERT(bag->pi != NULL) */

@ -1623,8 +1623,7 @@ static void global_cv_fd_table_init() {
gpr_cv_init(&g_cvfds.shutdown_cv);
gpr_ref_init(&g_cvfds.pollcount, 1);
g_cvfds.size = CV_DEFAULT_TABLE_SIZE;
g_cvfds.cvfds =
(fd_node *)gpr_malloc(sizeof(fd_node) * CV_DEFAULT_TABLE_SIZE);
g_cvfds.cvfds = (fd_node*)gpr_malloc(sizeof(fd_node) * CV_DEFAULT_TABLE_SIZE);
g_cvfds.free_fds = NULL;
thread_grace = gpr_time_from_millis(POLLCV_THREAD_GRACE_MS, GPR_TIMESPAN);
for (int i = 0; i < CV_DEFAULT_TABLE_SIZE; i++) {

@ -109,7 +109,8 @@ void grpc_iomgr_shutdown(grpc_exec_ctx *exec_ctx) {
}
if (g_root_object.next != &g_root_object) {
if (grpc_iomgr_abort_on_leaks()) {
gpr_log(GPR_DEBUG, "Failed to free %" PRIuPTR
gpr_log(GPR_DEBUG,
"Failed to free %" PRIuPTR
" iomgr objects before shutdown deadline: "
"memory leaks are likely",
count_objects());
@ -121,7 +122,8 @@ void grpc_iomgr_shutdown(grpc_exec_ctx *exec_ctx) {
if (gpr_cv_wait(&g_rcv, &g_mu, short_deadline)) {
if (gpr_time_cmp(gpr_now(GPR_CLOCK_REALTIME), shutdown_deadline) > 0) {
if (g_root_object.next != &g_root_object) {
gpr_log(GPR_DEBUG, "Failed to free %" PRIuPTR
gpr_log(GPR_DEBUG,
"Failed to free %" PRIuPTR
" iomgr objects before shutdown deadline: "
"memory leaks are likely",
count_objects());

@ -47,8 +47,8 @@ grpc_error *grpc_load_file(const char *filename, int add_null_terminator,
/* Converting to size_t on the assumption that it will not fail */
contents_size = (size_t)ftell(file);
fseek(file, 0, SEEK_SET);
contents = (unsigned char *)gpr_malloc(contents_size +
(add_null_terminator ? 1 : 0));
contents =
(unsigned char*)gpr_malloc(contents_size + (add_null_terminator ? 1 : 0));
bytes_read = fread(contents, 1, contents_size, file);
if (bytes_read < contents_size) {
error = GRPC_OS_ERROR(errno, "fread");

@ -277,8 +277,7 @@ static void rq_update_estimate(grpc_resource_quota *resource_quota) {
gpr_atm memory_usage_estimation = MEMORY_USAGE_ESTIMATION_MAX;
if (resource_quota->size != 0) {
memory_usage_estimation =
GPR_CLAMP((gpr_atm)((1.0 -
((double)resource_quota->free_pool) /
GPR_CLAMP((gpr_atm)((1.0 - ((double)resource_quota->free_pool) /
((double)resource_quota->size)) *
MEMORY_USAGE_ESTIMATION_MAX),
0, MEMORY_USAGE_ESTIMATION_MAX);
@ -295,7 +294,8 @@ static bool rq_alloc(grpc_exec_ctx *exec_ctx,
GRPC_RULIST_AWAITING_ALLOCATION))) {
gpr_mu_lock(&resource_user->mu);
if (GRPC_TRACER_ON(grpc_resource_quota_trace)) {
gpr_log(GPR_DEBUG, "RQ: check allocation for user %p shutdown=%" PRIdPTR
gpr_log(GPR_DEBUG,
"RQ: check allocation for user %p shutdown=%" PRIdPTR
" free_pool=%" PRId64,
resource_user, gpr_atm_no_barrier_load(&resource_user->shutdown),
resource_user->free_pool);
@ -320,7 +320,8 @@ static bool rq_alloc(grpc_exec_ctx *exec_ctx,
resource_quota->free_pool -= amt;
rq_update_estimate(resource_quota);
if (GRPC_TRACER_ON(grpc_resource_quota_trace)) {
gpr_log(GPR_DEBUG, "RQ %s %s: grant alloc %" PRId64
gpr_log(GPR_DEBUG,
"RQ %s %s: grant alloc %" PRId64
" bytes; rq_free_pool -> %" PRId64,
resource_quota->name, resource_user->name, amt,
resource_quota->free_pool);
@ -357,7 +358,8 @@ static bool rq_reclaim_from_per_user_free_pool(
resource_quota->free_pool += amt;
rq_update_estimate(resource_quota);
if (GRPC_TRACER_ON(grpc_resource_quota_trace)) {
gpr_log(GPR_DEBUG, "RQ %s %s: reclaim_from_per_user_free_pool %" PRId64
gpr_log(GPR_DEBUG,
"RQ %s %s: reclaim_from_per_user_free_pool %" PRId64
" bytes; rq_free_pool -> %" PRId64,
resource_quota->name, resource_user->name, amt,
resource_quota->free_pool);

@ -132,8 +132,7 @@ void grpc_sockaddr_make_wildcard4(int port,
void grpc_sockaddr_make_wildcard6(int port,
grpc_resolved_address* resolved_wild_out) {
struct sockaddr_in6 *wild_out =
(struct sockaddr_in6 *)resolved_wild_out->addr;
struct sockaddr_in6* wild_out = (struct sockaddr_in6*)resolved_wild_out->addr;
GPR_ASSERT(port >= 0 && port < 65536);
memset(resolved_wild_out, 0, sizeof(*resolved_wild_out));
wild_out->sin6_family = AF_INET6;

@ -106,8 +106,9 @@ static void tc_on_alarm(grpc_exec_ctx *exec_ctx, void *acp, grpc_error *error) {
}
gpr_mu_lock(&ac->mu);
if (ac->fd != NULL) {
grpc_fd_shutdown(exec_ctx, ac->fd, GRPC_ERROR_CREATE_FROM_STATIC_STRING(
"connect() timed out"));
grpc_fd_shutdown(
exec_ctx, ac->fd,
GRPC_ERROR_CREATE_FROM_STATIC_STRING("connect() timed out"));
}
done = (--ac->refs == 0);
gpr_mu_unlock(&ac->mu);
@ -279,8 +280,7 @@ static void tcp_client_connect_impl(grpc_exec_ctx *exec_ctx,
do {
GPR_ASSERT(addr->len < ~(socklen_t)0);
err =
connect(fd, (const struct sockaddr *)addr->addr, (socklen_t)addr->len);
err = connect(fd, (const struct sockaddr*)addr->addr, (socklen_t)addr->len);
} while (err < 0 && errno == EINTR);
addr_str = grpc_sockaddr_to_uri(addr);

@ -154,8 +154,7 @@ static void tcp_client_connect_impl(grpc_exec_ctx *exec_ctx,
// TODO(murgatroid99): figure out what the return value here means
uv_tcp_connect(&connect->connect_req, connect->tcp_handle,
(const struct sockaddr *)resolved_addr->addr,
uv_tc_on_connect);
(const struct sockaddr*)resolved_addr->addr, uv_tc_on_connect);
GRPC_CLOSURE_INIT(&connect->on_alarm, uv_tc_on_alarm, connect,
grpc_schedule_on_exec_ctx);
grpc_timer_init(exec_ctx, &connect->alarm, deadline, &connect->on_alarm);

@ -172,8 +172,8 @@ static void tcp_client_connect_impl(
grpc_sockaddr_make_wildcard6(0, &local_address);
status = bind(sock, (struct sockaddr *)&local_address.addr,
(int)local_address.len);
status =
bind(sock, (struct sockaddr*)&local_address.addr, (int)local_address.len);
if (status != 0) {
error = GRPC_WSA_ERROR(WSAGetLastError(), "bind");
goto failure;
@ -181,8 +181,8 @@ static void tcp_client_connect_impl(
socket = grpc_winsocket_create(sock, "client");
info = &socket->write_info;
success = ConnectEx(sock, (struct sockaddr *)&addr->addr, (int)addr->len,
NULL, 0, NULL, &info->overlapped);
success = ConnectEx(sock, (struct sockaddr*)&addr->addr, (int)addr->len, NULL,
0, NULL, &info->overlapped);
/* It wouldn't be unusual to get a success immediately. But we'll still get
an IOCP notification, so let's ignore it. */

@ -185,8 +185,9 @@ static void tcp_server_destroy(grpc_exec_ctx *exec_ctx, grpc_tcp_server *s) {
if (s->active_ports) {
grpc_tcp_listener* sp;
for (sp = s->head; sp; sp = sp->next) {
grpc_fd_shutdown(exec_ctx, sp->emfd, GRPC_ERROR_CREATE_FROM_STATIC_STRING(
"Server destroyed"));
grpc_fd_shutdown(
exec_ctx, sp->emfd,
GRPC_ERROR_CREATE_FROM_STATIC_STRING("Server destroyed"));
}
gpr_mu_unlock(&s->mu);
} else {

@ -356,8 +356,8 @@ grpc_error *grpc_tcp_server_add_port(grpc_tcp_server *s,
(int*)&sockname_temp.len)) {
*port = grpc_sockaddr_get_port(&sockname_temp);
if (*port > 0) {
allocated_addr = (grpc_resolved_address *)gpr_malloc(
sizeof(grpc_resolved_address));
allocated_addr =
(grpc_resolved_address*)gpr_malloc(sizeof(grpc_resolved_address));
memcpy(allocated_addr, addr, sizeof(grpc_resolved_address));
grpc_sockaddr_set_port(allocated_addr, *port);
addr = allocated_addr;

@ -138,8 +138,9 @@ static void finish_shutdown_locked(grpc_exec_ctx *exec_ctx,
GRPC_CLOSURE_SCHED(exec_ctx, s->shutdown_complete, GRPC_ERROR_NONE);
}
GRPC_CLOSURE_SCHED(exec_ctx, GRPC_CLOSURE_CREATE(destroy_server, s,
grpc_schedule_on_exec_ctx),
GRPC_CLOSURE_SCHED(
exec_ctx,
GRPC_CLOSURE_CREATE(destroy_server, s, grpc_schedule_on_exec_ctx),
GRPC_ERROR_NONE);
}
@ -346,8 +347,7 @@ static void on_accept(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) {
gpr_free(utf8_message);
}
int peer_name_len = (int)peer_name.len;
err =
getpeername(sock, (struct sockaddr *)peer_name.addr, &peer_name_len);
err = getpeername(sock, (struct sockaddr*)peer_name.addr, &peer_name_len);
peer_name.len = (size_t)peer_name_len;
if (!err) {
peer_name_string = grpc_sockaddr_to_uri(&peer_name);
@ -475,8 +475,8 @@ grpc_error *grpc_tcp_server_add_port(grpc_tcp_server *s,
sockname_temp.len = (size_t)sockname_temp_len;
*port = grpc_sockaddr_get_port(&sockname_temp);
if (*port > 0) {
allocated_addr = (grpc_resolved_address *)gpr_malloc(
sizeof(grpc_resolved_address));
allocated_addr =
(grpc_resolved_address*)gpr_malloc(sizeof(grpc_resolved_address));
memcpy(allocated_addr, addr, sizeof(grpc_resolved_address));
grpc_sockaddr_set_port(allocated_addr, *port);
addr = allocated_addr;

@ -252,8 +252,9 @@ static void uv_endpoint_write(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep,
}
if (tcp->shutting_down) {
GRPC_CLOSURE_SCHED(exec_ctx, cb, GRPC_ERROR_CREATE_FROM_STATIC_STRING(
"TCP socket is shutting down"));
GRPC_CLOSURE_SCHED(
exec_ctx, cb,
GRPC_ERROR_CREATE_FROM_STATIC_STRING("TCP socket is shutting down"));
return;
}

@ -368,7 +368,8 @@ void grpc_timer_init(grpc_exec_ctx *exec_ctx, grpc_timer *timer,
list_join(&shard->list, timer);
}
if (GRPC_TRACER_ON(grpc_timer_trace)) {
gpr_log(GPR_DEBUG, " .. add to shard %d with queue_deadline_cap=%" PRIdPTR
gpr_log(GPR_DEBUG,
" .. add to shard %d with queue_deadline_cap=%" PRIdPTR
" => is_first_timer=%s",
(int)(shard - g_shards), shard->queue_deadline_cap,
is_first_timer ? "true" : "false");
@ -633,8 +634,9 @@ grpc_timer_check_result grpc_timer_check(grpc_exec_ctx *exec_ctx,
} else {
gpr_asprintf(&next_str, "%" PRIdPTR, *next);
}
gpr_log(GPR_DEBUG, "TIMER CHECK BEGIN: now=%" PRIdPTR
" next=%s tls_min=%" PRIdPTR " glob_min=%" PRIdPTR,
gpr_log(GPR_DEBUG,
"TIMER CHECK BEGIN: now=%" PRIdPTR " next=%s tls_min=%" PRIdPTR
" glob_min=%" PRIdPTR,
now, next_str, gpr_tls_get(&g_last_seen_min_timer),
gpr_atm_no_barrier_load(&g_shared_mutables.min_timer));
gpr_free(next_str);

@ -53,9 +53,8 @@ static void adjust_downwards(grpc_timer **first, uint32_t i, uint32_t length,
uint32_t left_child = 1u + 2u * i;
if (left_child >= length) break;
uint32_t right_child = left_child + 1;
uint32_t next_i =
right_child < length &&
first[left_child]->deadline > first[right_child]->deadline
uint32_t next_i = right_child < length && first[left_child]->deadline >
first[right_child]->deadline
? right_child
: left_child;
if (t->deadline <= first[next_i]->deadline) break;

@ -258,8 +258,7 @@ static int bind_socket(grpc_socket_factory *socket_factory, int sockfd,
const grpc_resolved_address* addr) {
return (socket_factory != NULL)
? grpc_socket_factory_bind(socket_factory, sockfd, addr)
: bind(sockfd, (struct sockaddr *)addr->addr,
(socklen_t)addr->len);
: bind(sockfd, (struct sockaddr*)addr->addr, (socklen_t)addr->len);
}
/* Prepare a recently-created socket for listening. */
@ -445,8 +444,8 @@ int grpc_udp_server_add_port(grpc_udp_server *s,
(socklen_t*)&sockname_temp.len)) {
port = grpc_sockaddr_get_port(&sockname_temp);
if (port > 0) {
allocated_addr = (grpc_resolved_address *)gpr_malloc(
sizeof(grpc_resolved_address));
allocated_addr =
(grpc_resolved_address*)gpr_malloc(sizeof(grpc_resolved_address));
memcpy(allocated_addr, addr, sizeof(grpc_resolved_address));
grpc_sockaddr_set_port(allocated_addr, port);
addr = allocated_addr;

@ -273,7 +273,8 @@ void grpc_auth_context_add_property(grpc_auth_context *ctx, const char *name,
GRPC_API_TRACE(
"grpc_auth_context_add_property(ctx=%p, name=%s, value=%*.*s, "
"value_length=%lu)",
6, (ctx, name, (int)value_length, (int)value_length, value,
6,
(ctx, name, (int)value_length, (int)value_length, value,
(unsigned long)value_length));
ensure_auth_context_capacity(ctx);
prop = &ctx->properties.array[ctx->properties.count++];

@ -131,8 +131,7 @@ static grpc_call_credentials_vtable md_only_test_vtable = {
grpc_call_credentials* grpc_md_only_test_credentials_create(
grpc_exec_ctx* exec_ctx, const char* md_key, const char* md_value,
bool is_async) {
grpc_md_only_test_credentials *c =
(grpc_md_only_test_credentials *)gpr_zalloc(
grpc_md_only_test_credentials* c = (grpc_md_only_test_credentials*)gpr_zalloc(
sizeof(grpc_md_only_test_credentials));
c->base.type = GRPC_CALL_CREDENTIALS_TYPE_OAUTH2;
c->base.vtable = &md_only_test_vtable;

@ -511,8 +511,7 @@ static grpc_call_credentials_vtable access_token_vtable = {
grpc_call_credentials* grpc_access_token_credentials_create(
const char* access_token, void* reserved) {
grpc_access_token_credentials *c =
(grpc_access_token_credentials *)gpr_zalloc(
grpc_access_token_credentials* c = (grpc_access_token_credentials*)gpr_zalloc(
sizeof(grpc_access_token_credentials));
GRPC_API_TRACE(
"grpc_access_token_credentials_create(access_token=<redacted>, "

@ -258,8 +258,7 @@ static grpc_call_credentials_vtable plugin_vtable = {
grpc_call_credentials* grpc_metadata_credentials_create_from_plugin(
grpc_metadata_credentials_plugin plugin, void* reserved) {
grpc_plugin_credentials *c =
(grpc_plugin_credentials *)gpr_zalloc(sizeof(*c));
grpc_plugin_credentials* c = (grpc_plugin_credentials*)gpr_zalloc(sizeof(*c));
GRPC_API_TRACE("grpc_metadata_credentials_create_from_plugin(reserved=%p)", 1,
(reserved));
GPR_ASSERT(reserved == NULL);

@ -274,7 +274,8 @@ grpc_server_credentials *grpc_ssl_server_credentials_create_ex(
"grpc_ssl_server_credentials_create_ex("
"pem_root_certs=%s, pem_key_cert_pairs=%p, num_key_cert_pairs=%lu, "
"client_certificate_request=%d, reserved=%p)",
5, (pem_root_certs, pem_key_cert_pairs, (unsigned long)num_key_cert_pairs,
5,
(pem_root_certs, pem_key_cert_pairs, (unsigned long)num_key_cert_pairs,
client_certificate_request, reserved));
GPR_ASSERT(reserved == NULL);

@ -156,7 +156,8 @@ static void on_read(grpc_exec_ctx *exec_ctx, void *user_data,
if (error != GRPC_ERROR_NONE) {
grpc_slice_buffer_reset_and_unref_internal(exec_ctx, ep->read_buffer);
call_read_cb(exec_ctx, ep, GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
call_read_cb(exec_ctx, ep,
GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
"Secure read failed", &error, 1));
return;
}

@ -252,8 +252,8 @@ static const grpc_arg_pointer_vtable connector_arg_vtable = {
connector_arg_copy, connector_arg_destroy, connector_cmp};
grpc_arg grpc_security_connector_to_arg(grpc_security_connector* sc) {
return grpc_channel_arg_pointer_create((char *)GRPC_ARG_SECURITY_CONNECTOR,
sc, &connector_arg_vtable);
return grpc_channel_arg_pointer_create((char*)GRPC_ARG_SECURITY_CONNECTOR, sc,
&connector_arg_vtable);
}
grpc_security_connector* grpc_security_connector_from_arg(const grpc_arg* arg) {
@ -812,7 +812,8 @@ static void ssl_channel_check_peer(grpc_exec_ctx *exec_ctx,
grpc_closure* on_peer_checked) {
grpc_ssl_channel_security_connector* c =
(grpc_ssl_channel_security_connector*)sc;
grpc_error *error = ssl_check_peer(sc, c->overridden_target_name != NULL
grpc_error* error = ssl_check_peer(sc,
c->overridden_target_name != NULL
? c->overridden_target_name
: c->target_name,
&peer, auth_context);
@ -873,8 +874,8 @@ tsi_peer tsi_shallow_peer_from_ssl_auth_context(
while (grpc_auth_property_iterator_next(&it) != NULL) max_num_props++;
if (max_num_props > 0) {
peer.properties = (tsi_peer_property *)gpr_malloc(
max_num_props * sizeof(tsi_peer_property));
peer.properties = (tsi_peer_property*)gpr_malloc(max_num_props *
sizeof(tsi_peer_property));
it = grpc_auth_context_property_iterator(auth_context);
while ((prop = grpc_auth_property_iterator_next(&it)) != NULL) {
if (strcmp(prop->name, GRPC_X509_SAN_PROPERTY_NAME) == 0) {

@ -304,7 +304,8 @@ static void on_handshake_data_received_from_peer(grpc_exec_ctx *exec_ctx,
gpr_mu_lock(&h->mu);
if (error != GRPC_ERROR_NONE || h->shutdown) {
security_handshake_failed_locked(
exec_ctx, h, GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
exec_ctx, h,
GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
"Handshake read failed", &error, 1));
gpr_mu_unlock(&h->mu);
security_handshaker_unref(exec_ctx, h);
@ -347,7 +348,8 @@ static void on_handshake_data_sent_to_peer(grpc_exec_ctx *exec_ctx, void *arg,
gpr_mu_lock(&h->mu);
if (error != GRPC_ERROR_NONE || h->shutdown) {
security_handshake_failed_locked(
exec_ctx, h, GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
exec_ctx, h,
GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
"Handshake write failed", &error, 1));
gpr_mu_unlock(&h->mu);
security_handshaker_unref(exec_ctx, h);

@ -59,7 +59,8 @@ static long node_height(gpr_avl_node *node) {
#ifndef NDEBUG
static long calculate_height(gpr_avl_node* node) {
return node == NULL ? 0 : 1 + GPR_MAX(calculate_height(node->left),
return node == NULL ? 0
: 1 + GPR_MAX(calculate_height(node->left),
calculate_height(node->right));
}

@ -183,8 +183,7 @@ static double threshold_for_count_below(gpr_histogram *h, double count_below) {
should lie */
lower_bound = bucket_start(h, (double)lower_idx);
upper_bound = bucket_start(h, (double)(lower_idx + 1));
return GPR_CLAMP(upper_bound -
(upper_bound - lower_bound) *
return GPR_CLAMP(upper_bound - (upper_bound - lower_bound) *
(count_so_far - count_below) /
h->buckets[lower_idx],
h->min_seen, h->max_seen);

@ -27,7 +27,6 @@
#include <pthread.h>
#include <stdarg.h>
#include <stdio.h>
#include <stdio.h>
#include <string.h>
#include <time.h>

@ -33,7 +33,9 @@ extern "C" {
// List node (include this in a data structure at the top, and add application
// fields after it - to simulate inheritance)
typedef struct gpr_mpscq_node { gpr_atm next; } gpr_mpscq_node;
typedef struct gpr_mpscq_node {
gpr_atm next;
} gpr_mpscq_node;
// Actual queue type
typedef struct gpr_mpscq {

@ -23,7 +23,9 @@
/* Simple spinlock. No backoff strategy, gpr_spinlock_lock is almost always
a concurrency code smell. */
typedef struct { gpr_atm atm; } gpr_spinlock;
typedef struct {
gpr_atm atm;
} gpr_spinlock;
#ifdef __cplusplus
#define GPR_SPINLOCK_INITIALIZER (gpr_spinlock{0})

@ -1639,8 +1639,8 @@ static void receiving_initial_metadata_ready(grpc_exec_ctx *exec_ctx,
}
} else {
/* Already received messages */
saved_rsr_closure = GRPC_CLOSURE_CREATE(receiving_stream_ready,
(batch_control *)rsr_bctlp,
saved_rsr_closure =
GRPC_CLOSURE_CREATE(receiving_stream_ready, (batch_control*)rsr_bctlp,
grpc_schedule_on_exec_ctx);
/* No need to modify recv_state */
break;

@ -365,7 +365,8 @@ grpc_call *grpc_channel_create_registered_call(
"deadline=gpr_timespec { tv_sec: %" PRId64
", tv_nsec: %d, clock_type: %d }, "
"reserved=%p)",
9, (channel, parent_call, (unsigned)propagation_mask, completion_queue,
9,
(channel, parent_call, (unsigned)propagation_mask, completion_queue,
registered_call_handle, deadline.tv_sec, deadline.tv_nsec,
(int)deadline.clock_type, reserved));
GPR_ASSERT(!reserved);

@ -879,7 +879,8 @@ static grpc_event cq_next(grpc_completion_queue *cq, gpr_timespec deadline,
"deadline=gpr_timespec { tv_sec: %" PRId64
", tv_nsec: %d, clock_type: %d }, "
"reserved=%p)",
5, (cq, deadline.tv_sec, deadline.tv_nsec, (int)deadline.clock_type,
5,
(cq, deadline.tv_sec, deadline.tv_nsec, (int)deadline.clock_type,
reserved));
GPR_ASSERT(!reserved);
@ -1115,8 +1116,9 @@ static grpc_event cq_pluck(grpc_completion_queue *cq, void *tag,
"deadline=gpr_timespec { tv_sec: %" PRId64
", tv_nsec: %d, clock_type: %d }, "
"reserved=%p)",
6, (cq, tag, deadline.tv_sec, deadline.tv_nsec,
(int)deadline.clock_type, reserved));
6,
(cq, tag, deadline.tv_sec, deadline.tv_nsec, (int)deadline.clock_type,
reserved));
}
GPR_ASSERT(!reserved);

@ -1109,8 +1109,8 @@ void grpc_server_start(grpc_server *server) {
for (int j = 0; j < server->max_requested_calls_per_cq; j++) {
gpr_stack_lockfree_push(server->request_freelist_per_cq[i], j);
}
server->requested_calls_per_cq[i] = (requested_call *)gpr_malloc(
(size_t)server->max_requested_calls_per_cq *
server->requested_calls_per_cq[i] =
(requested_call*)gpr_malloc((size_t)server->max_requested_calls_per_cq *
sizeof(*server->requested_calls_per_cq[i]));
}
request_matcher_init(&server->unregistered_request_matcher,
@ -1269,8 +1269,8 @@ void grpc_server_shutdown_and_notify(grpc_server *server,
/* stay locked, and gather up some stuff to do */
GPR_ASSERT(grpc_cq_begin_op(cq, tag));
if (server->shutdown_published) {
grpc_cq_end_op(
&exec_ctx, cq, tag, GRPC_ERROR_NONE, done_published_shutdown, NULL,
grpc_cq_end_op(&exec_ctx, cq, tag, GRPC_ERROR_NONE, done_published_shutdown,
NULL,
(grpc_cq_completion*)gpr_malloc(sizeof(grpc_cq_completion)));
gpr_mu_unlock(&server->mu_global);
goto done;
@ -1443,7 +1443,8 @@ grpc_call_error grpc_server_request_call(
"grpc_server_request_call("
"server=%p, call=%p, details=%p, initial_metadata=%p, "
"cq_bound_to_call=%p, cq_for_notification=%p, tag=%p)",
7, (server, call, details, initial_metadata, cq_bound_to_call,
7,
(server, call, details, initial_metadata, cq_bound_to_call,
cq_for_notification, tag));
size_t cq_idx;
for (cq_idx = 0; cq_idx < server->cq_count; cq_idx++) {
@ -1491,7 +1492,8 @@ grpc_call_error grpc_server_request_registered_call(
"server=%p, rmp=%p, call=%p, deadline=%p, initial_metadata=%p, "
"optional_payload=%p, cq_bound_to_call=%p, cq_for_notification=%p, "
"tag=%p)",
9, (server, rmp, call, deadline, initial_metadata, optional_payload,
9,
(server, rmp, call, deadline, initial_metadata, optional_payload,
cq_bound_to_call, cq_for_notification, tag));
size_t cq_idx;

@ -45,7 +45,8 @@ grpc_millis BdpEstimator::CompletePing(grpc_exec_ctx *exec_ctx) {
double bw = dt > 0 ? ((double)accumulator_ / dt) : 0;
int start_inter_ping_delay = inter_ping_delay_;
if (GRPC_TRACER_ON(grpc_bdp_estimator_trace)) {
gpr_log(GPR_DEBUG, "bdp[%s]:complete acc=%" PRId64 " est=%" PRId64
gpr_log(GPR_DEBUG,
"bdp[%s]:complete acc=%" PRId64 " est=%" PRId64
" dt=%lf bw=%lfMbs bw_est=%lfMbs",
name_, accumulator_, estimate_, dt, bw / 125000.0,
bw_est_ / 125000.0);

@ -98,8 +98,7 @@ struct grpc_mdelem {
uintptr_t payload;
};
#define GRPC_MDELEM_DATA(md) \
((grpc_mdelem_data *)((md).payload & ~(uintptr_t)3))
#define GRPC_MDELEM_DATA(md) ((grpc_mdelem_data*)((md).payload & ~(uintptr_t)3))
#define GRPC_MDELEM_STORAGE(md) \
((grpc_mdelem_data_storage)((md).payload & (uintptr_t)3))
#ifdef __cplusplus
@ -137,8 +136,7 @@ size_t grpc_mdelem_get_size_in_hpack_table(grpc_mdelem elem,
/* Mutator and accessor for grpc_mdelem user data. The destructor function
is used as a type tag and is checked during user_data fetch. */
void *grpc_mdelem_get_user_data(grpc_mdelem md,
void (*if_destroy_func)(void *));
void* grpc_mdelem_get_user_data(grpc_mdelem md, void (*if_destroy_func)(void*));
void* grpc_mdelem_set_user_data(grpc_mdelem md, void (*destroy_func)(void*),
void* user_data);

@ -390,8 +390,10 @@ static void fake_protector_destroy(tsi_frame_protector *self) {
}
static const tsi_frame_protector_vtable frame_protector_vtable = {
fake_protector_protect, fake_protector_protect_flush,
fake_protector_unprotect, fake_protector_destroy,
fake_protector_protect,
fake_protector_protect_flush,
fake_protector_unprotect,
fake_protector_destroy,
};
/* --- tsi_zero_copy_grpc_protector methods implementation. ---*/

@ -295,8 +295,7 @@ static tsi_result add_pem_certificate(X509 *cert, tsi_peer_property *property) {
return TSI_INTERNAL_ERROR;
}
tsi_result result = tsi_construct_string_peer_property(
TSI_X509_PEM_CERT_PROPERTY, (const char *)contents, (size_t)len,
property);
TSI_X509_PEM_CERT_PROPERTY, (const char*)contents, (size_t)len, property);
BIO_free(bio);
return result;
}
@ -853,7 +852,9 @@ static void ssl_protector_destroy(tsi_frame_protector *self) {
}
static const tsi_frame_protector_vtable frame_protector_vtable = {
ssl_protector_protect, ssl_protector_protect_flush, ssl_protector_unprotect,
ssl_protector_protect,
ssl_protector_protect_flush,
ssl_protector_unprotect,
ssl_protector_destroy,
};

@ -28,7 +28,8 @@ std::unique_ptr<GenericClientAsyncReaderWriter> CallInternal(
const grpc::string& method, CompletionQueue* cq, bool start, void* tag) {
return std::unique_ptr<GenericClientAsyncReaderWriter>(
internal::ClientAsyncReaderWriterFactory<ByteBuffer, ByteBuffer>::Create(
channel, cq, internal::RpcMethod(method.c_str(),
channel, cq,
internal::RpcMethod(method.c_str(),
internal::RpcMethod::BIDI_STREAMING),
context, start, tag));
}

@ -56,13 +56,15 @@ bool SecureCallCredentials::ApplyToCall(grpc_call* call) {
namespace {
std::shared_ptr<ChannelCredentials> WrapChannelCredentials(
grpc_channel_credentials* creds) {
return creds == nullptr ? nullptr : std::shared_ptr<ChannelCredentials>(
return creds == nullptr ? nullptr
: std::shared_ptr<ChannelCredentials>(
new SecureChannelCredentials(creds));
}
std::shared_ptr<CallCredentials> WrapCallCredentials(
grpc_call_credentials* creds) {
return creds == nullptr ? nullptr : std::shared_ptr<CallCredentials>(
return creds == nullptr ? nullptr
: std::shared_ptr<CallCredentials>(
new SecureCallCredentials(creds));
}
} // namespace

@ -171,8 +171,7 @@ class TransportStreamOpBatch {
}
uint32_t* send_initial_metadata_flags() const {
return op_->send_initial_metadata
? &op_->payload->send_initial_metadata
return op_->send_initial_metadata ? &op_->payload->send_initial_metadata
.send_initial_metadata_flags
: nullptr;
}

@ -23,4 +23,4 @@
namespace grpc {
grpc::string Version() { return "1.8.0-dev"; }
}
} // namespace grpc

@ -25,14 +25,14 @@
using grpc::Status;
using grpc::StatusCode;
using grpc::reflection::v1alpha::ServerReflectionRequest;
using grpc::reflection::v1alpha::ErrorResponse;
using grpc::reflection::v1alpha::ExtensionNumberResponse;
using grpc::reflection::v1alpha::ExtensionRequest;
using grpc::reflection::v1alpha::ServerReflectionResponse;
using grpc::reflection::v1alpha::FileDescriptorResponse;
using grpc::reflection::v1alpha::ListServiceResponse;
using grpc::reflection::v1alpha::ServerReflectionRequest;
using grpc::reflection::v1alpha::ServerReflectionResponse;
using grpc::reflection::v1alpha::ServiceResponse;
using grpc::reflection::v1alpha::ExtensionNumberResponse;
using grpc::reflection::v1alpha::ErrorResponse;
using grpc::reflection::v1alpha::FileDescriptorResponse;
namespace grpc {

@ -21,7 +21,7 @@
namespace grpc {
namespace {
bool g_grpc_default_health_check_service_enabled = false;
} // namesapce
} // namespace
bool DefaultHealthCheckServiceEnabled() {
return g_grpc_default_health_check_service_enabled;

@ -21,10 +21,10 @@
#include <grpc/support/time.h>
using std::chrono::duration_cast;
using std::chrono::high_resolution_clock;
using std::chrono::nanoseconds;
using std::chrono::seconds;
using std::chrono::system_clock;
using std::chrono::high_resolution_clock;
namespace grpc {

@ -290,8 +290,7 @@ grpcsharp_batch_context_recv_status_on_client_details(
const grpcsharp_batch_context* ctx, size_t* details_length) {
*details_length =
GRPC_SLICE_LENGTH(ctx->recv_status_on_client.status_details);
return (char *)GRPC_SLICE_START_PTR(
ctx->recv_status_on_client.status_details);
return (char*)GRPC_SLICE_START_PTR(ctx->recv_status_on_client.status_details);
}
GPR_EXPORT const grpc_metadata_array* GPR_CALLTYPE

@ -35,8 +35,8 @@ using Nan::MaybeLocal;
using v8::Function;
using v8::Local;
using v8::Object;
using v8::Number;
using v8::Object;
using v8::Value;
grpc_byte_buffer* BufferToByteBuffer(Local<Value> buffer) {
@ -53,7 +53,7 @@ void delete_buffer(char *data, void *hint) {
grpc_slice_unref(*slice);
delete slice;
}
}
} // namespace
Local<Value> ByteBufferToBuffer(grpc_byte_buffer* buffer) {
Nan::EscapableHandleScope scope;

@ -35,8 +35,8 @@
#include "slice.h"
#include "timeval.h"
using std::unique_ptr;
using std::shared_ptr;
using std::unique_ptr;
using std::vector;
namespace grpc {
@ -62,8 +62,8 @@ using v8::Local;
using v8::Number;
using v8::Object;
using v8::ObjectTemplate;
using v8::Uint32;
using v8::String;
using v8::Uint32;
using v8::Value;
Callback* Call::constructor;
@ -538,8 +538,7 @@ Local<Value> Call::WrapStruct(grpc_call *call) {
return scope.Escape(Nan::Null());
}
const int argc = 1;
Local<Value> argv[argc] = {
Nan::New<External>(reinterpret_cast<void *>(call))};
Local<Value> argv[argc] = {Nan::New<External>(reinterpret_cast<void*>(call))};
MaybeLocal<Object> maybe_instance =
Nan::NewInstance(constructor->GetFunction(), argc, argv);
if (maybe_instance.IsEmpty()) {

@ -32,8 +32,8 @@
namespace grpc {
namespace node {
using std::unique_ptr;
using std::shared_ptr;
using std::unique_ptr;
v8::Local<v8::Value> nanErrorWithCode(const char* msg, grpc_call_error code);

@ -88,6 +88,6 @@ NAN_METHOD(PluginCallback);
NAUV_WORK_CB(SendPluginCallback);
} // namespace node
} // namepsace grpc
} // namespace grpc
#endif // GRPC_NODE_CALL_CREDENTIALS_H_

@ -46,11 +46,11 @@ using grpc::node::CreateSliceFromString;
using v8::FunctionTemplate;
using v8::Local;
using v8::Value;
using v8::Number;
using v8::Object;
using v8::Uint32;
using v8::String;
using v8::Uint32;
using v8::Value;
typedef struct log_args {
gpr_log_func_args core_args;

@ -24,7 +24,6 @@
#include <grpc/grpc.h>
#include <grpc/impl/codegen/compression_types.h>
#include <grpc/support/alloc.h>
#include <grpc/support/alloc.h>
#include <grpc/support/log.h>
#include "rb_byte_buffer.h"
@ -997,8 +996,8 @@ void Init_grpc_call() {
rb_define_method(grpc_rb_cCall, "metadata=", grpc_rb_call_set_metadata, 1);
rb_define_method(grpc_rb_cCall, "trailing_metadata",
grpc_rb_call_get_trailing_metadata, 0);
rb_define_method(grpc_rb_cCall, "trailing_metadata=",
grpc_rb_call_set_trailing_metadata, 1);
rb_define_method(grpc_rb_cCall,
"trailing_metadata=", grpc_rb_call_set_trailing_metadata, 1);
rb_define_method(grpc_rb_cCall, "write_flag", grpc_rb_call_get_write_flag, 0);
rb_define_method(grpc_rb_cCall, "write_flag=", grpc_rb_call_set_write_flag,
1);

@ -41,7 +41,8 @@ int main(int argc, char **argv) {
/* invalid content type */
GRPC_RUN_BAD_CLIENT_TEST(
verifier, NULL, PFX_STR
verifier, NULL,
PFX_STR
"\x00\x00\xc2\x01\x04\x00\x00\x00\x01"
"\x10\x05:path\x08/foo/bar"
"\x10\x07:scheme\x04http"
@ -56,7 +57,8 @@ int main(int argc, char **argv) {
/* invalid te */
GRPC_RUN_BAD_CLIENT_TEST(
verifier, NULL, PFX_STR
verifier, NULL,
PFX_STR
"\x00\x00\xcb\x01\x04\x00\x00\x00\x01"
"\x10\x05:path\x08/foo/bar"
"\x10\x07:scheme\x04http"
@ -73,7 +75,8 @@ int main(int argc, char **argv) {
/* two path headers */
GRPC_RUN_BAD_CLIENT_TEST(
verifier, NULL, PFX_STR
verifier, NULL,
PFX_STR
"\x00\x00\xd9\x01\x04\x00\x00\x00\x01"
"\x10\x05:path\x08/foo/bar"
"\x10\x05:path\x08/foo/bah"
@ -90,7 +93,8 @@ int main(int argc, char **argv) {
/* bad accept-encoding algorithm */
GRPC_RUN_BAD_CLIENT_TEST(
verifier, NULL, PFX_STR
verifier, NULL,
PFX_STR
"\x00\x00\xd2\x01\x04\x00\x00\x00\x01"
"\x10\x05:path\x08/foo/bar"
"\x10\x07:scheme\x04http"
@ -106,7 +110,8 @@ int main(int argc, char **argv) {
/* bad grpc-encoding algorithm */
GRPC_RUN_BAD_CLIENT_TEST(
verifier, NULL, PFX_STR
verifier, NULL,
PFX_STR
"\x00\x00\xf5\x01\x04\x00\x00\x00\x01"
"\x10\x05:path\x08/foo/bar"
"\x10\x07:scheme\x04http"

@ -66,194 +66,236 @@ int main(int argc, char **argv) {
GRPC_BAD_CLIENT_DISCONNECT);
/* test adding prioritization data */
GRPC_RUN_BAD_CLIENT_TEST(verifier, NULL, PFX_STR
GRPC_RUN_BAD_CLIENT_TEST(verifier, NULL,
PFX_STR
"\x00\x00\x01\x01\x24\x00\x00\x00\x01"
"\x00",
0);
GRPC_RUN_BAD_CLIENT_TEST(verifier, NULL, PFX_STR
GRPC_RUN_BAD_CLIENT_TEST(verifier, NULL,
PFX_STR
"\x00\x00\x02\x01\x24\x00\x00\x00\x01"
"\x00\x00",
0);
GRPC_RUN_BAD_CLIENT_TEST(verifier, NULL, PFX_STR
GRPC_RUN_BAD_CLIENT_TEST(verifier, NULL,
PFX_STR
"\x00\x00\x03\x01\x24\x00\x00\x00\x01"
"\x00\x00\x00",
0);
GRPC_RUN_BAD_CLIENT_TEST(verifier, NULL, PFX_STR
GRPC_RUN_BAD_CLIENT_TEST(verifier, NULL,
PFX_STR
"\x00\x00\x04\x01\x24\x00\x00\x00\x01"
"\x00\x00\x00\x00",
0);
GRPC_RUN_BAD_CLIENT_TEST(verifier, NULL, PFX_STR
GRPC_RUN_BAD_CLIENT_TEST(verifier, NULL,
PFX_STR
"\x00\x00\x05\x01\x24\x00\x00\x00\x01"
"",
GRPC_BAD_CLIENT_DISCONNECT);
GRPC_RUN_BAD_CLIENT_TEST(verifier, NULL, PFX_STR
GRPC_RUN_BAD_CLIENT_TEST(verifier, NULL,
PFX_STR
"\x00\x00\x05\x01\x24\x00\x00\x00\x01"
"\x00",
GRPC_BAD_CLIENT_DISCONNECT);
GRPC_RUN_BAD_CLIENT_TEST(verifier, NULL, PFX_STR
GRPC_RUN_BAD_CLIENT_TEST(verifier, NULL,
PFX_STR
"\x00\x00\x05\x01\x24\x00\x00\x00\x01"
"\x00\x00",
GRPC_BAD_CLIENT_DISCONNECT);
GRPC_RUN_BAD_CLIENT_TEST(verifier, NULL, PFX_STR
GRPC_RUN_BAD_CLIENT_TEST(verifier, NULL,
PFX_STR
"\x00\x00\x05\x01\x24\x00\x00\x00\x01"
"\x00\x00\x00",
GRPC_BAD_CLIENT_DISCONNECT);
GRPC_RUN_BAD_CLIENT_TEST(verifier, NULL, PFX_STR
GRPC_RUN_BAD_CLIENT_TEST(verifier, NULL,
PFX_STR
"\x00\x00\x05\x01\x24\x00\x00\x00\x01"
"\x00\x00\x00\x00",
GRPC_BAD_CLIENT_DISCONNECT);
GRPC_RUN_BAD_CLIENT_TEST(verifier, NULL, PFX_STR
GRPC_RUN_BAD_CLIENT_TEST(verifier, NULL,
PFX_STR
"\x00\x00\x05\x01\x24\x00\x00\x00\x01"
"\x00\x00\x00\x00\x00",
GRPC_BAD_CLIENT_DISCONNECT);
/* test looking up an invalid index */
GRPC_RUN_BAD_CLIENT_TEST(verifier, NULL, PFX_STR
GRPC_RUN_BAD_CLIENT_TEST(verifier, NULL,
PFX_STR
"\x00\x00\x01\x01\x04\x00\x00\x00\x01"
"\xfe",
0);
GRPC_RUN_BAD_CLIENT_TEST(verifier, NULL, PFX_STR
GRPC_RUN_BAD_CLIENT_TEST(verifier, NULL,
PFX_STR
"\x00\x00\x04\x01\x04\x00\x00\x00\x01"
"\x7f\x7f\x01"
"a",
0);
GRPC_RUN_BAD_CLIENT_TEST(verifier, NULL, PFX_STR
GRPC_RUN_BAD_CLIENT_TEST(verifier, NULL,
PFX_STR
"\x00\x00\x04\x01\x04\x00\x00\x00\x01"
"\x0f\x7f\x01"
"a",
0);
GRPC_RUN_BAD_CLIENT_TEST(verifier, NULL, PFX_STR
GRPC_RUN_BAD_CLIENT_TEST(verifier, NULL,
PFX_STR
"\x00\x00\x04\x01\x04\x00\x00\x00\x01"
"\x1f\x7f\x01"
"a",
0);
/* test nvr, not indexed in static table */
GRPC_RUN_BAD_CLIENT_TEST(verifier, NULL, PFX_STR
GRPC_RUN_BAD_CLIENT_TEST(verifier, NULL,
PFX_STR
"\x00\x00\x03\x01\x04\x00\x00\x00\x01"
"\x01\x01"
"a",
GRPC_BAD_CLIENT_DISCONNECT);
GRPC_RUN_BAD_CLIENT_TEST(verifier, NULL, PFX_STR
GRPC_RUN_BAD_CLIENT_TEST(verifier, NULL,
PFX_STR
"\x00\x00\x03\x01\x04\x00\x00\x00\x01"
"\x11\x01"
"a",
GRPC_BAD_CLIENT_DISCONNECT);
/* illegal op code */
GRPC_RUN_BAD_CLIENT_TEST(verifier, NULL, PFX_STR
GRPC_RUN_BAD_CLIENT_TEST(verifier, NULL,
PFX_STR
"\x00\x00\x01\x01\x04\x00\x00\x00\x01"
"\x80",
0);
/* parse some long indices */
GRPC_RUN_BAD_CLIENT_TEST(verifier, NULL, PFX_STR
GRPC_RUN_BAD_CLIENT_TEST(verifier, NULL,
PFX_STR
"\x00\x00\x02\x01\x04\x00\x00\x00\x01"
"\xff\x00",
0);
GRPC_RUN_BAD_CLIENT_TEST(verifier, NULL, PFX_STR
GRPC_RUN_BAD_CLIENT_TEST(verifier, NULL,
PFX_STR
"\x00\x00\x03\x01\x04\x00\x00\x00\x01"
"\xff\x80\x00",
0);
GRPC_RUN_BAD_CLIENT_TEST(verifier, NULL, PFX_STR
GRPC_RUN_BAD_CLIENT_TEST(verifier, NULL,
PFX_STR
"\x00\x00\x04\x01\x04\x00\x00\x00\x01"
"\xff\x80\x80\x00",
0);
GRPC_RUN_BAD_CLIENT_TEST(verifier, NULL, PFX_STR
GRPC_RUN_BAD_CLIENT_TEST(verifier, NULL,
PFX_STR
"\x00\x00\x05\x01\x04\x00\x00\x00\x01"
"\xff\x80\x80\x80\x00",
0);
GRPC_RUN_BAD_CLIENT_TEST(verifier, NULL, PFX_STR
GRPC_RUN_BAD_CLIENT_TEST(verifier, NULL,
PFX_STR
"\x00\x00\x06\x01\x04\x00\x00\x00\x01"
"\xff\x80\x80\x80\x80\x00",
0);
GRPC_RUN_BAD_CLIENT_TEST(verifier, NULL, PFX_STR
GRPC_RUN_BAD_CLIENT_TEST(verifier, NULL,
PFX_STR
"\x00\x00\x07\x01\x04\x00\x00\x00\x01"
"\xff\x80\x80\x80\x80\x80\x00",
0);
GRPC_RUN_BAD_CLIENT_TEST(verifier, NULL, PFX_STR
GRPC_RUN_BAD_CLIENT_TEST(verifier, NULL,
PFX_STR
"\x00\x00\x08\x01\x04\x00\x00\x00\x01"
"\xff",
GRPC_BAD_CLIENT_DISCONNECT);
GRPC_RUN_BAD_CLIENT_TEST(verifier, NULL, PFX_STR
GRPC_RUN_BAD_CLIENT_TEST(verifier, NULL,
PFX_STR
"\x00\x00\x08\x01\x04\x00\x00\x00\x01"
"\xff\x80",
GRPC_BAD_CLIENT_DISCONNECT);
GRPC_RUN_BAD_CLIENT_TEST(verifier, NULL, PFX_STR
GRPC_RUN_BAD_CLIENT_TEST(verifier, NULL,
PFX_STR
"\x00\x00\x08\x01\x04\x00\x00\x00\x01"
"\xff\x80\x80",
GRPC_BAD_CLIENT_DISCONNECT);
GRPC_RUN_BAD_CLIENT_TEST(verifier, NULL, PFX_STR
GRPC_RUN_BAD_CLIENT_TEST(verifier, NULL,
PFX_STR
"\x00\x00\x08\x01\x04\x00\x00\x00\x01"
"\xff\x80\x80\x80",
GRPC_BAD_CLIENT_DISCONNECT);
GRPC_RUN_BAD_CLIENT_TEST(verifier, NULL, PFX_STR
GRPC_RUN_BAD_CLIENT_TEST(verifier, NULL,
PFX_STR
"\x00\x00\x08\x01\x04\x00\x00\x00\x01"
"\xff\x80\x80\x80\x80",
GRPC_BAD_CLIENT_DISCONNECT);
GRPC_RUN_BAD_CLIENT_TEST(verifier, NULL, PFX_STR
GRPC_RUN_BAD_CLIENT_TEST(verifier, NULL,
PFX_STR
"\x00\x00\x08\x01\x04\x00\x00\x00\x01"
"\xff\x80\x80\x80\x80\x80",
GRPC_BAD_CLIENT_DISCONNECT);
GRPC_RUN_BAD_CLIENT_TEST(verifier, NULL, PFX_STR
GRPC_RUN_BAD_CLIENT_TEST(verifier, NULL,
PFX_STR
"\x00\x00\x08\x01\x04\x00\x00\x00\x01"
"\xff\x80\x80\x80\x80\x80\x80",
GRPC_BAD_CLIENT_DISCONNECT);
GRPC_RUN_BAD_CLIENT_TEST(verifier, NULL, PFX_STR
GRPC_RUN_BAD_CLIENT_TEST(verifier, NULL,
PFX_STR
"\x00\x00\x08\x01\x04\x00\x00\x00\x01"
"\xff\x80\x80\x80\x80\x80\x80\x00",
0);
/* overflow on byte 4 */
GRPC_RUN_BAD_CLIENT_TEST(verifier, NULL, PFX_STR
GRPC_RUN_BAD_CLIENT_TEST(verifier, NULL,
PFX_STR
"\x00\x00\x06\x01\x04\x00\x00\x00\x01"
"\xff\x80\x80\x80\x80\x7f",
GRPC_BAD_CLIENT_DISCONNECT);
GRPC_RUN_BAD_CLIENT_TEST(verifier, NULL, PFX_STR
GRPC_RUN_BAD_CLIENT_TEST(verifier, NULL,
PFX_STR
"\x00\x00\x06\x01\x04\x00\x00\x00\x01"
"\xff\xff\xff\xff\xff\x0f",
GRPC_BAD_CLIENT_DISCONNECT);
/* overflow after byte 4 */
GRPC_RUN_BAD_CLIENT_TEST(verifier, NULL, PFX_STR
GRPC_RUN_BAD_CLIENT_TEST(verifier, NULL,
PFX_STR
"\x00\x00\x08\x01\x04\x00\x00\x00\x01"
"\xff\x80\x80\x80\x80\x80\x80\x02",
0);
/* end of headers mid-opcode */
GRPC_RUN_BAD_CLIENT_TEST(verifier, NULL, PFX_STR
GRPC_RUN_BAD_CLIENT_TEST(verifier, NULL,
PFX_STR
"\x00\x00\x01\x01\x04\x00\x00\x00\x01"
"\x01",
GRPC_BAD_CLIENT_DISCONNECT);
/* dynamic table size update: set to default */
GRPC_RUN_BAD_CLIENT_TEST(verifier, NULL, PFX_STR
GRPC_RUN_BAD_CLIENT_TEST(verifier, NULL,
PFX_STR
"\x00\x00\x03\x01\x04\x00\x00\x00\x01"
"\x3f\xe1\x1f",
GRPC_BAD_CLIENT_DISCONNECT);
/* dynamic table size update: set too large */
GRPC_RUN_BAD_CLIENT_TEST(verifier, NULL, PFX_STR
GRPC_RUN_BAD_CLIENT_TEST(verifier, NULL,
PFX_STR
"\x00\x00\x03\x01\x04\x00\x00\x00\x01"
"\x3f\xf1\x1f",
0);
/* dynamic table size update: set twice */
GRPC_RUN_BAD_CLIENT_TEST(verifier, NULL, PFX_STR
GRPC_RUN_BAD_CLIENT_TEST(verifier, NULL,
PFX_STR
"\x00\x00\x04\x01\x04\x00\x00\x00\x01"
"\x20\x3f\xe1\x1f",
GRPC_BAD_CLIENT_DISCONNECT);
/* dynamic table size update: set thrice */
GRPC_RUN_BAD_CLIENT_TEST(verifier, NULL, PFX_STR
GRPC_RUN_BAD_CLIENT_TEST(verifier, NULL,
PFX_STR
"\x00\x00\x03\x01\x04\x00\x00\x00\x01"
"\x20\x20\x20",
0);
/* non-ending header followed by continuation frame */
GRPC_RUN_BAD_CLIENT_TEST(verifier, NULL, PFX_STR
GRPC_RUN_BAD_CLIENT_TEST(verifier, NULL,
PFX_STR
"\x00\x00\x00\x01\x00\x00\x00\x00\x01"
"\x00\x00\x00\x09\x04\x00\x00\x00\x01",
GRPC_BAD_CLIENT_DISCONNECT);
/* non-ending header followed by non-continuation frame */
GRPC_RUN_BAD_CLIENT_TEST(verifier, NULL, PFX_STR
GRPC_RUN_BAD_CLIENT_TEST(verifier, NULL,
PFX_STR
"\x00\x00\x00\x01\x00\x00\x00\x00\x01"
"\x00\x00\x00\x00\x04\x00\x00\x00\x01",
0);
/* non-ending header followed by a continuation frame for a different stream
*/
GRPC_RUN_BAD_CLIENT_TEST(verifier, NULL, PFX_STR
GRPC_RUN_BAD_CLIENT_TEST(verifier, NULL,
PFX_STR
"\x00\x00\x00\x01\x04\x00\x00\x00\x01"
"\x00\x00\x00\x01\x00\x00\x00\x00\x03"
"\x00\x00\x00\x09\x04\x00\x00\x00\x01",
@ -262,7 +304,8 @@ int main(int argc, char **argv) {
GRPC_RUN_BAD_CLIENT_TEST(verifier, NULL,
PFX_STR "\x00\x00\x00\x09\x04\x00\x00\x00\x01", 0);
/* three header frames */
GRPC_RUN_BAD_CLIENT_TEST(verifier, NULL, PFX_STR
GRPC_RUN_BAD_CLIENT_TEST(verifier, NULL,
PFX_STR
"\x00\x00\x00\x01\x04\x00\x00\x00\x01"
"\x00\x00\x00\x01\x04\x00\x00\x00\x01"
"\x00\x00\x00\x01\x04\x00\x00\x00\x01",
@ -274,13 +317,15 @@ int main(int argc, char **argv) {
GRPC_BAD_CLIENT_DISCONNECT);
/* a badly encoded timeout value */
GRPC_RUN_BAD_CLIENT_TEST(verifier, NULL, PFX_STR
GRPC_RUN_BAD_CLIENT_TEST(verifier, NULL,
PFX_STR
"\x00\x00\x19\x01\x04\x00\x00\x00\x01"
"\x10\x0cgrpc-timeout\x0a"
"15 seconds",
GRPC_BAD_CLIENT_DISCONNECT);
/* a badly encoded timeout value: twice (catches caching) */
GRPC_RUN_BAD_CLIENT_TEST(verifier, NULL, PFX_STR
GRPC_RUN_BAD_CLIENT_TEST(verifier, NULL,
PFX_STR
"\x00\x00\x19\x01\x04\x00\x00\x00\x01"
"\x10\x0cgrpc-timeout\x0a"
"15 seconds"

Some files were not shown because too many files have changed in this diff Show More

Loading…
Cancel
Save