Update clang-format to 5.0

pull/13255/head
Craig Tiller 8 years ago
parent ef68fe7239
commit baa14a975e
  1. 2
      .clang-format
  2. 4
      include/grpc++/impl/channel_argument_option.h
  3. 2
      include/grpc++/impl/codegen/async_unary_call.h
  4. 5
      include/grpc++/impl/codegen/completion_queue.h
  5. 38
      include/grpc++/impl/codegen/metadata_map.h
  6. 18
      include/grpc/compression.h
  7. 214
      include/grpc/grpc.h
  8. 6
      include/grpc/grpc_cronet.h
  9. 8
      include/grpc/grpc_posix.h
  10. 196
      include/grpc/grpc_security.h
  11. 2
      include/grpc/impl/codegen/atm.h
  12. 8
      include/grpc/impl/codegen/atm_gcc_atomic.h
  13. 10
      include/grpc/impl/codegen/atm_gcc_sync.h
  14. 44
      include/grpc/impl/codegen/atm_windows.h
  15. 28
      include/grpc/impl/codegen/byte_buffer.h
  16. 4
      include/grpc/impl/codegen/byte_buffer_reader.h
  17. 56
      include/grpc/impl/codegen/grpc_types.h
  18. 20
      include/grpc/impl/codegen/slice.h
  19. 12
      include/grpc/impl/codegen/sync_generic.h
  20. 34
      include/grpc/slice.h
  21. 44
      include/grpc/slice_buffer.h
  22. 20
      include/grpc/support/alloc.h
  23. 40
      include/grpc/support/avl.h
  24. 26
      include/grpc/support/cmdline.h
  25. 34
      include/grpc/support/histogram.h
  26. 4
      include/grpc/support/host_port.h
  27. 16
      include/grpc/support/log.h
  28. 2
      include/grpc/support/log_windows.h
  29. 4
      include/grpc/support/string_util.h
  30. 10
      include/grpc/support/subprocess.h
  31. 60
      include/grpc/support/sync.h
  32. 12
      include/grpc/support/thd.h
  33. 2
      include/grpc/support/tls_gcc.h
  34. 2
      include/grpc/support/tls_pthread.h
  35. 149
      src/compiler/cpp_generator.cc
  36. 64
      src/compiler/cpp_generator.h
  37. 12
      src/compiler/cpp_generator_helpers.h
  38. 16
      src/compiler/cpp_plugin.cc
  39. 85
      src/compiler/csharp_generator.cc
  40. 2
      src/compiler/csharp_generator.h
  41. 6
      src/compiler/csharp_generator_helpers.h
  42. 10
      src/compiler/csharp_plugin.cc
  43. 44
      src/compiler/generator_helpers.h
  44. 56
      src/compiler/node_generator.cc
  45. 2
      src/compiler/node_generator.h
  46. 10
      src/compiler/node_plugin.cc
  47. 36
      src/compiler/objective_c_generator.cc
  48. 8
      src/compiler/objective_c_generator.h
  49. 8
      src/compiler/objective_c_generator_helpers.h
  50. 30
      src/compiler/objective_c_plugin.cc
  51. 30
      src/compiler/php_generator.cc
  52. 6
      src/compiler/php_generator.h
  53. 16
      src/compiler/php_generator_helpers.h
  54. 12
      src/compiler/php_plugin.cc
  55. 30
      src/compiler/protobuf_plugin.h
  56. 2
      src/compiler/python_generator.cc
  57. 6
      src/compiler/python_generator_helpers.h
  58. 34
      src/compiler/ruby_generator.cc
  59. 2
      src/compiler/ruby_generator.h
  60. 8
      src/compiler/ruby_generator_helpers-inl.h
  61. 2
      src/compiler/ruby_generator_map-inl.h
  62. 22
      src/compiler/ruby_generator_string-inl.h
  63. 10
      src/compiler/ruby_plugin.cc
  64. 12
      src/compiler/schema_interface.h
  65. 6
      src/core/ext/census/grpc_context.cc
  66. 73
      src/core/ext/filters/client_channel/channel_connectivity.cc
  67. 499
      src/core/ext/filters/client_channel/client_channel.cc
  68. 14
      src/core/ext/filters/client_channel/client_channel.h
  69. 42
      src/core/ext/filters/client_channel/client_channel_factory.h
  70. 22
      src/core/ext/filters/client_channel/client_channel_plugin.cc
  71. 40
      src/core/ext/filters/client_channel/connector.h
  72. 87
      src/core/ext/filters/client_channel/lb_policy.cc
  73. 134
      src/core/ext/filters/client_channel/lb_policy.h
  74. 53
      src/core/ext/filters/client_channel/lb_policy/grpclb/client_load_reporting_filter.cc
  75. 2
      src/core/ext/filters/client_channel/lb_policy/grpclb/client_load_reporting_filter.h
  76. 528
      src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.cc
  77. 2
      src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.h
  78. 20
      src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_channel.cc
  79. 18
      src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_channel.h
  80. 28
      src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_channel_secure.cc
  81. 2
      src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_client_stats.h
  82. 108
      src/core/ext/filters/client_channel/lb_policy/grpclb/load_balancer_api.cc
  83. 40
      src/core/ext/filters/client_channel/lb_policy/grpclb/load_balancer_api.h
  84. 166
      src/core/ext/filters/client_channel/lb_policy/pick_first/pick_first.cc
  85. 208
      src/core/ext/filters/client_channel/lb_policy/round_robin/round_robin.cc
  86. 108
      src/core/ext/filters/client_channel/lb_policy/subchannel_list.cc
  87. 52
      src/core/ext/filters/client_channel/lb_policy/subchannel_list.h
  88. 80
      src/core/ext/filters/client_channel/lb_policy_factory.h
  89. 14
      src/core/ext/filters/client_channel/lb_policy_registry.cc
  90. 6
      src/core/ext/filters/client_channel/lb_policy_registry.h
  91. 42
      src/core/ext/filters/client_channel/parse_address.cc
  92. 12
      src/core/ext/filters/client_channel/parse_address.h
  93. 34
      src/core/ext/filters/client_channel/resolver.cc
  94. 48
      src/core/ext/filters/client_channel/resolver.h
  95. 164
      src/core/ext/filters/client_channel/resolver/dns/c_ares/dns_resolver_ares.cc
  96. 18
      src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver.h
  97. 96
      src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver_posix.cc
  98. 180
      src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.cc
  99. 30
      src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.h
  100. 42
      src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper_fallback.cc
  101. Some files were not shown because too many files have changed in this diff Show More

@ -1,5 +1,7 @@
---
Language: Cpp
BasedOnStyle: Google
DerivePointerAlignment: false
PointerAlignment: Left
...

@ -28,9 +28,9 @@
namespace grpc {
std::unique_ptr<ServerBuilderOption> MakeChannelArgumentOption(
const grpc::string &name, const grpc::string &value);
const grpc::string& name, const grpc::string& value);
std::unique_ptr<ServerBuilderOption> MakeChannelArgumentOption(
const grpc::string &name, int value);
const grpc::string& name, int value);
} // namespace grpc

@ -297,6 +297,6 @@ class default_delete<grpc::ClientAsyncResponseReaderInterface<R>> {
public:
void operator()(void* p) {}
};
}
} // namespace std
#endif // GRPCXX_IMPL_CODEGEN_ASYNC_UNARY_CALL_H

@ -164,8 +164,9 @@ class CompletionQueue : private GrpcLibraryCodegen {
///
/// \return true if read a regular event, false if the queue is shutting down.
bool Next(void** tag, bool* ok) {
return (AsyncNextInternal(tag, ok, g_core_codegen_interface->gpr_inf_future(
GPR_CLOCK_REALTIME)) != SHUTDOWN);
return (AsyncNextInternal(tag, ok,
g_core_codegen_interface->gpr_inf_future(
GPR_CLOCK_REALTIME)) != SHUTDOWN);
}
/// Request the shutdown of the queue.

@ -1,20 +1,20 @@
/*
*
* Copyright 2015 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
*
* Copyright 2015 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
#ifndef GRPCXX_IMPL_CODEGEN_METADATA_MAP_H
#define GRPCXX_IMPL_CODEGEN_METADATA_MAP_H
@ -41,11 +41,11 @@ class MetadataMap {
}
}
std::multimap<grpc::string_ref, grpc::string_ref> *map() { return &map_; }
const std::multimap<grpc::string_ref, grpc::string_ref> *map() const {
std::multimap<grpc::string_ref, grpc::string_ref>* map() { return &map_; }
const std::multimap<grpc::string_ref, grpc::string_ref>* map() const {
return &map_;
}
grpc_metadata_array *arr() { return &arr_; }
grpc_metadata_array* arr() { return &arr_; }
private:
grpc_metadata_array arr_;

@ -33,24 +33,24 @@ extern "C" {
/** Parses the \a slice as a grpc_compression_algorithm instance and updating \a
* algorithm. Returns 1 upon success, 0 otherwise. */
GRPCAPI int grpc_compression_algorithm_parse(
grpc_slice value, grpc_compression_algorithm *algorithm);
grpc_slice value, grpc_compression_algorithm* algorithm);
/** Parses the \a slice as a grpc_stream_compression_algorithm instance and
* updating \a algorithm. Returns 1 upon success, 0 otherwise. */
int grpc_stream_compression_algorithm_parse(
grpc_slice name, grpc_stream_compression_algorithm *algorithm);
grpc_slice name, grpc_stream_compression_algorithm* algorithm);
/** Updates \a name with the encoding name corresponding to a valid \a
* algorithm. Note that \a name is statically allocated and must *not* be freed.
* Returns 1 upon success, 0 otherwise. */
GRPCAPI int grpc_compression_algorithm_name(
grpc_compression_algorithm algorithm, const char **name);
grpc_compression_algorithm algorithm, const char** name);
/** Updates \a name with the encoding name corresponding to a valid \a
* algorithm. Note that \a name is statically allocated and must *not* be freed.
* Returns 1 upon success, 0 otherwise. */
GRPCAPI int grpc_stream_compression_algorithm_name(
grpc_stream_compression_algorithm algorithm, const char **name);
grpc_stream_compression_algorithm algorithm, const char** name);
/** Returns the compression algorithm corresponding to \a level for the
* compression algorithms encoded in the \a accepted_encodings bitset.
@ -66,23 +66,23 @@ GRPCAPI grpc_stream_compression_algorithm
grpc_stream_compression_algorithm_for_level(grpc_stream_compression_level level,
uint32_t accepted_stream_encodings);
GRPCAPI void grpc_compression_options_init(grpc_compression_options *opts);
GRPCAPI void grpc_compression_options_init(grpc_compression_options* opts);
/** Mark \a algorithm as enabled in \a opts. */
GRPCAPI void grpc_compression_options_enable_algorithm(
grpc_compression_options *opts, grpc_compression_algorithm algorithm);
grpc_compression_options* opts, grpc_compression_algorithm algorithm);
/** Mark \a algorithm as disabled in \a opts. */
GRPCAPI void grpc_compression_options_disable_algorithm(
grpc_compression_options *opts, grpc_compression_algorithm algorithm);
grpc_compression_options* opts, grpc_compression_algorithm algorithm);
/** Returns true if \a algorithm is marked as enabled in \a opts. */
GRPCAPI int grpc_compression_options_is_algorithm_enabled(
const grpc_compression_options *opts, grpc_compression_algorithm algorithm);
const grpc_compression_options* opts, grpc_compression_algorithm algorithm);
/** Returns true if \a algorithm is marked as enabled in \a opts. */
GRPCAPI int grpc_compression_options_is_stream_compression_algorithm_enabled(
const grpc_compression_options *opts,
const grpc_compression_options* opts,
grpc_stream_compression_algorithm algorithm);
#ifdef __cplusplus

@ -40,11 +40,11 @@ extern "C" {
* functionality lives in grpc_security.h.
*/
GRPCAPI void grpc_metadata_array_init(grpc_metadata_array *array);
GRPCAPI void grpc_metadata_array_destroy(grpc_metadata_array *array);
GRPCAPI void grpc_metadata_array_init(grpc_metadata_array* array);
GRPCAPI void grpc_metadata_array_destroy(grpc_metadata_array* array);
GRPCAPI void grpc_call_details_init(grpc_call_details *details);
GRPCAPI void grpc_call_details_destroy(grpc_call_details *details);
GRPCAPI void grpc_call_details_init(grpc_call_details* details);
GRPCAPI void grpc_call_details_destroy(grpc_call_details* details);
/** Registers a plugin to be initialized and destroyed with the library.
@ -73,31 +73,31 @@ GRPCAPI void grpc_init(void);
GRPCAPI void grpc_shutdown(void);
/** Return a string representing the current version of grpc */
GRPCAPI const char *grpc_version_string(void);
GRPCAPI const char* grpc_version_string(void);
/** Return a string specifying what the 'g' in gRPC stands for */
GRPCAPI const char *grpc_g_stands_for(void);
GRPCAPI const char* grpc_g_stands_for(void);
/** Returns the completion queue factory based on the attributes. MAY return a
NULL if no factory can be found */
GRPCAPI const grpc_completion_queue_factory *
GRPCAPI const grpc_completion_queue_factory*
grpc_completion_queue_factory_lookup(
const grpc_completion_queue_attributes *attributes);
const grpc_completion_queue_attributes* attributes);
/** Helper function to create a completion queue with grpc_cq_completion_type
of GRPC_CQ_NEXT and grpc_cq_polling_type of GRPC_CQ_DEFAULT_POLLING */
GRPCAPI grpc_completion_queue *grpc_completion_queue_create_for_next(
void *reserved);
GRPCAPI grpc_completion_queue* grpc_completion_queue_create_for_next(
void* reserved);
/** Helper function to create a completion queue with grpc_cq_completion_type
of GRPC_CQ_PLUCK and grpc_cq_polling_type of GRPC_CQ_DEFAULT_POLLING */
GRPCAPI grpc_completion_queue *grpc_completion_queue_create_for_pluck(
void *reserved);
GRPCAPI grpc_completion_queue* grpc_completion_queue_create_for_pluck(
void* reserved);
/** Create a completion queue */
GRPCAPI grpc_completion_queue *grpc_completion_queue_create(
const grpc_completion_queue_factory *factory,
const grpc_completion_queue_attributes *attributes, void *reserved);
GRPCAPI grpc_completion_queue* grpc_completion_queue_create(
const grpc_completion_queue_factory* factory,
const grpc_completion_queue_attributes* attributes, void* reserved);
/** Blocks until an event is available, the completion queue is being shut down,
or deadline is reached.
@ -107,9 +107,9 @@ GRPCAPI grpc_completion_queue *grpc_completion_queue_create(
Callers must not call grpc_completion_queue_next and
grpc_completion_queue_pluck simultaneously on the same completion queue. */
GRPCAPI grpc_event grpc_completion_queue_next(grpc_completion_queue *cq,
GRPCAPI grpc_event grpc_completion_queue_next(grpc_completion_queue* cq,
gpr_timespec deadline,
void *reserved);
void* reserved);
/** Blocks until an event with tag 'tag' is available, the completion queue is
being shutdown or deadline is reached.
@ -122,9 +122,9 @@ GRPCAPI grpc_event grpc_completion_queue_next(grpc_completion_queue *cq,
Completion queues support a maximum of GRPC_MAX_COMPLETION_QUEUE_PLUCKERS
concurrently executing plucks at any time. */
GRPCAPI grpc_event grpc_completion_queue_pluck(grpc_completion_queue *cq,
void *tag, gpr_timespec deadline,
void *reserved);
GRPCAPI grpc_event grpc_completion_queue_pluck(grpc_completion_queue* cq,
void* tag, gpr_timespec deadline,
void* reserved);
/** Maximum number of outstanding grpc_completion_queue_pluck executions per
completion queue */
@ -137,31 +137,31 @@ GRPCAPI grpc_event grpc_completion_queue_pluck(grpc_completion_queue *cq,
After calling this function applications should ensure that no
NEW work is added to be published on this completion queue. */
GRPCAPI void grpc_completion_queue_shutdown(grpc_completion_queue *cq);
GRPCAPI void grpc_completion_queue_shutdown(grpc_completion_queue* cq);
/** Destroy a completion queue. The caller must ensure that the queue is
drained and no threads are executing grpc_completion_queue_next */
GRPCAPI void grpc_completion_queue_destroy(grpc_completion_queue *cq);
GRPCAPI void grpc_completion_queue_destroy(grpc_completion_queue* cq);
/*********** EXPERIMENTAL API ************/
/** Initializes a thread local cache for \a cq.
* grpc_flush_cq_tls_cache() MUST be called on the same thread,
* with the same cq.
*/
* grpc_flush_cq_tls_cache() MUST be called on the same thread,
* with the same cq.
*/
GRPCAPI void grpc_completion_queue_thread_local_cache_init(
grpc_completion_queue *cq);
grpc_completion_queue* cq);
/*********** EXPERIMENTAL API ************/
/** Flushes the thread local cache for \a cq.
* Returns 1 if there was contents in the cache. If there was an event
* in \a cq tls cache, its tag is placed in tag, and ok is set to the
* event success.
*/
* Returns 1 if there was contents in the cache. If there was an event
* in \a cq tls cache, its tag is placed in tag, and ok is set to the
* event success.
*/
GRPCAPI int grpc_completion_queue_thread_local_cache_flush(
grpc_completion_queue *cq, void **tag, int *ok);
grpc_completion_queue* cq, void** tag, int* ok);
/** Create a completion queue alarm instance */
GRPCAPI grpc_alarm *grpc_alarm_create(void *reserved);
GRPCAPI grpc_alarm* grpc_alarm_create(void* reserved);
/** Set a completion queue alarm instance associated to \a cq.
*
@ -169,25 +169,25 @@ GRPCAPI grpc_alarm *grpc_alarm_create(void *reserved);
* grpc_alarm_cancel), an event with tag \a tag will be added to \a cq. If the
* alarm expired, the event's success bit will be true, false otherwise (ie,
* upon cancellation). */
GRPCAPI void grpc_alarm_set(grpc_alarm *alarm, grpc_completion_queue *cq,
gpr_timespec deadline, void *tag, void *reserved);
GRPCAPI void grpc_alarm_set(grpc_alarm* alarm, grpc_completion_queue* cq,
gpr_timespec deadline, void* tag, void* reserved);
/** Cancel a completion queue alarm. Calling this function over an alarm that
* has already fired has no effect. */
GRPCAPI void grpc_alarm_cancel(grpc_alarm *alarm, void *reserved);
GRPCAPI void grpc_alarm_cancel(grpc_alarm* alarm, void* reserved);
/** Destroy the given completion queue alarm, cancelling it in the process. */
GRPCAPI void grpc_alarm_destroy(grpc_alarm *alarm, void *reserved);
GRPCAPI void grpc_alarm_destroy(grpc_alarm* alarm, void* reserved);
/** Check the connectivity state of a channel. */
GRPCAPI grpc_connectivity_state grpc_channel_check_connectivity_state(
grpc_channel *channel, int try_to_connect);
grpc_channel* channel, int try_to_connect);
/** Number of active "external connectivity state watchers" attached to a
* channel.
* Useful for testing. **/
GRPCAPI int grpc_channel_num_external_connectivity_watchers(
grpc_channel *channel);
grpc_channel* channel);
/** Watch for a change in connectivity state.
Once the channel connectivity state is different from last_observed_state,
@ -195,11 +195,11 @@ GRPCAPI int grpc_channel_num_external_connectivity_watchers(
If deadline expires BEFORE the state is changed, tag will be enqueued on cq
with success=0. */
GRPCAPI void grpc_channel_watch_connectivity_state(
grpc_channel *channel, grpc_connectivity_state last_observed_state,
gpr_timespec deadline, grpc_completion_queue *cq, void *tag);
grpc_channel* channel, grpc_connectivity_state last_observed_state,
gpr_timespec deadline, grpc_completion_queue* cq, void* tag);
/** Check whether a grpc channel supports connectivity watcher */
GRPCAPI int grpc_channel_support_connectivity_watcher(grpc_channel *channel);
GRPCAPI int grpc_channel_support_connectivity_watcher(grpc_channel* channel);
/** Create a call given a grpc_channel, in order to call 'method'. All
completions are sent to 'completion_queue'. 'method' and 'host' need only
@ -208,31 +208,31 @@ GRPCAPI int grpc_channel_support_connectivity_watcher(grpc_channel *channel);
to propagate properties from the server call to this new client call,
depending on the value of \a propagation_mask (see propagation_bits.h for
possible values). */
GRPCAPI grpc_call *grpc_channel_create_call(
grpc_channel *channel, grpc_call *parent_call, uint32_t propagation_mask,
grpc_completion_queue *completion_queue, grpc_slice method,
const grpc_slice *host, gpr_timespec deadline, void *reserved);
GRPCAPI grpc_call* grpc_channel_create_call(
grpc_channel* channel, grpc_call* parent_call, uint32_t propagation_mask,
grpc_completion_queue* completion_queue, grpc_slice method,
const grpc_slice* host, gpr_timespec deadline, void* reserved);
/** Ping the channels peer (load balanced channels will select one sub-channel
to ping); if the channel is not connected, posts a failed. */
GRPCAPI void grpc_channel_ping(grpc_channel *channel, grpc_completion_queue *cq,
void *tag, void *reserved);
GRPCAPI void grpc_channel_ping(grpc_channel* channel, grpc_completion_queue* cq,
void* tag, void* reserved);
/** Pre-register a method/host pair on a channel. */
GRPCAPI void *grpc_channel_register_call(grpc_channel *channel,
const char *method, const char *host,
void *reserved);
GRPCAPI void* grpc_channel_register_call(grpc_channel* channel,
const char* method, const char* host,
void* reserved);
/** Create a call given a handle returned from grpc_channel_register_call.
\sa grpc_channel_create_call. */
GRPCAPI grpc_call *grpc_channel_create_registered_call(
grpc_channel *channel, grpc_call *parent_call, uint32_t propagation_mask,
grpc_completion_queue *completion_queue, void *registered_call_handle,
gpr_timespec deadline, void *reserved);
GRPCAPI grpc_call* grpc_channel_create_registered_call(
grpc_channel* channel, grpc_call* parent_call, uint32_t propagation_mask,
grpc_completion_queue* completion_queue, void* registered_call_handle,
gpr_timespec deadline, void* reserved);
/** Allocate memory in the grpc_call arena: this memory is automatically
discarded at call completion */
GRPCAPI void *grpc_call_arena_alloc(grpc_call *call, size_t size);
GRPCAPI void* grpc_call_arena_alloc(grpc_call* call, size_t size);
/** Start a batch of operations defined in the array ops; when complete, post a
completion of type 'tag' to the completion queue bound to the call.
@ -251,9 +251,9 @@ GRPCAPI void *grpc_call_arena_alloc(grpc_call *call, size_t size);
needs to be synchronized. As an optimization, you may synchronize batches
containing just send operations independently from batches containing just
receive operations. */
GRPCAPI grpc_call_error grpc_call_start_batch(grpc_call *call,
const grpc_op *ops, size_t nops,
void *tag, void *reserved);
GRPCAPI grpc_call_error grpc_call_start_batch(grpc_call* call,
const grpc_op* ops, size_t nops,
void* tag, void* reserved);
/** Returns a newly allocated string representing the endpoint to which this
call is communicating with. The string is in the uri format accepted by
@ -263,43 +263,43 @@ GRPCAPI grpc_call_error grpc_call_start_batch(grpc_call *call,
WARNING: this value is never authenticated or subject to any security
related code. It must not be used for any authentication related
functionality. Instead, use grpc_auth_context. */
GRPCAPI char *grpc_call_get_peer(grpc_call *call);
GRPCAPI char* grpc_call_get_peer(grpc_call* call);
struct census_context;
/** Set census context for a call; Must be called before first call to
grpc_call_start_batch(). */
GRPCAPI void grpc_census_call_set_context(grpc_call *call,
struct census_context *context);
GRPCAPI void grpc_census_call_set_context(grpc_call* call,
struct census_context* context);
/** Retrieve the calls current census context. */
GRPCAPI struct census_context *grpc_census_call_get_context(grpc_call *call);
GRPCAPI struct census_context* grpc_census_call_get_context(grpc_call* call);
/** Return a newly allocated string representing the target a channel was
created for. */
GRPCAPI char *grpc_channel_get_target(grpc_channel *channel);
GRPCAPI char* grpc_channel_get_target(grpc_channel* channel);
/** Request info about the channel.
\a channel_info indicates what information is being requested and
how that information will be returned.
\a channel_info is owned by the caller. */
GRPCAPI void grpc_channel_get_info(grpc_channel *channel,
const grpc_channel_info *channel_info);
GRPCAPI void grpc_channel_get_info(grpc_channel* channel,
const grpc_channel_info* channel_info);
/** Create a client channel to 'target'. Additional channel level configuration
MAY be provided by grpc_channel_args, though the expectation is that most
clients will want to simply pass NULL. See grpc_channel_args definition for
more on this. The data in 'args' need only live through the invocation of
this function. */
GRPCAPI grpc_channel *grpc_insecure_channel_create(
const char *target, const grpc_channel_args *args, void *reserved);
GRPCAPI grpc_channel* grpc_insecure_channel_create(
const char* target, const grpc_channel_args* args, void* reserved);
/** Create a lame client: this client fails every operation attempted on it. */
GRPCAPI grpc_channel *grpc_lame_client_channel_create(
const char *target, grpc_status_code error_code, const char *error_message);
GRPCAPI grpc_channel* grpc_lame_client_channel_create(
const char* target, grpc_status_code error_code, const char* error_message);
/** Close and destroy a grpc channel */
GRPCAPI void grpc_channel_destroy(grpc_channel *channel);
GRPCAPI void grpc_channel_destroy(grpc_channel* channel);
/** Error handling for grpc_call
Most grpc_call functions return a grpc_error. If the error is not GRPC_OK
@ -312,7 +312,7 @@ GRPCAPI void grpc_channel_destroy(grpc_channel *channel);
THREAD-SAFETY grpc_call_cancel and grpc_call_cancel_with_status
are thread-safe, and can be called at any point before grpc_call_unref
is called.*/
GRPCAPI grpc_call_error grpc_call_cancel(grpc_call *call, void *reserved);
GRPCAPI grpc_call_error grpc_call_cancel(grpc_call* call, void* reserved);
/** Called by clients to cancel an RPC on the server.
Can be called multiple times, from any thread.
@ -324,18 +324,18 @@ GRPCAPI grpc_call_error grpc_call_cancel(grpc_call *call, void *reserved);
It doesn't need to be alive after the call to
grpc_call_cancel_with_status completes.
*/
GRPCAPI grpc_call_error grpc_call_cancel_with_status(grpc_call *call,
GRPCAPI grpc_call_error grpc_call_cancel_with_status(grpc_call* call,
grpc_status_code status,
const char *description,
void *reserved);
const char* description,
void* reserved);
/** Ref a call.
THREAD SAFETY: grpc_call_ref is thread-compatible */
GRPCAPI void grpc_call_ref(grpc_call *call);
GRPCAPI void grpc_call_ref(grpc_call* call);
/** Unref a call.
THREAD SAFETY: grpc_call_unref is thread-compatible */
GRPCAPI void grpc_call_unref(grpc_call *call);
GRPCAPI void grpc_call_unref(grpc_call* call);
/** Request notification of a new call.
Once a call is received, a notification tagged with \a tag_new is added to
@ -346,10 +346,10 @@ GRPCAPI void grpc_call_unref(grpc_call *call);
Note that \a cq_for_notification must have been registered to the server via
\a grpc_server_register_completion_queue. */
GRPCAPI grpc_call_error grpc_server_request_call(
grpc_server *server, grpc_call **call, grpc_call_details *details,
grpc_metadata_array *request_metadata,
grpc_completion_queue *cq_bound_to_call,
grpc_completion_queue *cq_for_notification, void *tag_new);
grpc_server* server, grpc_call** call, grpc_call_details* details,
grpc_metadata_array* request_metadata,
grpc_completion_queue* cq_bound_to_call,
grpc_completion_queue* cq_for_notification, void* tag_new);
/** How to handle payloads for a registered method */
typedef enum {
@ -366,8 +366,8 @@ typedef enum {
registered_method (as returned by this function).
Must be called before grpc_server_start.
Returns NULL on failure. */
GRPCAPI void *grpc_server_register_method(
grpc_server *server, const char *method, const char *host,
GRPCAPI void* grpc_server_register_method(
grpc_server* server, const char* method, const char* host,
grpc_server_register_method_payload_handling payload_handling,
uint32_t flags);
@ -375,35 +375,35 @@ GRPCAPI void *grpc_server_register_method(
must have been registered to the server via
grpc_server_register_completion_queue. */
GRPCAPI grpc_call_error grpc_server_request_registered_call(
grpc_server *server, void *registered_method, grpc_call **call,
gpr_timespec *deadline, grpc_metadata_array *request_metadata,
grpc_byte_buffer **optional_payload,
grpc_completion_queue *cq_bound_to_call,
grpc_completion_queue *cq_for_notification, void *tag_new);
grpc_server* server, void* registered_method, grpc_call** call,
gpr_timespec* deadline, grpc_metadata_array* request_metadata,
grpc_byte_buffer** optional_payload,
grpc_completion_queue* cq_bound_to_call,
grpc_completion_queue* cq_for_notification, void* tag_new);
/** Create a server. Additional configuration for each incoming channel can
be specified with args. If no additional configuration is needed, args can
be NULL. See grpc_channel_args for more. The data in 'args' need only live
through the invocation of this function. */
GRPCAPI grpc_server *grpc_server_create(const grpc_channel_args *args,
void *reserved);
GRPCAPI grpc_server* grpc_server_create(const grpc_channel_args* args,
void* reserved);
/** Register a completion queue with the server. Must be done for any
notification completion queue that is passed to grpc_server_request_*_call
and to grpc_server_shutdown_and_notify. Must be performed prior to
grpc_server_start. */
GRPCAPI void grpc_server_register_completion_queue(grpc_server *server,
grpc_completion_queue *cq,
void *reserved);
GRPCAPI void grpc_server_register_completion_queue(grpc_server* server,
grpc_completion_queue* cq,
void* reserved);
/** Add a HTTP2 over plaintext over tcp listener.
Returns bound port number on success, 0 on failure.
REQUIRES: server not started */
GRPCAPI int grpc_server_add_insecure_http2_port(grpc_server *server,
const char *addr);
GRPCAPI int grpc_server_add_insecure_http2_port(grpc_server* server,
const char* addr);
/** Start a server - tells all listeners to start listening */
GRPCAPI void grpc_server_start(grpc_server *server);
GRPCAPI void grpc_server_start(grpc_server* server);
/** Begin shutting down a server.
After completion, no new calls or connections will be admitted.
@ -412,19 +412,19 @@ GRPCAPI void grpc_server_start(grpc_server *server);
Shutdown is idempotent, and all tags will be notified at once if multiple
grpc_server_shutdown_and_notify calls are made. 'cq' must have been
registered to this server via grpc_server_register_completion_queue. */
GRPCAPI void grpc_server_shutdown_and_notify(grpc_server *server,
grpc_completion_queue *cq,
void *tag);
GRPCAPI void grpc_server_shutdown_and_notify(grpc_server* server,
grpc_completion_queue* cq,
void* tag);
/** Cancel all in-progress calls.
Only usable after shutdown. */
GRPCAPI void grpc_server_cancel_all_calls(grpc_server *server);
GRPCAPI void grpc_server_cancel_all_calls(grpc_server* server);
/** Destroy a server.
Shutdown must have completed beforehand (i.e. all tags generated by
grpc_server_shutdown_and_notify must have been received, and at least
one call to grpc_server_shutdown_and_notify must have been made). */
GRPCAPI void grpc_server_destroy(grpc_server *server);
GRPCAPI void grpc_server_destroy(grpc_server* server);
/** Enable or disable a tracer.
@ -434,7 +434,7 @@ GRPCAPI void grpc_server_destroy(grpc_server *server);
Use of this function is not strictly thread-safe, but the
thread-safety issues raised by it should not be of concern. */
GRPCAPI int grpc_tracer_set_enabled(const char *name, int enabled);
GRPCAPI int grpc_tracer_set_enabled(const char* name, int enabled);
/** Check whether a metadata key is legal (will be accepted by core) */
GRPCAPI int grpc_header_key_is_legal(grpc_slice slice);
@ -447,24 +447,24 @@ GRPCAPI int grpc_header_nonbin_value_is_legal(grpc_slice slice);
GRPCAPI int grpc_is_binary_header(grpc_slice slice);
/** Convert grpc_call_error values to a string */
GRPCAPI const char *grpc_call_error_to_string(grpc_call_error error);
GRPCAPI const char* grpc_call_error_to_string(grpc_call_error error);
/** Create a buffer pool */
GRPCAPI grpc_resource_quota *grpc_resource_quota_create(const char *trace_name);
GRPCAPI grpc_resource_quota* grpc_resource_quota_create(const char* trace_name);
/** Add a reference to a buffer pool */
GRPCAPI void grpc_resource_quota_ref(grpc_resource_quota *resource_quota);
GRPCAPI void grpc_resource_quota_ref(grpc_resource_quota* resource_quota);
/** Drop a reference to a buffer pool */
GRPCAPI void grpc_resource_quota_unref(grpc_resource_quota *resource_quota);
GRPCAPI void grpc_resource_quota_unref(grpc_resource_quota* resource_quota);
/** Update the size of a buffer pool */
GRPCAPI void grpc_resource_quota_resize(grpc_resource_quota *resource_quota,
GRPCAPI void grpc_resource_quota_resize(grpc_resource_quota* resource_quota,
size_t new_size);
/** Fetch a vtable for a grpc_channel_arg that points to a grpc_resource_quota
*/
GRPCAPI const grpc_arg_pointer_vtable *grpc_resource_quota_arg_vtable(void);
GRPCAPI const grpc_arg_pointer_vtable* grpc_resource_quota_arg_vtable(void);
#ifdef __cplusplus
}

@ -25,9 +25,9 @@
extern "C" {
#endif
GRPCAPI grpc_channel *grpc_cronet_secure_channel_create(
void *engine, const char *target, const grpc_channel_args *args,
void *reserved);
GRPCAPI grpc_channel* grpc_cronet_secure_channel_create(
void* engine, const char* target, const grpc_channel_args* args,
void* reserved);
#ifdef __cplusplus
}

@ -37,8 +37,8 @@ extern "C" {
/** Create a client channel to 'target' using file descriptor 'fd'. The 'target'
argument will be used to indicate the name for this channel. See the comment
for grpc_insecure_channel_create for description of 'args' argument. */
GRPCAPI grpc_channel *grpc_insecure_channel_create_from_fd(
const char *target, int fd, const grpc_channel_args *args);
GRPCAPI grpc_channel* grpc_insecure_channel_create_from_fd(
const char* target, int fd, const grpc_channel_args* args);
/** Add the connected communication channel based on file descriptor 'fd' to the
'server'. The 'fd' must be an open file descriptor corresponding to a
@ -48,8 +48,8 @@ GRPCAPI grpc_channel *grpc_insecure_channel_create_from_fd(
The 'reserved' pointer MUST be NULL.
*/
GRPCAPI void grpc_server_add_insecure_channel_from_fd(grpc_server *server,
void *reserved, int fd);
GRPCAPI void grpc_server_add_insecure_channel_from_fd(grpc_server* server,
void* reserved, int fd);
/** GRPC Core POSIX library may internally use signals to optimize some work.
The library uses (SIGRTMIN + 6) signal by default. Use this API to instruct

@ -32,51 +32,51 @@ extern "C" {
typedef struct grpc_auth_context grpc_auth_context;
typedef struct grpc_auth_property_iterator {
const grpc_auth_context *ctx;
const grpc_auth_context* ctx;
size_t index;
const char *name;
const char* name;
} grpc_auth_property_iterator;
/** value, if not NULL, is guaranteed to be NULL terminated. */
typedef struct grpc_auth_property {
char *name;
char *value;
char* name;
char* value;
size_t value_length;
} grpc_auth_property;
/** Returns NULL when the iterator is at the end. */
GRPCAPI const grpc_auth_property *grpc_auth_property_iterator_next(
grpc_auth_property_iterator *it);
GRPCAPI const grpc_auth_property* grpc_auth_property_iterator_next(
grpc_auth_property_iterator* it);
/** Iterates over the auth context. */
GRPCAPI grpc_auth_property_iterator
grpc_auth_context_property_iterator(const grpc_auth_context *ctx);
grpc_auth_context_property_iterator(const grpc_auth_context* ctx);
/** Gets the peer identity. Returns an empty iterator (first _next will return
NULL) if the peer is not authenticated. */
GRPCAPI grpc_auth_property_iterator
grpc_auth_context_peer_identity(const grpc_auth_context *ctx);
grpc_auth_context_peer_identity(const grpc_auth_context* ctx);
/** Finds a property in the context. May return an empty iterator (first _next
will return NULL) if no property with this name was found in the context. */
GRPCAPI grpc_auth_property_iterator grpc_auth_context_find_properties_by_name(
const grpc_auth_context *ctx, const char *name);
const grpc_auth_context* ctx, const char* name);
/** Gets the name of the property that indicates the peer identity. Will return
NULL if the peer is not authenticated. */
GRPCAPI const char *grpc_auth_context_peer_identity_property_name(
const grpc_auth_context *ctx);
GRPCAPI const char* grpc_auth_context_peer_identity_property_name(
const grpc_auth_context* ctx);
/** Returns 1 if the peer is authenticated, 0 otherwise. */
GRPCAPI int grpc_auth_context_peer_is_authenticated(
const grpc_auth_context *ctx);
const grpc_auth_context* ctx);
/** Gets the auth context from the call. Caller needs to call
grpc_auth_context_release on the returned context. */
GRPCAPI grpc_auth_context *grpc_call_auth_context(grpc_call *call);
GRPCAPI grpc_auth_context* grpc_call_auth_context(grpc_call* call);
/** Releases the auth context returned from grpc_call_auth_context. */
GRPCAPI void grpc_auth_context_release(grpc_auth_context *context);
GRPCAPI void grpc_auth_context_release(grpc_auth_context* context);
/** --
The following auth context methods should only be called by a server metadata
@ -84,19 +84,19 @@ GRPCAPI void grpc_auth_context_release(grpc_auth_context *context);
-- */
/** Add a property. */
GRPCAPI void grpc_auth_context_add_property(grpc_auth_context *ctx,
const char *name, const char *value,
GRPCAPI void grpc_auth_context_add_property(grpc_auth_context* ctx,
const char* name, const char* value,
size_t value_length);
/** Add a C string property. */
GRPCAPI void grpc_auth_context_add_cstring_property(grpc_auth_context *ctx,
const char *name,
const char *value);
GRPCAPI void grpc_auth_context_add_cstring_property(grpc_auth_context* ctx,
const char* name,
const char* value);
/** Sets the property name. Returns 1 if successful or 0 in case of failure
(which means that no property with this name exists). */
GRPCAPI int grpc_auth_context_set_peer_identity_property_name(
grpc_auth_context *ctx, const char *name);
grpc_auth_context* ctx, const char* name);
/** --- grpc_channel_credentials object. ---
@ -107,12 +107,12 @@ typedef struct grpc_channel_credentials grpc_channel_credentials;
/** Releases a channel credentials object.
The creator of the credentials object is responsible for its release. */
GRPCAPI void grpc_channel_credentials_release(grpc_channel_credentials *creds);
GRPCAPI void grpc_channel_credentials_release(grpc_channel_credentials* creds);
/** Creates default credentials to connect to a google gRPC service.
WARNING: Do NOT use this credentials to connect to a non-google service as
this could result in an oauth2 token leak. */
GRPCAPI grpc_channel_credentials *grpc_google_default_credentials_create(void);
GRPCAPI grpc_channel_credentials* grpc_google_default_credentials_create(void);
/** Callback for getting the SSL roots override from the application.
In case of success, *pem_roots_certs must be set to a NULL terminated string
@ -121,7 +121,7 @@ GRPCAPI grpc_channel_credentials *grpc_google_default_credentials_create(void);
If this function fails and GRPC_DEFAULT_SSL_ROOTS_FILE_PATH environment is
set to a valid path, it will override the roots specified this func */
typedef grpc_ssl_roots_override_result (*grpc_ssl_roots_override_callback)(
char **pem_root_certs);
char** pem_root_certs);
/** Setup a callback to override the default TLS/SSL roots.
This function is not thread-safe and must be called at initialization time
@ -135,11 +135,11 @@ GRPCAPI void grpc_set_ssl_roots_override_callback(
typedef struct {
/** private_key is the NULL-terminated string containing the PEM encoding of
the client's private key. */
const char *private_key;
const char* private_key;
/** cert_chain is the NULL-terminated string containing the PEM encoding of
the client's certificate chain. */
const char *cert_chain;
const char* cert_chain;
} grpc_ssl_pem_key_cert_pair;
/** Creates an SSL credentials object.
@ -153,9 +153,9 @@ typedef struct {
- pem_key_cert_pair is a pointer on the object containing client's private
key and certificate chain. This parameter can be NULL if the client does
not have such a key/cert pair. */
GRPCAPI grpc_channel_credentials *grpc_ssl_credentials_create(
const char *pem_root_certs, grpc_ssl_pem_key_cert_pair *pem_key_cert_pair,
void *reserved);
GRPCAPI grpc_channel_credentials* grpc_ssl_credentials_create(
const char* pem_root_certs, grpc_ssl_pem_key_cert_pair* pem_key_cert_pair,
void* reserved);
/** --- grpc_call_credentials object.
@ -167,23 +167,23 @@ typedef struct grpc_call_credentials grpc_call_credentials;
/** Releases a call credentials object.
The creator of the credentials object is responsible for its release. */
GRPCAPI void grpc_call_credentials_release(grpc_call_credentials *creds);
GRPCAPI void grpc_call_credentials_release(grpc_call_credentials* creds);
/** Creates a composite channel credentials object. */
GRPCAPI grpc_channel_credentials *grpc_composite_channel_credentials_create(
grpc_channel_credentials *channel_creds, grpc_call_credentials *call_creds,
void *reserved);
GRPCAPI grpc_channel_credentials* grpc_composite_channel_credentials_create(
grpc_channel_credentials* channel_creds, grpc_call_credentials* call_creds,
void* reserved);
/** Creates a composite call credentials object. */
GRPCAPI grpc_call_credentials *grpc_composite_call_credentials_create(
grpc_call_credentials *creds1, grpc_call_credentials *creds2,
void *reserved);
GRPCAPI grpc_call_credentials* grpc_composite_call_credentials_create(
grpc_call_credentials* creds1, grpc_call_credentials* creds2,
void* reserved);
/** Creates a compute engine credentials object for connecting to Google.
WARNING: Do NOT use this credentials to connect to a non-google service as
this could result in an oauth2 token leak. */
GRPCAPI grpc_call_credentials *grpc_google_compute_engine_credentials_create(
void *reserved);
GRPCAPI grpc_call_credentials* grpc_google_compute_engine_credentials_create(
void* reserved);
GRPCAPI gpr_timespec grpc_max_auth_token_lifetime();
@ -192,10 +192,10 @@ GRPCAPI gpr_timespec grpc_max_auth_token_lifetime();
- token_lifetime is the lifetime of each Json Web Token (JWT) created with
this credentials. It should not exceed grpc_max_auth_token_lifetime or
will be cropped to this value. */
GRPCAPI grpc_call_credentials *
grpc_service_account_jwt_access_credentials_create(const char *json_key,
GRPCAPI grpc_call_credentials*
grpc_service_account_jwt_access_credentials_create(const char* json_key,
gpr_timespec token_lifetime,
void *reserved);
void* reserved);
/** Creates an Oauth2 Refresh Token credentials object for connecting to Google.
May return NULL if the input is invalid.
@ -203,18 +203,18 @@ grpc_service_account_jwt_access_credentials_create(const char *json_key,
this could result in an oauth2 token leak.
- json_refresh_token is the JSON string containing the refresh token itself
along with a client_id and client_secret. */
GRPCAPI grpc_call_credentials *grpc_google_refresh_token_credentials_create(
const char *json_refresh_token, void *reserved);
GRPCAPI grpc_call_credentials* grpc_google_refresh_token_credentials_create(
const char* json_refresh_token, void* reserved);
/** Creates an Oauth2 Access Token credentials with an access token that was
aquired by an out of band mechanism. */
GRPCAPI grpc_call_credentials *grpc_access_token_credentials_create(
const char *access_token, void *reserved);
GRPCAPI grpc_call_credentials* grpc_access_token_credentials_create(
const char* access_token, void* reserved);
/** Creates an IAM credentials object for connecting to Google. */
GRPCAPI grpc_call_credentials *grpc_google_iam_credentials_create(
const char *authorization_token, const char *authority_selector,
void *reserved);
GRPCAPI grpc_call_credentials* grpc_google_iam_credentials_create(
const char* authorization_token, const char* authority_selector,
void* reserved);
/** Callback function to be called by the metadata credentials plugin
implementation when the metadata is ready.
@ -228,25 +228,25 @@ GRPCAPI grpc_call_credentials *grpc_google_iam_credentials_create(
- error_details contains details about the error if any. In case of success
it should be NULL and will be otherwise ignored. */
typedef void (*grpc_credentials_plugin_metadata_cb)(
void *user_data, const grpc_metadata *creds_md, size_t num_creds_md,
grpc_status_code status, const char *error_details);
void* user_data, const grpc_metadata* creds_md, size_t num_creds_md,
grpc_status_code status, const char* error_details);
/** Context that can be used by metadata credentials plugin in order to create
auth related metadata. */
typedef struct {
/** The fully qualifed service url. */
const char *service_url;
const char* service_url;
/** The method name of the RPC being called (not fully qualified).
The fully qualified method name can be built from the service_url:
full_qualified_method_name = ctx->service_url + '/' + ctx->method_name. */
const char *method_name;
const char* method_name;
/** The auth_context of the channel which gives the server's identity. */
const grpc_auth_context *channel_auth_context;
const grpc_auth_context* channel_auth_context;
/** Reserved for future use. */
void *reserved;
void* reserved;
} grpc_auth_metadata_context;
/** Maximum number of metadata entries returnable by a credentials plugin via
@ -278,32 +278,32 @@ typedef struct {
\a context is the information that can be used by the plugin to create
auth metadata. */
int (*get_metadata)(
void *state, grpc_auth_metadata_context context,
grpc_credentials_plugin_metadata_cb cb, void *user_data,
void* state, grpc_auth_metadata_context context,
grpc_credentials_plugin_metadata_cb cb, void* user_data,
grpc_metadata creds_md[GRPC_METADATA_CREDENTIALS_PLUGIN_SYNC_MAX],
size_t *num_creds_md, grpc_status_code *status,
const char **error_details);
size_t* num_creds_md, grpc_status_code* status,
const char** error_details);
/** Destroys the plugin state. */
void (*destroy)(void *state);
void (*destroy)(void* state);
/** State that will be set as the first parameter of the methods above. */
void *state;
void* state;
/** Type of credentials that this plugin is implementing. */
const char *type;
const char* type;
} grpc_metadata_credentials_plugin;
/** Creates a credentials object from a plugin. */
GRPCAPI grpc_call_credentials *grpc_metadata_credentials_create_from_plugin(
grpc_metadata_credentials_plugin plugin, void *reserved);
GRPCAPI grpc_call_credentials* grpc_metadata_credentials_create_from_plugin(
grpc_metadata_credentials_plugin plugin, void* reserved);
/** --- Secure channel creation. --- */
/** Creates a secure channel using the passed-in credentials. */
GRPCAPI grpc_channel *grpc_secure_channel_create(
grpc_channel_credentials *creds, const char *target,
const grpc_channel_args *args, void *reserved);
GRPCAPI grpc_channel* grpc_secure_channel_create(
grpc_channel_credentials* creds, const char* target,
const grpc_channel_args* args, void* reserved);
/** --- grpc_server_credentials object. ---
@ -314,7 +314,7 @@ typedef struct grpc_server_credentials grpc_server_credentials;
/** Releases a server_credentials object.
The creator of the server_credentials object is responsible for its release.
*/
GRPCAPI void grpc_server_credentials_release(grpc_server_credentials *creds);
GRPCAPI void grpc_server_credentials_release(grpc_server_credentials* creds);
/** Server certificate config object holds the server's public certificates and
associated private keys, as well as any CA certificates needed for client
@ -333,15 +333,15 @@ typedef struct grpc_ssl_server_certificate_config
and cert_chain_files parameters. It must be at least 1.
- It is the caller's responsibility to free this object via
grpc_ssl_server_certificate_config_destroy(). */
GRPCAPI grpc_ssl_server_certificate_config *
GRPCAPI grpc_ssl_server_certificate_config*
grpc_ssl_server_certificate_config_create(
const char *pem_root_certs,
const grpc_ssl_pem_key_cert_pair *pem_key_cert_pairs,
const char* pem_root_certs,
const grpc_ssl_pem_key_cert_pair* pem_key_cert_pairs,
size_t num_key_cert_pairs);
/** Destroys a grpc_ssl_server_certificate_config object. */
GRPCAPI void grpc_ssl_server_certificate_config_destroy(
grpc_ssl_server_certificate_config *config);
grpc_ssl_server_certificate_config* config);
/** Callback to retrieve updated SSL server certificates, private keys, and
trusted CAs (for client authentication).
@ -351,7 +351,7 @@ GRPCAPI void grpc_ssl_server_certificate_config_destroy(
- The caller assumes ownership of the config. */
typedef grpc_ssl_certificate_config_reload_status (
*grpc_ssl_server_certificate_config_callback)(
void *user_data, grpc_ssl_server_certificate_config **config);
void* user_data, grpc_ssl_server_certificate_config** config);
/** Deprecated in favor of grpc_ssl_server_credentials_create_ex.
Creates an SSL server_credentials object.
@ -365,19 +365,19 @@ typedef grpc_ssl_certificate_config_reload_status (
- force_client_auth, if set to non-zero will force the client to authenticate
with an SSL cert. Note that this option is ignored if pem_root_certs is
NULL. */
GRPCAPI grpc_server_credentials *grpc_ssl_server_credentials_create(
const char *pem_root_certs, grpc_ssl_pem_key_cert_pair *pem_key_cert_pairs,
size_t num_key_cert_pairs, int force_client_auth, void *reserved);
GRPCAPI grpc_server_credentials* grpc_ssl_server_credentials_create(
const char* pem_root_certs, grpc_ssl_pem_key_cert_pair* pem_key_cert_pairs,
size_t num_key_cert_pairs, int force_client_auth, void* reserved);
/** Deprecated in favor of grpc_ssl_server_credentials_create_with_options.
Same as grpc_ssl_server_credentials_create method except uses
grpc_ssl_client_certificate_request_type enum to support more ways to
authenticate client cerificates.*/
GRPCAPI grpc_server_credentials *grpc_ssl_server_credentials_create_ex(
const char *pem_root_certs, grpc_ssl_pem_key_cert_pair *pem_key_cert_pairs,
GRPCAPI grpc_server_credentials* grpc_ssl_server_credentials_create_ex(
const char* pem_root_certs, grpc_ssl_pem_key_cert_pair* pem_key_cert_pairs,
size_t num_key_cert_pairs,
grpc_ssl_client_certificate_request_type client_certificate_request,
void *reserved);
void* reserved);
typedef struct grpc_ssl_server_credentials_options
grpc_ssl_server_credentials_options;
@ -386,10 +386,10 @@ typedef struct grpc_ssl_server_credentials_options
the certificates and keys of the SSL server will not change during the
server's lifetime.
- Takes ownership of the certificate_config parameter. */
GRPCAPI grpc_ssl_server_credentials_options *
GRPCAPI grpc_ssl_server_credentials_options*
grpc_ssl_server_credentials_create_options_using_config(
grpc_ssl_client_certificate_request_type client_certificate_request,
grpc_ssl_server_certificate_config *certificate_config);
grpc_ssl_server_certificate_config* certificate_config);
/** Creates an options object using a certificate config fetcher. Use this
method to reload the certificates and keys of the SSL server without
@ -398,36 +398,36 @@ grpc_ssl_server_credentials_create_options_using_config(
- user_data parameter, if not NULL, contains opaque data which will be passed
to the fetcher (see definition of
grpc_ssl_server_certificate_config_callback). */
GRPCAPI grpc_ssl_server_credentials_options *
GRPCAPI grpc_ssl_server_credentials_options*
grpc_ssl_server_credentials_create_options_using_config_fetcher(
grpc_ssl_client_certificate_request_type client_certificate_request,
grpc_ssl_server_certificate_config_callback cb, void *user_data);
grpc_ssl_server_certificate_config_callback cb, void* user_data);
/** Destroys a grpc_ssl_server_credentials_options object. */
GRPCAPI void grpc_ssl_server_credentials_options_destroy(
grpc_ssl_server_credentials_options *options);
grpc_ssl_server_credentials_options* options);
/** Creates an SSL server_credentials object using the provided options struct.
- Takes ownership of the options parameter. */
GRPCAPI grpc_server_credentials *
GRPCAPI grpc_server_credentials*
grpc_ssl_server_credentials_create_with_options(
grpc_ssl_server_credentials_options *options);
grpc_ssl_server_credentials_options* options);
/** --- Server-side secure ports. --- */
/** Add a HTTP2 over an encrypted link over tcp listener.
Returns bound port number on success, 0 on failure.
REQUIRES: server not started */
GRPCAPI int grpc_server_add_secure_http2_port(grpc_server *server,
const char *addr,
grpc_server_credentials *creds);
GRPCAPI int grpc_server_add_secure_http2_port(grpc_server* server,
const char* addr,
grpc_server_credentials* creds);
/** --- Call specific credentials. --- */
/** Sets a credentials to a call. Can only be called on the client side before
grpc_call_start_batch. */
GRPCAPI grpc_call_error grpc_call_set_credentials(grpc_call *call,
grpc_call_credentials *creds);
GRPCAPI grpc_call_error grpc_call_set_credentials(grpc_call* call,
grpc_call_credentials* creds);
/** --- Auth Metadata Processing --- */
@ -441,9 +441,9 @@ GRPCAPI grpc_call_error grpc_call_set_credentials(grpc_call *call,
GRPC_STATUS PERMISSION_DENIED in case of an authorization failure.
- error_details gives details about the error. May be NULL. */
typedef void (*grpc_process_auth_metadata_done_cb)(
void *user_data, const grpc_metadata *consumed_md, size_t num_consumed_md,
const grpc_metadata *response_md, size_t num_response_md,
grpc_status_code status, const char *error_details);
void* user_data, const grpc_metadata* consumed_md, size_t num_consumed_md,
const grpc_metadata* response_md, size_t num_response_md,
grpc_status_code status, const char* error_details);
/** Pluggable server-side metadata processor object. */
typedef struct {
@ -451,15 +451,15 @@ typedef struct {
channel peer and it is the job of the process function to augment it with
properties derived from the passed-in metadata.
The lifetime of these objects is guaranteed until cb is invoked. */
void (*process)(void *state, grpc_auth_context *context,
const grpc_metadata *md, size_t num_md,
grpc_process_auth_metadata_done_cb cb, void *user_data);
void (*destroy)(void *state);
void *state;
void (*process)(void* state, grpc_auth_context* context,
const grpc_metadata* md, size_t num_md,
grpc_process_auth_metadata_done_cb cb, void* user_data);
void (*destroy)(void* state);
void* state;
} grpc_auth_metadata_processor;
GRPCAPI void grpc_server_credentials_set_auth_metadata_processor(
grpc_server_credentials *creds, grpc_auth_metadata_processor processor);
grpc_server_credentials* creds, grpc_auth_metadata_processor processor);
#ifdef __cplusplus
}

@ -81,7 +81,7 @@
/** Adds \a delta to \a *value, clamping the result to the range specified
by \a min and \a max. Returns the new value. */
gpr_atm gpr_atm_no_barrier_clamped_add(gpr_atm *value, gpr_atm delta,
gpr_atm gpr_atm_no_barrier_clamped_add(gpr_atm* value, gpr_atm delta,
gpr_atm min, gpr_atm max);
#endif /* GRPC_IMPL_CODEGEN_ATM_H */

@ -57,22 +57,22 @@ extern gpr_atm gpr_counter_atm_add;
GPR_ATM_INC_ADD_THEN( \
__atomic_fetch_add((p), (intptr_t)(delta), __ATOMIC_ACQ_REL))
static __inline int gpr_atm_no_barrier_cas(gpr_atm *p, gpr_atm o, gpr_atm n) {
static __inline int gpr_atm_no_barrier_cas(gpr_atm* p, gpr_atm o, gpr_atm n) {
return GPR_ATM_INC_CAS_THEN(__atomic_compare_exchange_n(
p, &o, n, 0, __ATOMIC_RELAXED, __ATOMIC_RELAXED));
}
static __inline int gpr_atm_acq_cas(gpr_atm *p, gpr_atm o, gpr_atm n) {
static __inline int gpr_atm_acq_cas(gpr_atm* p, gpr_atm o, gpr_atm n) {
return GPR_ATM_INC_CAS_THEN(__atomic_compare_exchange_n(
p, &o, n, 0, __ATOMIC_ACQUIRE, __ATOMIC_RELAXED));
}
static __inline int gpr_atm_rel_cas(gpr_atm *p, gpr_atm o, gpr_atm n) {
static __inline int gpr_atm_rel_cas(gpr_atm* p, gpr_atm o, gpr_atm n) {
return GPR_ATM_INC_CAS_THEN(__atomic_compare_exchange_n(
p, &o, n, 0, __ATOMIC_RELEASE, __ATOMIC_RELAXED));
}
static __inline int gpr_atm_full_cas(gpr_atm *p, gpr_atm o, gpr_atm n) {
static __inline int gpr_atm_full_cas(gpr_atm* p, gpr_atm o, gpr_atm n) {
return GPR_ATM_INC_CAS_THEN(__atomic_compare_exchange_n(
p, &o, n, 0, __ATOMIC_ACQ_REL, __ATOMIC_RELAXED));
}

@ -38,24 +38,24 @@ typedef intptr_t gpr_atm;
#define gpr_atm_full_barrier() (__sync_synchronize())
static __inline gpr_atm gpr_atm_acq_load(const gpr_atm *p) {
static __inline gpr_atm gpr_atm_acq_load(const gpr_atm* p) {
gpr_atm value = *p;
GPR_ATM_LS_BARRIER_();
return value;
}
static __inline gpr_atm gpr_atm_no_barrier_load(const gpr_atm *p) {
static __inline gpr_atm gpr_atm_no_barrier_load(const gpr_atm* p) {
gpr_atm value = *p;
GPR_ATM_COMPILE_BARRIER_();
return value;
}
static __inline void gpr_atm_rel_store(gpr_atm *p, gpr_atm value) {
static __inline void gpr_atm_rel_store(gpr_atm* p, gpr_atm value) {
GPR_ATM_LS_BARRIER_();
*p = value;
}
static __inline void gpr_atm_no_barrier_store(gpr_atm *p, gpr_atm value) {
static __inline void gpr_atm_no_barrier_store(gpr_atm* p, gpr_atm value) {
GPR_ATM_COMPILE_BARRIER_();
*p = value;
}
@ -72,7 +72,7 @@ static __inline void gpr_atm_no_barrier_store(gpr_atm *p, gpr_atm value) {
#define gpr_atm_rel_cas(p, o, n) gpr_atm_acq_cas((p), (o), (n))
#define gpr_atm_full_cas(p, o, n) gpr_atm_acq_cas((p), (o), (n))
static __inline gpr_atm gpr_atm_full_xchg(gpr_atm *p, gpr_atm n) {
static __inline gpr_atm gpr_atm_full_xchg(gpr_atm* p, gpr_atm n) {
gpr_atm cur;
do {
cur = gpr_atm_acq_load(p);

@ -28,70 +28,70 @@ typedef intptr_t gpr_atm;
#define gpr_atm_full_barrier MemoryBarrier
static __inline gpr_atm gpr_atm_acq_load(const gpr_atm *p) {
static __inline gpr_atm gpr_atm_acq_load(const gpr_atm* p) {
gpr_atm result = *p;
gpr_atm_full_barrier();
return result;
}
static __inline gpr_atm gpr_atm_no_barrier_load(const gpr_atm *p) {
static __inline gpr_atm gpr_atm_no_barrier_load(const gpr_atm* p) {
/* TODO(dklempner): Can we implement something better here? */
return gpr_atm_acq_load(p);
}
static __inline void gpr_atm_rel_store(gpr_atm *p, gpr_atm value) {
static __inline void gpr_atm_rel_store(gpr_atm* p, gpr_atm value) {
gpr_atm_full_barrier();
*p = value;
}
static __inline void gpr_atm_no_barrier_store(gpr_atm *p, gpr_atm value) {
static __inline void gpr_atm_no_barrier_store(gpr_atm* p, gpr_atm value) {
/* TODO(ctiller): Can we implement something better here? */
gpr_atm_rel_store(p, value);
}
static __inline int gpr_atm_no_barrier_cas(gpr_atm *p, gpr_atm o, gpr_atm n) {
static __inline int gpr_atm_no_barrier_cas(gpr_atm* p, gpr_atm o, gpr_atm n) {
/** InterlockedCompareExchangePointerNoFence() not available on vista or
windows7 */
#ifdef GPR_ARCH_64
return o == (gpr_atm)InterlockedCompareExchangeAcquire64(
(volatile LONGLONG *)p, (LONGLONG)n, (LONGLONG)o);
(volatile LONGLONG*)p, (LONGLONG)n, (LONGLONG)o);
#else
return o == (gpr_atm)InterlockedCompareExchangeAcquire((volatile LONG *)p,
return o == (gpr_atm)InterlockedCompareExchangeAcquire((volatile LONG*)p,
(LONG)n, (LONG)o);
#endif
}
static __inline int gpr_atm_acq_cas(gpr_atm *p, gpr_atm o, gpr_atm n) {
static __inline int gpr_atm_acq_cas(gpr_atm* p, gpr_atm o, gpr_atm n) {
#ifdef GPR_ARCH_64
return o == (gpr_atm)InterlockedCompareExchangeAcquire64(
(volatile LONGLONG *)p, (LONGLONG)n, (LONGLONG)o);
(volatile LONGLONG*)p, (LONGLONG)n, (LONGLONG)o);
#else
return o == (gpr_atm)InterlockedCompareExchangeAcquire((volatile LONG *)p,
return o == (gpr_atm)InterlockedCompareExchangeAcquire((volatile LONG*)p,
(LONG)n, (LONG)o);
#endif
}
static __inline int gpr_atm_rel_cas(gpr_atm *p, gpr_atm o, gpr_atm n) {
static __inline int gpr_atm_rel_cas(gpr_atm* p, gpr_atm o, gpr_atm n) {
#ifdef GPR_ARCH_64
return o == (gpr_atm)InterlockedCompareExchangeRelease64(
(volatile LONGLONG *)p, (LONGLONG)n, (LONGLONG)o);
(volatile LONGLONG*)p, (LONGLONG)n, (LONGLONG)o);
#else
return o == (gpr_atm)InterlockedCompareExchangeRelease((volatile LONG *)p,
return o == (gpr_atm)InterlockedCompareExchangeRelease((volatile LONG*)p,
(LONG)n, (LONG)o);
#endif
}
static __inline int gpr_atm_full_cas(gpr_atm *p, gpr_atm o, gpr_atm n) {
static __inline int gpr_atm_full_cas(gpr_atm* p, gpr_atm o, gpr_atm n) {
#ifdef GPR_ARCH_64
return o == (gpr_atm)InterlockedCompareExchange64((volatile LONGLONG *)p,
return o == (gpr_atm)InterlockedCompareExchange64((volatile LONGLONG*)p,
(LONGLONG)n, (LONGLONG)o);
#else
return o == (gpr_atm)InterlockedCompareExchange((volatile LONG *)p, (LONG)n,
return o == (gpr_atm)InterlockedCompareExchange((volatile LONG*)p, (LONG)n,
(LONG)o);
#endif
}
static __inline gpr_atm gpr_atm_no_barrier_fetch_add(gpr_atm *p,
static __inline gpr_atm gpr_atm_no_barrier_fetch_add(gpr_atm* p,
gpr_atm delta) {
/** Use the CAS operation to get pointer-sized fetch and add */
gpr_atm old;
@ -101,26 +101,26 @@ static __inline gpr_atm gpr_atm_no_barrier_fetch_add(gpr_atm *p,
return old;
}
static __inline gpr_atm gpr_atm_full_fetch_add(gpr_atm *p, gpr_atm delta) {
static __inline gpr_atm gpr_atm_full_fetch_add(gpr_atm* p, gpr_atm delta) {
/** Use a CAS operation to get pointer-sized fetch and add */
gpr_atm old;
#ifdef GPR_ARCH_64
do {
old = *p;
} while (old != (gpr_atm)InterlockedCompareExchange64((volatile LONGLONG *)p,
} while (old != (gpr_atm)InterlockedCompareExchange64((volatile LONGLONG*)p,
(LONGLONG)old + delta,
(LONGLONG)old));
#else
do {
old = *p;
} while (old != (gpr_atm)InterlockedCompareExchange(
(volatile LONG *)p, (LONG)old + delta, (LONG)old));
(volatile LONG*)p, (LONG)old + delta, (LONG)old));
#endif
return old;
}
static __inline gpr_atm gpr_atm_full_xchg(gpr_atm *p, gpr_atm n) {
return (gpr_atm)InterlockedExchangePointer((PVOID *)p, (PVOID)n);
static __inline gpr_atm gpr_atm_full_xchg(gpr_atm* p, gpr_atm n) {
return (gpr_atm)InterlockedExchangePointer((PVOID*)p, (PVOID)n);
}
#endif /* GRPC_IMPL_CODEGEN_ATM_WINDOWS_H */

@ -29,7 +29,7 @@ extern "C" {
*
* Increases the reference count for all \a slices processed. The user is
* responsible for invoking grpc_byte_buffer_destroy on the returned instance.*/
GRPCAPI grpc_byte_buffer *grpc_raw_byte_buffer_create(grpc_slice *slices,
GRPCAPI grpc_byte_buffer* grpc_raw_byte_buffer_create(grpc_slice* slices,
size_t nslices);
/** Returns a *compressed* RAW byte buffer instance over the given slices (up to
@ -38,20 +38,20 @@ GRPCAPI grpc_byte_buffer *grpc_raw_byte_buffer_create(grpc_slice *slices,
*
* Increases the reference count for all \a slices processed. The user is
* responsible for invoking grpc_byte_buffer_destroy on the returned instance.*/
GRPCAPI grpc_byte_buffer *grpc_raw_compressed_byte_buffer_create(
grpc_slice *slices, size_t nslices, grpc_compression_algorithm compression);
GRPCAPI grpc_byte_buffer* grpc_raw_compressed_byte_buffer_create(
grpc_slice* slices, size_t nslices, grpc_compression_algorithm compression);
/** Copies input byte buffer \a bb.
*
* Increases the reference count of all the source slices. The user is
* responsible for calling grpc_byte_buffer_destroy over the returned copy. */
GRPCAPI grpc_byte_buffer *grpc_byte_buffer_copy(grpc_byte_buffer *bb);
GRPCAPI grpc_byte_buffer* grpc_byte_buffer_copy(grpc_byte_buffer* bb);
/** Returns the size of the given byte buffer, in bytes. */
GRPCAPI size_t grpc_byte_buffer_length(grpc_byte_buffer *bb);
GRPCAPI size_t grpc_byte_buffer_length(grpc_byte_buffer* bb);
/** Destroys \a byte_buffer deallocating all its memory. */
GRPCAPI void grpc_byte_buffer_destroy(grpc_byte_buffer *byte_buffer);
GRPCAPI void grpc_byte_buffer_destroy(grpc_byte_buffer* byte_buffer);
/** Reader for byte buffers. Iterates over slices in the byte buffer */
struct grpc_byte_buffer_reader;
@ -59,25 +59,25 @@ typedef struct grpc_byte_buffer_reader grpc_byte_buffer_reader;
/** Initialize \a reader to read over \a buffer.
* Returns 1 upon success, 0 otherwise. */
GRPCAPI int grpc_byte_buffer_reader_init(grpc_byte_buffer_reader *reader,
grpc_byte_buffer *buffer);
GRPCAPI int grpc_byte_buffer_reader_init(grpc_byte_buffer_reader* reader,
grpc_byte_buffer* buffer);
/** Cleanup and destroy \a reader */
GRPCAPI void grpc_byte_buffer_reader_destroy(grpc_byte_buffer_reader *reader);
GRPCAPI void grpc_byte_buffer_reader_destroy(grpc_byte_buffer_reader* reader);
/** Updates \a slice with the next piece of data from from \a reader and returns
* 1. Returns 0 at the end of the stream. Caller is responsible for calling
* grpc_slice_unref on the result. */
GRPCAPI int grpc_byte_buffer_reader_next(grpc_byte_buffer_reader *reader,
grpc_slice *slice);
GRPCAPI int grpc_byte_buffer_reader_next(grpc_byte_buffer_reader* reader,
grpc_slice* slice);
/** Merge all data from \a reader into single slice */
GRPCAPI grpc_slice
grpc_byte_buffer_reader_readall(grpc_byte_buffer_reader *reader);
grpc_byte_buffer_reader_readall(grpc_byte_buffer_reader* reader);
/** Returns a RAW byte buffer instance from the output of \a reader. */
GRPCAPI grpc_byte_buffer *grpc_raw_byte_buffer_from_reader(
grpc_byte_buffer_reader *reader);
GRPCAPI grpc_byte_buffer* grpc_raw_byte_buffer_from_reader(
grpc_byte_buffer_reader* reader);
#ifdef __cplusplus
}

@ -26,8 +26,8 @@ extern "C" {
struct grpc_byte_buffer;
struct grpc_byte_buffer_reader {
struct grpc_byte_buffer *buffer_in;
struct grpc_byte_buffer *buffer_out;
struct grpc_byte_buffer* buffer_in;
struct grpc_byte_buffer* buffer_out;
/** Different current objects correspond to different types of byte buffers */
union grpc_byte_buffer_reader_current {
/** Index into a slice buffer's array of slices */

@ -39,11 +39,11 @@ typedef enum {
} grpc_byte_buffer_type;
typedef struct grpc_byte_buffer {
void *reserved;
void* reserved;
grpc_byte_buffer_type type;
union grpc_byte_buffer_data {
struct /* internal */ {
void *reserved[8];
void* reserved[8];
} reserved;
struct grpc_compressed_buffer {
grpc_compression_algorithm compression;
@ -84,9 +84,9 @@ typedef enum {
} grpc_arg_type;
typedef struct grpc_arg_pointer_vtable {
void *(*copy)(void *p);
void (*destroy)(grpc_exec_ctx *exec_ctx, void *p);
int (*cmp)(void *p, void *q);
void* (*copy)(void* p);
void (*destroy)(grpc_exec_ctx* exec_ctx, void* p);
int (*cmp)(void* p, void* q);
} grpc_arg_pointer_vtable;
/** A single argument... each argument has a key and a value
@ -103,13 +103,13 @@ typedef struct grpc_arg_pointer_vtable {
their keys so that it's possible to change them in the future. */
typedef struct {
grpc_arg_type type;
char *key;
char* key;
union grpc_arg_value {
char *string;
char* string;
int integer;
struct grpc_arg_pointer {
void *p;
const grpc_arg_pointer_vtable *vtable;
void* p;
const grpc_arg_pointer_vtable* vtable;
} pointer;
} value;
} grpc_arg;
@ -127,7 +127,7 @@ typedef struct {
details. */
typedef struct {
size_t num_args;
grpc_arg *args;
grpc_arg* args;
} grpc_channel_args;
/** \defgroup grpc_arg_keys
@ -400,7 +400,7 @@ typedef struct grpc_metadata {
There is no need to initialize them, and they will be set to garbage
during calls to grpc. */
struct /* internal */ {
void *obfuscated[4];
void* obfuscated[4];
} internal_data;
} grpc_metadata;
@ -428,13 +428,13 @@ typedef struct grpc_event {
int success;
/** The tag passed to grpc_call_start_batch etc to start this operation.
Only GRPC_OP_COMPLETE has a tag. */
void *tag;
void* tag;
} grpc_event;
typedef struct {
size_t count;
size_t capacity;
grpc_metadata *metadata;
grpc_metadata* metadata;
} grpc_metadata_array;
typedef struct {
@ -442,7 +442,7 @@ typedef struct {
grpc_slice host;
gpr_timespec deadline;
uint32_t flags;
void *reserved;
void* reserved;
} grpc_call_details;
typedef enum {
@ -498,15 +498,15 @@ typedef struct grpc_op {
/** Write flags bitset for grpc_begin_messages */
uint32_t flags;
/** Reserved for future usage */
void *reserved;
void* reserved;
union grpc_op_data {
/** Reserved for future usage */
struct /* internal */ {
void *reserved[8];
void* reserved[8];
} reserved;
struct grpc_op_send_initial_metadata {
size_t count;
grpc_metadata *metadata;
grpc_metadata* metadata;
/** If \a is_set, \a compression_level will be used for the call.
* Otherwise, \a compression_level won't be considered */
struct grpc_op_send_initial_metadata_maybe_compression_level {
@ -524,16 +524,16 @@ typedef struct grpc_op {
* and likely empty. The original owner should still call
* grpc_byte_buffer_destroy() on this object however.
*/
struct grpc_byte_buffer *send_message;
struct grpc_byte_buffer* send_message;
} send_message;
struct grpc_op_send_status_from_server {
size_t trailing_metadata_count;
grpc_metadata *trailing_metadata;
grpc_metadata* trailing_metadata;
grpc_status_code status;
/** optional: set to NULL if no details need sending, non-NULL if they do
* pointer will not be retained past the start_batch call
*/
grpc_slice *status_details;
grpc_slice* status_details;
} send_status_from_server;
/** ownership of the array is with the caller, but ownership of the elements
stays with the call object (ie key, value members are owned by the call
@ -541,13 +541,13 @@ typedef struct grpc_op {
After the operation completes, call grpc_metadata_array_destroy on this
value, or reuse it in a future op. */
struct grpc_op_recv_initial_metadata {
grpc_metadata_array *recv_initial_metadata;
grpc_metadata_array* recv_initial_metadata;
} recv_initial_metadata;
/** ownership of the byte buffer is moved to the caller; the caller must
call grpc_byte_buffer_destroy on this value, or reuse it in a future op.
*/
struct grpc_op_recv_message {
struct grpc_byte_buffer **recv_message;
struct grpc_byte_buffer** recv_message;
} recv_message;
struct grpc_op_recv_status_on_client {
/** ownership of the array is with the caller, but ownership of the
@ -555,14 +555,14 @@ typedef struct grpc_op {
by the call object, trailing_metadata->array is owned by the caller).
After the operation completes, call grpc_metadata_array_destroy on
this value, or reuse it in a future op. */
grpc_metadata_array *trailing_metadata;
grpc_status_code *status;
grpc_slice *status_details;
grpc_metadata_array* trailing_metadata;
grpc_status_code* status;
grpc_slice* status_details;
} recv_status_on_client;
struct grpc_op_recv_close_on_server {
/** out argument, set to 1 if the call failed in any way (seen as a
cancellation on the server), or 0 if the call succeeded */
int *cancelled;
int* cancelled;
} recv_close_on_server;
} data;
} grpc_op;
@ -571,10 +571,10 @@ typedef struct grpc_op {
typedef struct {
/** If non-NULL, will be set to point to a string indicating the LB
* policy name. Caller takes ownership. */
char **lb_policy_name;
char** lb_policy_name;
/** If non-NULL, will be set to point to a string containing the
* service config used by the channel in JSON form. */
char **service_config_json;
char** service_config_json;
} grpc_channel_info;
typedef struct grpc_resource_quota grpc_resource_quota;

@ -42,8 +42,8 @@ typedef struct grpc_slice grpc_slice;
constraints (is the callee allowed to modify the slice?) */
typedef struct grpc_slice_refcount_vtable {
void (*ref)(void *);
void (*unref)(grpc_exec_ctx *exec_ctx, void *);
void (*ref)(void*);
void (*unref)(grpc_exec_ctx* exec_ctx, void*);
int (*eq)(grpc_slice a, grpc_slice b);
uint32_t (*hash)(grpc_slice slice);
} grpc_slice_refcount_vtable;
@ -54,20 +54,20 @@ typedef struct grpc_slice_refcount_vtable {
Typically client code should not touch this, and use grpc_slice_malloc,
grpc_slice_new, or grpc_slice_new_with_len instead. */
typedef struct grpc_slice_refcount {
const grpc_slice_refcount_vtable *vtable;
const grpc_slice_refcount_vtable* vtable;
/** If a subset of this slice is taken, use this pointer for the refcount.
Typically points back to the refcount itself, however iterning
implementations can use this to avoid a verification step on each hash
or equality check */
struct grpc_slice_refcount *sub_refcount;
struct grpc_slice_refcount* sub_refcount;
} grpc_slice_refcount;
/* Inlined half of grpc_slice is allowed to expand the size of the overall type
by this many bytes */
#define GRPC_SLICE_INLINE_EXTRA_SIZE sizeof(void *)
#define GRPC_SLICE_INLINE_EXTRA_SIZE sizeof(void*)
#define GRPC_SLICE_INLINED_SIZE \
(sizeof(size_t) + sizeof(uint8_t *) - 1 + GRPC_SLICE_INLINE_EXTRA_SIZE)
(sizeof(size_t) + sizeof(uint8_t*) - 1 + GRPC_SLICE_INLINE_EXTRA_SIZE)
/** A grpc_slice s, if initialized, represents the byte range
s.bytes[0..s.length-1].
@ -79,10 +79,10 @@ typedef struct grpc_slice_refcount {
If the slice does not have a refcount, it represents an inlined small piece
of data that is copied by value. */
struct grpc_slice {
struct grpc_slice_refcount *refcount;
struct grpc_slice_refcount* refcount;
union grpc_slice_data {
struct grpc_slice_refcounted {
uint8_t *bytes;
uint8_t* bytes;
size_t length;
} refcounted;
struct grpc_slice_inlined {
@ -99,10 +99,10 @@ struct grpc_slice {
typedef struct {
/** This is for internal use only. External users (i.e any code outside grpc
* core) MUST NOT use this field */
grpc_slice *base_slices;
grpc_slice* base_slices;
/** slices in the array (Points to the first valid grpc_slice in the array) */
grpc_slice *slices;
grpc_slice* slices;
/** the number of slices in the array */
size_t count;
/** the number of slices allocated in the array. External users (i.e any code

@ -23,16 +23,22 @@
#include <grpc/impl/codegen/atm.h>
/* gpr_event */
typedef struct { gpr_atm state; } gpr_event;
typedef struct {
gpr_atm state;
} gpr_event;
#define GPR_EVENT_INIT \
{ 0 }
/* gpr_refcount */
typedef struct { gpr_atm count; } gpr_refcount;
typedef struct {
gpr_atm count;
} gpr_refcount;
/* gpr_stats_counter */
typedef struct { gpr_atm value; } gpr_stats_counter;
typedef struct {
gpr_atm value;
} gpr_stats_counter;
#define GPR_STATS_INIT \
{ 0 }

@ -44,20 +44,20 @@ GPRAPI grpc_slice grpc_slice_copy(grpc_slice s);
/** Create a slice pointing at some data. Calls malloc to allocate a refcount
for the object, and arranges that destroy will be called with the pointer
passed in at destruction. */
GPRAPI grpc_slice grpc_slice_new(void *p, size_t len, void (*destroy)(void *));
GPRAPI grpc_slice grpc_slice_new(void* p, size_t len, void (*destroy)(void*));
/** Equivalent to grpc_slice_new, but with a separate pointer that is
passed to the destroy function. This function can be useful when
the data is part of a larger structure that must be destroyed when
the data is no longer needed. */
GPRAPI grpc_slice grpc_slice_new_with_user_data(void *p, size_t len,
void (*destroy)(void *),
void *user_data);
GPRAPI grpc_slice grpc_slice_new_with_user_data(void* p, size_t len,
void (*destroy)(void*),
void* user_data);
/** Equivalent to grpc_slice_new, but with a two argument destroy function that
also takes the slice length. */
GPRAPI grpc_slice grpc_slice_new_with_len(void *p, size_t len,
void (*destroy)(void *, size_t));
GPRAPI grpc_slice grpc_slice_new_with_len(void* p, size_t len,
void (*destroy)(void*, size_t));
/** Equivalent to grpc_slice_new(malloc(len), len, free), but saves one malloc()
call.
@ -79,19 +79,19 @@ GPRAPI grpc_slice grpc_slice_intern(grpc_slice slice);
size_t len = strlen(source);
grpc_slice slice = grpc_slice_malloc(len);
memcpy(slice->data, source, len); */
GPRAPI grpc_slice grpc_slice_from_copied_string(const char *source);
GPRAPI grpc_slice grpc_slice_from_copied_string(const char* source);
/** Create a slice by copying a buffer.
Equivalent to:
grpc_slice slice = grpc_slice_malloc(len);
memcpy(slice->data, source, len); */
GPRAPI grpc_slice grpc_slice_from_copied_buffer(const char *source, size_t len);
GPRAPI grpc_slice grpc_slice_from_copied_buffer(const char* source, size_t len);
/** Create a slice pointing to constant memory */
GPRAPI grpc_slice grpc_slice_from_static_string(const char *source);
GPRAPI grpc_slice grpc_slice_from_static_string(const char* source);
/** Create a slice pointing to constant memory */
GPRAPI grpc_slice grpc_slice_from_static_buffer(const void *source, size_t len);
GPRAPI grpc_slice grpc_slice_from_static_buffer(const void* source, size_t len);
/** Return a result slice derived from s, which shares a ref count with \a s,
where result.data==s.data+begin, and result.length==end-begin. The ref count
@ -106,7 +106,7 @@ GPRAPI grpc_slice grpc_slice_sub_no_ref(grpc_slice s, size_t begin, size_t end);
/** Splits s into two: modifies s to be s[0:split], and returns a new slice,
sharing a refcount with s, that contains s[split:s.length].
Requires s intialized, split <= s.length */
GPRAPI grpc_slice grpc_slice_split_tail(grpc_slice *s, size_t split);
GPRAPI grpc_slice grpc_slice_split_tail(grpc_slice* s, size_t split);
typedef enum {
GRPC_SLICE_REF_TAIL = 1,
@ -117,13 +117,13 @@ typedef enum {
/** The same as grpc_slice_split_tail, but with an option to skip altering
* refcounts (grpc_slice_split_tail_maybe_ref(..., true) is equivalent to
* grpc_slice_split_tail(...)) */
GPRAPI grpc_slice grpc_slice_split_tail_maybe_ref(grpc_slice *s, size_t split,
GPRAPI grpc_slice grpc_slice_split_tail_maybe_ref(grpc_slice* s, size_t split,
grpc_slice_ref_whom ref_whom);
/** Splits s into two: modifies s to be s[split:s.length], and returns a new
slice, sharing a refcount with s, that contains s[0:split].
Requires s intialized, split <= s.length */
GPRAPI grpc_slice grpc_slice_split_head(grpc_slice *s, size_t split);
GPRAPI grpc_slice grpc_slice_split_head(grpc_slice* s, size_t split);
GPRAPI grpc_slice grpc_empty_slice(void);
@ -136,11 +136,11 @@ GPRAPI int grpc_slice_eq(grpc_slice a, grpc_slice b);
The order is arbitrary, and is not guaranteed to be stable across different
versions of the API. */
GPRAPI int grpc_slice_cmp(grpc_slice a, grpc_slice b);
GPRAPI int grpc_slice_str_cmp(grpc_slice a, const char *b);
GPRAPI int grpc_slice_buf_cmp(grpc_slice a, const void *b, size_t blen);
GPRAPI int grpc_slice_str_cmp(grpc_slice a, const char* b);
GPRAPI int grpc_slice_buf_cmp(grpc_slice a, const void* b, size_t blen);
/** return non-zero if the first blen bytes of a are equal to b */
GPRAPI int grpc_slice_buf_start_eq(grpc_slice a, const void *b, size_t blen);
GPRAPI int grpc_slice_buf_start_eq(grpc_slice a, const void* b, size_t blen);
/** return the index of the last instance of \a c in \a s, or -1 if not found */
GPRAPI int grpc_slice_rchr(grpc_slice s, char c);
@ -162,7 +162,7 @@ GPRAPI grpc_slice grpc_slice_dup(grpc_slice a);
/** Return a copy of slice as a C string. Offers no protection against embedded
NULL's. Returned string must be freed with gpr_free. */
GPRAPI char *grpc_slice_to_c_string(grpc_slice s);
GPRAPI char* grpc_slice_to_c_string(grpc_slice s);
#ifdef __cplusplus
}

@ -26,13 +26,13 @@ extern "C" {
#endif
/** initialize a slice buffer */
GPRAPI void grpc_slice_buffer_init(grpc_slice_buffer *sb);
GPRAPI void grpc_slice_buffer_init(grpc_slice_buffer* sb);
/** destroy a slice buffer - unrefs any held elements */
GPRAPI void grpc_slice_buffer_destroy(grpc_slice_buffer *sb);
GPRAPI void grpc_slice_buffer_destroy(grpc_slice_buffer* sb);
/** Add an element to a slice buffer - takes ownership of the slice.
This function is allowed to concatenate the passed in slice to the end of
some other slice if desired by the slice buffer. */
GPRAPI void grpc_slice_buffer_add(grpc_slice_buffer *sb, grpc_slice slice);
GPRAPI void grpc_slice_buffer_add(grpc_slice_buffer* sb, grpc_slice slice);
/** add an element to a slice buffer - takes ownership of the slice and returns
the index of the slice.
Guarantees that the slice will not be concatenated at the end of another
@ -40,40 +40,40 @@ GPRAPI void grpc_slice_buffer_add(grpc_slice_buffer *sb, grpc_slice slice);
slice at the returned index in sb->slices)
The implementation MAY decide to concatenate data at the end of a small
slice added in this fashion. */
GPRAPI size_t grpc_slice_buffer_add_indexed(grpc_slice_buffer *sb,
GPRAPI size_t grpc_slice_buffer_add_indexed(grpc_slice_buffer* sb,
grpc_slice slice);
GPRAPI void grpc_slice_buffer_addn(grpc_slice_buffer *sb, grpc_slice *slices,
GPRAPI void grpc_slice_buffer_addn(grpc_slice_buffer* sb, grpc_slice* slices,
size_t n);
/** add a very small (less than 8 bytes) amount of data to the end of a slice
buffer: returns a pointer into which to add the data */
GPRAPI uint8_t *grpc_slice_buffer_tiny_add(grpc_slice_buffer *sb, size_t len);
GPRAPI uint8_t* grpc_slice_buffer_tiny_add(grpc_slice_buffer* sb, size_t len);
/** pop the last buffer, but don't unref it */
GPRAPI void grpc_slice_buffer_pop(grpc_slice_buffer *sb);
GPRAPI void grpc_slice_buffer_pop(grpc_slice_buffer* sb);
/** clear a slice buffer, unref all elements */
GPRAPI void grpc_slice_buffer_reset_and_unref(grpc_slice_buffer *sb);
GPRAPI void grpc_slice_buffer_reset_and_unref(grpc_slice_buffer* sb);
/** swap the contents of two slice buffers */
GPRAPI void grpc_slice_buffer_swap(grpc_slice_buffer *a, grpc_slice_buffer *b);
GPRAPI void grpc_slice_buffer_swap(grpc_slice_buffer* a, grpc_slice_buffer* b);
/** move all of the elements of src into dst */
GPRAPI void grpc_slice_buffer_move_into(grpc_slice_buffer *src,
grpc_slice_buffer *dst);
GPRAPI void grpc_slice_buffer_move_into(grpc_slice_buffer* src,
grpc_slice_buffer* dst);
/** remove n bytes from the end of a slice buffer */
GPRAPI void grpc_slice_buffer_trim_end(grpc_slice_buffer *src, size_t n,
grpc_slice_buffer *garbage);
GPRAPI void grpc_slice_buffer_trim_end(grpc_slice_buffer* src, size_t n,
grpc_slice_buffer* garbage);
/** move the first n bytes of src into dst */
GPRAPI void grpc_slice_buffer_move_first(grpc_slice_buffer *src, size_t n,
grpc_slice_buffer *dst);
GPRAPI void grpc_slice_buffer_move_first(grpc_slice_buffer* src, size_t n,
grpc_slice_buffer* dst);
/** move the first n bytes of src into dst without adding references */
GPRAPI void grpc_slice_buffer_move_first_no_ref(grpc_slice_buffer *src,
GPRAPI void grpc_slice_buffer_move_first_no_ref(grpc_slice_buffer* src,
size_t n,
grpc_slice_buffer *dst);
grpc_slice_buffer* dst);
/** move the first n bytes of src into dst (copying them) */
GPRAPI void grpc_slice_buffer_move_first_into_buffer(grpc_exec_ctx *exec_ctx,
grpc_slice_buffer *src,
size_t n, void *dst);
GPRAPI void grpc_slice_buffer_move_first_into_buffer(grpc_exec_ctx* exec_ctx,
grpc_slice_buffer* src,
size_t n, void* dst);
/** take the first slice in the slice buffer */
GPRAPI grpc_slice grpc_slice_buffer_take_first(grpc_slice_buffer *src);
GPRAPI grpc_slice grpc_slice_buffer_take_first(grpc_slice_buffer* src);
/** undo the above with (a possibly different) \a slice */
GPRAPI void grpc_slice_buffer_undo_take_first(grpc_slice_buffer *src,
GPRAPI void grpc_slice_buffer_undo_take_first(grpc_slice_buffer* src,
grpc_slice slice);
#ifdef __cplusplus

@ -28,10 +28,10 @@ extern "C" {
#endif
typedef struct gpr_allocation_functions {
void *(*malloc_fn)(size_t size);
void *(*zalloc_fn)(size_t size); /** if NULL, uses malloc_fn then memset */
void *(*realloc_fn)(void *ptr, size_t size);
void (*free_fn)(void *ptr);
void* (*malloc_fn)(size_t size);
void* (*zalloc_fn)(size_t size); /** if NULL, uses malloc_fn then memset */
void* (*realloc_fn)(void* ptr, size_t size);
void (*free_fn)(void* ptr);
} gpr_allocation_functions;
/** malloc.
@ -39,17 +39,17 @@ typedef struct gpr_allocation_functions {
* The pointer returned is suitably aligned for any kind of variable it could
* contain.
*/
GPRAPI void *gpr_malloc(size_t size);
GPRAPI void* gpr_malloc(size_t size);
/** like malloc, but zero all bytes before returning them */
GPRAPI void *gpr_zalloc(size_t size);
GPRAPI void* gpr_zalloc(size_t size);
/** free */
GPRAPI void gpr_free(void *ptr);
GPRAPI void gpr_free(void* ptr);
/** realloc, never returns NULL */
GPRAPI void *gpr_realloc(void *p, size_t size);
GPRAPI void* gpr_realloc(void* p, size_t size);
/** aligned malloc, never returns NULL, will align to 1 << alignment_log */
GPRAPI void *gpr_malloc_aligned(size_t size, size_t alignment_log);
GPRAPI void* gpr_malloc_aligned(size_t size, size_t alignment_log);
/** free memory allocated by gpr_malloc_aligned */
GPRAPI void gpr_free_aligned(void *ptr);
GPRAPI void gpr_free_aligned(void* ptr);
/** Request the family of allocation functions in \a functions be used. NOTE
* that this request will be honored in a *best effort* basis and that no

@ -28,10 +28,10 @@ extern "C" {
/** internal node of an AVL tree */
typedef struct gpr_avl_node {
gpr_refcount refs;
void *key;
void *value;
struct gpr_avl_node *left;
struct gpr_avl_node *right;
void* key;
void* value;
struct gpr_avl_node* left;
struct gpr_avl_node* right;
long height;
} gpr_avl_node;
@ -42,56 +42,56 @@ typedef struct gpr_avl_node {
*/
typedef struct gpr_avl_vtable {
/** destroy a key */
void (*destroy_key)(void *key, void *user_data);
void (*destroy_key)(void* key, void* user_data);
/** copy a key, returning new value */
void *(*copy_key)(void *key, void *user_data);
void* (*copy_key)(void* key, void* user_data);
/** compare key1, key2; return <0 if key1 < key2,
>0 if key1 > key2, 0 if key1 == key2 */
long (*compare_keys)(void *key1, void *key2, void *user_data);
long (*compare_keys)(void* key1, void* key2, void* user_data);
/** destroy a value */
void (*destroy_value)(void *value, void *user_data);
void (*destroy_value)(void* value, void* user_data);
/** copy a value */
void *(*copy_value)(void *value, void *user_data);
void* (*copy_value)(void* value, void* user_data);
} gpr_avl_vtable;
/** "pointer" to an AVL tree - this is a reference
counted object - use gpr_avl_ref to add a reference,
gpr_avl_unref when done with a reference */
typedef struct gpr_avl {
const gpr_avl_vtable *vtable;
gpr_avl_node *root;
const gpr_avl_vtable* vtable;
gpr_avl_node* root;
} gpr_avl;
/** Create an immutable AVL tree. */
GPRAPI gpr_avl gpr_avl_create(const gpr_avl_vtable *vtable);
GPRAPI gpr_avl gpr_avl_create(const gpr_avl_vtable* vtable);
/** Add a reference to an existing tree - returns
the tree as a convenience. The optional user_data will be passed to vtable
functions. */
GPRAPI gpr_avl gpr_avl_ref(gpr_avl avl, void *user_data);
GPRAPI gpr_avl gpr_avl_ref(gpr_avl avl, void* user_data);
/** Remove a reference to a tree - destroying it if there
are no references left. The optional user_data will be passed to vtable
functions. */
GPRAPI void gpr_avl_unref(gpr_avl avl, void *user_data);
GPRAPI void gpr_avl_unref(gpr_avl avl, void* user_data);
/** Return a new tree with (key, value) added to avl.
implicitly unrefs avl to allow easy chaining.
if key exists in avl, the new tree's key entry updated
(i.e. a duplicate is not created). The optional user_data will be passed to
vtable functions. */
GPRAPI gpr_avl gpr_avl_add(gpr_avl avl, void *key, void *value,
void *user_data);
GPRAPI gpr_avl gpr_avl_add(gpr_avl avl, void* key, void* value,
void* user_data);
/** Return a new tree with key deleted
implicitly unrefs avl to allow easy chaining. The optional user_data will be
passed to vtable functions. */
GPRAPI gpr_avl gpr_avl_remove(gpr_avl avl, void *key, void *user_data);
GPRAPI gpr_avl gpr_avl_remove(gpr_avl avl, void* key, void* user_data);
/** Lookup key, and return the associated value.
Does not mutate avl.
Returns NULL if key is not found. The optional user_data will be passed to
vtable functions.*/
GPRAPI void *gpr_avl_get(gpr_avl avl, void *key, void *user_data);
GPRAPI void* gpr_avl_get(gpr_avl avl, void* key, void* user_data);
/** Return 1 if avl contains key, 0 otherwise; if it has the key, sets *value to
its value. THe optional user_data will be passed to vtable functions. */
GPRAPI int gpr_avl_maybe_get(gpr_avl avl, void *key, void **value,
void *user_data);
GPRAPI int gpr_avl_maybe_get(gpr_avl avl, void* key, void** value,
void* user_data);
/** Return 1 if avl is empty, 0 otherwise */
GPRAPI int gpr_avl_is_empty(gpr_avl avl);

@ -55,31 +55,31 @@ typedef struct gpr_cmdline gpr_cmdline;
/** Construct a command line parser: takes a short description of the tool
doing the parsing */
GPRAPI gpr_cmdline *gpr_cmdline_create(const char *description);
GPRAPI gpr_cmdline* gpr_cmdline_create(const char* description);
/** Add an integer parameter, with a name (used on the command line) and some
helpful text (used in the command usage) */
GPRAPI void gpr_cmdline_add_int(gpr_cmdline *cl, const char *name,
const char *help, int *value);
GPRAPI void gpr_cmdline_add_int(gpr_cmdline* cl, const char* name,
const char* help, int* value);
/** The same, for a boolean flag */
GPRAPI void gpr_cmdline_add_flag(gpr_cmdline *cl, const char *name,
const char *help, int *value);
GPRAPI void gpr_cmdline_add_flag(gpr_cmdline* cl, const char* name,
const char* help, int* value);
/** And for a string */
GPRAPI void gpr_cmdline_add_string(gpr_cmdline *cl, const char *name,
const char *help, char **value);
GPRAPI void gpr_cmdline_add_string(gpr_cmdline* cl, const char* name,
const char* help, char** value);
/** Set a callback for non-named arguments */
GPRAPI void gpr_cmdline_on_extra_arg(
gpr_cmdline *cl, const char *name, const char *help,
void (*on_extra_arg)(void *user_data, const char *arg), void *user_data);
gpr_cmdline* cl, const char* name, const char* help,
void (*on_extra_arg)(void* user_data, const char* arg), void* user_data);
/** Enable surviving failure: default behavior is to exit the process */
GPRAPI void gpr_cmdline_set_survive_failure(gpr_cmdline *cl);
GPRAPI void gpr_cmdline_set_survive_failure(gpr_cmdline* cl);
/** Parse the command line; returns 1 on success, on failure either dies
(by default) or returns 0 if gpr_cmdline_set_survive_failure() has been
called */
GPRAPI int gpr_cmdline_parse(gpr_cmdline *cl, int argc, char **argv);
GPRAPI int gpr_cmdline_parse(gpr_cmdline* cl, int argc, char** argv);
/** Destroy the parser */
GPRAPI void gpr_cmdline_destroy(gpr_cmdline *cl);
GPRAPI void gpr_cmdline_destroy(gpr_cmdline* cl);
/** Get a string describing usage */
GPRAPI char *gpr_cmdline_usage_string(gpr_cmdline *cl, const char *argv0);
GPRAPI char* gpr_cmdline_usage_string(gpr_cmdline* cl, const char* argv0);
#ifdef __cplusplus
}

@ -28,31 +28,31 @@ extern "C" {
typedef struct gpr_histogram gpr_histogram;
GPRAPI gpr_histogram *gpr_histogram_create(double resolution,
GPRAPI gpr_histogram* gpr_histogram_create(double resolution,
double max_bucket_start);
GPRAPI void gpr_histogram_destroy(gpr_histogram *h);
GPRAPI void gpr_histogram_add(gpr_histogram *h, double x);
GPRAPI void gpr_histogram_destroy(gpr_histogram* h);
GPRAPI void gpr_histogram_add(gpr_histogram* h, double x);
/** The following merges the second histogram into the first. It only works
if they have the same buckets and resolution. Returns 0 on failure, 1
on success */
GPRAPI int gpr_histogram_merge(gpr_histogram *dst, const gpr_histogram *src);
GPRAPI int gpr_histogram_merge(gpr_histogram* dst, const gpr_histogram* src);
GPRAPI double gpr_histogram_percentile(gpr_histogram *histogram,
GPRAPI double gpr_histogram_percentile(gpr_histogram* histogram,
double percentile);
GPRAPI double gpr_histogram_mean(gpr_histogram *histogram);
GPRAPI double gpr_histogram_stddev(gpr_histogram *histogram);
GPRAPI double gpr_histogram_variance(gpr_histogram *histogram);
GPRAPI double gpr_histogram_maximum(gpr_histogram *histogram);
GPRAPI double gpr_histogram_minimum(gpr_histogram *histogram);
GPRAPI double gpr_histogram_count(gpr_histogram *histogram);
GPRAPI double gpr_histogram_sum(gpr_histogram *histogram);
GPRAPI double gpr_histogram_sum_of_squares(gpr_histogram *histogram);
GPRAPI double gpr_histogram_mean(gpr_histogram* histogram);
GPRAPI double gpr_histogram_stddev(gpr_histogram* histogram);
GPRAPI double gpr_histogram_variance(gpr_histogram* histogram);
GPRAPI double gpr_histogram_maximum(gpr_histogram* histogram);
GPRAPI double gpr_histogram_minimum(gpr_histogram* histogram);
GPRAPI double gpr_histogram_count(gpr_histogram* histogram);
GPRAPI double gpr_histogram_sum(gpr_histogram* histogram);
GPRAPI double gpr_histogram_sum_of_squares(gpr_histogram* histogram);
GPRAPI const uint32_t *gpr_histogram_get_contents(gpr_histogram *histogram,
size_t *count);
GPRAPI void gpr_histogram_merge_contents(gpr_histogram *histogram,
const uint32_t *data,
GPRAPI const uint32_t* gpr_histogram_get_contents(gpr_histogram* histogram,
size_t* count);
GPRAPI void gpr_histogram_merge_contents(gpr_histogram* histogram,
const uint32_t* data,
size_t data_count, double min_seen,
double max_seen, double sum,
double sum_of_squares, double count);

@ -35,14 +35,14 @@ extern "C" {
destroyed using gpr_free().
In the unlikely event of an error, returns -1 and sets *out to NULL. */
GPRAPI int gpr_join_host_port(char **out, const char *host, int port);
GPRAPI int gpr_join_host_port(char** out, const char* host, int port);
/** Given a name in the form "host:port" or "[ho:st]:port", split into hostname
and port number, into newly allocated strings, which must later be
destroyed using gpr_free().
Return 1 on success, 0 on failure. Guarantees *host and *port == NULL on
failure. */
GPRAPI int gpr_split_host_port(const char *name, char **host, char **port);
GPRAPI int gpr_split_host_port(const char* name, char** host, char** port);
#ifdef __cplusplus
}

@ -50,7 +50,7 @@ typedef enum gpr_log_severity {
#define GPR_LOG_VERBOSITY_UNSET -1
/** Returns a string representation of the log severity */
GPRAPI const char *gpr_log_severity_string(gpr_log_severity severity);
GPRAPI const char* gpr_log_severity_string(gpr_log_severity severity);
/** Macros to build log contexts at various severity levels */
#define GPR_DEBUG __FILE__, __LINE__, GPR_LOG_SEVERITY_DEBUG
@ -59,11 +59,11 @@ GPRAPI const char *gpr_log_severity_string(gpr_log_severity severity);
/** Log a message. It's advised to use GPR_xxx above to generate the context
* for each message */
GPRAPI void gpr_log(const char *file, int line, gpr_log_severity severity,
const char *format, ...) GPR_PRINT_FORMAT_CHECK(4, 5);
GPRAPI void gpr_log(const char* file, int line, gpr_log_severity severity,
const char* format, ...) GPR_PRINT_FORMAT_CHECK(4, 5);
GPRAPI void gpr_log_message(const char *file, int line,
gpr_log_severity severity, const char *message);
GPRAPI void gpr_log_message(const char* file, int line,
gpr_log_severity severity, const char* message);
/** Set global log verbosity */
GPRAPI void gpr_set_log_verbosity(gpr_log_severity min_severity_to_print);
@ -74,13 +74,13 @@ GPRAPI void gpr_log_verbosity_init();
and use their own implementations */
typedef struct {
const char *file;
const char* file;
int line;
gpr_log_severity severity;
const char *message;
const char* message;
} gpr_log_func_args;
typedef void (*gpr_log_func)(gpr_log_func_args *args);
typedef void (*gpr_log_func)(gpr_log_func_args* args);
GPRAPI void gpr_set_log_function(gpr_log_func func);
/** abort() the process if x is zero, having written a line to the log.

@ -29,7 +29,7 @@ extern "C" {
* formatted error message, corresponding to the error messageid.
* Use in conjunction with GetLastError() et al.
*/
GPRAPI char *gpr_format_message(int messageid);
GPRAPI char* gpr_format_message(int messageid);
#ifdef __cplusplus
}

@ -29,7 +29,7 @@ extern "C" {
/** Returns a copy of src that can be passed to gpr_free().
If allocation fails or if src is NULL, returns NULL. */
GPRAPI char *gpr_strdup(const char *src);
GPRAPI char* gpr_strdup(const char* src);
/** printf to a newly-allocated string. The set of supported formats may vary
between platforms.
@ -39,7 +39,7 @@ GPRAPI char *gpr_strdup(const char *src);
On error, returns -1 and sets *strp to NULL. If the format string is bad,
the result is undefined. */
GPRAPI int gpr_asprintf(char **strp, const char *format, ...)
GPRAPI int gpr_asprintf(char** strp, const char* format, ...)
GPR_PRINT_FORMAT_CHECK(2, 3);
#ifdef __cplusplus

@ -28,14 +28,14 @@ extern "C" {
typedef struct gpr_subprocess gpr_subprocess;
/** .exe on windows, empty on unices */
GPRAPI const char *gpr_subprocess_binary_extension();
GPRAPI const char* gpr_subprocess_binary_extension();
GPRAPI gpr_subprocess *gpr_subprocess_create(int argc, const char **argv);
GPRAPI gpr_subprocess* gpr_subprocess_create(int argc, const char** argv);
/** if subprocess has not been joined, kill it */
GPRAPI void gpr_subprocess_destroy(gpr_subprocess *p);
GPRAPI void gpr_subprocess_destroy(gpr_subprocess* p);
/** returns exit status; can be called at most once */
GPRAPI int gpr_subprocess_join(gpr_subprocess *p);
GPRAPI void gpr_subprocess_interrupt(gpr_subprocess *p);
GPRAPI int gpr_subprocess_join(gpr_subprocess* p);
GPRAPI void gpr_subprocess_interrupt(gpr_subprocess* p);
#ifdef __cplusplus
} // extern "C"

@ -34,26 +34,26 @@ extern "C" {
gpr_mu are uninitialized when first declared. */
/** Initialize *mu. Requires: *mu uninitialized. */
GPRAPI void gpr_mu_init(gpr_mu *mu);
GPRAPI void gpr_mu_init(gpr_mu* mu);
/** Cause *mu no longer to be initialized, freeing any memory in use. Requires:
*mu initialized; no other concurrent operation on *mu. */
GPRAPI void gpr_mu_destroy(gpr_mu *mu);
*mu initialized; no other concurrent operation on *mu. */
GPRAPI void gpr_mu_destroy(gpr_mu* mu);
/** Wait until no thread has a lock on *mu, cause the calling thread to own an
exclusive lock on *mu, then return. May block indefinitely or crash if the
calling thread has a lock on *mu. Requires: *mu initialized. */
GPRAPI void gpr_mu_lock(gpr_mu *mu);
GPRAPI void gpr_mu_lock(gpr_mu* mu);
/** Release an exclusive lock on *mu held by the calling thread. Requires: *mu
initialized; the calling thread holds an exclusive lock on *mu. */
GPRAPI void gpr_mu_unlock(gpr_mu *mu);
GPRAPI void gpr_mu_unlock(gpr_mu* mu);
/** Without blocking, attempt to acquire an exclusive lock on *mu for the
calling thread, then return non-zero iff success. Fail, if any thread holds
the lock; succeeds with high probability if no thread holds the lock.
Requires: *mu initialized. */
GPRAPI int gpr_mu_trylock(gpr_mu *mu);
GPRAPI int gpr_mu_trylock(gpr_mu* mu);
/** --- Condition variable interface ---
@ -62,11 +62,11 @@ GPRAPI int gpr_mu_trylock(gpr_mu *mu);
uninitialized when first declared. */
/** Initialize *cv. Requires: *cv uninitialized. */
GPRAPI void gpr_cv_init(gpr_cv *cv);
GPRAPI void gpr_cv_init(gpr_cv* cv);
/** Cause *cv no longer to be initialized, freeing any memory in use. Requires:
*cv initialized; no other concurrent operation on *cv.*/
GPRAPI void gpr_cv_destroy(gpr_cv *cv);
*cv initialized; no other concurrent operation on *cv.*/
GPRAPI void gpr_cv_destroy(gpr_cv* cv);
/** Atomically release *mu and wait on *cv. When the calling thread is woken
from *cv or the deadline abs_deadline is exceeded, execute gpr_mu_lock(mu)
@ -75,16 +75,16 @@ GPRAPI void gpr_cv_destroy(gpr_cv *cv);
an absolute deadline, or a GPR_TIMESPAN. May return even when not
woken explicitly. Requires: *mu and *cv initialized; the calling thread
holds an exclusive lock on *mu. */
GPRAPI int gpr_cv_wait(gpr_cv *cv, gpr_mu *mu, gpr_timespec abs_deadline);
GPRAPI int gpr_cv_wait(gpr_cv* cv, gpr_mu* mu, gpr_timespec abs_deadline);
/** If any threads are waiting on *cv, wake at least one.
Clients may treat this as an optimization of gpr_cv_broadcast()
for use in the case where waking more than one waiter is not useful.
Requires: *cv initialized. */
GPRAPI void gpr_cv_signal(gpr_cv *cv);
GPRAPI void gpr_cv_signal(gpr_cv* cv);
/** Wake all threads waiting on *cv. Requires: *cv initialized. */
GPRAPI void gpr_cv_broadcast(gpr_cv *cv);
GPRAPI void gpr_cv_broadcast(gpr_cv* cv);
/** --- One-time initialization ---
@ -97,7 +97,7 @@ GPRAPI void gpr_cv_broadcast(gpr_cv *cv);
If multiple threads call gpr_once() on the same gpr_once instance, one of
them will call (*init_routine)(), and the others will block until that call
finishes.*/
GPRAPI void gpr_once_init(gpr_once *once, void (*init_routine)(void));
GPRAPI void gpr_once_init(gpr_once* once, void (*init_routine)(void));
/** --- One-time event notification ---
@ -107,51 +107,51 @@ GPRAPI void gpr_once_init(gpr_once *once, void (*init_routine)(void));
It requires no destruction. */
/** Initialize *ev. */
GPRAPI void gpr_event_init(gpr_event *ev);
GPRAPI void gpr_event_init(gpr_event* ev);
/** Set *ev so that gpr_event_get() and gpr_event_wait() will return value.
Requires: *ev initialized; value != NULL; no prior or concurrent calls to
gpr_event_set(ev, ...) since initialization. */
GPRAPI void gpr_event_set(gpr_event *ev, void *value);
GPRAPI void gpr_event_set(gpr_event* ev, void* value);
/** Return the value set by gpr_event_set(ev, ...), or NULL if no such call has
completed. If the result is non-NULL, all operations that occurred prior to
the gpr_event_set(ev, ...) set will be visible after this call returns.
Requires: *ev initialized. This operation is faster than acquiring a mutex
on most platforms. */
GPRAPI void *gpr_event_get(gpr_event *ev);
GPRAPI void* gpr_event_get(gpr_event* ev);
/** Wait until *ev is set by gpr_event_set(ev, ...), or abs_deadline is
exceeded, then return gpr_event_get(ev). Requires: *ev initialized. Use
abs_deadline==gpr_inf_future for no deadline. When the event has been
signalled before the call, this operation is faster than acquiring a mutex
on most platforms. */
GPRAPI void *gpr_event_wait(gpr_event *ev, gpr_timespec abs_deadline);
GPRAPI void* gpr_event_wait(gpr_event* ev, gpr_timespec abs_deadline);
/** --- Reference counting ---
These calls act on the type gpr_refcount. It requires no destruction. */
/** Initialize *r to value n. */
GPRAPI void gpr_ref_init(gpr_refcount *r, int n);
GPRAPI void gpr_ref_init(gpr_refcount* r, int n);
/** Increment the reference count *r. Requires *r initialized. */
GPRAPI void gpr_ref(gpr_refcount *r);
GPRAPI void gpr_ref(gpr_refcount* r);
/** Increment the reference count *r. Requires *r initialized.
Crashes if refcount is zero */
GPRAPI void gpr_ref_non_zero(gpr_refcount *r);
GPRAPI void gpr_ref_non_zero(gpr_refcount* r);
/** Increment the reference count *r by n. Requires *r initialized, n > 0. */
GPRAPI void gpr_refn(gpr_refcount *r, int n);
GPRAPI void gpr_refn(gpr_refcount* r, int n);
/** Decrement the reference count *r and return non-zero iff it has reached
zero. . Requires *r initialized. */
GPRAPI int gpr_unref(gpr_refcount *r);
GPRAPI int gpr_unref(gpr_refcount* r);
/** Return non-zero iff the reference count of *r is one, and thus is owned
by exactly one object. */
GPRAPI int gpr_ref_is_unique(gpr_refcount *r);
GPRAPI int gpr_ref_is_unique(gpr_refcount* r);
/** --- Stats counters ---
@ -162,13 +162,13 @@ GPRAPI int gpr_ref_is_unique(gpr_refcount *r);
synchronize other events. */
/** Initialize *c to the value n. */
GPRAPI void gpr_stats_init(gpr_stats_counter *c, intptr_t n);
GPRAPI void gpr_stats_init(gpr_stats_counter* c, intptr_t n);
/** *c += inc. Requires: *c initialized. */
GPRAPI void gpr_stats_inc(gpr_stats_counter *c, intptr_t inc);
GPRAPI void gpr_stats_inc(gpr_stats_counter* c, intptr_t inc);
/** Return *c. Requires: *c initialized. */
GPRAPI intptr_t gpr_stats_read(const gpr_stats_counter *c);
GPRAPI intptr_t gpr_stats_read(const gpr_stats_counter* c);
/** ==================Example use of interface===================
A producer-consumer queue of up to N integers,
@ -280,14 +280,14 @@ namespace grpc_core {
class mu_guard {
public:
mu_guard(gpr_mu *mu) : mu_(mu) { gpr_mu_lock(mu); }
mu_guard(gpr_mu* mu) : mu_(mu) { gpr_mu_lock(mu); }
~mu_guard() { gpr_mu_unlock(mu_); }
mu_guard(const mu_guard &) = delete;
mu_guard &operator=(const mu_guard &) = delete;
mu_guard(const mu_guard&) = delete;
mu_guard& operator=(const mu_guard&) = delete;
private:
gpr_mu *const mu_;
gpr_mu* const mu_;
};
} // namespace grpc_core

@ -44,23 +44,23 @@ typedef struct {
in *t, and return true. If there are insufficient resources, return false.
If options==NULL, default options are used.
The thread is immediately runnable, and exits when (*thd_body)() returns. */
GPRAPI int gpr_thd_new(gpr_thd_id *t, void (*thd_body)(void *arg), void *arg,
const gpr_thd_options *options);
GPRAPI int gpr_thd_new(gpr_thd_id* t, void (*thd_body)(void* arg), void* arg,
const gpr_thd_options* options);
/** Return a gpr_thd_options struct with all fields set to defaults. */
GPRAPI gpr_thd_options gpr_thd_options_default(void);
/** Set the thread to become detached on startup - this is the default. */
GPRAPI void gpr_thd_options_set_detached(gpr_thd_options *options);
GPRAPI void gpr_thd_options_set_detached(gpr_thd_options* options);
/** Set the thread to become joinable - mutually exclusive with detached. */
GPRAPI void gpr_thd_options_set_joinable(gpr_thd_options *options);
GPRAPI void gpr_thd_options_set_joinable(gpr_thd_options* options);
/** Returns non-zero if the option detached is set. */
GPRAPI int gpr_thd_options_is_detached(const gpr_thd_options *options);
GPRAPI int gpr_thd_options_is_detached(const gpr_thd_options* options);
/** Returns non-zero if the option joinable is set. */
GPRAPI int gpr_thd_options_is_joinable(const gpr_thd_options *options);
GPRAPI int gpr_thd_options_is_joinable(const gpr_thd_options* options);
/** Returns the identifier of the current thread. */
GPRAPI gpr_thd_id gpr_thd_currentid(void);

@ -30,7 +30,7 @@
struct gpr_gcc_thread_local {
intptr_t value;
bool *inited;
bool* inited;
};
#define GPR_TLS_DECL(name) \

@ -37,7 +37,7 @@ struct gpr_pthread_thread_local {
#ifdef __cplusplus
extern "C" {
#endif
intptr_t gpr_tls_set(struct gpr_pthread_thread_local *tls, intptr_t value);
intptr_t gpr_tls_set(struct gpr_pthread_thread_local* tls, intptr_t value);
#ifdef __cplusplus
}
#endif

@ -32,15 +32,15 @@ grpc::string as_string(T x) {
return out.str();
}
inline bool ClientOnlyStreaming(const grpc_generator::Method *method) {
inline bool ClientOnlyStreaming(const grpc_generator::Method* method) {
return method->ClientStreaming() && !method->ServerStreaming();
}
inline bool ServerOnlyStreaming(const grpc_generator::Method *method) {
inline bool ServerOnlyStreaming(const grpc_generator::Method* method) {
return !method->ClientStreaming() && method->ServerStreaming();
}
grpc::string FilenameIdentifier(const grpc::string &filename) {
grpc::string FilenameIdentifier(const grpc::string& filename) {
grpc::string result;
for (unsigned i = 0; i < filename.size(); i++) {
char c = filename[i];
@ -58,19 +58,19 @@ grpc::string FilenameIdentifier(const grpc::string &filename) {
} // namespace
template <class T, size_t N>
T *array_end(T (&array)[N]) {
T* array_end(T (&array)[N]) {
return array + N;
}
void PrintIncludes(grpc_generator::Printer *printer,
const std::vector<grpc::string> &headers,
const Parameters &params) {
void PrintIncludes(grpc_generator::Printer* printer,
const std::vector<grpc::string>& headers,
const Parameters& params) {
std::map<grpc::string, grpc::string> vars;
vars["l"] = params.use_system_headers ? '<' : '"';
vars["r"] = params.use_system_headers ? '>' : '"';
auto &s = params.grpc_search_path;
auto& s = params.grpc_search_path;
if (!s.empty()) {
vars["l"] += s;
if (s[s.size() - 1] != '/') {
@ -84,8 +84,8 @@ void PrintIncludes(grpc_generator::Printer *printer,
}
}
grpc::string GetHeaderPrologue(grpc_generator::File *file,
const Parameters & /*params*/) {
grpc::string GetHeaderPrologue(grpc_generator::File* file,
const Parameters& /*params*/) {
grpc::string output;
{
// Scope the output stream so it closes and finalizes output to the string.
@ -116,15 +116,15 @@ grpc::string GetHeaderPrologue(grpc_generator::File *file,
return output;
}
grpc::string GetHeaderIncludes(grpc_generator::File *file,
const Parameters &params) {
grpc::string GetHeaderIncludes(grpc_generator::File* file,
const Parameters& params) {
grpc::string output;
{
// Scope the output stream so it closes and finalizes output to the string.
auto printer = file->CreatePrinter(&output);
std::map<grpc::string, grpc::string> vars;
static const char *headers_strs[] = {
static const char* headers_strs[] = {
"grpc++/impl/codegen/async_stream.h",
"grpc++/impl/codegen/async_unary_call.h",
"grpc++/impl/codegen/method_handler_impl.h",
@ -158,8 +158,8 @@ grpc::string GetHeaderIncludes(grpc_generator::File *file,
}
void PrintHeaderClientMethodInterfaces(
grpc_generator::Printer *printer, const grpc_generator::Method *method,
std::map<grpc::string, grpc::string> *vars, bool is_public) {
grpc_generator::Printer* printer, const grpc_generator::Method* method,
std::map<grpc::string, grpc::string>* vars, bool is_public) {
(*vars)["Method"] = method->name();
(*vars)["Request"] = method->input_type_name();
(*vars)["Response"] = method->output_type_name();
@ -355,9 +355,9 @@ void PrintHeaderClientMethodInterfaces(
}
}
void PrintHeaderClientMethod(grpc_generator::Printer *printer,
const grpc_generator::Method *method,
std::map<grpc::string, grpc::string> *vars,
void PrintHeaderClientMethod(grpc_generator::Printer* printer,
const grpc_generator::Method* method,
std::map<grpc::string, grpc::string>* vars,
bool is_public) {
(*vars)["Method"] = method->name();
(*vars)["Request"] = method->input_type_name();
@ -542,17 +542,17 @@ void PrintHeaderClientMethod(grpc_generator::Printer *printer,
}
}
void PrintHeaderClientMethodData(grpc_generator::Printer *printer,
const grpc_generator::Method *method,
std::map<grpc::string, grpc::string> *vars) {
void PrintHeaderClientMethodData(grpc_generator::Printer* printer,
const grpc_generator::Method* method,
std::map<grpc::string, grpc::string>* vars) {
(*vars)["Method"] = method->name();
printer->Print(*vars,
"const ::grpc::internal::RpcMethod rpcmethod_$Method$_;\n");
}
void PrintHeaderServerMethodSync(grpc_generator::Printer *printer,
const grpc_generator::Method *method,
std::map<grpc::string, grpc::string> *vars) {
void PrintHeaderServerMethodSync(grpc_generator::Printer* printer,
const grpc_generator::Method* method,
std::map<grpc::string, grpc::string>* vars) {
(*vars)["Method"] = method->name();
(*vars)["Request"] = method->input_type_name();
(*vars)["Response"] = method->output_type_name();
@ -584,9 +584,9 @@ void PrintHeaderServerMethodSync(grpc_generator::Printer *printer,
printer->Print(method->GetTrailingComments("//").c_str());
}
void PrintHeaderServerMethodAsync(grpc_generator::Printer *printer,
const grpc_generator::Method *method,
std::map<grpc::string, grpc::string> *vars) {
void PrintHeaderServerMethodAsync(grpc_generator::Printer* printer,
const grpc_generator::Method* method,
std::map<grpc::string, grpc::string>* vars) {
(*vars)["Method"] = method->name();
(*vars)["Request"] = method->input_type_name();
(*vars)["Response"] = method->output_type_name();
@ -700,8 +700,8 @@ void PrintHeaderServerMethodAsync(grpc_generator::Printer *printer,
}
void PrintHeaderServerMethodStreamedUnary(
grpc_generator::Printer *printer, const grpc_generator::Method *method,
std::map<grpc::string, grpc::string> *vars) {
grpc_generator::Printer* printer, const grpc_generator::Method* method,
std::map<grpc::string, grpc::string>* vars) {
(*vars)["Method"] = method->name();
(*vars)["Request"] = method->input_type_name();
(*vars)["Response"] = method->output_type_name();
@ -751,8 +751,8 @@ void PrintHeaderServerMethodStreamedUnary(
}
void PrintHeaderServerMethodSplitStreaming(
grpc_generator::Printer *printer, const grpc_generator::Method *method,
std::map<grpc::string, grpc::string> *vars) {
grpc_generator::Printer* printer, const grpc_generator::Method* method,
std::map<grpc::string, grpc::string>* vars) {
(*vars)["Method"] = method->name();
(*vars)["Request"] = method->input_type_name();
(*vars)["Response"] = method->output_type_name();
@ -804,8 +804,8 @@ void PrintHeaderServerMethodSplitStreaming(
}
void PrintHeaderServerMethodGeneric(
grpc_generator::Printer *printer, const grpc_generator::Method *method,
std::map<grpc::string, grpc::string> *vars) {
grpc_generator::Printer* printer, const grpc_generator::Method* method,
std::map<grpc::string, grpc::string>* vars) {
(*vars)["Method"] = method->name();
(*vars)["Request"] = method->input_type_name();
(*vars)["Response"] = method->output_type_name();
@ -873,9 +873,9 @@ void PrintHeaderServerMethodGeneric(
printer->Print(*vars, "};\n");
}
void PrintHeaderService(grpc_generator::Printer *printer,
const grpc_generator::Service *service,
std::map<grpc::string, grpc::string> *vars) {
void PrintHeaderService(grpc_generator::Printer* printer,
const grpc_generator::Service* service,
std::map<grpc::string, grpc::string>* vars) {
(*vars)["Service"] = service->name();
printer->Print(service->GetLeadingComments("//").c_str());
@ -1050,8 +1050,8 @@ void PrintHeaderService(grpc_generator::Printer *printer,
printer->Print(service->GetTrailingComments("//").c_str());
}
grpc::string GetHeaderServices(grpc_generator::File *file,
const Parameters &params) {
grpc::string GetHeaderServices(grpc_generator::File* file,
const Parameters& params) {
grpc::string output;
{
// Scope the output stream so it closes and finalizes output to the string.
@ -1081,8 +1081,8 @@ grpc::string GetHeaderServices(grpc_generator::File *file,
return output;
}
grpc::string GetHeaderEpilogue(grpc_generator::File *file,
const Parameters & /*params*/) {
grpc::string GetHeaderEpilogue(grpc_generator::File* file,
const Parameters& /*params*/) {
grpc::string output;
{
// Scope the output stream so it closes and finalizes output to the string.
@ -1110,8 +1110,8 @@ grpc::string GetHeaderEpilogue(grpc_generator::File *file,
return output;
}
grpc::string GetSourcePrologue(grpc_generator::File *file,
const Parameters & /*params*/) {
grpc::string GetSourcePrologue(grpc_generator::File* file,
const Parameters& /*params*/) {
grpc::string output;
{
// Scope the output stream so it closes and finalizes output to the string.
@ -1135,15 +1135,15 @@ grpc::string GetSourcePrologue(grpc_generator::File *file,
return output;
}
grpc::string GetSourceIncludes(grpc_generator::File *file,
const Parameters &params) {
grpc::string GetSourceIncludes(grpc_generator::File* file,
const Parameters& params) {
grpc::string output;
{
// Scope the output stream so it closes and finalizes output to the string.
auto printer = file->CreatePrinter(&output);
std::map<grpc::string, grpc::string> vars;
static const char *headers_strs[] = {
static const char* headers_strs[] = {
"grpc++/impl/codegen/async_stream.h",
"grpc++/impl/codegen/async_unary_call.h",
"grpc++/impl/codegen/channel_interface.h",
@ -1169,9 +1169,9 @@ grpc::string GetSourceIncludes(grpc_generator::File *file,
return output;
}
void PrintSourceClientMethod(grpc_generator::Printer *printer,
const grpc_generator::Method *method,
std::map<grpc::string, grpc::string> *vars) {
void PrintSourceClientMethod(grpc_generator::Printer* printer,
const grpc_generator::Method* method,
std::map<grpc::string, grpc::string>* vars) {
(*vars)["Method"] = method->name();
(*vars)["Request"] = method->input_type_name();
(*vars)["Response"] = method->output_type_name();
@ -1305,9 +1305,9 @@ void PrintSourceClientMethod(grpc_generator::Printer *printer,
}
}
void PrintSourceServerMethod(grpc_generator::Printer *printer,
const grpc_generator::Method *method,
std::map<grpc::string, grpc::string> *vars) {
void PrintSourceServerMethod(grpc_generator::Printer* printer,
const grpc_generator::Method* method,
std::map<grpc::string, grpc::string>* vars) {
(*vars)["Method"] = method->name();
(*vars)["Request"] = method->input_type_name();
(*vars)["Response"] = method->output_type_name();
@ -1364,9 +1364,9 @@ void PrintSourceServerMethod(grpc_generator::Printer *printer,
}
}
void PrintSourceService(grpc_generator::Printer *printer,
const grpc_generator::Service *service,
std::map<grpc::string, grpc::string> *vars) {
void PrintSourceService(grpc_generator::Printer* printer,
const grpc_generator::Service* service,
std::map<grpc::string, grpc::string>* vars) {
(*vars)["Service"] = service->name();
if (service->method_count() > 0) {
@ -1481,8 +1481,8 @@ void PrintSourceService(grpc_generator::Printer *printer,
}
}
grpc::string GetSourceServices(grpc_generator::File *file,
const Parameters &params) {
grpc::string GetSourceServices(grpc_generator::File* file,
const Parameters& params) {
grpc::string output;
{
// Scope the output stream so it closes and finalizes output to the string.
@ -1510,8 +1510,8 @@ grpc::string GetSourceServices(grpc_generator::File *file,
return output;
}
grpc::string GetSourceEpilogue(grpc_generator::File *file,
const Parameters & /*params*/) {
grpc::string GetSourceEpilogue(grpc_generator::File* file,
const Parameters& /*params*/) {
grpc::string temp;
if (!file->package().empty()) {
@ -1529,8 +1529,8 @@ grpc::string GetSourceEpilogue(grpc_generator::File *file,
}
// TODO(mmukhi): Make sure we need parameters or not.
grpc::string GetMockPrologue(grpc_generator::File *file,
const Parameters & /*params*/) {
grpc::string GetMockPrologue(grpc_generator::File* file,
const Parameters& /*params*/) {
grpc::string output;
{
// Scope the output stream so it closes and finalizes output to the string.
@ -1556,17 +1556,18 @@ grpc::string GetMockPrologue(grpc_generator::File *file,
}
// TODO(mmukhi): Add client-stream and completion-queue headers.
grpc::string GetMockIncludes(grpc_generator::File *file,
const Parameters &params) {
grpc::string GetMockIncludes(grpc_generator::File* file,
const Parameters& params) {
grpc::string output;
{
// Scope the output stream so it closes and finalizes output to the string.
auto printer = file->CreatePrinter(&output);
std::map<grpc::string, grpc::string> vars;
static const char *headers_strs[] = {
static const char* headers_strs[] = {
"grpc++/impl/codegen/async_stream.h",
"grpc++/impl/codegen/sync_stream.h", "gmock/gmock.h",
"grpc++/impl/codegen/sync_stream.h",
"gmock/gmock.h",
};
std::vector<grpc::string> headers(headers_strs, array_end(headers_strs));
PrintIncludes(printer.get(), headers, params);
@ -1585,9 +1586,9 @@ grpc::string GetMockIncludes(grpc_generator::File *file,
return output;
}
void PrintMockClientMethods(grpc_generator::Printer *printer,
const grpc_generator::Method *method,
std::map<grpc::string, grpc::string> *vars) {
void PrintMockClientMethods(grpc_generator::Printer* printer,
const grpc_generator::Method* method,
std::map<grpc::string, grpc::string>* vars) {
(*vars)["Method"] = method->name();
(*vars)["Request"] = method->input_type_name();
(*vars)["Response"] = method->output_type_name();
@ -1668,9 +1669,9 @@ void PrintMockClientMethods(grpc_generator::Printer *printer,
}
}
void PrintMockService(grpc_generator::Printer *printer,
const grpc_generator::Service *service,
std::map<grpc::string, grpc::string> *vars) {
void PrintMockService(grpc_generator::Printer* printer,
const grpc_generator::Service* service,
std::map<grpc::string, grpc::string>* vars) {
(*vars)["Service"] = service->name();
printer->Print(*vars,
@ -1684,8 +1685,8 @@ void PrintMockService(grpc_generator::Printer *printer,
printer->Print("};\n");
}
grpc::string GetMockServices(grpc_generator::File *file,
const Parameters &params) {
grpc::string GetMockServices(grpc_generator::File* file,
const Parameters& params) {
grpc::string output;
{
// Scope the output stream so it closes and finalizes output to the string.
@ -1715,8 +1716,8 @@ grpc::string GetMockServices(grpc_generator::File *file,
return output;
}
grpc::string GetMockEpilogue(grpc_generator::File *file,
const Parameters & /*params*/) {
grpc::string GetMockEpilogue(grpc_generator::File* file,
const Parameters& /*params*/) {
grpc::string temp;
if (!file->package().empty()) {

@ -55,68 +55,68 @@ struct Parameters {
};
// Return the prologue of the generated header file.
grpc::string GetHeaderPrologue(grpc_generator::File *file,
const Parameters &params);
grpc::string GetHeaderPrologue(grpc_generator::File* file,
const Parameters& params);
// Return the includes needed for generated header file.
grpc::string GetHeaderIncludes(grpc_generator::File *file,
const Parameters &params);
grpc::string GetHeaderIncludes(grpc_generator::File* file,
const Parameters& params);
// Return the includes needed for generated source file.
grpc::string GetSourceIncludes(grpc_generator::File *file,
const Parameters &params);
grpc::string GetSourceIncludes(grpc_generator::File* file,
const Parameters& params);
// Return the epilogue of the generated header file.
grpc::string GetHeaderEpilogue(grpc_generator::File *file,
const Parameters &params);
grpc::string GetHeaderEpilogue(grpc_generator::File* file,
const Parameters& params);
// Return the prologue of the generated source file.
grpc::string GetSourcePrologue(grpc_generator::File *file,
const Parameters &params);
grpc::string GetSourcePrologue(grpc_generator::File* file,
const Parameters& params);
// Return the services for generated header file.
grpc::string GetHeaderServices(grpc_generator::File *file,
const Parameters &params);
grpc::string GetHeaderServices(grpc_generator::File* file,
const Parameters& params);
// Return the services for generated source file.
grpc::string GetSourceServices(grpc_generator::File *file,
const Parameters &params);
grpc::string GetSourceServices(grpc_generator::File* file,
const Parameters& params);
// Return the epilogue of the generated source file.
grpc::string GetSourceEpilogue(grpc_generator::File *file,
const Parameters &params);
grpc::string GetSourceEpilogue(grpc_generator::File* file,
const Parameters& params);
// Return the prologue of the generated mock file.
grpc::string GetMockPrologue(grpc_generator::File *file,
const Parameters &params);
grpc::string GetMockPrologue(grpc_generator::File* file,
const Parameters& params);
// Return the includes needed for generated mock file.
grpc::string GetMockIncludes(grpc_generator::File *file,
const Parameters &params);
grpc::string GetMockIncludes(grpc_generator::File* file,
const Parameters& params);
// Return the services for generated mock file.
grpc::string GetMockServices(grpc_generator::File *file,
const Parameters &params);
grpc::string GetMockServices(grpc_generator::File* file,
const Parameters& params);
// Return the epilogue of generated mock file.
grpc::string GetMockEpilogue(grpc_generator::File *file,
const Parameters &params);
grpc::string GetMockEpilogue(grpc_generator::File* file,
const Parameters& params);
// Return the prologue of the generated mock file.
grpc::string GetMockPrologue(grpc_generator::File *file,
const Parameters &params);
grpc::string GetMockPrologue(grpc_generator::File* file,
const Parameters& params);
// Return the includes needed for generated mock file.
grpc::string GetMockIncludes(grpc_generator::File *file,
const Parameters &params);
grpc::string GetMockIncludes(grpc_generator::File* file,
const Parameters& params);
// Return the services for generated mock file.
grpc::string GetMockServices(grpc_generator::File *file,
const Parameters &params);
grpc::string GetMockServices(grpc_generator::File* file,
const Parameters& params);
// Return the epilogue of generated mock file.
grpc::string GetMockEpilogue(grpc_generator::File *file,
const Parameters &params);
grpc::string GetMockEpilogue(grpc_generator::File* file,
const Parameters& params);
} // namespace grpc_cpp_generator

@ -26,22 +26,22 @@
namespace grpc_cpp_generator {
inline grpc::string DotsToColons(const grpc::string &name) {
inline grpc::string DotsToColons(const grpc::string& name) {
return grpc_generator::StringReplace(name, ".", "::");
}
inline grpc::string DotsToUnderscores(const grpc::string &name) {
inline grpc::string DotsToUnderscores(const grpc::string& name) {
return grpc_generator::StringReplace(name, ".", "_");
}
inline grpc::string ClassName(const grpc::protobuf::Descriptor *descriptor,
inline grpc::string ClassName(const grpc::protobuf::Descriptor* descriptor,
bool qualified) {
// Find "outer", the descriptor of the top-level message in which
// "descriptor" is embedded.
const grpc::protobuf::Descriptor *outer = descriptor;
const grpc::protobuf::Descriptor* outer = descriptor;
while (outer->containing_type() != NULL) outer = outer->containing_type();
const grpc::string &outer_name = outer->full_name();
const grpc::string& outer_name = outer->full_name();
grpc::string inner_name = descriptor->full_name().substr(outer_name.size());
if (qualified) {
@ -54,7 +54,7 @@ inline grpc::string ClassName(const grpc::protobuf::Descriptor *descriptor,
// Get leading or trailing comments in a string. Comment lines start with "// ".
// Leading detached comments are put in in front of leading comments.
template <typename DescriptorType>
inline grpc::string GetCppComments(const DescriptorType *desc, bool leading) {
inline grpc::string GetCppComments(const DescriptorType* desc, bool leading) {
return grpc_generator::GetPrefixedComments(desc, leading, "//");
}

@ -33,10 +33,10 @@ class CppGrpcGenerator : public grpc::protobuf::compiler::CodeGenerator {
CppGrpcGenerator() {}
virtual ~CppGrpcGenerator() {}
virtual bool Generate(const grpc::protobuf::FileDescriptor *file,
const grpc::string &parameter,
grpc::protobuf::compiler::GeneratorContext *context,
grpc::string *error) const {
virtual bool Generate(const grpc::protobuf::FileDescriptor* file,
const grpc::string& parameter,
grpc::protobuf::compiler::GeneratorContext* context,
grpc::string* error) const {
if (file->options().cc_generic_services()) {
*error =
"cpp grpc proto compiler plugin does not work with generic "
@ -125,9 +125,9 @@ class CppGrpcGenerator : public grpc::protobuf::compiler::CodeGenerator {
private:
// Insert the given code into the given file at the given insertion point.
void Insert(grpc::protobuf::compiler::GeneratorContext *context,
const grpc::string &filename, const grpc::string &insertion_point,
const grpc::string &code) const {
void Insert(grpc::protobuf::compiler::GeneratorContext* context,
const grpc::string& filename, const grpc::string& insertion_point,
const grpc::string& code) const {
std::unique_ptr<grpc::protobuf::io::ZeroCopyOutputStream> output(
context->OpenForInsert(filename, insertion_point));
grpc::protobuf::io::CodedOutputStream coded_out(output.get());
@ -135,7 +135,7 @@ class CppGrpcGenerator : public grpc::protobuf::compiler::CodeGenerator {
}
};
int main(int argc, char *argv[]) {
int main(int argc, char* argv[]) {
CppGrpcGenerator generator;
return grpc::protobuf::compiler::PluginMain(argc, argv, &generator);
}

@ -23,24 +23,23 @@
#include "src/compiler/config.h"
#include "src/compiler/csharp_generator.h"
#include "src/compiler/csharp_generator.h"
#include "src/compiler/csharp_generator_helpers.h"
using google::protobuf::compiler::csharp::GetFileNamespace;
using google::protobuf::compiler::csharp::GetClassName;
using google::protobuf::compiler::csharp::GetFileNamespace;
using google::protobuf::compiler::csharp::GetReflectionClassName;
using grpc::protobuf::FileDescriptor;
using grpc::protobuf::Descriptor;
using grpc::protobuf::ServiceDescriptor;
using grpc::protobuf::FileDescriptor;
using grpc::protobuf::MethodDescriptor;
using grpc::protobuf::ServiceDescriptor;
using grpc::protobuf::io::Printer;
using grpc::protobuf::io::StringOutputStream;
using grpc_generator::MethodType;
using grpc_generator::GetMethodType;
using grpc_generator::METHODTYPE_NO_STREAMING;
using grpc_generator::METHODTYPE_BIDI_STREAMING;
using grpc_generator::METHODTYPE_CLIENT_STREAMING;
using grpc_generator::METHODTYPE_NO_STREAMING;
using grpc_generator::METHODTYPE_SERVER_STREAMING;
using grpc_generator::METHODTYPE_BIDI_STREAMING;
using grpc_generator::MethodType;
using grpc_generator::StringReplace;
using std::map;
using std::vector;
@ -53,7 +52,7 @@ namespace {
// Currently, we cannot easily reuse the functionality as
// google/protobuf/compiler/csharp/csharp_doc_comment.h is not a public header.
// TODO(jtattermusch): reuse the functionality from google/protobuf.
bool GenerateDocCommentBodyImpl(grpc::protobuf::io::Printer *printer,
bool GenerateDocCommentBodyImpl(grpc::protobuf::io::Printer* printer,
grpc::protobuf::SourceLocation location) {
grpc::string comments = location.leading_comments.empty()
? location.trailing_comments
@ -100,8 +99,8 @@ bool GenerateDocCommentBodyImpl(grpc::protobuf::io::Printer *printer,
}
template <typename DescriptorType>
bool GenerateDocCommentBody(grpc::protobuf::io::Printer *printer,
const DescriptorType *descriptor) {
bool GenerateDocCommentBody(grpc::protobuf::io::Printer* printer,
const DescriptorType* descriptor) {
grpc::protobuf::SourceLocation location;
if (!descriptor->GetSourceLocation(&location)) {
return false;
@ -109,8 +108,8 @@ bool GenerateDocCommentBody(grpc::protobuf::io::Printer *printer,
return GenerateDocCommentBodyImpl(printer, location);
}
void GenerateDocCommentServerMethod(grpc::protobuf::io::Printer *printer,
const MethodDescriptor *method) {
void GenerateDocCommentServerMethod(grpc::protobuf::io::Printer* printer,
const MethodDescriptor* method) {
if (GenerateDocCommentBody(printer, method)) {
if (method->client_streaming()) {
printer->Print(
@ -141,8 +140,8 @@ void GenerateDocCommentServerMethod(grpc::protobuf::io::Printer *printer,
}
}
void GenerateDocCommentClientMethod(grpc::protobuf::io::Printer *printer,
const MethodDescriptor *method,
void GenerateDocCommentClientMethod(grpc::protobuf::io::Printer* printer,
const MethodDescriptor* method,
bool is_sync, bool use_call_options) {
if (GenerateDocCommentBody(printer, method)) {
if (!method->client_streaming()) {
@ -173,15 +172,15 @@ void GenerateDocCommentClientMethod(grpc::protobuf::io::Printer *printer,
}
}
std::string GetServiceClassName(const ServiceDescriptor *service) {
std::string GetServiceClassName(const ServiceDescriptor* service) {
return service->name();
}
std::string GetClientClassName(const ServiceDescriptor *service) {
std::string GetClientClassName(const ServiceDescriptor* service) {
return service->name() + "Client";
}
std::string GetServerClassName(const ServiceDescriptor *service) {
std::string GetServerClassName(const ServiceDescriptor* service) {
return service->name() + "Base";
}
@ -202,15 +201,15 @@ std::string GetCSharpMethodType(MethodType method_type) {
std::string GetServiceNameFieldName() { return "__ServiceName"; }
std::string GetMarshallerFieldName(const Descriptor *message) {
std::string GetMarshallerFieldName(const Descriptor* message) {
return "__Marshaller_" + message->name();
}
std::string GetMethodFieldName(const MethodDescriptor *method) {
std::string GetMethodFieldName(const MethodDescriptor* method) {
return "__Method_" + method->name();
}
std::string GetMethodRequestParamMaybe(const MethodDescriptor *method,
std::string GetMethodRequestParamMaybe(const MethodDescriptor* method,
bool invocation_param = false) {
if (method->client_streaming()) {
return "";
@ -225,7 +224,7 @@ std::string GetAccessLevel(bool internal_access) {
return internal_access ? "internal" : "public";
}
std::string GetMethodReturnTypeClient(const MethodDescriptor *method) {
std::string GetMethodReturnTypeClient(const MethodDescriptor* method) {
switch (GetMethodType(method)) {
case METHODTYPE_NO_STREAMING:
return "grpc::AsyncUnaryCall<" + GetClassName(method->output_type()) +
@ -246,7 +245,7 @@ std::string GetMethodReturnTypeClient(const MethodDescriptor *method) {
return "";
}
std::string GetMethodRequestParamServer(const MethodDescriptor *method) {
std::string GetMethodRequestParamServer(const MethodDescriptor* method) {
switch (GetMethodType(method)) {
case METHODTYPE_NO_STREAMING:
case METHODTYPE_SERVER_STREAMING:
@ -260,7 +259,7 @@ std::string GetMethodRequestParamServer(const MethodDescriptor *method) {
return "";
}
std::string GetMethodReturnTypeServer(const MethodDescriptor *method) {
std::string GetMethodReturnTypeServer(const MethodDescriptor* method) {
switch (GetMethodType(method)) {
case METHODTYPE_NO_STREAMING:
case METHODTYPE_CLIENT_STREAMING:
@ -274,7 +273,7 @@ std::string GetMethodReturnTypeServer(const MethodDescriptor *method) {
return "";
}
std::string GetMethodResponseStreamMaybe(const MethodDescriptor *method) {
std::string GetMethodResponseStreamMaybe(const MethodDescriptor* method) {
switch (GetMethodType(method)) {
case METHODTYPE_NO_STREAMING:
case METHODTYPE_CLIENT_STREAMING:
@ -289,13 +288,13 @@ std::string GetMethodResponseStreamMaybe(const MethodDescriptor *method) {
}
// Gets vector of all messages used as input or output types.
std::vector<const Descriptor *> GetUsedMessages(
const ServiceDescriptor *service) {
std::set<const Descriptor *> descriptor_set;
std::vector<const Descriptor *>
std::vector<const Descriptor*> GetUsedMessages(
const ServiceDescriptor* service) {
std::set<const Descriptor*> descriptor_set;
std::vector<const Descriptor*>
result; // vector is to maintain stable ordering
for (int i = 0; i < service->method_count(); i++) {
const MethodDescriptor *method = service->method(i);
const MethodDescriptor* method = service->method(i);
if (descriptor_set.find(method->input_type()) == descriptor_set.end()) {
descriptor_set.insert(method->input_type());
result.push_back(method->input_type());
@ -308,10 +307,10 @@ std::vector<const Descriptor *> GetUsedMessages(
return result;
}
void GenerateMarshallerFields(Printer *out, const ServiceDescriptor *service) {
std::vector<const Descriptor *> used_messages = GetUsedMessages(service);
void GenerateMarshallerFields(Printer* out, const ServiceDescriptor* service) {
std::vector<const Descriptor*> used_messages = GetUsedMessages(service);
for (size_t i = 0; i < used_messages.size(); i++) {
const Descriptor *message = used_messages[i];
const Descriptor* message = used_messages[i];
out->Print(
"static readonly grpc::Marshaller<$type$> $fieldname$ = "
"grpc::Marshallers.Create((arg) => "
@ -323,7 +322,7 @@ void GenerateMarshallerFields(Printer *out, const ServiceDescriptor *service) {
out->Print("\n");
}
void GenerateStaticMethodField(Printer *out, const MethodDescriptor *method) {
void GenerateStaticMethodField(Printer* out, const MethodDescriptor* method) {
out->Print(
"static readonly grpc::Method<$request$, $response$> $fieldname$ = new "
"grpc::Method<$request$, $response$>(\n",
@ -346,8 +345,8 @@ void GenerateStaticMethodField(Printer *out, const MethodDescriptor *method) {
out->Outdent();
}
void GenerateServiceDescriptorProperty(Printer *out,
const ServiceDescriptor *service) {
void GenerateServiceDescriptorProperty(Printer* out,
const ServiceDescriptor* service) {
std::ostringstream index;
index << service->index();
out->Print("/// <summary>Service descriptor</summary>\n");
@ -362,7 +361,7 @@ void GenerateServiceDescriptorProperty(Printer *out,
out->Print("\n");
}
void GenerateServerClass(Printer *out, const ServiceDescriptor *service) {
void GenerateServerClass(Printer* out, const ServiceDescriptor* service) {
out->Print(
"/// <summary>Base class for server-side implementations of "
"$servicename$</summary>\n",
@ -372,7 +371,7 @@ void GenerateServerClass(Printer *out, const ServiceDescriptor *service) {
out->Print("{\n");
out->Indent();
for (int i = 0; i < service->method_count(); i++) {
const MethodDescriptor *method = service->method(i);
const MethodDescriptor* method = service->method(i);
GenerateDocCommentServerMethod(out, method);
out->Print(
"public virtual $returntype$ "
@ -395,7 +394,7 @@ void GenerateServerClass(Printer *out, const ServiceDescriptor *service) {
out->Print("\n");
}
void GenerateClientStub(Printer *out, const ServiceDescriptor *service) {
void GenerateClientStub(Printer* out, const ServiceDescriptor* service) {
out->Print("/// <summary>Client for $servicename$</summary>\n", "servicename",
GetServiceClassName(service));
out->Print("public partial class $name$ : grpc::ClientBase<$name$>\n", "name",
@ -443,7 +442,7 @@ void GenerateClientStub(Printer *out, const ServiceDescriptor *service) {
out->Print("}\n\n");
for (int i = 0; i < service->method_count(); i++) {
const MethodDescriptor *method = service->method(i);
const MethodDescriptor* method = service->method(i);
MethodType method_type = GetMethodType(method);
if (method_type == METHODTYPE_NO_STREAMING) {
@ -573,7 +572,7 @@ void GenerateClientStub(Printer *out, const ServiceDescriptor *service) {
out->Print("\n");
}
void GenerateBindServiceMethod(Printer *out, const ServiceDescriptor *service) {
void GenerateBindServiceMethod(Printer* out, const ServiceDescriptor* service) {
out->Print(
"/// <summary>Creates service definition that can be registered with a "
"server</summary>\n");
@ -591,7 +590,7 @@ void GenerateBindServiceMethod(Printer *out, const ServiceDescriptor *service) {
out->Indent();
out->Indent();
for (int i = 0; i < service->method_count(); i++) {
const MethodDescriptor *method = service->method(i);
const MethodDescriptor* method = service->method(i);
out->Print(".AddMethod($methodfield$, serviceImpl.$methodname$)",
"methodfield", GetMethodFieldName(method), "methodname",
method->name());
@ -608,7 +607,7 @@ void GenerateBindServiceMethod(Printer *out, const ServiceDescriptor *service) {
out->Print("\n");
}
void GenerateService(Printer *out, const ServiceDescriptor *service,
void GenerateService(Printer* out, const ServiceDescriptor* service,
bool generate_client, bool generate_server,
bool internal_access) {
GenerateDocCommentBody(out, service);
@ -644,7 +643,7 @@ void GenerateService(Printer *out, const ServiceDescriptor *service,
} // anonymous namespace
grpc::string GetServices(const FileDescriptor *file, bool generate_client,
grpc::string GetServices(const FileDescriptor* file, bool generate_client,
bool generate_server, bool internal_access) {
grpc::string output;
{

@ -25,7 +25,7 @@
namespace grpc_csharp_generator {
grpc::string GetServices(const grpc::protobuf::FileDescriptor *file,
grpc::string GetServices(const grpc::protobuf::FileDescriptor* file,
bool generate_client, bool generate_server,
bool internal_access);

@ -24,8 +24,8 @@
namespace grpc_csharp_generator {
inline bool ServicesFilename(const grpc::protobuf::FileDescriptor *file,
grpc::string *file_name_or_error) {
inline bool ServicesFilename(const grpc::protobuf::FileDescriptor* file,
grpc::string* file_name_or_error) {
*file_name_or_error =
grpc_generator::FileNameInUpperCamel(file, false) + "Grpc.cs";
return true;
@ -34,7 +34,7 @@ inline bool ServicesFilename(const grpc::protobuf::FileDescriptor *file,
// Get leading or trailing comments in a string. Comment lines start with "// ".
// Leading detached comments are put in in front of leading comments.
template <typename DescriptorType>
inline grpc::string GetCsharpComments(const DescriptorType *desc,
inline grpc::string GetCsharpComments(const DescriptorType* desc,
bool leading) {
return grpc_generator::GetPrefixedComments(desc, leading, "//");
}

@ -29,10 +29,10 @@ class CSharpGrpcGenerator : public grpc::protobuf::compiler::CodeGenerator {
CSharpGrpcGenerator() {}
~CSharpGrpcGenerator() {}
bool Generate(const grpc::protobuf::FileDescriptor *file,
const grpc::string &parameter,
grpc::protobuf::compiler::GeneratorContext *context,
grpc::string *error) const {
bool Generate(const grpc::protobuf::FileDescriptor* file,
const grpc::string& parameter,
grpc::protobuf::compiler::GeneratorContext* context,
grpc::string* error) const {
std::vector<std::pair<grpc::string, grpc::string> > options;
grpc::protobuf::compiler::ParseGeneratorParameter(parameter, &options);
@ -71,7 +71,7 @@ class CSharpGrpcGenerator : public grpc::protobuf::compiler::CodeGenerator {
}
};
int main(int argc, char *argv[]) {
int main(int argc, char* argv[]) {
CSharpGrpcGenerator generator;
return grpc::protobuf::compiler::PluginMain(argc, argv, &generator);
}

@ -29,7 +29,7 @@
namespace grpc_generator {
inline bool StripSuffix(grpc::string *filename, const grpc::string &suffix) {
inline bool StripSuffix(grpc::string* filename, const grpc::string& suffix) {
if (filename->length() >= suffix.length()) {
size_t suffix_pos = filename->length() - suffix.length();
if (filename->compare(suffix_pos, grpc::string::npos, suffix) == 0) {
@ -41,7 +41,7 @@ inline bool StripSuffix(grpc::string *filename, const grpc::string &suffix) {
return false;
}
inline bool StripPrefix(grpc::string *name, const grpc::string &prefix) {
inline bool StripPrefix(grpc::string* name, const grpc::string& prefix) {
if (name->length() >= prefix.length()) {
if (name->substr(0, prefix.size()) == prefix) {
*name = name->substr(prefix.size());
@ -58,8 +58,8 @@ inline grpc::string StripProto(grpc::string filename) {
return filename;
}
inline grpc::string StringReplace(grpc::string str, const grpc::string &from,
const grpc::string &to, bool replace_all) {
inline grpc::string StringReplace(grpc::string str, const grpc::string& from,
const grpc::string& to, bool replace_all) {
size_t pos = 0;
do {
@ -74,13 +74,13 @@ inline grpc::string StringReplace(grpc::string str, const grpc::string &from,
return str;
}
inline grpc::string StringReplace(grpc::string str, const grpc::string &from,
const grpc::string &to) {
inline grpc::string StringReplace(grpc::string str, const grpc::string& from,
const grpc::string& to) {
return StringReplace(str, from, to, true);
}
inline std::vector<grpc::string> tokenize(const grpc::string &input,
const grpc::string &delimiters) {
inline std::vector<grpc::string> tokenize(const grpc::string& input,
const grpc::string& delimiters) {
std::vector<grpc::string> tokens;
size_t pos, last_pos = 0;
@ -125,7 +125,7 @@ inline grpc::string LowerUnderscoreToUpperCamel(grpc::string str) {
}
inline grpc::string FileNameInUpperCamel(
const grpc::protobuf::FileDescriptor *file, bool include_package_path) {
const grpc::protobuf::FileDescriptor* file, bool include_package_path) {
std::vector<grpc::string> tokens = tokenize(StripProto(file->name()), "/");
grpc::string result = "";
if (include_package_path) {
@ -138,7 +138,7 @@ inline grpc::string FileNameInUpperCamel(
}
inline grpc::string FileNameInUpperCamel(
const grpc::protobuf::FileDescriptor *file) {
const grpc::protobuf::FileDescriptor* file) {
return FileNameInUpperCamel(file, true);
}
@ -150,7 +150,7 @@ enum MethodType {
};
inline MethodType GetMethodType(
const grpc::protobuf::MethodDescriptor *method) {
const grpc::protobuf::MethodDescriptor* method) {
if (method->client_streaming()) {
if (method->server_streaming()) {
return METHODTYPE_BIDI_STREAMING;
@ -166,8 +166,8 @@ inline MethodType GetMethodType(
}
}
inline void Split(const grpc::string &s, char delim,
std::vector<grpc::string> *append_to) {
inline void Split(const grpc::string& s, char delim,
std::vector<grpc::string>* append_to) {
std::istringstream iss(s);
grpc::string piece;
while (std::getline(iss, piece)) {
@ -183,14 +183,14 @@ enum CommentType {
// Get all the raw comments and append each line without newline to out.
template <typename DescriptorType>
inline void GetComment(const DescriptorType *desc, CommentType type,
std::vector<grpc::string> *out) {
inline void GetComment(const DescriptorType* desc, CommentType type,
std::vector<grpc::string>* out) {
grpc::protobuf::SourceLocation location;
if (!desc->GetSourceLocation(&location)) {
return;
}
if (type == COMMENTTYPE_LEADING || type == COMMENTTYPE_TRAILING) {
const grpc::string &comments = type == COMMENTTYPE_LEADING
const grpc::string& comments = type == COMMENTTYPE_LEADING
? location.leading_comments
: location.trailing_comments;
Split(comments, '\n', out);
@ -210,8 +210,8 @@ inline void GetComment(const DescriptorType *desc, CommentType type,
// For file level leading and detached leading comments, we return comments
// above syntax line. Return nothing for trailing comments.
template <>
inline void GetComment(const grpc::protobuf::FileDescriptor *desc,
CommentType type, std::vector<grpc::string> *out) {
inline void GetComment(const grpc::protobuf::FileDescriptor* desc,
CommentType type, std::vector<grpc::string>* out) {
if (type == COMMENTTYPE_TRAILING) {
return;
}
@ -238,10 +238,10 @@ inline void GetComment(const grpc::protobuf::FileDescriptor *desc,
// Add prefix and newline to each comment line and concatenate them together.
// Make sure there is a space after the prefix unless the line is empty.
inline grpc::string GenerateCommentsWithPrefix(
const std::vector<grpc::string> &in, const grpc::string &prefix) {
const std::vector<grpc::string>& in, const grpc::string& prefix) {
std::ostringstream oss;
for (auto it = in.begin(); it != in.end(); it++) {
const grpc::string &elem = *it;
const grpc::string& elem = *it;
if (elem.empty()) {
oss << prefix << "\n";
} else if (elem[0] == ' ') {
@ -254,9 +254,9 @@ inline grpc::string GenerateCommentsWithPrefix(
}
template <typename DescriptorType>
inline grpc::string GetPrefixedComments(const DescriptorType *desc,
inline grpc::string GetPrefixedComments(const DescriptorType* desc,
bool leading,
const grpc::string &prefix) {
const grpc::string& prefix) {
std::vector<grpc::string> out;
if (leading) {
grpc_generator::GetComment(

@ -22,10 +22,10 @@
#include "src/compiler/generator_helpers.h"
#include "src/compiler/node_generator_helpers.h"
using grpc::protobuf::Descriptor;
using grpc::protobuf::FileDescriptor;
using grpc::protobuf::ServiceDescriptor;
using grpc::protobuf::MethodDescriptor;
using grpc::protobuf::Descriptor;
using grpc::protobuf::ServiceDescriptor;
using grpc::protobuf::io::Printer;
using grpc::protobuf::io::StringOutputStream;
using std::map;
@ -53,15 +53,15 @@ grpc::string ModuleAlias(const grpc::string filename) {
// Given a filename like foo/bar/baz.proto, returns the corresponding JavaScript
// message file foo/bar/baz.js
grpc::string GetJSMessageFilename(const grpc::string &filename) {
grpc::string GetJSMessageFilename(const grpc::string& filename) {
grpc::string name = filename;
return grpc_generator::StripProto(name) + "_pb.js";
}
// Given a filename like foo/bar/baz.proto, returns the root directory
// path ../../
grpc::string GetRootPath(const grpc::string &from_filename,
const grpc::string &to_filename) {
grpc::string GetRootPath(const grpc::string& from_filename,
const grpc::string& to_filename) {
if (to_filename.find("google/protobuf") == 0) {
// Well-known types (.proto files in the google/protobuf directory) are
// assumed to come from the 'google-protobuf' npm package. We may want to
@ -82,24 +82,24 @@ grpc::string GetRootPath(const grpc::string &from_filename,
// Return the relative path to load to_file from the directory containing
// from_file, assuming that both paths are relative to the same directory
grpc::string GetRelativePath(const grpc::string &from_file,
const grpc::string &to_file) {
grpc::string GetRelativePath(const grpc::string& from_file,
const grpc::string& to_file) {
return GetRootPath(from_file, to_file) + to_file;
}
/* Finds all message types used in all services in the file, and returns them
* as a map of fully qualified message type name to message descriptor */
map<grpc::string, const Descriptor *> GetAllMessages(
const FileDescriptor *file) {
map<grpc::string, const Descriptor *> message_types;
map<grpc::string, const Descriptor*> GetAllMessages(
const FileDescriptor* file) {
map<grpc::string, const Descriptor*> message_types;
for (int service_num = 0; service_num < file->service_count();
service_num++) {
const ServiceDescriptor *service = file->service(service_num);
const ServiceDescriptor* service = file->service(service_num);
for (int method_num = 0; method_num < service->method_count();
method_num++) {
const MethodDescriptor *method = service->method(method_num);
const Descriptor *input_type = method->input_type();
const Descriptor *output_type = method->output_type();
const MethodDescriptor* method = service->method(method_num);
const Descriptor* input_type = method->input_type();
const Descriptor* output_type = method->output_type();
message_types[input_type->full_name()] = input_type;
message_types[output_type->full_name()] = output_type;
}
@ -107,11 +107,11 @@ map<grpc::string, const Descriptor *> GetAllMessages(
return message_types;
}
grpc::string MessageIdentifierName(const grpc::string &name) {
grpc::string MessageIdentifierName(const grpc::string& name) {
return grpc_generator::StringReplace(name, ".", "_");
}
grpc::string NodeObjectPath(const Descriptor *descriptor) {
grpc::string NodeObjectPath(const Descriptor* descriptor) {
grpc::string module_alias = ModuleAlias(descriptor->file()->name());
grpc::string name = descriptor->full_name();
grpc_generator::StripPrefix(&name, descriptor->file()->package() + ".");
@ -119,7 +119,7 @@ grpc::string NodeObjectPath(const Descriptor *descriptor) {
}
// Prints out the message serializer and deserializer functions
void PrintMessageTransformer(const Descriptor *descriptor, Printer *out) {
void PrintMessageTransformer(const Descriptor* descriptor, Printer* out) {
map<grpc::string, grpc::string> template_vars;
grpc::string full_name = descriptor->full_name();
template_vars["identifier_name"] = MessageIdentifierName(full_name);
@ -149,9 +149,9 @@ void PrintMessageTransformer(const Descriptor *descriptor, Printer *out) {
out->Print("}\n\n");
}
void PrintMethod(const MethodDescriptor *method, Printer *out) {
const Descriptor *input_type = method->input_type();
const Descriptor *output_type = method->output_type();
void PrintMethod(const MethodDescriptor* method, Printer* out) {
const Descriptor* input_type = method->input_type();
const Descriptor* output_type = method->output_type();
map<grpc::string, grpc::string> vars;
vars["service_name"] = method->service()->full_name();
vars["name"] = method->name();
@ -177,7 +177,7 @@ void PrintMethod(const MethodDescriptor *method, Printer *out) {
}
// Prints out the service descriptor object
void PrintService(const ServiceDescriptor *service, Printer *out) {
void PrintService(const ServiceDescriptor* service, Printer* out) {
map<grpc::string, grpc::string> template_vars;
out->Print(GetNodeComments(service, true).c_str());
template_vars["name"] = service->name();
@ -200,7 +200,7 @@ void PrintService(const ServiceDescriptor *service, Printer *out) {
out->Print(GetNodeComments(service, false).c_str());
}
void PrintImports(const FileDescriptor *file, Printer *out) {
void PrintImports(const FileDescriptor* file, Printer* out) {
out->Print("var grpc = require('grpc');\n");
if (file->message_type_count() > 0) {
grpc::string file_path =
@ -219,9 +219,9 @@ void PrintImports(const FileDescriptor *file, Printer *out) {
out->Print("\n");
}
void PrintTransformers(const FileDescriptor *file, Printer *out) {
map<grpc::string, const Descriptor *> messages = GetAllMessages(file);
for (std::map<grpc::string, const Descriptor *>::iterator it =
void PrintTransformers(const FileDescriptor* file, Printer* out) {
map<grpc::string, const Descriptor*> messages = GetAllMessages(file);
for (std::map<grpc::string, const Descriptor*>::iterator it =
messages.begin();
it != messages.end(); it++) {
PrintMessageTransformer(it->second, out);
@ -229,14 +229,14 @@ void PrintTransformers(const FileDescriptor *file, Printer *out) {
out->Print("\n");
}
void PrintServices(const FileDescriptor *file, Printer *out) {
void PrintServices(const FileDescriptor* file, Printer* out) {
for (int i = 0; i < file->service_count(); i++) {
PrintService(file->service(i), out);
}
}
}
} // namespace
grpc::string GenerateFile(const FileDescriptor *file) {
grpc::string GenerateFile(const FileDescriptor* file) {
grpc::string output;
{
StringOutputStream output_stream(&output);

@ -23,7 +23,7 @@
namespace grpc_node_generator {
grpc::string GenerateFile(const grpc::protobuf::FileDescriptor *file);
grpc::string GenerateFile(const grpc::protobuf::FileDescriptor* file);
} // namespace grpc_node_generator

@ -32,10 +32,10 @@ class NodeGrpcGenerator : public grpc::protobuf::compiler::CodeGenerator {
NodeGrpcGenerator() {}
~NodeGrpcGenerator() {}
bool Generate(const grpc::protobuf::FileDescriptor *file,
const grpc::string &parameter,
grpc::protobuf::compiler::GeneratorContext *context,
grpc::string *error) const {
bool Generate(const grpc::protobuf::FileDescriptor* file,
const grpc::string& parameter,
grpc::protobuf::compiler::GeneratorContext* context,
grpc::string* error) const {
grpc::string code = GenerateFile(file);
if (code.size() == 0) {
return true;
@ -52,7 +52,7 @@ class NodeGrpcGenerator : public grpc::protobuf::compiler::CodeGenerator {
}
};
int main(int argc, char *argv[]) {
int main(int argc, char* argv[]) {
NodeGrpcGenerator generator;
return grpc::protobuf::compiler::PluginMain(argc, argv, &generator);
}

@ -27,10 +27,10 @@
#include <google/protobuf/compiler/objectivec/objectivec_helpers.h>
using ::google::protobuf::compiler::objectivec::ClassName;
using ::grpc::protobuf::io::Printer;
using ::grpc::protobuf::FileDescriptor;
using ::grpc::protobuf::MethodDescriptor;
using ::grpc::protobuf::ServiceDescriptor;
using ::grpc::protobuf::FileDescriptor;
using ::grpc::protobuf::io::Printer;
using ::std::map;
using ::std::set;
@ -38,7 +38,7 @@ namespace grpc_objective_c_generator {
namespace {
void PrintProtoRpcDeclarationAsPragma(
Printer *printer, const MethodDescriptor *method,
Printer* printer, const MethodDescriptor* method,
map< ::grpc::string, ::grpc::string> vars) {
vars["client_stream"] = method->client_streaming() ? "stream " : "";
vars["server_stream"] = method->server_streaming() ? "stream " : "";
@ -49,7 +49,7 @@ void PrintProtoRpcDeclarationAsPragma(
}
template <typename DescriptorType>
static void PrintAllComments(const DescriptorType *desc, Printer *printer) {
static void PrintAllComments(const DescriptorType* desc, Printer* printer) {
std::vector<grpc::string> comments;
grpc_generator::GetComment(desc, grpc_generator::COMMENTTYPE_LEADING_DETACHED,
&comments);
@ -72,8 +72,8 @@ static void PrintAllComments(const DescriptorType *desc, Printer *printer) {
printer->Print(" */\n");
}
void PrintMethodSignature(Printer *printer, const MethodDescriptor *method,
const map< ::grpc::string, ::grpc::string> &vars) {
void PrintMethodSignature(Printer* printer, const MethodDescriptor* method,
const map< ::grpc::string, ::grpc::string>& vars) {
// Print comment
PrintAllComments(method, printer);
@ -97,7 +97,7 @@ void PrintMethodSignature(Printer *printer, const MethodDescriptor *method,
}
}
void PrintSimpleSignature(Printer *printer, const MethodDescriptor *method,
void PrintSimpleSignature(Printer* printer, const MethodDescriptor* method,
map< ::grpc::string, ::grpc::string> vars) {
vars["method_name"] =
grpc_generator::LowercaseFirstLetter(vars["method_name"]);
@ -105,7 +105,7 @@ void PrintSimpleSignature(Printer *printer, const MethodDescriptor *method,
PrintMethodSignature(printer, method, vars);
}
void PrintAdvancedSignature(Printer *printer, const MethodDescriptor *method,
void PrintAdvancedSignature(Printer* printer, const MethodDescriptor* method,
map< ::grpc::string, ::grpc::string> vars) {
vars["method_name"] = "RPCTo" + vars["method_name"];
vars["return_type"] = "GRPCProtoCall *";
@ -113,7 +113,7 @@ void PrintAdvancedSignature(Printer *printer, const MethodDescriptor *method,
}
inline map< ::grpc::string, ::grpc::string> GetMethodVars(
const MethodDescriptor *method) {
const MethodDescriptor* method) {
map< ::grpc::string, ::grpc::string> res;
res["method_name"] = method->name();
res["request_type"] = method->input_type()->name();
@ -123,7 +123,7 @@ inline map< ::grpc::string, ::grpc::string> GetMethodVars(
return res;
}
void PrintMethodDeclarations(Printer *printer, const MethodDescriptor *method) {
void PrintMethodDeclarations(Printer* printer, const MethodDescriptor* method) {
map< ::grpc::string, ::grpc::string> vars = GetMethodVars(method);
PrintProtoRpcDeclarationAsPragma(printer, method, vars);
@ -134,7 +134,7 @@ void PrintMethodDeclarations(Printer *printer, const MethodDescriptor *method) {
printer->Print(";\n\n\n");
}
void PrintSimpleImplementation(Printer *printer, const MethodDescriptor *method,
void PrintSimpleImplementation(Printer* printer, const MethodDescriptor* method,
map< ::grpc::string, ::grpc::string> vars) {
printer->Print("{\n");
printer->Print(vars, " [[self RPCTo$method_name$With");
@ -151,8 +151,8 @@ void PrintSimpleImplementation(Printer *printer, const MethodDescriptor *method,
printer->Print("}\n");
}
void PrintAdvancedImplementation(Printer *printer,
const MethodDescriptor *method,
void PrintAdvancedImplementation(Printer* printer,
const MethodDescriptor* method,
map< ::grpc::string, ::grpc::string> vars) {
printer->Print("{\n");
printer->Print(vars, " return [self RPCToMethod:@\"$method_name$\"\n");
@ -176,8 +176,8 @@ void PrintAdvancedImplementation(Printer *printer,
printer->Print("}\n");
}
void PrintMethodImplementations(Printer *printer,
const MethodDescriptor *method) {
void PrintMethodImplementations(Printer* printer,
const MethodDescriptor* method) {
map< ::grpc::string, ::grpc::string> vars = GetMethodVars(method);
PrintProtoRpcDeclarationAsPragma(printer, method, vars);
@ -193,7 +193,7 @@ void PrintMethodImplementations(Printer *printer,
} // namespace
::grpc::string GetAllMessageClasses(const FileDescriptor *file) {
::grpc::string GetAllMessageClasses(const FileDescriptor* file) {
::grpc::string output;
set< ::grpc::string> classes;
for (int i = 0; i < file->service_count(); i++) {
@ -211,7 +211,7 @@ void PrintMethodImplementations(Printer *printer,
return output;
}
::grpc::string GetHeader(const ServiceDescriptor *service) {
::grpc::string GetHeader(const ServiceDescriptor* service) {
::grpc::string output;
{
// Scope the output stream so it closes and finalizes output to the string.
@ -245,7 +245,7 @@ void PrintMethodImplementations(Printer *printer,
return output;
}
::grpc::string GetSource(const ServiceDescriptor *service) {
::grpc::string GetSource(const ServiceDescriptor* service) {
::grpc::string output;
{
// Scope the output stream so it closes and finalizes output to the string.

@ -23,20 +23,20 @@
namespace grpc_objective_c_generator {
using ::grpc::protobuf::ServiceDescriptor;
using ::grpc::protobuf::FileDescriptor;
using ::grpc::protobuf::ServiceDescriptor;
using ::grpc::string;
// Returns forward declaration of classes in the generated header file.
string GetAllMessageClasses(const FileDescriptor *file);
string GetAllMessageClasses(const FileDescriptor* file);
// Returns the content to be included in the "global_scope" insertion point of
// the generated header file.
string GetHeader(const ServiceDescriptor *service);
string GetHeader(const ServiceDescriptor* service);
// Returns the content to be included in the "global_scope" insertion point of
// the generated implementation file.
string GetSource(const ServiceDescriptor *service);
string GetSource(const ServiceDescriptor* service);
} // namespace grpc_objective_c_generator

@ -31,14 +31,14 @@ using ::grpc::protobuf::FileDescriptor;
using ::grpc::protobuf::ServiceDescriptor;
using ::grpc::string;
inline string MessageHeaderName(const FileDescriptor *file) {
inline string MessageHeaderName(const FileDescriptor* file) {
return google::protobuf::compiler::objectivec::FilePath(file) + ".pbobjc.h";
}
inline string ServiceClassName(const ServiceDescriptor *service) {
const FileDescriptor *file = service->file();
inline string ServiceClassName(const ServiceDescriptor* service) {
const FileDescriptor* file = service->file();
string prefix = file->options().objc_class_prefix();
return prefix + service->name();
}
}
} // namespace grpc_objective_c_generator
#endif // GRPC_INTERNAL_COMPILER_OBJECTIVE_C_GENERATOR_HELPERS_H

@ -26,19 +26,19 @@
#include <google/protobuf/compiler/objectivec/objectivec_helpers.h>
using ::google::protobuf::compiler::objectivec::ProtobufLibraryFrameworkName;
using ::google::protobuf::compiler::objectivec::
IsProtobufLibraryBundledProtoFile;
using ::google::protobuf::compiler::objectivec::ProtobufLibraryFrameworkName;
class ObjectiveCGrpcGenerator : public grpc::protobuf::compiler::CodeGenerator {
public:
ObjectiveCGrpcGenerator() {}
virtual ~ObjectiveCGrpcGenerator() {}
virtual bool Generate(const grpc::protobuf::FileDescriptor *file,
const ::grpc::string &parameter,
grpc::protobuf::compiler::GeneratorContext *context,
::grpc::string *error) const {
virtual bool Generate(const grpc::protobuf::FileDescriptor* file,
const ::grpc::string& parameter,
grpc::protobuf::compiler::GeneratorContext* context,
::grpc::string* error) const {
if (file->service_count() == 0) {
// No services. Do nothing.
return true;
@ -65,7 +65,7 @@ class ObjectiveCGrpcGenerator : public grpc::protobuf::compiler::CodeGenerator {
for (int i = 0; i < file->dependency_count(); i++) {
::grpc::string header =
grpc_objective_c_generator::MessageHeaderName(file->dependency(i));
const grpc::protobuf::FileDescriptor *dependency = file->dependency(i);
const grpc::protobuf::FileDescriptor* dependency = file->dependency(i);
if (IsProtobufLibraryBundledProtoFile(dependency)) {
::grpc::string base_name = header;
grpc_generator::StripPrefix(&base_name, "google/protobuf/");
@ -88,7 +88,7 @@ class ObjectiveCGrpcGenerator : public grpc::protobuf::compiler::CodeGenerator {
::grpc::string declarations;
for (int i = 0; i < file->service_count(); i++) {
const grpc::protobuf::ServiceDescriptor *service = file->service(i);
const grpc::protobuf::ServiceDescriptor* service = file->service(i);
declarations += grpc_objective_c_generator::GetHeader(service);
}
@ -96,9 +96,9 @@ class ObjectiveCGrpcGenerator : public grpc::protobuf::compiler::CodeGenerator {
"\nNS_ASSUME_NONNULL_BEGIN\n\n";
static const ::grpc::string kNonNullEnd = "\nNS_ASSUME_NONNULL_END\n";
Write(context, file_name + ".pbrpc.h", imports + '\n' + proto_imports +
'\n' + kNonNullBegin +
declarations + kNonNullEnd);
Write(context, file_name + ".pbrpc.h",
imports + '\n' + proto_imports + '\n' + kNonNullBegin +
declarations + kNonNullEnd);
}
{
@ -111,7 +111,7 @@ class ObjectiveCGrpcGenerator : public grpc::protobuf::compiler::CodeGenerator {
for (int i = 0; i < file->dependency_count(); i++) {
::grpc::string header =
grpc_objective_c_generator::MessageHeaderName(file->dependency(i));
const grpc::protobuf::FileDescriptor *dependency = file->dependency(i);
const grpc::protobuf::FileDescriptor* dependency = file->dependency(i);
if (IsProtobufLibraryBundledProtoFile(dependency)) {
::grpc::string base_name = header;
grpc_generator::StripPrefix(&base_name, "google/protobuf/");
@ -133,7 +133,7 @@ class ObjectiveCGrpcGenerator : public grpc::protobuf::compiler::CodeGenerator {
::grpc::string definitions;
for (int i = 0; i < file->service_count(); i++) {
const grpc::protobuf::ServiceDescriptor *service = file->service(i);
const grpc::protobuf::ServiceDescriptor* service = file->service(i);
definitions += grpc_objective_c_generator::GetSource(service);
}
@ -145,8 +145,8 @@ class ObjectiveCGrpcGenerator : public grpc::protobuf::compiler::CodeGenerator {
private:
// Write the given code into the given file.
void Write(grpc::protobuf::compiler::GeneratorContext *context,
const ::grpc::string &filename, const ::grpc::string &code) const {
void Write(grpc::protobuf::compiler::GeneratorContext* context,
const ::grpc::string& filename, const ::grpc::string& code) const {
std::unique_ptr<grpc::protobuf::io::ZeroCopyOutputStream> output(
context->Open(filename));
grpc::protobuf::io::CodedOutputStream coded_out(output.get());
@ -154,7 +154,7 @@ class ObjectiveCGrpcGenerator : public grpc::protobuf::compiler::CodeGenerator {
}
};
int main(int argc, char *argv[]) {
int main(int argc, char* argv[]) {
ObjectiveCGrpcGenerator generator;
return grpc::protobuf::compiler::PluginMain(argc, argv, &generator);
}

@ -22,10 +22,10 @@
#include "src/compiler/generator_helpers.h"
#include "src/compiler/php_generator_helpers.h"
using grpc::protobuf::Descriptor;
using grpc::protobuf::FileDescriptor;
using grpc::protobuf::ServiceDescriptor;
using grpc::protobuf::MethodDescriptor;
using grpc::protobuf::Descriptor;
using grpc::protobuf::ServiceDescriptor;
using grpc::protobuf::io::Printer;
using grpc::protobuf::io::StringOutputStream;
using std::map;
@ -33,7 +33,7 @@ using std::map;
namespace grpc_php_generator {
namespace {
grpc::string ConvertToPhpNamespace(const grpc::string &name) {
grpc::string ConvertToPhpNamespace(const grpc::string& name) {
std::vector<grpc::string> tokens = grpc_generator::tokenize(name, ".");
std::ostringstream oss;
for (unsigned int i = 0; i < tokens.size(); i++) {
@ -43,7 +43,7 @@ grpc::string ConvertToPhpNamespace(const grpc::string &name) {
return oss.str();
}
grpc::string PackageName(const FileDescriptor *file) {
grpc::string PackageName(const FileDescriptor* file) {
if (file->options().has_php_namespace()) {
return file->options().php_namespace();
} else {
@ -51,8 +51,8 @@ grpc::string PackageName(const FileDescriptor *file) {
}
}
grpc::string MessageIdentifierName(const grpc::string &name,
const FileDescriptor *file) {
grpc::string MessageIdentifierName(const grpc::string& name,
const FileDescriptor* file) {
std::vector<grpc::string> tokens = grpc_generator::tokenize(name, ".");
std::ostringstream oss;
oss << PackageName(file) << "\\"
@ -60,9 +60,9 @@ grpc::string MessageIdentifierName(const grpc::string &name,
return oss.str();
}
void PrintMethod(const MethodDescriptor *method, Printer *out) {
const Descriptor *input_type = method->input_type();
const Descriptor *output_type = method->output_type();
void PrintMethod(const MethodDescriptor* method, Printer* out) {
const Descriptor* input_type = method->input_type();
const Descriptor* output_type = method->output_type();
map<grpc::string, grpc::string> vars;
vars["service_name"] = method->service()->full_name();
vars["name"] = method->name();
@ -116,8 +116,8 @@ void PrintMethod(const MethodDescriptor *method, Printer *out) {
}
// Prints out the service descriptor object
void PrintService(const ServiceDescriptor *service,
const grpc::string &class_suffix, Printer *out) {
void PrintService(const ServiceDescriptor* service,
const grpc::string& class_suffix, Printer* out) {
map<grpc::string, grpc::string> vars;
out->Print("/**\n");
out->Print(GetPHPComments(service, " *").c_str());
@ -148,11 +148,11 @@ void PrintService(const ServiceDescriptor *service,
out->Outdent();
out->Print("}\n");
}
}
} // namespace
grpc::string GenerateFile(const FileDescriptor *file,
const ServiceDescriptor *service,
const grpc::string &class_suffix) {
grpc::string GenerateFile(const FileDescriptor* file,
const ServiceDescriptor* service,
const grpc::string& class_suffix) {
grpc::string output;
{
StringOutputStream output_stream(&output);

@ -23,9 +23,9 @@
namespace grpc_php_generator {
grpc::string GenerateFile(const grpc::protobuf::FileDescriptor *file,
const grpc::protobuf::ServiceDescriptor *service,
const grpc::string &class_suffix);
grpc::string GenerateFile(const grpc::protobuf::FileDescriptor* file,
const grpc::protobuf::ServiceDescriptor* service,
const grpc::string& class_suffix);
} // namespace grpc_php_generator

@ -27,14 +27,14 @@
namespace grpc_php_generator {
inline grpc::string GetPHPServiceClassname(
const grpc::protobuf::ServiceDescriptor *service,
const grpc::string &class_suffix) {
const grpc::protobuf::ServiceDescriptor* service,
const grpc::string& class_suffix) {
return service->name() + (class_suffix == "" ? "Client" : class_suffix);
}
// ReplaceAll replaces all instances of search with replace in s.
inline grpc::string ReplaceAll(grpc::string s, const grpc::string &search,
const grpc::string &replace) {
inline grpc::string ReplaceAll(grpc::string s, const grpc::string& search,
const grpc::string& replace) {
size_t pos = 0;
while ((pos = s.find(search, pos)) != grpc::string::npos) {
s.replace(pos, search.length(), replace);
@ -44,9 +44,9 @@ inline grpc::string ReplaceAll(grpc::string s, const grpc::string &search,
}
inline grpc::string GetPHPServiceFilename(
const grpc::protobuf::FileDescriptor *file,
const grpc::protobuf::ServiceDescriptor *service,
const grpc::string &class_suffix) {
const grpc::protobuf::FileDescriptor* file,
const grpc::protobuf::ServiceDescriptor* service,
const grpc::string& class_suffix) {
std::ostringstream oss;
if (file->options().has_php_namespace()) {
oss << ReplaceAll(file->options().php_namespace(), "\\", "/");
@ -65,7 +65,7 @@ inline grpc::string GetPHPServiceFilename(
// Get leading or trailing comments in a string. Comment lines start with "// ".
// Leading detached comments are put in in front of leading comments.
template <typename DescriptorType>
inline grpc::string GetPHPComments(const DescriptorType *desc,
inline grpc::string GetPHPComments(const DescriptorType* desc,
grpc::string prefix) {
return ReplaceAll(grpc_generator::GetPrefixedComments(desc, true, prefix),
"*/", "&#42;/");

@ -24,19 +24,19 @@
#include "src/compiler/php_generator.h"
#include "src/compiler/php_generator_helpers.h"
using google::protobuf::compiler::ParseGeneratorParameter;
using grpc_php_generator::GenerateFile;
using grpc_php_generator::GetPHPServiceFilename;
using google::protobuf::compiler::ParseGeneratorParameter;
class PHPGrpcGenerator : public grpc::protobuf::compiler::CodeGenerator {
public:
PHPGrpcGenerator() {}
~PHPGrpcGenerator() {}
bool Generate(const grpc::protobuf::FileDescriptor *file,
const grpc::string &parameter,
grpc::protobuf::compiler::GeneratorContext *context,
grpc::string *error) const {
bool Generate(const grpc::protobuf::FileDescriptor* file,
const grpc::string& parameter,
grpc::protobuf::compiler::GeneratorContext* context,
grpc::string* error) const {
if (file->service_count() == 0) {
return true;
}
@ -71,7 +71,7 @@ class PHPGrpcGenerator : public grpc::protobuf::compiler::CodeGenerator {
}
};
int main(int argc, char *argv[]) {
int main(int argc, char* argv[]) {
PHPGrpcGenerator generator;
return grpc::protobuf::compiler::PluginMain(argc, argv, &generator);
}

@ -29,14 +29,14 @@
// Get leading or trailing comments in a string.
template <typename DescriptorType>
inline grpc::string GetCommentsHelper(const DescriptorType *desc, bool leading,
const grpc::string &prefix) {
inline grpc::string GetCommentsHelper(const DescriptorType* desc, bool leading,
const grpc::string& prefix) {
return grpc_generator::GetPrefixedComments(desc, leading, prefix);
}
class ProtoBufMethod : public grpc_generator::Method {
public:
ProtoBufMethod(const grpc::protobuf::MethodDescriptor *method)
ProtoBufMethod(const grpc::protobuf::MethodDescriptor* method)
: method_(method) {}
grpc::string name() const { return method_->name(); }
@ -55,7 +55,7 @@ class ProtoBufMethod : public grpc_generator::Method {
return method_->output_type()->file()->name();
}
bool get_module_and_message_path_input(grpc::string *str,
bool get_module_and_message_path_input(grpc::string* str,
grpc::string generator_file_name,
bool generate_in_pb2_grpc,
grpc::string import_prefix) const {
@ -64,7 +64,7 @@ class ProtoBufMethod : public grpc_generator::Method {
import_prefix);
}
bool get_module_and_message_path_output(grpc::string *str,
bool get_module_and_message_path_output(grpc::string* str,
grpc::string generator_file_name,
bool generate_in_pb2_grpc,
grpc::string import_prefix) const {
@ -98,12 +98,12 @@ class ProtoBufMethod : public grpc_generator::Method {
}
private:
const grpc::protobuf::MethodDescriptor *method_;
const grpc::protobuf::MethodDescriptor* method_;
};
class ProtoBufService : public grpc_generator::Service {
public:
ProtoBufService(const grpc::protobuf::ServiceDescriptor *service)
ProtoBufService(const grpc::protobuf::ServiceDescriptor* service)
: service_(service) {}
grpc::string name() const { return service_->name(); }
@ -127,20 +127,20 @@ class ProtoBufService : public grpc_generator::Service {
}
private:
const grpc::protobuf::ServiceDescriptor *service_;
const grpc::protobuf::ServiceDescriptor* service_;
};
class ProtoBufPrinter : public grpc_generator::Printer {
public:
ProtoBufPrinter(grpc::string *str)
ProtoBufPrinter(grpc::string* str)
: output_stream_(str), printer_(&output_stream_, '$') {}
void Print(const std::map<grpc::string, grpc::string> &vars,
const char *string_template) {
void Print(const std::map<grpc::string, grpc::string>& vars,
const char* string_template) {
printer_.Print(vars, string_template);
}
void Print(const char *string) { printer_.Print(string); }
void Print(const char* string) { printer_.Print(string); }
void Indent() { printer_.Indent(); }
void Outdent() { printer_.Outdent(); }
@ -151,7 +151,7 @@ class ProtoBufPrinter : public grpc_generator::Printer {
class ProtoBufFile : public grpc_generator::File {
public:
ProtoBufFile(const grpc::protobuf::FileDescriptor *file) : file_(file) {}
ProtoBufFile(const grpc::protobuf::FileDescriptor* file) : file_(file) {}
grpc::string filename() const { return file_->name(); }
grpc::string filename_without_ext() const {
@ -172,7 +172,7 @@ class ProtoBufFile : public grpc_generator::File {
}
std::unique_ptr<grpc_generator::Printer> CreatePrinter(
grpc::string *str) const {
grpc::string* str) const {
return std::unique_ptr<grpc_generator::Printer>(new ProtoBufPrinter(str));
}
@ -189,7 +189,7 @@ class ProtoBufFile : public grpc_generator::File {
}
private:
const grpc::protobuf::FileDescriptor *file_;
const grpc::protobuf::FileDescriptor* file_;
};
#endif // GRPC_INTERNAL_COMPILER_PROTOBUF_PLUGIN_H

@ -45,9 +45,9 @@ using std::make_pair;
using std::map;
using std::pair;
using std::replace;
using std::set;
using std::tuple;
using std::vector;
using std::set;
namespace grpc_python_generator {

@ -29,9 +29,6 @@
#include "src/compiler/python_generator.h"
#include "src/compiler/python_private_generator.h"
using std::vector;
using grpc_generator::StringReplace;
using grpc_generator::StripProto;
using grpc::protobuf::Descriptor;
using grpc::protobuf::FileDescriptor;
using grpc::protobuf::MethodDescriptor;
@ -41,6 +38,9 @@ using grpc::protobuf::io::CodedOutputStream;
using grpc::protobuf::io::Printer;
using grpc::protobuf::io::StringOutputStream;
using grpc::protobuf::io::ZeroCopyOutputStream;
using grpc_generator::StringReplace;
using grpc_generator::StripProto;
using std::vector;
namespace grpc_python_generator {

@ -27,8 +27,8 @@
#include "src/compiler/ruby_generator_string-inl.h"
using grpc::protobuf::FileDescriptor;
using grpc::protobuf::ServiceDescriptor;
using grpc::protobuf::MethodDescriptor;
using grpc::protobuf::ServiceDescriptor;
using grpc::protobuf::io::Printer;
using grpc::protobuf::io::StringOutputStream;
using std::map;
@ -38,8 +38,8 @@ namespace grpc_ruby_generator {
namespace {
// Prints out the method using the ruby gRPC DSL.
void PrintMethod(const MethodDescriptor *method, const grpc::string &package,
Printer *out) {
void PrintMethod(const MethodDescriptor* method, const grpc::string& package,
Printer* out) {
grpc::string input_type =
RubyTypeOf(method->input_type()->full_name(), package);
if (method->client_streaming()) {
@ -51,7 +51,11 @@ void PrintMethod(const MethodDescriptor *method, const grpc::string &package,
output_type = "stream(" + output_type + ")";
}
std::map<grpc::string, grpc::string> method_vars = ListToDict({
"mth.name", method->name(), "input.type", input_type, "output.type",
"mth.name",
method->name(),
"input.type",
input_type,
"output.type",
output_type,
});
out->Print(GetRubyComments(method, true).c_str());
@ -60,15 +64,16 @@ void PrintMethod(const MethodDescriptor *method, const grpc::string &package,
}
// Prints out the service using the ruby gRPC DSL.
void PrintService(const ServiceDescriptor *service, const grpc::string &package,
Printer *out) {
void PrintService(const ServiceDescriptor* service, const grpc::string& package,
Printer* out) {
if (service->method_count() == 0) {
return;
}
// Begin the service module
std::map<grpc::string, grpc::string> module_vars = ListToDict({
"module.name", CapitalizeFirst(service->name()),
"module.name",
CapitalizeFirst(service->name()),
});
out->Print(module_vars, "module $module.name$\n");
out->Indent();
@ -119,7 +124,7 @@ char ToUpper(char ch) { return IsLower(ch) ? (ch - 'a' + 'A') : ch; }
// names must be PascalCased.
//
// foo_bar_baz -> FooBarBaz
grpc::string PackageToModule(const grpc::string &name) {
grpc::string PackageToModule(const grpc::string& name) {
bool next_upper = true;
grpc::string result;
result.reserve(name.size());
@ -141,7 +146,7 @@ grpc::string PackageToModule(const grpc::string &name) {
}
// end copying of protoc generator for ruby code
grpc::string GetServices(const FileDescriptor *file) {
grpc::string GetServices(const FileDescriptor* file) {
grpc::string output;
{
// Scope the output stream so it closes and finalizes output to the string.
@ -157,7 +162,10 @@ grpc::string GetServices(const FileDescriptor *file) {
// Write out a file header.
std::map<grpc::string, grpc::string> header_comment_vars = ListToDict({
"file.name", file->name(), "file.package", file->package(),
"file.name",
file->name(),
"file.package",
file->package(),
});
out.Print("# Generated by the protocol buffer compiler. DO NOT EDIT!\n");
out.Print(header_comment_vars,
@ -175,7 +183,8 @@ grpc::string GetServices(const FileDescriptor *file) {
// that defines the messages used by the service. This is generated by the
// main ruby plugin.
std::map<grpc::string, grpc::string> dep_vars = ListToDict({
"dep.name", MessagesRequireName(file),
"dep.name",
MessagesRequireName(file),
});
out.Print(dep_vars, "require '$dep.name$'\n");
@ -184,7 +193,8 @@ grpc::string GetServices(const FileDescriptor *file) {
std::vector<grpc::string> modules = Split(file->package(), '.');
for (size_t i = 0; i < modules.size(); ++i) {
std::map<grpc::string, grpc::string> module_vars = ListToDict({
"module.name", PackageToModule(modules[i]),
"module.name",
PackageToModule(modules[i]),
});
out.Print(module_vars, "module $module.name$\n");
out.Indent();

@ -23,7 +23,7 @@
namespace grpc_ruby_generator {
grpc::string GetServices(const grpc::protobuf::FileDescriptor *file);
grpc::string GetServices(const grpc::protobuf::FileDescriptor* file);
} // namespace grpc_ruby_generator

@ -25,8 +25,8 @@
namespace grpc_ruby_generator {
inline bool ServicesFilename(const grpc::protobuf::FileDescriptor *file,
grpc::string *file_name_or_error) {
inline bool ServicesFilename(const grpc::protobuf::FileDescriptor* file,
grpc::string* file_name_or_error) {
// Get output file name.
static const unsigned proto_suffix_length = 6; // length of ".proto"
if (file->name().size() > proto_suffix_length &&
@ -42,14 +42,14 @@ inline bool ServicesFilename(const grpc::protobuf::FileDescriptor *file,
}
inline grpc::string MessagesRequireName(
const grpc::protobuf::FileDescriptor *file) {
const grpc::protobuf::FileDescriptor* file) {
return Replace(file->name(), ".proto", "_pb");
}
// Get leading or trailing comments in a string. Comment lines start with "# ".
// Leading detached comments are put in in front of leading comments.
template <typename DescriptorType>
inline grpc::string GetRubyComments(const DescriptorType *desc, bool leading) {
inline grpc::string GetRubyComments(const DescriptorType* desc, bool leading) {
return grpc_generator::GetPrefixedComments(desc, leading, "#");
}

@ -36,7 +36,7 @@ namespace grpc_ruby_generator {
// Converts an initializer list of the form { key0, value0, key1, value1, ... }
// into a map of key* to value*. Is merely a readability helper for later code.
inline std::map<grpc::string, grpc::string> ListToDict(
const initializer_list<grpc::string> &values) {
const initializer_list<grpc::string>& values) {
if (values.size() % 2 != 0) {
std::cerr << "Not every 'key' has a value in `values`." << std::endl;
}

@ -31,8 +31,8 @@ using std::transform;
namespace grpc_ruby_generator {
// Split splits a string using char into elems.
inline std::vector<grpc::string> &Split(const grpc::string &s, char delim,
std::vector<grpc::string> *elems) {
inline std::vector<grpc::string>& Split(const grpc::string& s, char delim,
std::vector<grpc::string>* elems) {
std::stringstream ss(s);
grpc::string item;
while (getline(ss, item, delim)) {
@ -42,15 +42,15 @@ inline std::vector<grpc::string> &Split(const grpc::string &s, char delim,
}
// Split splits a string using char, returning the result in a vector.
inline std::vector<grpc::string> Split(const grpc::string &s, char delim) {
inline std::vector<grpc::string> Split(const grpc::string& s, char delim) {
std::vector<grpc::string> elems;
Split(s, delim, &elems);
return elems;
}
// Replace replaces from with to in s.
inline grpc::string Replace(grpc::string s, const grpc::string &from,
const grpc::string &to) {
inline grpc::string Replace(grpc::string s, const grpc::string& from,
const grpc::string& to) {
size_t start_pos = s.find(from);
if (start_pos == grpc::string::npos) {
return s;
@ -60,8 +60,8 @@ inline grpc::string Replace(grpc::string s, const grpc::string &from,
}
// ReplaceAll replaces all instances of search with replace in s.
inline grpc::string ReplaceAll(grpc::string s, const grpc::string &search,
const grpc::string &replace) {
inline grpc::string ReplaceAll(grpc::string s, const grpc::string& search,
const grpc::string& replace) {
size_t pos = 0;
while ((pos = s.find(search, pos)) != grpc::string::npos) {
s.replace(pos, search.length(), replace);
@ -71,8 +71,8 @@ inline grpc::string ReplaceAll(grpc::string s, const grpc::string &search,
}
// ReplacePrefix replaces from with to in s if search is a prefix of s.
inline bool ReplacePrefix(grpc::string *s, const grpc::string &from,
const grpc::string &to) {
inline bool ReplacePrefix(grpc::string* s, const grpc::string& from,
const grpc::string& to) {
size_t start_pos = s->find(from);
if (start_pos == grpc::string::npos || start_pos != 0) {
return false;
@ -91,8 +91,8 @@ inline grpc::string CapitalizeFirst(grpc::string s) {
}
// RubyTypeOf updates a proto type to the required ruby equivalent.
inline grpc::string RubyTypeOf(const grpc::string &a_type,
const grpc::string &package) {
inline grpc::string RubyTypeOf(const grpc::string& a_type,
const grpc::string& package) {
grpc::string res(a_type);
ReplacePrefix(&res, package, ""); // remove the leading package if present
ReplacePrefix(&res, ".", ""); // remove the leading . (no package)

@ -29,10 +29,10 @@ class RubyGrpcGenerator : public grpc::protobuf::compiler::CodeGenerator {
RubyGrpcGenerator() {}
~RubyGrpcGenerator() {}
bool Generate(const grpc::protobuf::FileDescriptor *file,
const grpc::string &parameter,
grpc::protobuf::compiler::GeneratorContext *context,
grpc::string *error) const {
bool Generate(const grpc::protobuf::FileDescriptor* file,
const grpc::string& parameter,
grpc::protobuf::compiler::GeneratorContext* context,
grpc::string* error) const {
grpc::string code = grpc_ruby_generator::GetServices(file);
if (code.size() == 0) {
return true; // don't generate a file if there are no services
@ -51,7 +51,7 @@ class RubyGrpcGenerator : public grpc::protobuf::compiler::CodeGenerator {
}
};
int main(int argc, char *argv[]) {
int main(int argc, char* argv[]) {
RubyGrpcGenerator generator;
return grpc::protobuf::compiler::PluginMain(argc, argv, &generator);
}

@ -56,10 +56,10 @@ struct Method : public CommentHolder {
virtual grpc::string output_type_name() const = 0;
virtual bool get_module_and_message_path_input(
grpc::string *str, grpc::string generator_file_name,
grpc::string* str, grpc::string generator_file_name,
bool generate_in_pb2_grpc, grpc::string import_prefix) const = 0;
virtual bool get_module_and_message_path_output(
grpc::string *str, grpc::string generator_file_name,
grpc::string* str, grpc::string generator_file_name,
bool generate_in_pb2_grpc, grpc::string import_prefix) const = 0;
virtual grpc::string get_input_type_name() const = 0;
@ -83,9 +83,9 @@ struct Service : public CommentHolder {
struct Printer {
virtual ~Printer() {}
virtual void Print(const std::map<grpc::string, grpc::string> &vars,
const char *template_string) = 0;
virtual void Print(const char *string) = 0;
virtual void Print(const std::map<grpc::string, grpc::string>& vars,
const char* template_string) = 0;
virtual void Print(const char* string) = 0;
virtual void Indent() = 0;
virtual void Outdent() = 0;
};
@ -104,7 +104,7 @@ struct File : public CommentHolder {
virtual int service_count() const = 0;
virtual std::unique_ptr<const Service> service(int i) const = 0;
virtual std::unique_ptr<Printer> CreatePrinter(grpc::string *str) const = 0;
virtual std::unique_ptr<Printer> CreatePrinter(grpc::string* str) const = 0;
};
} // namespace grpc_generator

@ -21,7 +21,7 @@
#include "src/core/lib/surface/api_trace.h"
#include "src/core/lib/surface/call.h"
void grpc_census_call_set_context(grpc_call *call, census_context *context) {
void grpc_census_call_set_context(grpc_call* call, census_context* context) {
GRPC_API_TRACE("grpc_census_call_set_context(call=%p, census_context=%p)", 2,
(call, context));
if (context != NULL) {
@ -29,7 +29,7 @@ void grpc_census_call_set_context(grpc_call *call, census_context *context) {
}
}
census_context *grpc_census_call_get_context(grpc_call *call) {
census_context* grpc_census_call_get_context(grpc_call* call) {
GRPC_API_TRACE("grpc_census_call_get_context(call=%p)", 1, (call));
return (census_context *)grpc_call_context_get(call, GRPC_CONTEXT_TRACING);
return (census_context*)grpc_call_context_get(call, GRPC_CONTEXT_TRACING);
}

@ -29,9 +29,9 @@
#include "src/core/lib/surface/completion_queue.h"
grpc_connectivity_state grpc_channel_check_connectivity_state(
grpc_channel *channel, int try_to_connect) {
grpc_channel* channel, int try_to_connect) {
/* forward through to the underlying client channel */
grpc_channel_element *client_channel_elem =
grpc_channel_element* client_channel_elem =
grpc_channel_stack_last_element(grpc_channel_get_channel_stack(channel));
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
grpc_connectivity_state state;
@ -66,15 +66,15 @@ typedef struct {
grpc_closure watcher_timer_init;
grpc_timer alarm;
grpc_connectivity_state state;
grpc_completion_queue *cq;
grpc_completion_queue* cq;
grpc_cq_completion completion_storage;
grpc_channel *channel;
grpc_error *error;
void *tag;
grpc_channel* channel;
grpc_error* error;
void* tag;
} state_watcher;
static void delete_state_watcher(grpc_exec_ctx *exec_ctx, state_watcher *w) {
grpc_channel_element *client_channel_elem = grpc_channel_stack_last_element(
static void delete_state_watcher(grpc_exec_ctx* exec_ctx, state_watcher* w) {
grpc_channel_element* client_channel_elem = grpc_channel_stack_last_element(
grpc_channel_get_channel_stack(w->channel));
if (client_channel_elem->filter == &grpc_client_channel_filter) {
GRPC_CHANNEL_INTERNAL_UNREF(exec_ctx, w->channel,
@ -86,10 +86,10 @@ static void delete_state_watcher(grpc_exec_ctx *exec_ctx, state_watcher *w) {
gpr_free(w);
}
static void finished_completion(grpc_exec_ctx *exec_ctx, void *pw,
grpc_cq_completion *ignored) {
static void finished_completion(grpc_exec_ctx* exec_ctx, void* pw,
grpc_cq_completion* ignored) {
bool should_delete = false;
state_watcher *w = (state_watcher *)pw;
state_watcher* w = (state_watcher*)pw;
gpr_mu_lock(&w->mu);
switch (w->phase) {
case WAITING:
@ -106,12 +106,12 @@ static void finished_completion(grpc_exec_ctx *exec_ctx, void *pw,
}
}
static void partly_done(grpc_exec_ctx *exec_ctx, state_watcher *w,
bool due_to_completion, grpc_error *error) {
static void partly_done(grpc_exec_ctx* exec_ctx, state_watcher* w,
bool due_to_completion, grpc_error* error) {
if (due_to_completion) {
grpc_timer_cancel(exec_ctx, &w->alarm);
} else {
grpc_channel_element *client_channel_elem = grpc_channel_stack_last_element(
grpc_channel_element* client_channel_elem = grpc_channel_stack_last_element(
grpc_channel_get_channel_stack(w->channel));
grpc_client_channel_watch_connectivity_state(
exec_ctx, client_channel_elem,
@ -161,31 +161,31 @@ static void partly_done(grpc_exec_ctx *exec_ctx, state_watcher *w,
GRPC_ERROR_UNREF(error);
}
static void watch_complete(grpc_exec_ctx *exec_ctx, void *pw,
grpc_error *error) {
partly_done(exec_ctx, (state_watcher *)pw, true, GRPC_ERROR_REF(error));
static void watch_complete(grpc_exec_ctx* exec_ctx, void* pw,
grpc_error* error) {
partly_done(exec_ctx, (state_watcher*)pw, true, GRPC_ERROR_REF(error));
}
static void timeout_complete(grpc_exec_ctx *exec_ctx, void *pw,
grpc_error *error) {
partly_done(exec_ctx, (state_watcher *)pw, false, GRPC_ERROR_REF(error));
static void timeout_complete(grpc_exec_ctx* exec_ctx, void* pw,
grpc_error* error) {
partly_done(exec_ctx, (state_watcher*)pw, false, GRPC_ERROR_REF(error));
}
int grpc_channel_num_external_connectivity_watchers(grpc_channel *channel) {
grpc_channel_element *client_channel_elem =
int grpc_channel_num_external_connectivity_watchers(grpc_channel* channel) {
grpc_channel_element* client_channel_elem =
grpc_channel_stack_last_element(grpc_channel_get_channel_stack(channel));
return grpc_client_channel_num_external_connectivity_watchers(
client_channel_elem);
}
typedef struct watcher_timer_init_arg {
state_watcher *w;
state_watcher* w;
gpr_timespec deadline;
} watcher_timer_init_arg;
static void watcher_timer_init(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error_ignored) {
watcher_timer_init_arg *wa = (watcher_timer_init_arg *)arg;
static void watcher_timer_init(grpc_exec_ctx* exec_ctx, void* arg,
grpc_error* error_ignored) {
watcher_timer_init_arg* wa = (watcher_timer_init_arg*)arg;
grpc_timer_init(exec_ctx, &wa->w->alarm,
grpc_timespec_to_millis_round_up(wa->deadline),
@ -193,19 +193,19 @@ static void watcher_timer_init(grpc_exec_ctx *exec_ctx, void *arg,
gpr_free(wa);
}
int grpc_channel_support_connectivity_watcher(grpc_channel *channel) {
grpc_channel_element *client_channel_elem =
int grpc_channel_support_connectivity_watcher(grpc_channel* channel) {
grpc_channel_element* client_channel_elem =
grpc_channel_stack_last_element(grpc_channel_get_channel_stack(channel));
return client_channel_elem->filter != &grpc_client_channel_filter ? 0 : 1;
}
void grpc_channel_watch_connectivity_state(
grpc_channel *channel, grpc_connectivity_state last_observed_state,
gpr_timespec deadline, grpc_completion_queue *cq, void *tag) {
grpc_channel_element *client_channel_elem =
grpc_channel* channel, grpc_connectivity_state last_observed_state,
gpr_timespec deadline, grpc_completion_queue* cq, void* tag) {
grpc_channel_element* client_channel_elem =
grpc_channel_stack_last_element(grpc_channel_get_channel_stack(channel));
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
state_watcher *w = (state_watcher *)gpr_malloc(sizeof(*w));
state_watcher* w = (state_watcher*)gpr_malloc(sizeof(*w));
GRPC_API_TRACE(
"grpc_channel_watch_connectivity_state("
@ -213,8 +213,9 @@ void grpc_channel_watch_connectivity_state(
"deadline=gpr_timespec { tv_sec: %" PRId64
", tv_nsec: %d, clock_type: %d }, "
"cq=%p, tag=%p)",
7, (channel, (int)last_observed_state, deadline.tv_sec, deadline.tv_nsec,
(int)deadline.clock_type, cq, tag));
7,
(channel, (int)last_observed_state, deadline.tv_sec, deadline.tv_nsec,
(int)deadline.clock_type, cq, tag));
GPR_ASSERT(grpc_cq_begin_op(cq, tag));
@ -230,8 +231,8 @@ void grpc_channel_watch_connectivity_state(
w->channel = channel;
w->error = NULL;
watcher_timer_init_arg *wa =
(watcher_timer_init_arg *)gpr_malloc(sizeof(watcher_timer_init_arg));
watcher_timer_init_arg* wa =
(watcher_timer_init_arg*)gpr_malloc(sizeof(watcher_timer_init_arg));
wa->w = w;
wa->deadline = deadline;
GRPC_CLOSURE_INIT(&w->watcher_timer_init, watcher_timer_init, wa,

@ -76,24 +76,24 @@ typedef struct {
wait_for_ready_value wait_for_ready;
} method_parameters;
static method_parameters *method_parameters_ref(
method_parameters *method_params) {
static method_parameters* method_parameters_ref(
method_parameters* method_params) {
gpr_ref(&method_params->refs);
return method_params;
}
static void method_parameters_unref(method_parameters *method_params) {
static void method_parameters_unref(method_parameters* method_params) {
if (gpr_unref(&method_params->refs)) {
gpr_free(method_params);
}
}
static void method_parameters_free(grpc_exec_ctx *exec_ctx, void *value) {
method_parameters_unref((method_parameters *)value);
static void method_parameters_free(grpc_exec_ctx* exec_ctx, void* value) {
method_parameters_unref((method_parameters*)value);
}
static bool parse_wait_for_ready(grpc_json *field,
wait_for_ready_value *wait_for_ready) {
static bool parse_wait_for_ready(grpc_json* field,
wait_for_ready_value* wait_for_ready) {
if (field->type != GRPC_JSON_TRUE && field->type != GRPC_JSON_FALSE) {
return false;
}
@ -102,13 +102,13 @@ static bool parse_wait_for_ready(grpc_json *field,
return true;
}
static bool parse_timeout(grpc_json *field, grpc_millis *timeout) {
static bool parse_timeout(grpc_json* field, grpc_millis* timeout) {
if (field->type != GRPC_JSON_STRING) return false;
size_t len = strlen(field->value);
if (field->value[len - 1] != 's') return false;
char *buf = gpr_strdup(field->value);
char* buf = gpr_strdup(field->value);
buf[len - 1] = '\0'; // Remove trailing 's'.
char *decimal_point = strchr(buf, '.');
char* decimal_point = strchr(buf, '.');
int nanos = 0;
if (decimal_point != NULL) {
*decimal_point = '\0';
@ -141,10 +141,10 @@ static bool parse_timeout(grpc_json *field, grpc_millis *timeout) {
return true;
}
static void *method_parameters_create_from_json(const grpc_json *json) {
static void* method_parameters_create_from_json(const grpc_json* json) {
wait_for_ready_value wait_for_ready = WAIT_FOR_READY_UNSET;
grpc_millis timeout = 0;
for (grpc_json *field = json->child; field != NULL; field = field->next) {
for (grpc_json* field = json->child; field != NULL; field = field->next) {
if (field->key == NULL) continue;
if (strcmp(field->key, "waitForReady") == 0) {
if (wait_for_ready != WAIT_FOR_READY_UNSET) return NULL; // Duplicate.
@ -154,8 +154,8 @@ static void *method_parameters_create_from_json(const grpc_json *json) {
if (!parse_timeout(field, &timeout)) return NULL;
}
}
method_parameters *value =
(method_parameters *)gpr_malloc(sizeof(method_parameters));
method_parameters* value =
(method_parameters*)gpr_malloc(sizeof(method_parameters));
gpr_ref_init(&value->refs, 1);
value->timeout = timeout;
value->wait_for_ready = wait_for_ready;
@ -170,24 +170,24 @@ struct external_connectivity_watcher;
typedef struct client_channel_channel_data {
/** resolver for this channel */
grpc_resolver *resolver;
grpc_resolver* resolver;
/** have we started resolving this channel */
bool started_resolving;
/** is deadline checking enabled? */
bool deadline_checking_enabled;
/** client channel factory */
grpc_client_channel_factory *client_channel_factory;
grpc_client_channel_factory* client_channel_factory;
/** combiner protecting all variables below in this data structure */
grpc_combiner *combiner;
grpc_combiner* combiner;
/** currently active load balancer */
grpc_lb_policy *lb_policy;
grpc_lb_policy* lb_policy;
/** retry throttle data */
grpc_server_retry_throttle_data *retry_throttle_data;
grpc_server_retry_throttle_data* retry_throttle_data;
/** maps method names to method_parameters structs */
grpc_slice_hash_table *method_params_table;
grpc_slice_hash_table* method_params_table;
/** incoming resolver result - set by resolver.next() */
grpc_channel_args *resolver_result;
grpc_channel_args* resolver_result;
/** a list of closures that are all waiting for resolver result to come in */
grpc_closure_list waiting_for_resolver_result_closures;
/** resolver callback */
@ -197,42 +197,42 @@ typedef struct client_channel_channel_data {
/** when an lb_policy arrives, should we try to exit idle */
bool exit_idle_when_lb_policy_arrives;
/** owning stack */
grpc_channel_stack *owning_stack;
grpc_channel_stack* owning_stack;
/** interested parties (owned) */
grpc_pollset_set *interested_parties;
grpc_pollset_set* interested_parties;
/* external_connectivity_watcher_list head is guarded by its own mutex, since
* counts need to be grabbed immediately without polling on a cq */
gpr_mu external_connectivity_watcher_list_mu;
struct external_connectivity_watcher *external_connectivity_watcher_list_head;
struct external_connectivity_watcher* external_connectivity_watcher_list_head;
/* the following properties are guarded by a mutex since API's require them
to be instantaneously available */
gpr_mu info_mu;
char *info_lb_policy_name;
char* info_lb_policy_name;
/** service config in JSON form */
char *info_service_config_json;
char* info_service_config_json;
} channel_data;
/** We create one watcher for each new lb_policy that is returned from a
resolver, to watch for state changes from the lb_policy. When a state
change is seen, we update the channel, and create a new watcher. */
typedef struct {
channel_data *chand;
channel_data* chand;
grpc_closure on_changed;
grpc_connectivity_state state;
grpc_lb_policy *lb_policy;
grpc_lb_policy* lb_policy;
} lb_policy_connectivity_watcher;
static void watch_lb_policy_locked(grpc_exec_ctx *exec_ctx, channel_data *chand,
grpc_lb_policy *lb_policy,
static void watch_lb_policy_locked(grpc_exec_ctx* exec_ctx, channel_data* chand,
grpc_lb_policy* lb_policy,
grpc_connectivity_state current_state);
static void set_channel_connectivity_state_locked(grpc_exec_ctx *exec_ctx,
channel_data *chand,
static void set_channel_connectivity_state_locked(grpc_exec_ctx* exec_ctx,
channel_data* chand,
grpc_connectivity_state state,
grpc_error *error,
const char *reason) {
grpc_error* error,
const char* reason) {
/* TODO: Improve failure handling:
* - Make it possible for policies to return GRPC_CHANNEL_TRANSIENT_FAILURE.
* - Hand over pending picks from old policies during the switch that happens
@ -259,9 +259,9 @@ static void set_channel_connectivity_state_locked(grpc_exec_ctx *exec_ctx,
reason);
}
static void on_lb_policy_state_changed_locked(grpc_exec_ctx *exec_ctx,
void *arg, grpc_error *error) {
lb_policy_connectivity_watcher *w = (lb_policy_connectivity_watcher *)arg;
static void on_lb_policy_state_changed_locked(grpc_exec_ctx* exec_ctx,
void* arg, grpc_error* error) {
lb_policy_connectivity_watcher* w = (lb_policy_connectivity_watcher*)arg;
grpc_connectivity_state publish_state = w->state;
/* check if the notification is for the latest policy */
if (w->lb_policy == w->chand->lb_policy) {
@ -285,11 +285,11 @@ static void on_lb_policy_state_changed_locked(grpc_exec_ctx *exec_ctx,
gpr_free(w);
}
static void watch_lb_policy_locked(grpc_exec_ctx *exec_ctx, channel_data *chand,
grpc_lb_policy *lb_policy,
static void watch_lb_policy_locked(grpc_exec_ctx* exec_ctx, channel_data* chand,
grpc_lb_policy* lb_policy,
grpc_connectivity_state current_state) {
lb_policy_connectivity_watcher *w =
(lb_policy_connectivity_watcher *)gpr_malloc(sizeof(*w));
lb_policy_connectivity_watcher* w =
(lb_policy_connectivity_watcher*)gpr_malloc(sizeof(*w));
GRPC_CHANNEL_STACK_REF(chand->owning_stack, "watch_lb_policy");
w->chand = chand;
GRPC_CLOSURE_INIT(&w->on_changed, on_lb_policy_state_changed_locked, w,
@ -300,8 +300,8 @@ static void watch_lb_policy_locked(grpc_exec_ctx *exec_ctx, channel_data *chand,
&w->on_changed);
}
static void start_resolving_locked(grpc_exec_ctx *exec_ctx,
channel_data *chand) {
static void start_resolving_locked(grpc_exec_ctx* exec_ctx,
channel_data* chand) {
if (GRPC_TRACER_ON(grpc_client_channel_trace)) {
gpr_log(GPR_DEBUG, "chand=%p: starting name resolution", chand);
}
@ -313,19 +313,19 @@ static void start_resolving_locked(grpc_exec_ctx *exec_ctx,
}
typedef struct {
char *server_name;
grpc_server_retry_throttle_data *retry_throttle_data;
char* server_name;
grpc_server_retry_throttle_data* retry_throttle_data;
} service_config_parsing_state;
static void parse_retry_throttle_params(const grpc_json *field, void *arg) {
service_config_parsing_state *parsing_state =
(service_config_parsing_state *)arg;
static void parse_retry_throttle_params(const grpc_json* field, void* arg) {
service_config_parsing_state* parsing_state =
(service_config_parsing_state*)arg;
if (strcmp(field->key, "retryThrottling") == 0) {
if (parsing_state->retry_throttle_data != NULL) return; // Duplicate.
if (field->type != GRPC_JSON_OBJECT) return;
int max_milli_tokens = 0;
int milli_token_ratio = 0;
for (grpc_json *sub_field = field->child; sub_field != NULL;
for (grpc_json* sub_field = field->child; sub_field != NULL;
sub_field = sub_field->next) {
if (sub_field->key == NULL) return;
if (strcmp(sub_field->key, "maxTokens") == 0) {
@ -341,7 +341,7 @@ static void parse_retry_throttle_params(const grpc_json *field, void *arg) {
size_t whole_len = strlen(sub_field->value);
uint32_t multiplier = 1;
uint32_t decimal_value = 0;
const char *decimal_point = strchr(sub_field->value, '.');
const char* decimal_point = strchr(sub_field->value, '.');
if (decimal_point != NULL) {
whole_len = (size_t)(decimal_point - sub_field->value);
multiplier = 1000;
@ -372,25 +372,25 @@ static void parse_retry_throttle_params(const grpc_json *field, void *arg) {
}
}
static void on_resolver_result_changed_locked(grpc_exec_ctx *exec_ctx,
void *arg, grpc_error *error) {
channel_data *chand = (channel_data *)arg;
static void on_resolver_result_changed_locked(grpc_exec_ctx* exec_ctx,
void* arg, grpc_error* error) {
channel_data* chand = (channel_data*)arg;
if (GRPC_TRACER_ON(grpc_client_channel_trace)) {
gpr_log(GPR_DEBUG, "chand=%p: got resolver result: error=%s", chand,
grpc_error_string(error));
}
// Extract the following fields from the resolver result, if non-NULL.
bool lb_policy_updated = false;
char *lb_policy_name_dup = NULL;
char* lb_policy_name_dup = NULL;
bool lb_policy_name_changed = false;
grpc_lb_policy *new_lb_policy = NULL;
char *service_config_json = NULL;
grpc_server_retry_throttle_data *retry_throttle_data = NULL;
grpc_slice_hash_table *method_params_table = NULL;
grpc_lb_policy* new_lb_policy = NULL;
char* service_config_json = NULL;
grpc_server_retry_throttle_data* retry_throttle_data = NULL;
grpc_slice_hash_table* method_params_table = NULL;
if (chand->resolver_result != NULL) {
// Find LB policy name.
const char *lb_policy_name = NULL;
const grpc_arg *channel_arg =
const char* lb_policy_name = NULL;
const grpc_arg* channel_arg =
grpc_channel_args_find(chand->resolver_result, GRPC_ARG_LB_POLICY_NAME);
if (channel_arg != NULL) {
GPR_ASSERT(channel_arg->type == GRPC_ARG_STRING);
@ -401,8 +401,8 @@ static void on_resolver_result_changed_locked(grpc_exec_ctx *exec_ctx,
channel_arg =
grpc_channel_args_find(chand->resolver_result, GRPC_ARG_LB_ADDRESSES);
if (channel_arg != NULL && channel_arg->type == GRPC_ARG_POINTER) {
grpc_lb_addresses *addresses =
(grpc_lb_addresses *)channel_arg->value.pointer.p;
grpc_lb_addresses* addresses =
(grpc_lb_addresses*)channel_arg->value.pointer.p;
bool found_balancer_address = false;
for (size_t i = 0; i < addresses->num_addresses; ++i) {
if (addresses->addresses[i].is_balancer) {
@ -453,14 +453,14 @@ static void on_resolver_result_changed_locked(grpc_exec_ctx *exec_ctx,
if (channel_arg != NULL) {
GPR_ASSERT(channel_arg->type == GRPC_ARG_STRING);
service_config_json = gpr_strdup(channel_arg->value.string);
grpc_service_config *service_config =
grpc_service_config* service_config =
grpc_service_config_create(service_config_json);
if (service_config != NULL) {
channel_arg =
grpc_channel_args_find(chand->resolver_result, GRPC_ARG_SERVER_URI);
GPR_ASSERT(channel_arg != NULL);
GPR_ASSERT(channel_arg->type == GRPC_ARG_STRING);
grpc_uri *uri =
grpc_uri* uri =
grpc_uri_parse(exec_ctx, channel_arg->value.string, true);
GPR_ASSERT(uri->path[0] != '\0');
service_config_parsing_state parsing_state;
@ -563,7 +563,7 @@ static void on_resolver_result_changed_locked(grpc_exec_ctx *exec_ctx,
&chand->waiting_for_resolver_result_closures);
} else { // Not shutting down.
grpc_connectivity_state state = GRPC_CHANNEL_TRANSIENT_FAILURE;
grpc_error *state_error =
grpc_error* state_error =
GRPC_ERROR_CREATE_FROM_STATIC_STRING("No load balancing policy");
if (new_lb_policy != NULL) {
if (GRPC_TRACER_ON(grpc_client_channel_trace)) {
@ -595,12 +595,12 @@ static void on_resolver_result_changed_locked(grpc_exec_ctx *exec_ctx,
}
}
static void start_transport_op_locked(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error_ignored) {
grpc_transport_op *op = (grpc_transport_op *)arg;
grpc_channel_element *elem =
(grpc_channel_element *)op->handler_private.extra_arg;
channel_data *chand = (channel_data *)elem->channel_data;
static void start_transport_op_locked(grpc_exec_ctx* exec_ctx, void* arg,
grpc_error* error_ignored) {
grpc_transport_op* op = (grpc_transport_op*)arg;
grpc_channel_element* elem =
(grpc_channel_element*)op->handler_private.extra_arg;
channel_data* chand = (channel_data*)elem->channel_data;
if (op->on_connectivity_state_change != NULL) {
grpc_connectivity_state_notify_on_state_change(
@ -651,10 +651,10 @@ static void start_transport_op_locked(grpc_exec_ctx *exec_ctx, void *arg,
GRPC_CLOSURE_SCHED(exec_ctx, op->on_consumed, GRPC_ERROR_NONE);
}
static void cc_start_transport_op(grpc_exec_ctx *exec_ctx,
grpc_channel_element *elem,
grpc_transport_op *op) {
channel_data *chand = (channel_data *)elem->channel_data;
static void cc_start_transport_op(grpc_exec_ctx* exec_ctx,
grpc_channel_element* elem,
grpc_transport_op* op) {
channel_data* chand = (channel_data*)elem->channel_data;
GPR_ASSERT(op->set_accept_stream == false);
if (op->bind_pollset != NULL) {
@ -671,10 +671,10 @@ static void cc_start_transport_op(grpc_exec_ctx *exec_ctx,
GRPC_ERROR_NONE);
}
static void cc_get_channel_info(grpc_exec_ctx *exec_ctx,
grpc_channel_element *elem,
const grpc_channel_info *info) {
channel_data *chand = (channel_data *)elem->channel_data;
static void cc_get_channel_info(grpc_exec_ctx* exec_ctx,
grpc_channel_element* elem,
const grpc_channel_info* info) {
channel_data* chand = (channel_data*)elem->channel_data;
gpr_mu_lock(&chand->info_mu);
if (info->lb_policy_name != NULL) {
*info->lb_policy_name = chand->info_lb_policy_name == NULL
@ -691,10 +691,10 @@ static void cc_get_channel_info(grpc_exec_ctx *exec_ctx,
}
/* Constructor for channel_data */
static grpc_error *cc_init_channel_elem(grpc_exec_ctx *exec_ctx,
grpc_channel_element *elem,
grpc_channel_element_args *args) {
channel_data *chand = (channel_data *)elem->channel_data;
static grpc_error* cc_init_channel_elem(grpc_exec_ctx* exec_ctx,
grpc_channel_element* elem,
grpc_channel_element_args* args) {
channel_data* chand = (channel_data*)elem->channel_data;
GPR_ASSERT(args->is_last);
GPR_ASSERT(elem->filter == &grpc_client_channel_filter);
// Initialize data members.
@ -715,7 +715,7 @@ static grpc_error *cc_init_channel_elem(grpc_exec_ctx *exec_ctx,
"client_channel");
grpc_client_channel_start_backup_polling(exec_ctx, chand->interested_parties);
// Record client channel factory.
const grpc_arg *arg = grpc_channel_args_find(args->channel_args,
const grpc_arg* arg = grpc_channel_args_find(args->channel_args,
GRPC_ARG_CLIENT_CHANNEL_FACTORY);
if (arg == NULL) {
return GRPC_ERROR_CREATE_FROM_STATIC_STRING(
@ -726,9 +726,9 @@ static grpc_error *cc_init_channel_elem(grpc_exec_ctx *exec_ctx,
"client channel factory arg must be a pointer");
}
grpc_client_channel_factory_ref(
(grpc_client_channel_factory *)arg->value.pointer.p);
(grpc_client_channel_factory*)arg->value.pointer.p);
chand->client_channel_factory =
(grpc_client_channel_factory *)arg->value.pointer.p;
(grpc_client_channel_factory*)arg->value.pointer.p;
// Get server name to resolve, using proxy mapper if needed.
arg = grpc_channel_args_find(args->channel_args, GRPC_ARG_SERVER_URI);
if (arg == NULL) {
@ -739,8 +739,8 @@ static grpc_error *cc_init_channel_elem(grpc_exec_ctx *exec_ctx,
return GRPC_ERROR_CREATE_FROM_STATIC_STRING(
"server uri arg must be a string");
}
char *proxy_name = NULL;
grpc_channel_args *new_args = NULL;
char* proxy_name = NULL;
grpc_channel_args* new_args = NULL;
grpc_proxy_mappers_map_name(exec_ctx, arg->value.string, args->channel_args,
&proxy_name, &new_args);
// Instantiate resolver.
@ -758,21 +758,22 @@ static grpc_error *cc_init_channel_elem(grpc_exec_ctx *exec_ctx,
return GRPC_ERROR_NONE;
}
static void shutdown_resolver_locked(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error) {
grpc_resolver *resolver = (grpc_resolver *)arg;
static void shutdown_resolver_locked(grpc_exec_ctx* exec_ctx, void* arg,
grpc_error* error) {
grpc_resolver* resolver = (grpc_resolver*)arg;
grpc_resolver_shutdown_locked(exec_ctx, resolver);
GRPC_RESOLVER_UNREF(exec_ctx, resolver, "channel");
}
/* Destructor for channel_data */
static void cc_destroy_channel_elem(grpc_exec_ctx *exec_ctx,
grpc_channel_element *elem) {
channel_data *chand = (channel_data *)elem->channel_data;
static void cc_destroy_channel_elem(grpc_exec_ctx* exec_ctx,
grpc_channel_element* elem) {
channel_data* chand = (channel_data*)elem->channel_data;
if (chand->resolver != NULL) {
GRPC_CLOSURE_SCHED(
exec_ctx, GRPC_CLOSURE_CREATE(shutdown_resolver_locked, chand->resolver,
grpc_combiner_scheduler(chand->combiner)),
exec_ctx,
GRPC_CLOSURE_CREATE(shutdown_resolver_locked, chand->resolver,
grpc_combiner_scheduler(chand->combiner)),
GRPC_ERROR_NONE);
}
if (chand->client_channel_factory != NULL) {
@ -832,45 +833,45 @@ typedef struct client_channel_call_data {
grpc_slice path; // Request path.
gpr_timespec call_start_time;
grpc_millis deadline;
gpr_arena *arena;
grpc_call_stack *owning_call;
grpc_call_combiner *call_combiner;
gpr_arena* arena;
grpc_call_stack* owning_call;
grpc_call_combiner* call_combiner;
grpc_server_retry_throttle_data *retry_throttle_data;
method_parameters *method_params;
grpc_server_retry_throttle_data* retry_throttle_data;
method_parameters* method_params;
grpc_subchannel_call *subchannel_call;
grpc_error *error;
grpc_subchannel_call* subchannel_call;
grpc_error* error;
grpc_lb_policy *lb_policy; // Holds ref while LB pick is pending.
grpc_lb_policy* lb_policy; // Holds ref while LB pick is pending.
grpc_closure lb_pick_closure;
grpc_closure lb_pick_cancel_closure;
grpc_connected_subchannel *connected_subchannel;
grpc_connected_subchannel* connected_subchannel;
grpc_call_context_element subchannel_call_context[GRPC_CONTEXT_COUNT];
grpc_polling_entity *pollent;
grpc_polling_entity* pollent;
grpc_transport_stream_op_batch *waiting_for_pick_batches[MAX_WAITING_BATCHES];
grpc_transport_stream_op_batch* waiting_for_pick_batches[MAX_WAITING_BATCHES];
size_t waiting_for_pick_batches_count;
grpc_closure handle_pending_batch_in_call_combiner[MAX_WAITING_BATCHES];
grpc_transport_stream_op_batch *initial_metadata_batch;
grpc_transport_stream_op_batch* initial_metadata_batch;
grpc_linked_mdelem lb_token_mdelem;
grpc_closure on_complete;
grpc_closure *original_on_complete;
grpc_closure* original_on_complete;
} call_data;
grpc_subchannel_call *grpc_client_channel_get_subchannel_call(
grpc_call_element *elem) {
call_data *calld = (call_data *)elem->call_data;
grpc_subchannel_call* grpc_client_channel_get_subchannel_call(
grpc_call_element* elem) {
call_data* calld = (call_data*)elem->call_data;
return calld->subchannel_call;
}
// This is called via the call combiner, so access to calld is synchronized.
static void waiting_for_pick_batches_add(
call_data *calld, grpc_transport_stream_op_batch *batch) {
call_data* calld, grpc_transport_stream_op_batch* batch) {
if (batch->send_initial_metadata) {
GPR_ASSERT(calld->initial_metadata_batch == NULL);
calld->initial_metadata_batch = batch;
@ -882,9 +883,9 @@ static void waiting_for_pick_batches_add(
}
// This is called via the call combiner, so access to calld is synchronized.
static void fail_pending_batch_in_call_combiner(grpc_exec_ctx *exec_ctx,
void *arg, grpc_error *error) {
call_data *calld = (call_data *)arg;
static void fail_pending_batch_in_call_combiner(grpc_exec_ctx* exec_ctx,
void* arg, grpc_error* error) {
call_data* calld = (call_data*)arg;
if (calld->waiting_for_pick_batches_count > 0) {
--calld->waiting_for_pick_batches_count;
grpc_transport_stream_op_batch_finish_with_failure(
@ -895,10 +896,10 @@ static void fail_pending_batch_in_call_combiner(grpc_exec_ctx *exec_ctx,
}
// This is called via the call combiner, so access to calld is synchronized.
static void waiting_for_pick_batches_fail(grpc_exec_ctx *exec_ctx,
grpc_call_element *elem,
grpc_error *error) {
call_data *calld = (call_data *)elem->call_data;
static void waiting_for_pick_batches_fail(grpc_exec_ctx* exec_ctx,
grpc_call_element* elem,
grpc_error* error) {
call_data* calld = (call_data*)elem->call_data;
if (GRPC_TRACER_ON(grpc_client_channel_trace)) {
gpr_log(GPR_DEBUG,
"chand=%p calld=%p: failing %" PRIuPTR " pending batches: %s",
@ -926,9 +927,9 @@ static void waiting_for_pick_batches_fail(grpc_exec_ctx *exec_ctx,
}
// This is called via the call combiner, so access to calld is synchronized.
static void run_pending_batch_in_call_combiner(grpc_exec_ctx *exec_ctx,
void *arg, grpc_error *ignored) {
call_data *calld = (call_data *)arg;
static void run_pending_batch_in_call_combiner(grpc_exec_ctx* exec_ctx,
void* arg, grpc_error* ignored) {
call_data* calld = (call_data*)arg;
if (calld->waiting_for_pick_batches_count > 0) {
--calld->waiting_for_pick_batches_count;
grpc_subchannel_call_process_op(
@ -938,13 +939,14 @@ static void run_pending_batch_in_call_combiner(grpc_exec_ctx *exec_ctx,
}
// This is called via the call combiner, so access to calld is synchronized.
static void waiting_for_pick_batches_resume(grpc_exec_ctx *exec_ctx,
grpc_call_element *elem) {
channel_data *chand = (channel_data *)elem->channel_data;
call_data *calld = (call_data *)elem->call_data;
static void waiting_for_pick_batches_resume(grpc_exec_ctx* exec_ctx,
grpc_call_element* elem) {
channel_data* chand = (channel_data*)elem->channel_data;
call_data* calld = (call_data*)elem->call_data;
if (GRPC_TRACER_ON(grpc_client_channel_trace)) {
gpr_log(GPR_DEBUG, "chand=%p calld=%p: sending %" PRIuPTR
" pending batches to subchannel_call=%p",
gpr_log(GPR_DEBUG,
"chand=%p calld=%p: sending %" PRIuPTR
" pending batches to subchannel_call=%p",
chand, calld, calld->waiting_for_pick_batches_count,
calld->subchannel_call);
}
@ -964,10 +966,10 @@ static void waiting_for_pick_batches_resume(grpc_exec_ctx *exec_ctx,
// Applies service config to the call. Must be invoked once we know
// that the resolver has returned results to the channel.
static void apply_service_config_to_call_locked(grpc_exec_ctx *exec_ctx,
grpc_call_element *elem) {
channel_data *chand = (channel_data *)elem->channel_data;
call_data *calld = (call_data *)elem->call_data;
static void apply_service_config_to_call_locked(grpc_exec_ctx* exec_ctx,
grpc_call_element* elem) {
channel_data* chand = (channel_data*)elem->channel_data;
call_data* calld = (call_data*)elem->call_data;
if (GRPC_TRACER_ON(grpc_client_channel_trace)) {
gpr_log(GPR_DEBUG, "chand=%p calld=%p: applying service config to call",
chand, calld);
@ -977,7 +979,7 @@ static void apply_service_config_to_call_locked(grpc_exec_ctx *exec_ctx,
grpc_server_retry_throttle_data_ref(chand->retry_throttle_data);
}
if (chand->method_params_table != NULL) {
calld->method_params = (method_parameters *)grpc_method_config_table_get(
calld->method_params = (method_parameters*)grpc_method_config_table_get(
exec_ctx, chand->method_params_table, calld->path);
if (calld->method_params != NULL) {
method_parameters_ref(calld->method_params);
@ -997,11 +999,11 @@ static void apply_service_config_to_call_locked(grpc_exec_ctx *exec_ctx,
}
}
static void create_subchannel_call_locked(grpc_exec_ctx *exec_ctx,
grpc_call_element *elem,
grpc_error *error) {
channel_data *chand = (channel_data *)elem->channel_data;
call_data *calld = (call_data *)elem->call_data;
static void create_subchannel_call_locked(grpc_exec_ctx* exec_ctx,
grpc_call_element* elem,
grpc_error* error) {
channel_data* chand = (channel_data*)elem->channel_data;
call_data* calld = (call_data*)elem->call_data;
const grpc_connected_subchannel_call_args call_args = {
calld->pollent, // pollent
calld->path, // path
@ -1011,7 +1013,7 @@ static void create_subchannel_call_locked(grpc_exec_ctx *exec_ctx,
calld->subchannel_call_context, // context
calld->call_combiner // call_combiner
};
grpc_error *new_error = grpc_connected_subchannel_create_call(
grpc_error* new_error = grpc_connected_subchannel_create_call(
exec_ctx, calld->connected_subchannel, &call_args,
&calld->subchannel_call);
if (GRPC_TRACER_ON(grpc_client_channel_trace)) {
@ -1028,10 +1030,10 @@ static void create_subchannel_call_locked(grpc_exec_ctx *exec_ctx,
}
// Invoked when a pick is completed, on both success or failure.
static void pick_done_locked(grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
grpc_error *error) {
call_data *calld = (call_data *)elem->call_data;
channel_data *chand = (channel_data *)elem->channel_data;
static void pick_done_locked(grpc_exec_ctx* exec_ctx, grpc_call_element* elem,
grpc_error* error) {
call_data* calld = (call_data*)elem->call_data;
channel_data* chand = (channel_data*)elem->channel_data;
if (calld->connected_subchannel == NULL) {
// Failed to create subchannel.
GRPC_ERROR_UNREF(calld->error);
@ -1057,10 +1059,10 @@ static void pick_done_locked(grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
// either (a) the pick was deferred pending a resolver result or (b) the
// pick was done asynchronously. Removes the call's polling entity from
// chand->interested_parties before invoking pick_done_locked().
static void async_pick_done_locked(grpc_exec_ctx *exec_ctx,
grpc_call_element *elem, grpc_error *error) {
channel_data *chand = (channel_data *)elem->channel_data;
call_data *calld = (call_data *)elem->call_data;
static void async_pick_done_locked(grpc_exec_ctx* exec_ctx,
grpc_call_element* elem, grpc_error* error) {
channel_data* chand = (channel_data*)elem->channel_data;
call_data* calld = (call_data*)elem->call_data;
grpc_polling_entity_del_from_pollset_set(exec_ctx, calld->pollent,
chand->interested_parties);
pick_done_locked(exec_ctx, elem, error);
@ -1068,11 +1070,11 @@ static void async_pick_done_locked(grpc_exec_ctx *exec_ctx,
// Note: This runs under the client_channel combiner, but will NOT be
// holding the call combiner.
static void pick_callback_cancel_locked(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error) {
grpc_call_element *elem = (grpc_call_element *)arg;
channel_data *chand = (channel_data *)elem->channel_data;
call_data *calld = (call_data *)elem->call_data;
static void pick_callback_cancel_locked(grpc_exec_ctx* exec_ctx, void* arg,
grpc_error* error) {
grpc_call_element* elem = (grpc_call_element*)arg;
channel_data* chand = (channel_data*)elem->channel_data;
call_data* calld = (call_data*)elem->call_data;
if (calld->lb_policy != NULL) {
if (GRPC_TRACER_ON(grpc_client_channel_trace)) {
gpr_log(GPR_DEBUG, "chand=%p calld=%p: cancelling pick from LB policy %p",
@ -1087,11 +1089,11 @@ static void pick_callback_cancel_locked(grpc_exec_ctx *exec_ctx, void *arg,
// Callback invoked by grpc_lb_policy_pick_locked() for async picks.
// Unrefs the LB policy and invokes async_pick_done_locked().
static void pick_callback_done_locked(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error) {
grpc_call_element *elem = (grpc_call_element *)arg;
channel_data *chand = (channel_data *)elem->channel_data;
call_data *calld = (call_data *)elem->call_data;
static void pick_callback_done_locked(grpc_exec_ctx* exec_ctx, void* arg,
grpc_error* error) {
grpc_call_element* elem = (grpc_call_element*)arg;
channel_data* chand = (channel_data*)elem->channel_data;
call_data* calld = (call_data*)elem->call_data;
if (GRPC_TRACER_ON(grpc_client_channel_trace)) {
gpr_log(GPR_DEBUG, "chand=%p calld=%p: pick completed asynchronously",
chand, calld);
@ -1105,10 +1107,10 @@ static void pick_callback_done_locked(grpc_exec_ctx *exec_ctx, void *arg,
// Takes a ref to chand->lb_policy and calls grpc_lb_policy_pick_locked().
// If the pick was completed synchronously, unrefs the LB policy and
// returns true.
static bool pick_callback_start_locked(grpc_exec_ctx *exec_ctx,
grpc_call_element *elem) {
channel_data *chand = (channel_data *)elem->channel_data;
call_data *calld = (call_data *)elem->call_data;
static bool pick_callback_start_locked(grpc_exec_ctx* exec_ctx,
grpc_call_element* elem) {
channel_data* chand = (channel_data*)elem->channel_data;
call_data* calld = (call_data*)elem->call_data;
if (GRPC_TRACER_ON(grpc_client_channel_trace)) {
gpr_log(GPR_DEBUG, "chand=%p calld=%p: starting pick on lb_policy=%p",
chand, calld, chand->lb_policy);
@ -1165,7 +1167,7 @@ static bool pick_callback_start_locked(grpc_exec_ctx *exec_ctx,
}
typedef struct {
grpc_call_element *elem;
grpc_call_element* elem;
bool finished;
grpc_closure closure;
grpc_closure cancel_closure;
@ -1173,11 +1175,10 @@ typedef struct {
// Note: This runs under the client_channel combiner, but will NOT be
// holding the call combiner.
static void pick_after_resolver_result_cancel_locked(grpc_exec_ctx *exec_ctx,
void *arg,
grpc_error *error) {
pick_after_resolver_result_args *args =
(pick_after_resolver_result_args *)arg;
static void pick_after_resolver_result_cancel_locked(grpc_exec_ctx* exec_ctx,
void* arg,
grpc_error* error) {
pick_after_resolver_result_args* args = (pick_after_resolver_result_args*)arg;
if (args->finished) {
gpr_free(args);
return;
@ -1190,9 +1191,9 @@ static void pick_after_resolver_result_cancel_locked(grpc_exec_ctx *exec_ctx,
// is called, it will be a no-op. We also immediately invoke
// async_pick_done_locked() to propagate the error back to the caller.
args->finished = true;
grpc_call_element *elem = args->elem;
channel_data *chand = (channel_data *)elem->channel_data;
call_data *calld = (call_data *)elem->call_data;
grpc_call_element* elem = args->elem;
channel_data* chand = (channel_data*)elem->channel_data;
call_data* calld = (call_data*)elem->call_data;
if (GRPC_TRACER_ON(grpc_client_channel_trace)) {
gpr_log(GPR_DEBUG,
"chand=%p calld=%p: cancelling pick waiting for resolver result",
@ -1208,14 +1209,13 @@ static void pick_after_resolver_result_cancel_locked(grpc_exec_ctx *exec_ctx,
"Pick cancelled", &error, 1));
}
static void pick_after_resolver_result_start_locked(grpc_exec_ctx *exec_ctx,
grpc_call_element *elem);
static void pick_after_resolver_result_start_locked(grpc_exec_ctx* exec_ctx,
grpc_call_element* elem);
static void pick_after_resolver_result_done_locked(grpc_exec_ctx *exec_ctx,
void *arg,
grpc_error *error) {
pick_after_resolver_result_args *args =
(pick_after_resolver_result_args *)arg;
static void pick_after_resolver_result_done_locked(grpc_exec_ctx* exec_ctx,
void* arg,
grpc_error* error) {
pick_after_resolver_result_args* args = (pick_after_resolver_result_args*)arg;
if (args->finished) {
/* cancelled, do nothing */
if (GRPC_TRACER_ON(grpc_client_channel_trace)) {
@ -1225,9 +1225,9 @@ static void pick_after_resolver_result_done_locked(grpc_exec_ctx *exec_ctx,
return;
}
args->finished = true;
grpc_call_element *elem = args->elem;
channel_data *chand = (channel_data *)elem->channel_data;
call_data *calld = (call_data *)elem->call_data;
grpc_call_element* elem = args->elem;
channel_data* chand = (channel_data*)elem->channel_data;
call_data* calld = (call_data*)elem->call_data;
if (error != GRPC_ERROR_NONE) {
if (GRPC_TRACER_ON(grpc_client_channel_trace)) {
gpr_log(GPR_DEBUG, "chand=%p calld=%p: resolver failed to return data",
@ -1274,17 +1274,17 @@ static void pick_after_resolver_result_done_locked(grpc_exec_ctx *exec_ctx,
}
}
static void pick_after_resolver_result_start_locked(grpc_exec_ctx *exec_ctx,
grpc_call_element *elem) {
channel_data *chand = (channel_data *)elem->channel_data;
call_data *calld = (call_data *)elem->call_data;
static void pick_after_resolver_result_start_locked(grpc_exec_ctx* exec_ctx,
grpc_call_element* elem) {
channel_data* chand = (channel_data*)elem->channel_data;
call_data* calld = (call_data*)elem->call_data;
if (GRPC_TRACER_ON(grpc_client_channel_trace)) {
gpr_log(GPR_DEBUG,
"chand=%p calld=%p: deferring pick pending resolver result", chand,
calld);
}
pick_after_resolver_result_args *args =
(pick_after_resolver_result_args *)gpr_zalloc(sizeof(*args));
pick_after_resolver_result_args* args =
(pick_after_resolver_result_args*)gpr_zalloc(sizeof(*args));
args->elem = elem;
GRPC_CLOSURE_INIT(&args->closure, pick_after_resolver_result_done_locked,
args, grpc_combiner_scheduler(chand->combiner));
@ -1297,11 +1297,11 @@ static void pick_after_resolver_result_start_locked(grpc_exec_ctx *exec_ctx,
grpc_combiner_scheduler(chand->combiner)));
}
static void start_pick_locked(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *ignored) {
grpc_call_element *elem = (grpc_call_element *)arg;
call_data *calld = (call_data *)elem->call_data;
channel_data *chand = (channel_data *)elem->channel_data;
static void start_pick_locked(grpc_exec_ctx* exec_ctx, void* arg,
grpc_error* ignored) {
grpc_call_element* elem = (grpc_call_element*)arg;
call_data* calld = (call_data*)elem->call_data;
channel_data* chand = (channel_data*)elem->channel_data;
GPR_ASSERT(calld->connected_subchannel == NULL);
if (chand->lb_policy != NULL) {
// We already have an LB policy, so ask it for a pick.
@ -1331,9 +1331,9 @@ static void start_pick_locked(grpc_exec_ctx *exec_ctx, void *arg,
chand->interested_parties);
}
static void on_complete(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) {
grpc_call_element *elem = (grpc_call_element *)arg;
call_data *calld = (call_data *)elem->call_data;
static void on_complete(grpc_exec_ctx* exec_ctx, void* arg, grpc_error* error) {
grpc_call_element* elem = (grpc_call_element*)arg;
call_data* calld = (call_data*)elem->call_data;
if (calld->retry_throttle_data != NULL) {
if (error == GRPC_ERROR_NONE) {
grpc_server_retry_throttle_data_record_success(
@ -1352,10 +1352,10 @@ static void on_complete(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) {
}
static void cc_start_transport_stream_op_batch(
grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
grpc_transport_stream_op_batch *batch) {
call_data *calld = (call_data *)elem->call_data;
channel_data *chand = (channel_data *)elem->channel_data;
grpc_exec_ctx* exec_ctx, grpc_call_element* elem,
grpc_transport_stream_op_batch* batch) {
call_data* calld = (call_data*)elem->call_data;
channel_data* chand = (channel_data*)elem->channel_data;
if (chand->deadline_checking_enabled) {
grpc_deadline_state_client_start_transport_stream_op_batch(exec_ctx, elem,
batch);
@ -1446,11 +1446,11 @@ done:
}
/* Constructor for call_data */
static grpc_error *cc_init_call_elem(grpc_exec_ctx *exec_ctx,
grpc_call_element *elem,
const grpc_call_element_args *args) {
call_data *calld = (call_data *)elem->call_data;
channel_data *chand = (channel_data *)elem->channel_data;
static grpc_error* cc_init_call_elem(grpc_exec_ctx* exec_ctx,
grpc_call_element* elem,
const grpc_call_element_args* args) {
call_data* calld = (call_data*)elem->call_data;
channel_data* chand = (channel_data*)elem->channel_data;
// Initialize data members.
calld->path = grpc_slice_ref_internal(args->path);
calld->call_start_time = args->start_time;
@ -1466,12 +1466,12 @@ static grpc_error *cc_init_call_elem(grpc_exec_ctx *exec_ctx,
}
/* Destructor for call_data */
static void cc_destroy_call_elem(grpc_exec_ctx *exec_ctx,
grpc_call_element *elem,
const grpc_call_final_info *final_info,
grpc_closure *then_schedule_closure) {
call_data *calld = (call_data *)elem->call_data;
channel_data *chand = (channel_data *)elem->channel_data;
static void cc_destroy_call_elem(grpc_exec_ctx* exec_ctx,
grpc_call_element* elem,
const grpc_call_final_info* final_info,
grpc_closure* then_schedule_closure) {
call_data* calld = (call_data*)elem->call_data;
channel_data* chand = (channel_data*)elem->channel_data;
if (chand->deadline_checking_enabled) {
grpc_deadline_state_destroy(exec_ctx, elem);
}
@ -1502,10 +1502,10 @@ static void cc_destroy_call_elem(grpc_exec_ctx *exec_ctx,
GRPC_CLOSURE_SCHED(exec_ctx, then_schedule_closure, GRPC_ERROR_NONE);
}
static void cc_set_pollset_or_pollset_set(grpc_exec_ctx *exec_ctx,
grpc_call_element *elem,
grpc_polling_entity *pollent) {
call_data *calld = (call_data *)elem->call_data;
static void cc_set_pollset_or_pollset_set(grpc_exec_ctx* exec_ctx,
grpc_call_element* elem,
grpc_polling_entity* pollent) {
call_data* calld = (call_data*)elem->call_data;
calld->pollent = pollent;
}
@ -1527,9 +1527,9 @@ const grpc_channel_filter grpc_client_channel_filter = {
"client-channel",
};
static void try_to_connect_locked(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error_ignored) {
channel_data *chand = (channel_data *)arg;
static void try_to_connect_locked(grpc_exec_ctx* exec_ctx, void* arg,
grpc_error* error_ignored) {
channel_data* chand = (channel_data*)arg;
if (chand->lb_policy != NULL) {
grpc_lb_policy_exit_idle_locked(exec_ctx, chand->lb_policy);
} else {
@ -1542,34 +1542,35 @@ static void try_to_connect_locked(grpc_exec_ctx *exec_ctx, void *arg,
}
grpc_connectivity_state grpc_client_channel_check_connectivity_state(
grpc_exec_ctx *exec_ctx, grpc_channel_element *elem, int try_to_connect) {
channel_data *chand = (channel_data *)elem->channel_data;
grpc_exec_ctx* exec_ctx, grpc_channel_element* elem, int try_to_connect) {
channel_data* chand = (channel_data*)elem->channel_data;
grpc_connectivity_state out =
grpc_connectivity_state_check(&chand->state_tracker);
if (out == GRPC_CHANNEL_IDLE && try_to_connect) {
GRPC_CHANNEL_STACK_REF(chand->owning_stack, "try_to_connect");
GRPC_CLOSURE_SCHED(
exec_ctx, GRPC_CLOSURE_CREATE(try_to_connect_locked, chand,
grpc_combiner_scheduler(chand->combiner)),
exec_ctx,
GRPC_CLOSURE_CREATE(try_to_connect_locked, chand,
grpc_combiner_scheduler(chand->combiner)),
GRPC_ERROR_NONE);
}
return out;
}
typedef struct external_connectivity_watcher {
channel_data *chand;
channel_data* chand;
grpc_polling_entity pollent;
grpc_closure *on_complete;
grpc_closure *watcher_timer_init;
grpc_connectivity_state *state;
grpc_closure* on_complete;
grpc_closure* watcher_timer_init;
grpc_connectivity_state* state;
grpc_closure my_closure;
struct external_connectivity_watcher *next;
struct external_connectivity_watcher* next;
} external_connectivity_watcher;
static external_connectivity_watcher *lookup_external_connectivity_watcher(
channel_data *chand, grpc_closure *on_complete) {
static external_connectivity_watcher* lookup_external_connectivity_watcher(
channel_data* chand, grpc_closure* on_complete) {
gpr_mu_lock(&chand->external_connectivity_watcher_list_mu);
external_connectivity_watcher *w =
external_connectivity_watcher* w =
chand->external_connectivity_watcher_list_head;
while (w != NULL && w->on_complete != on_complete) {
w = w->next;
@ -1579,7 +1580,7 @@ static external_connectivity_watcher *lookup_external_connectivity_watcher(
}
static void external_connectivity_watcher_list_append(
channel_data *chand, external_connectivity_watcher *w) {
channel_data* chand, external_connectivity_watcher* w) {
GPR_ASSERT(!lookup_external_connectivity_watcher(chand, w->on_complete));
gpr_mu_lock(&w->chand->external_connectivity_watcher_list_mu);
@ -1590,7 +1591,7 @@ static void external_connectivity_watcher_list_append(
}
static void external_connectivity_watcher_list_remove(
channel_data *chand, external_connectivity_watcher *too_remove) {
channel_data* chand, external_connectivity_watcher* too_remove) {
GPR_ASSERT(
lookup_external_connectivity_watcher(chand, too_remove->on_complete));
gpr_mu_lock(&chand->external_connectivity_watcher_list_mu);
@ -1599,7 +1600,7 @@ static void external_connectivity_watcher_list_remove(
gpr_mu_unlock(&chand->external_connectivity_watcher_list_mu);
return;
}
external_connectivity_watcher *w =
external_connectivity_watcher* w =
chand->external_connectivity_watcher_list_head;
while (w != NULL) {
if (w->next == too_remove) {
@ -1613,12 +1614,12 @@ static void external_connectivity_watcher_list_remove(
}
int grpc_client_channel_num_external_connectivity_watchers(
grpc_channel_element *elem) {
channel_data *chand = (channel_data *)elem->channel_data;
grpc_channel_element* elem) {
channel_data* chand = (channel_data*)elem->channel_data;
int count = 0;
gpr_mu_lock(&chand->external_connectivity_watcher_list_mu);
external_connectivity_watcher *w =
external_connectivity_watcher* w =
chand->external_connectivity_watcher_list_head;
while (w != NULL) {
count++;
@ -1629,10 +1630,10 @@ int grpc_client_channel_num_external_connectivity_watchers(
return count;
}
static void on_external_watch_complete_locked(grpc_exec_ctx *exec_ctx,
void *arg, grpc_error *error) {
external_connectivity_watcher *w = (external_connectivity_watcher *)arg;
grpc_closure *follow_up = w->on_complete;
static void on_external_watch_complete_locked(grpc_exec_ctx* exec_ctx,
void* arg, grpc_error* error) {
external_connectivity_watcher* w = (external_connectivity_watcher*)arg;
grpc_closure* follow_up = w->on_complete;
grpc_polling_entity_del_from_pollset_set(exec_ctx, &w->pollent,
w->chand->interested_parties);
GRPC_CHANNEL_STACK_UNREF(exec_ctx, w->chand->owning_stack,
@ -1642,10 +1643,10 @@ static void on_external_watch_complete_locked(grpc_exec_ctx *exec_ctx,
GRPC_CLOSURE_RUN(exec_ctx, follow_up, GRPC_ERROR_REF(error));
}
static void watch_connectivity_state_locked(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error_ignored) {
external_connectivity_watcher *w = (external_connectivity_watcher *)arg;
external_connectivity_watcher *found = NULL;
static void watch_connectivity_state_locked(grpc_exec_ctx* exec_ctx, void* arg,
grpc_error* error_ignored) {
external_connectivity_watcher* w = (external_connectivity_watcher*)arg;
external_connectivity_watcher* found = NULL;
if (w->state != NULL) {
external_connectivity_watcher_list_append(w->chand, w);
GRPC_CLOSURE_RUN(exec_ctx, w->watcher_timer_init, GRPC_ERROR_NONE);
@ -1670,12 +1671,12 @@ static void watch_connectivity_state_locked(grpc_exec_ctx *exec_ctx, void *arg,
}
void grpc_client_channel_watch_connectivity_state(
grpc_exec_ctx *exec_ctx, grpc_channel_element *elem,
grpc_polling_entity pollent, grpc_connectivity_state *state,
grpc_closure *closure, grpc_closure *watcher_timer_init) {
channel_data *chand = (channel_data *)elem->channel_data;
external_connectivity_watcher *w =
(external_connectivity_watcher *)gpr_zalloc(sizeof(*w));
grpc_exec_ctx* exec_ctx, grpc_channel_element* elem,
grpc_polling_entity pollent, grpc_connectivity_state* state,
grpc_closure* closure, grpc_closure* watcher_timer_init) {
channel_data* chand = (channel_data*)elem->channel_data;
external_connectivity_watcher* w =
(external_connectivity_watcher*)gpr_zalloc(sizeof(*w));
w->chand = chand;
w->pollent = pollent;
w->on_complete = closure;

@ -42,19 +42,19 @@ extern "C" {
extern const grpc_channel_filter grpc_client_channel_filter;
grpc_connectivity_state grpc_client_channel_check_connectivity_state(
grpc_exec_ctx *exec_ctx, grpc_channel_element *elem, int try_to_connect);
grpc_exec_ctx* exec_ctx, grpc_channel_element* elem, int try_to_connect);
int grpc_client_channel_num_external_connectivity_watchers(
grpc_channel_element *elem);
grpc_channel_element* elem);
void grpc_client_channel_watch_connectivity_state(
grpc_exec_ctx *exec_ctx, grpc_channel_element *elem,
grpc_polling_entity pollent, grpc_connectivity_state *state,
grpc_closure *on_complete, grpc_closure *watcher_timer_init);
grpc_exec_ctx* exec_ctx, grpc_channel_element* elem,
grpc_polling_entity pollent, grpc_connectivity_state* state,
grpc_closure* on_complete, grpc_closure* watcher_timer_init);
/* Debug helper: pull the subchannel call from a call stack element */
grpc_subchannel_call *grpc_client_channel_get_subchannel_call(
grpc_call_element *elem);
grpc_subchannel_call* grpc_client_channel_get_subchannel_call(
grpc_call_element* elem);
#ifdef __cplusplus
}

@ -44,39 +44,39 @@ typedef enum {
/** Constructor for new configured channels.
Creating decorators around this type is encouraged to adapt behavior. */
struct grpc_client_channel_factory {
const grpc_client_channel_factory_vtable *vtable;
const grpc_client_channel_factory_vtable* vtable;
};
struct grpc_client_channel_factory_vtable {
void (*ref)(grpc_client_channel_factory *factory);
void (*unref)(grpc_exec_ctx *exec_ctx, grpc_client_channel_factory *factory);
grpc_subchannel *(*create_subchannel)(grpc_exec_ctx *exec_ctx,
grpc_client_channel_factory *factory,
const grpc_subchannel_args *args);
grpc_channel *(*create_client_channel)(grpc_exec_ctx *exec_ctx,
grpc_client_channel_factory *factory,
const char *target,
void (*ref)(grpc_client_channel_factory* factory);
void (*unref)(grpc_exec_ctx* exec_ctx, grpc_client_channel_factory* factory);
grpc_subchannel* (*create_subchannel)(grpc_exec_ctx* exec_ctx,
grpc_client_channel_factory* factory,
const grpc_subchannel_args* args);
grpc_channel* (*create_client_channel)(grpc_exec_ctx* exec_ctx,
grpc_client_channel_factory* factory,
const char* target,
grpc_client_channel_type type,
const grpc_channel_args *args);
const grpc_channel_args* args);
};
void grpc_client_channel_factory_ref(grpc_client_channel_factory *factory);
void grpc_client_channel_factory_unref(grpc_exec_ctx *exec_ctx,
grpc_client_channel_factory *factory);
void grpc_client_channel_factory_ref(grpc_client_channel_factory* factory);
void grpc_client_channel_factory_unref(grpc_exec_ctx* exec_ctx,
grpc_client_channel_factory* factory);
/** Create a new grpc_subchannel */
grpc_subchannel *grpc_client_channel_factory_create_subchannel(
grpc_exec_ctx *exec_ctx, grpc_client_channel_factory *factory,
const grpc_subchannel_args *args);
grpc_subchannel* grpc_client_channel_factory_create_subchannel(
grpc_exec_ctx* exec_ctx, grpc_client_channel_factory* factory,
const grpc_subchannel_args* args);
/** Create a new grpc_channel */
grpc_channel *grpc_client_channel_factory_create_channel(
grpc_exec_ctx *exec_ctx, grpc_client_channel_factory *factory,
const char *target, grpc_client_channel_type type,
const grpc_channel_args *args);
grpc_channel* grpc_client_channel_factory_create_channel(
grpc_exec_ctx* exec_ctx, grpc_client_channel_factory* factory,
const char* target, grpc_client_channel_type type,
const grpc_channel_args* args);
grpc_arg grpc_client_channel_factory_create_channel_arg(
grpc_client_channel_factory *factory);
grpc_client_channel_factory* factory);
#ifdef __cplusplus
}

@ -34,16 +34,16 @@
#include "src/core/ext/filters/client_channel/subchannel_index.h"
#include "src/core/lib/surface/channel_init.h"
static bool append_filter(grpc_exec_ctx *exec_ctx,
grpc_channel_stack_builder *builder, void *arg) {
static bool append_filter(grpc_exec_ctx* exec_ctx,
grpc_channel_stack_builder* builder, void* arg) {
return grpc_channel_stack_builder_append_filter(
builder, (const grpc_channel_filter *)arg, NULL, NULL);
builder, (const grpc_channel_filter*)arg, NULL, NULL);
}
static bool set_default_host_if_unset(grpc_exec_ctx *exec_ctx,
grpc_channel_stack_builder *builder,
void *unused) {
const grpc_channel_args *args =
static bool set_default_host_if_unset(grpc_exec_ctx* exec_ctx,
grpc_channel_stack_builder* builder,
void* unused) {
const grpc_channel_args* args =
grpc_channel_stack_builder_get_channel_arguments(builder);
for (size_t i = 0; i < args->num_args; i++) {
if (0 == strcmp(args->args[i].key, GRPC_ARG_DEFAULT_AUTHORITY) ||
@ -51,12 +51,12 @@ static bool set_default_host_if_unset(grpc_exec_ctx *exec_ctx,
return true;
}
}
char *default_authority = grpc_get_default_authority(
char* default_authority = grpc_get_default_authority(
exec_ctx, grpc_channel_stack_builder_get_target(builder));
if (default_authority != NULL) {
grpc_arg arg = grpc_channel_arg_string_create(
(char *)GRPC_ARG_DEFAULT_AUTHORITY, default_authority);
grpc_channel_args *new_args = grpc_channel_args_copy_and_add(args, &arg, 1);
(char*)GRPC_ARG_DEFAULT_AUTHORITY, default_authority);
grpc_channel_args* new_args = grpc_channel_args_copy_and_add(args, &arg, 1);
grpc_channel_stack_builder_set_channel_arguments(exec_ctx, builder,
new_args);
gpr_free(default_authority);
@ -76,7 +76,7 @@ extern "C" void grpc_client_channel_init(void) {
set_default_host_if_unset, NULL);
grpc_channel_init_register_stage(
GRPC_CLIENT_CHANNEL, GRPC_CHANNEL_INIT_BUILTIN_PRIORITY, append_filter,
(void *)&grpc_client_channel_filter);
(void*)&grpc_client_channel_filter);
grpc_http_connect_register_handshaker_factory();
grpc_register_tracer(&grpc_client_channel_trace);
#ifndef NDEBUG

@ -31,48 +31,48 @@ typedef struct grpc_connector grpc_connector;
typedef struct grpc_connector_vtable grpc_connector_vtable;
struct grpc_connector {
const grpc_connector_vtable *vtable;
const grpc_connector_vtable* vtable;
};
typedef struct {
/** set of pollsets interested in this connection */
grpc_pollset_set *interested_parties;
grpc_pollset_set* interested_parties;
/** deadline for connection */
grpc_millis deadline;
/** channel arguments (to be passed to transport) */
const grpc_channel_args *channel_args;
const grpc_channel_args* channel_args;
} grpc_connect_in_args;
typedef struct {
/** the connected transport */
grpc_transport *transport;
grpc_transport* transport;
/** channel arguments (to be passed to the filters) */
grpc_channel_args *channel_args;
grpc_channel_args* channel_args;
} grpc_connect_out_args;
struct grpc_connector_vtable {
void (*ref)(grpc_connector *connector);
void (*unref)(grpc_exec_ctx *exec_ctx, grpc_connector *connector);
void (*ref)(grpc_connector* connector);
void (*unref)(grpc_exec_ctx* exec_ctx, grpc_connector* connector);
/** Implementation of grpc_connector_shutdown */
void (*shutdown)(grpc_exec_ctx *exec_ctx, grpc_connector *connector,
grpc_error *why);
void (*shutdown)(grpc_exec_ctx* exec_ctx, grpc_connector* connector,
grpc_error* why);
/** Implementation of grpc_connector_connect */
void (*connect)(grpc_exec_ctx *exec_ctx, grpc_connector *connector,
const grpc_connect_in_args *in_args,
grpc_connect_out_args *out_args, grpc_closure *notify);
void (*connect)(grpc_exec_ctx* exec_ctx, grpc_connector* connector,
const grpc_connect_in_args* in_args,
grpc_connect_out_args* out_args, grpc_closure* notify);
};
grpc_connector *grpc_connector_ref(grpc_connector *connector);
void grpc_connector_unref(grpc_exec_ctx *exec_ctx, grpc_connector *connector);
grpc_connector* grpc_connector_ref(grpc_connector* connector);
void grpc_connector_unref(grpc_exec_ctx* exec_ctx, grpc_connector* connector);
/** Connect using the connector: max one outstanding call at a time */
void grpc_connector_connect(grpc_exec_ctx *exec_ctx, grpc_connector *connector,
const grpc_connect_in_args *in_args,
grpc_connect_out_args *out_args,
grpc_closure *notify);
void grpc_connector_connect(grpc_exec_ctx* exec_ctx, grpc_connector* connector,
const grpc_connect_in_args* in_args,
grpc_connect_out_args* out_args,
grpc_closure* notify);
/** Cancel any pending connection */
void grpc_connector_shutdown(grpc_exec_ctx *exec_ctx, grpc_connector *connector,
grpc_error *why);
void grpc_connector_shutdown(grpc_exec_ctx* exec_ctx, grpc_connector* connector,
grpc_error* why);
#ifdef __cplusplus
}

@ -26,9 +26,9 @@ grpc_tracer_flag grpc_trace_lb_policy_refcount =
GRPC_TRACER_INITIALIZER(false, "lb_policy_refcount");
#endif
void grpc_lb_policy_init(grpc_lb_policy *policy,
const grpc_lb_policy_vtable *vtable,
grpc_combiner *combiner) {
void grpc_lb_policy_init(grpc_lb_policy* policy,
const grpc_lb_policy_vtable* vtable,
grpc_combiner* combiner) {
policy->vtable = vtable;
gpr_atm_no_barrier_store(&policy->ref_pair, 1 << WEAK_REF_BITS);
policy->interested_parties = grpc_pollset_set_create();
@ -37,7 +37,7 @@ void grpc_lb_policy_init(grpc_lb_policy *policy,
#ifndef NDEBUG
#define REF_FUNC_EXTRA_ARGS , const char *file, int line, const char *reason
#define REF_MUTATE_EXTRA_ARGS REF_FUNC_EXTRA_ARGS, const char *purpose
#define REF_MUTATE_EXTRA_ARGS REF_FUNC_EXTRA_ARGS, const char* purpose
#define REF_FUNC_PASS_ARGS(new_reason) , file, line, new_reason
#define REF_MUTATE_PASS_ARGS(purpose) , file, line, reason, purpose
#else
@ -47,7 +47,7 @@ void grpc_lb_policy_init(grpc_lb_policy *policy,
#define REF_MUTATE_PASS_ARGS(x)
#endif
static gpr_atm ref_mutate(grpc_lb_policy *c, gpr_atm delta,
static gpr_atm ref_mutate(grpc_lb_policy* c, gpr_atm delta,
int barrier REF_MUTATE_EXTRA_ARGS) {
gpr_atm old_val = barrier ? gpr_atm_full_fetch_add(&c->ref_pair, delta)
: gpr_atm_no_barrier_fetch_add(&c->ref_pair, delta);
@ -61,104 +61,105 @@ static gpr_atm ref_mutate(grpc_lb_policy *c, gpr_atm delta,
return old_val;
}
void grpc_lb_policy_ref(grpc_lb_policy *policy REF_FUNC_EXTRA_ARGS) {
void grpc_lb_policy_ref(grpc_lb_policy* policy REF_FUNC_EXTRA_ARGS) {
ref_mutate(policy, 1 << WEAK_REF_BITS, 0 REF_MUTATE_PASS_ARGS("STRONG_REF"));
}
static void shutdown_locked(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error) {
grpc_lb_policy *policy = (grpc_lb_policy *)arg;
static void shutdown_locked(grpc_exec_ctx* exec_ctx, void* arg,
grpc_error* error) {
grpc_lb_policy* policy = (grpc_lb_policy*)arg;
policy->vtable->shutdown_locked(exec_ctx, policy);
GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, policy, "strong-unref");
}
void grpc_lb_policy_unref(grpc_exec_ctx *exec_ctx,
grpc_lb_policy *policy REF_FUNC_EXTRA_ARGS) {
void grpc_lb_policy_unref(grpc_exec_ctx* exec_ctx,
grpc_lb_policy* policy REF_FUNC_EXTRA_ARGS) {
gpr_atm old_val =
ref_mutate(policy, (gpr_atm)1 - (gpr_atm)(1 << WEAK_REF_BITS),
1 REF_MUTATE_PASS_ARGS("STRONG_UNREF"));
gpr_atm mask = ~(gpr_atm)((1 << WEAK_REF_BITS) - 1);
gpr_atm check = 1 << WEAK_REF_BITS;
if ((old_val & mask) == check) {
GRPC_CLOSURE_SCHED(exec_ctx, GRPC_CLOSURE_CREATE(
shutdown_locked, policy,
grpc_combiner_scheduler(policy->combiner)),
GRPC_ERROR_NONE);
GRPC_CLOSURE_SCHED(
exec_ctx,
GRPC_CLOSURE_CREATE(shutdown_locked, policy,
grpc_combiner_scheduler(policy->combiner)),
GRPC_ERROR_NONE);
} else {
grpc_lb_policy_weak_unref(exec_ctx,
policy REF_FUNC_PASS_ARGS("strong-unref"));
}
}
void grpc_lb_policy_weak_ref(grpc_lb_policy *policy REF_FUNC_EXTRA_ARGS) {
void grpc_lb_policy_weak_ref(grpc_lb_policy* policy REF_FUNC_EXTRA_ARGS) {
ref_mutate(policy, 1, 0 REF_MUTATE_PASS_ARGS("WEAK_REF"));
}
void grpc_lb_policy_weak_unref(grpc_exec_ctx *exec_ctx,
grpc_lb_policy *policy REF_FUNC_EXTRA_ARGS) {
void grpc_lb_policy_weak_unref(grpc_exec_ctx* exec_ctx,
grpc_lb_policy* policy REF_FUNC_EXTRA_ARGS) {
gpr_atm old_val =
ref_mutate(policy, -(gpr_atm)1, 1 REF_MUTATE_PASS_ARGS("WEAK_UNREF"));
if (old_val == 1) {
grpc_pollset_set_destroy(exec_ctx, policy->interested_parties);
grpc_combiner *combiner = policy->combiner;
grpc_combiner* combiner = policy->combiner;
policy->vtable->destroy(exec_ctx, policy);
GRPC_COMBINER_UNREF(exec_ctx, combiner, "lb_policy");
}
}
int grpc_lb_policy_pick_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy,
const grpc_lb_policy_pick_args *pick_args,
grpc_connected_subchannel **target,
grpc_call_context_element *context,
void **user_data, grpc_closure *on_complete) {
int grpc_lb_policy_pick_locked(grpc_exec_ctx* exec_ctx, grpc_lb_policy* policy,
const grpc_lb_policy_pick_args* pick_args,
grpc_connected_subchannel** target,
grpc_call_context_element* context,
void** user_data, grpc_closure* on_complete) {
return policy->vtable->pick_locked(exec_ctx, policy, pick_args, target,
context, user_data, on_complete);
}
void grpc_lb_policy_cancel_pick_locked(grpc_exec_ctx *exec_ctx,
grpc_lb_policy *policy,
grpc_connected_subchannel **target,
grpc_error *error) {
void grpc_lb_policy_cancel_pick_locked(grpc_exec_ctx* exec_ctx,
grpc_lb_policy* policy,
grpc_connected_subchannel** target,
grpc_error* error) {
policy->vtable->cancel_pick_locked(exec_ctx, policy, target, error);
}
void grpc_lb_policy_cancel_picks_locked(grpc_exec_ctx *exec_ctx,
grpc_lb_policy *policy,
void grpc_lb_policy_cancel_picks_locked(grpc_exec_ctx* exec_ctx,
grpc_lb_policy* policy,
uint32_t initial_metadata_flags_mask,
uint32_t initial_metadata_flags_eq,
grpc_error *error) {
grpc_error* error) {
policy->vtable->cancel_picks_locked(exec_ctx, policy,
initial_metadata_flags_mask,
initial_metadata_flags_eq, error);
}
void grpc_lb_policy_exit_idle_locked(grpc_exec_ctx *exec_ctx,
grpc_lb_policy *policy) {
void grpc_lb_policy_exit_idle_locked(grpc_exec_ctx* exec_ctx,
grpc_lb_policy* policy) {
policy->vtable->exit_idle_locked(exec_ctx, policy);
}
void grpc_lb_policy_ping_one_locked(grpc_exec_ctx *exec_ctx,
grpc_lb_policy *policy,
grpc_closure *closure) {
void grpc_lb_policy_ping_one_locked(grpc_exec_ctx* exec_ctx,
grpc_lb_policy* policy,
grpc_closure* closure) {
policy->vtable->ping_one_locked(exec_ctx, policy, closure);
}
void grpc_lb_policy_notify_on_state_change_locked(
grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy,
grpc_connectivity_state *state, grpc_closure *closure) {
grpc_exec_ctx* exec_ctx, grpc_lb_policy* policy,
grpc_connectivity_state* state, grpc_closure* closure) {
policy->vtable->notify_on_state_change_locked(exec_ctx, policy, state,
closure);
}
grpc_connectivity_state grpc_lb_policy_check_connectivity_locked(
grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy,
grpc_error **connectivity_error) {
grpc_exec_ctx* exec_ctx, grpc_lb_policy* policy,
grpc_error** connectivity_error) {
return policy->vtable->check_connectivity_locked(exec_ctx, policy,
connectivity_error);
}
void grpc_lb_policy_update_locked(grpc_exec_ctx *exec_ctx,
grpc_lb_policy *policy,
const grpc_lb_policy_args *lb_policy_args) {
void grpc_lb_policy_update_locked(grpc_exec_ctx* exec_ctx,
grpc_lb_policy* policy,
const grpc_lb_policy_args* lb_policy_args) {
policy->vtable->update_locked(exec_ctx, policy, lb_policy_args);
}

@ -38,70 +38,70 @@ extern grpc_tracer_flag grpc_trace_lb_policy_refcount;
#endif
struct grpc_lb_policy {
const grpc_lb_policy_vtable *vtable;
const grpc_lb_policy_vtable* vtable;
gpr_atm ref_pair;
/* owned pointer to interested parties in load balancing decisions */
grpc_pollset_set *interested_parties;
grpc_pollset_set* interested_parties;
/* combiner under which lb_policy actions take place */
grpc_combiner *combiner;
grpc_combiner* combiner;
};
/** Extra arguments for an LB pick */
typedef struct grpc_lb_policy_pick_args {
/** Initial metadata associated with the picking call. */
grpc_metadata_batch *initial_metadata;
grpc_metadata_batch* initial_metadata;
/** Bitmask used for selective cancelling. See \a
* grpc_lb_policy_cancel_picks() and \a GRPC_INITIAL_METADATA_* in
* grpc_types.h */
uint32_t initial_metadata_flags;
/** Storage for LB token in \a initial_metadata, or NULL if not used */
grpc_linked_mdelem *lb_token_mdelem_storage;
grpc_linked_mdelem* lb_token_mdelem_storage;
} grpc_lb_policy_pick_args;
struct grpc_lb_policy_vtable {
void (*destroy)(grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy);
void (*shutdown_locked)(grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy);
void (*destroy)(grpc_exec_ctx* exec_ctx, grpc_lb_policy* policy);
void (*shutdown_locked)(grpc_exec_ctx* exec_ctx, grpc_lb_policy* policy);
/** \see grpc_lb_policy_pick */
int (*pick_locked)(grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy,
const grpc_lb_policy_pick_args *pick_args,
grpc_connected_subchannel **target,
grpc_call_context_element *context, void **user_data,
grpc_closure *on_complete);
int (*pick_locked)(grpc_exec_ctx* exec_ctx, grpc_lb_policy* policy,
const grpc_lb_policy_pick_args* pick_args,
grpc_connected_subchannel** target,
grpc_call_context_element* context, void** user_data,
grpc_closure* on_complete);
/** \see grpc_lb_policy_cancel_pick */
void (*cancel_pick_locked)(grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy,
grpc_connected_subchannel **target,
grpc_error *error);
void (*cancel_pick_locked)(grpc_exec_ctx* exec_ctx, grpc_lb_policy* policy,
grpc_connected_subchannel** target,
grpc_error* error);
/** \see grpc_lb_policy_cancel_picks */
void (*cancel_picks_locked)(grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy,
void (*cancel_picks_locked)(grpc_exec_ctx* exec_ctx, grpc_lb_policy* policy,
uint32_t initial_metadata_flags_mask,
uint32_t initial_metadata_flags_eq,
grpc_error *error);
grpc_error* error);
/** \see grpc_lb_policy_ping_one */
void (*ping_one_locked)(grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy,
grpc_closure *closure);
void (*ping_one_locked)(grpc_exec_ctx* exec_ctx, grpc_lb_policy* policy,
grpc_closure* closure);
/** Try to enter a READY connectivity state */
void (*exit_idle_locked)(grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy);
void (*exit_idle_locked)(grpc_exec_ctx* exec_ctx, grpc_lb_policy* policy);
/** check the current connectivity of the lb_policy */
grpc_connectivity_state (*check_connectivity_locked)(
grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy,
grpc_error **connectivity_error);
grpc_exec_ctx* exec_ctx, grpc_lb_policy* policy,
grpc_error** connectivity_error);
/** call notify when the connectivity state of a channel changes from *state.
Updates *state with the new state of the policy. Calling with a NULL \a
state cancels the subscription. */
void (*notify_on_state_change_locked)(grpc_exec_ctx *exec_ctx,
grpc_lb_policy *policy,
grpc_connectivity_state *state,
grpc_closure *closure);
void (*notify_on_state_change_locked)(grpc_exec_ctx* exec_ctx,
grpc_lb_policy* policy,
grpc_connectivity_state* state,
grpc_closure* closure);
void (*update_locked)(grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy,
const grpc_lb_policy_args *args);
void (*update_locked)(grpc_exec_ctx* exec_ctx, grpc_lb_policy* policy,
const grpc_lb_policy_args* args);
};
#ifndef NDEBUG
@ -119,29 +119,29 @@ struct grpc_lb_policy_vtable {
grpc_lb_policy_weak_ref((p), __FILE__, __LINE__, (r))
#define GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, p, r) \
grpc_lb_policy_weak_unref((exec_ctx), (p), __FILE__, __LINE__, (r))
void grpc_lb_policy_ref(grpc_lb_policy *policy, const char *file, int line,
const char *reason);
void grpc_lb_policy_unref(grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy,
const char *file, int line, const char *reason);
void grpc_lb_policy_weak_ref(grpc_lb_policy *policy, const char *file, int line,
const char *reason);
void grpc_lb_policy_weak_unref(grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy,
const char *file, int line, const char *reason);
void grpc_lb_policy_ref(grpc_lb_policy* policy, const char* file, int line,
const char* reason);
void grpc_lb_policy_unref(grpc_exec_ctx* exec_ctx, grpc_lb_policy* policy,
const char* file, int line, const char* reason);
void grpc_lb_policy_weak_ref(grpc_lb_policy* policy, const char* file, int line,
const char* reason);
void grpc_lb_policy_weak_unref(grpc_exec_ctx* exec_ctx, grpc_lb_policy* policy,
const char* file, int line, const char* reason);
#else
#define GRPC_LB_POLICY_REF(p, r) grpc_lb_policy_ref((p))
#define GRPC_LB_POLICY_UNREF(cl, p, r) grpc_lb_policy_unref((cl), (p))
#define GRPC_LB_POLICY_WEAK_REF(p, r) grpc_lb_policy_weak_ref((p))
#define GRPC_LB_POLICY_WEAK_UNREF(cl, p, r) grpc_lb_policy_weak_unref((cl), (p))
void grpc_lb_policy_ref(grpc_lb_policy *policy);
void grpc_lb_policy_unref(grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy);
void grpc_lb_policy_weak_ref(grpc_lb_policy *policy);
void grpc_lb_policy_weak_unref(grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy);
void grpc_lb_policy_ref(grpc_lb_policy* policy);
void grpc_lb_policy_unref(grpc_exec_ctx* exec_ctx, grpc_lb_policy* policy);
void grpc_lb_policy_weak_ref(grpc_lb_policy* policy);
void grpc_lb_policy_weak_unref(grpc_exec_ctx* exec_ctx, grpc_lb_policy* policy);
#endif
/** called by concrete implementations to initialize the base struct */
void grpc_lb_policy_init(grpc_lb_policy *policy,
const grpc_lb_policy_vtable *vtable,
grpc_combiner *combiner);
void grpc_lb_policy_init(grpc_lb_policy* policy,
const grpc_lb_policy_vtable* vtable,
grpc_combiner* combiner);
/** Finds an appropriate subchannel for a call, based on \a pick_args.
@ -160,53 +160,53 @@ void grpc_lb_policy_init(grpc_lb_policy *policy,
Any IO should be done under the \a interested_parties \a grpc_pollset_set
in the \a grpc_lb_policy struct. */
int grpc_lb_policy_pick_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy,
const grpc_lb_policy_pick_args *pick_args,
grpc_connected_subchannel **target,
grpc_call_context_element *context,
void **user_data, grpc_closure *on_complete);
int grpc_lb_policy_pick_locked(grpc_exec_ctx* exec_ctx, grpc_lb_policy* policy,
const grpc_lb_policy_pick_args* pick_args,
grpc_connected_subchannel** target,
grpc_call_context_element* context,
void** user_data, grpc_closure* on_complete);
/** Perform a connected subchannel ping (see \a grpc_connected_subchannel_ping)
against one of the connected subchannels managed by \a policy. */
void grpc_lb_policy_ping_one_locked(grpc_exec_ctx *exec_ctx,
grpc_lb_policy *policy,
grpc_closure *closure);
void grpc_lb_policy_ping_one_locked(grpc_exec_ctx* exec_ctx,
grpc_lb_policy* policy,
grpc_closure* closure);
/** Cancel picks for \a target.
The \a on_complete callback of the pending picks will be invoked with \a
*target set to NULL. */
void grpc_lb_policy_cancel_pick_locked(grpc_exec_ctx *exec_ctx,
grpc_lb_policy *policy,
grpc_connected_subchannel **target,
grpc_error *error);
void grpc_lb_policy_cancel_pick_locked(grpc_exec_ctx* exec_ctx,
grpc_lb_policy* policy,
grpc_connected_subchannel** target,
grpc_error* error);
/** Cancel all pending picks for which their \a initial_metadata_flags (as given
in the call to \a grpc_lb_policy_pick) matches \a initial_metadata_flags_eq
when AND'd with \a initial_metadata_flags_mask */
void grpc_lb_policy_cancel_picks_locked(grpc_exec_ctx *exec_ctx,
grpc_lb_policy *policy,
void grpc_lb_policy_cancel_picks_locked(grpc_exec_ctx* exec_ctx,
grpc_lb_policy* policy,
uint32_t initial_metadata_flags_mask,
uint32_t initial_metadata_flags_eq,
grpc_error *error);
grpc_error* error);
/** Try to enter a READY connectivity state */
void grpc_lb_policy_exit_idle_locked(grpc_exec_ctx *exec_ctx,
grpc_lb_policy *policy);
void grpc_lb_policy_exit_idle_locked(grpc_exec_ctx* exec_ctx,
grpc_lb_policy* policy);
/* Call notify when the connectivity state of a channel changes from \a *state.
* Updates \a *state with the new state of the policy */
void grpc_lb_policy_notify_on_state_change_locked(
grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy,
grpc_connectivity_state *state, grpc_closure *closure);
grpc_exec_ctx* exec_ctx, grpc_lb_policy* policy,
grpc_connectivity_state* state, grpc_closure* closure);
grpc_connectivity_state grpc_lb_policy_check_connectivity_locked(
grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy,
grpc_error **connectivity_error);
grpc_exec_ctx* exec_ctx, grpc_lb_policy* policy,
grpc_error** connectivity_error);
/** Update \a policy with \a lb_policy_args. */
void grpc_lb_policy_update_locked(grpc_exec_ctx *exec_ctx,
grpc_lb_policy *policy,
const grpc_lb_policy_args *lb_policy_args);
void grpc_lb_policy_update_locked(grpc_exec_ctx* exec_ctx,
grpc_lb_policy* policy,
const grpc_lb_policy_args* lb_policy_args);
#ifdef __cplusplus
}

@ -25,31 +25,31 @@
#include "src/core/lib/iomgr/error.h"
#include "src/core/lib/profiling/timers.h"
static grpc_error *init_channel_elem(grpc_exec_ctx *exec_ctx,
grpc_channel_element *elem,
grpc_channel_element_args *args) {
static grpc_error* init_channel_elem(grpc_exec_ctx* exec_ctx,
grpc_channel_element* elem,
grpc_channel_element_args* args) {
return GRPC_ERROR_NONE;
}
static void destroy_channel_elem(grpc_exec_ctx *exec_ctx,
grpc_channel_element *elem) {}
static void destroy_channel_elem(grpc_exec_ctx* exec_ctx,
grpc_channel_element* elem) {}
typedef struct {
// Stats object to update.
grpc_grpclb_client_stats *client_stats;
grpc_grpclb_client_stats* client_stats;
// State for intercepting send_initial_metadata.
grpc_closure on_complete_for_send;
grpc_closure *original_on_complete_for_send;
grpc_closure* original_on_complete_for_send;
bool send_initial_metadata_succeeded;
// State for intercepting recv_initial_metadata.
grpc_closure recv_initial_metadata_ready;
grpc_closure *original_recv_initial_metadata_ready;
grpc_closure* original_recv_initial_metadata_ready;
bool recv_initial_metadata_succeeded;
} call_data;
static void on_complete_for_send(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error) {
call_data *calld = (call_data *)arg;
static void on_complete_for_send(grpc_exec_ctx* exec_ctx, void* arg,
grpc_error* error) {
call_data* calld = (call_data*)arg;
if (error == GRPC_ERROR_NONE) {
calld->send_initial_metadata_succeeded = true;
}
@ -57,9 +57,9 @@ static void on_complete_for_send(grpc_exec_ctx *exec_ctx, void *arg,
GRPC_ERROR_REF(error));
}
static void recv_initial_metadata_ready(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error) {
call_data *calld = (call_data *)arg;
static void recv_initial_metadata_ready(grpc_exec_ctx* exec_ctx, void* arg,
grpc_error* error) {
call_data* calld = (call_data*)arg;
if (error == GRPC_ERROR_NONE) {
calld->recv_initial_metadata_succeeded = true;
}
@ -67,25 +67,24 @@ static void recv_initial_metadata_ready(grpc_exec_ctx *exec_ctx, void *arg,
GRPC_ERROR_REF(error));
}
static grpc_error *init_call_elem(grpc_exec_ctx *exec_ctx,
grpc_call_element *elem,
const grpc_call_element_args *args) {
call_data *calld = (call_data *)elem->call_data;
static grpc_error* init_call_elem(grpc_exec_ctx* exec_ctx,
grpc_call_element* elem,
const grpc_call_element_args* args) {
call_data* calld = (call_data*)elem->call_data;
// Get stats object from context and take a ref.
GPR_ASSERT(args->context != NULL);
GPR_ASSERT(args->context[GRPC_GRPCLB_CLIENT_STATS].value != NULL);
calld->client_stats = grpc_grpclb_client_stats_ref(
(grpc_grpclb_client_stats *)args->context[GRPC_GRPCLB_CLIENT_STATS]
.value);
(grpc_grpclb_client_stats*)args->context[GRPC_GRPCLB_CLIENT_STATS].value);
// Record call started.
grpc_grpclb_client_stats_add_call_started(calld->client_stats);
return GRPC_ERROR_NONE;
}
static void destroy_call_elem(grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
const grpc_call_final_info *final_info,
grpc_closure *ignored) {
call_data *calld = (call_data *)elem->call_data;
static void destroy_call_elem(grpc_exec_ctx* exec_ctx, grpc_call_element* elem,
const grpc_call_final_info* final_info,
grpc_closure* ignored) {
call_data* calld = (call_data*)elem->call_data;
// Record call finished, optionally setting client_failed_to_send and
// received.
grpc_grpclb_client_stats_add_call_finished(
@ -97,9 +96,9 @@ static void destroy_call_elem(grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
}
static void start_transport_stream_op_batch(
grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
grpc_transport_stream_op_batch *batch) {
call_data *calld = (call_data *)elem->call_data;
grpc_exec_ctx* exec_ctx, grpc_call_element* elem,
grpc_transport_stream_op_batch* batch) {
call_data* calld = (call_data*)elem->call_data;
GPR_TIMER_BEGIN("clr_start_transport_stream_op_batch", 0);
// Intercept send_initial_metadata.
if (batch->send_initial_metadata) {

@ -32,4 +32,4 @@ extern const grpc_channel_filter grpc_client_load_reporting_filter;
#endif
#endif /* GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_LB_POLICY_GRPCLB_CLIENT_LOAD_REPORTING_FILTER_H \
*/
*/

@ -28,7 +28,7 @@ extern "C" {
/** Returns a load balancing factory for the glb policy, which tries to connect
* to a load balancing server to decide the next successfully connected
* subchannel to pick. */
grpc_lb_policy_factory *grpc_glb_lb_factory_create();
grpc_lb_policy_factory* grpc_glb_lb_factory_create();
#ifdef __cplusplus
}

@ -25,20 +25,20 @@
#include "src/core/lib/iomgr/sockaddr_utils.h"
#include "src/core/lib/support/string.h"
grpc_channel *grpc_lb_policy_grpclb_create_lb_channel(
grpc_exec_ctx *exec_ctx, const char *lb_service_target_addresses,
grpc_client_channel_factory *client_channel_factory,
grpc_channel_args *args) {
grpc_channel *lb_channel = grpc_client_channel_factory_create_channel(
grpc_channel* grpc_lb_policy_grpclb_create_lb_channel(
grpc_exec_ctx* exec_ctx, const char* lb_service_target_addresses,
grpc_client_channel_factory* client_channel_factory,
grpc_channel_args* args) {
grpc_channel* lb_channel = grpc_client_channel_factory_create_channel(
exec_ctx, client_channel_factory, lb_service_target_addresses,
GRPC_CLIENT_CHANNEL_TYPE_LOAD_BALANCING, args);
return lb_channel;
}
grpc_channel_args *grpc_lb_policy_grpclb_build_lb_channel_args(
grpc_exec_ctx *exec_ctx, grpc_slice_hash_table *targets_info,
grpc_fake_resolver_response_generator *response_generator,
const grpc_channel_args *args) {
grpc_channel_args* grpc_lb_policy_grpclb_build_lb_channel_args(
grpc_exec_ctx* exec_ctx, grpc_slice_hash_table* targets_info,
grpc_fake_resolver_response_generator* response_generator,
const grpc_channel_args* args) {
const grpc_arg to_add[] = {
grpc_fake_resolver_response_generator_arg(response_generator)};
/* We remove:
@ -62,7 +62,7 @@ grpc_channel_args *grpc_lb_policy_grpclb_build_lb_channel_args(
*
* - The fake resolver generator, because we are replacing it with the one
* from the grpclb policy, used to propagate updates to the LB channel. */
static const char *keys_to_remove[] = {
static const char* keys_to_remove[] = {
GRPC_ARG_LB_POLICY_NAME, GRPC_ARG_LB_ADDRESSES, GRPC_ARG_SERVER_URI,
GRPC_ARG_FAKE_RESOLVER_RESPONSE_GENERATOR};
return grpc_channel_args_copy_and_add_and_remove(

@ -34,19 +34,19 @@ extern "C" {
* from resolving the LB service's name (eg, ipv4:10.0.0.1:1234,10.2.3.4:9876).
* \a client_channel_factory will be used for the creation of the LB channel,
* alongside the channel args passed in \a args. */
grpc_channel *grpc_lb_policy_grpclb_create_lb_channel(
grpc_exec_ctx *exec_ctx, const char *lb_service_target_addresses,
grpc_client_channel_factory *client_channel_factory,
grpc_channel_args *args);
grpc_channel* grpc_lb_policy_grpclb_create_lb_channel(
grpc_exec_ctx* exec_ctx, const char* lb_service_target_addresses,
grpc_client_channel_factory* client_channel_factory,
grpc_channel_args* args);
grpc_channel_args *grpc_lb_policy_grpclb_build_lb_channel_args(
grpc_exec_ctx *exec_ctx, grpc_slice_hash_table *targets_info,
grpc_fake_resolver_response_generator *response_generator,
const grpc_channel_args *args);
grpc_channel_args* grpc_lb_policy_grpclb_build_lb_channel_args(
grpc_exec_ctx* exec_ctx, grpc_slice_hash_table* targets_info,
grpc_fake_resolver_response_generator* response_generator,
const grpc_channel_args* args);
#ifdef __cplusplus
}
#endif
#endif /* GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_LB_POLICY_GRPCLB_GRPCLB_CHANNEL_H \
*/
*/

@ -28,19 +28,19 @@
#include "src/core/lib/slice/slice_internal.h"
#include "src/core/lib/support/string.h"
grpc_channel *grpc_lb_policy_grpclb_create_lb_channel(
grpc_exec_ctx *exec_ctx, const char *lb_service_target_addresses,
grpc_client_channel_factory *client_channel_factory,
grpc_channel_args *args) {
grpc_channel_args *new_args = args;
grpc_channel_credentials *channel_credentials =
grpc_channel* grpc_lb_policy_grpclb_create_lb_channel(
grpc_exec_ctx* exec_ctx, const char* lb_service_target_addresses,
grpc_client_channel_factory* client_channel_factory,
grpc_channel_args* args) {
grpc_channel_args* new_args = args;
grpc_channel_credentials* channel_credentials =
grpc_channel_credentials_find_in_args(args);
if (channel_credentials != NULL) {
/* Substitute the channel credentials with a version without call
* credentials: the load balancer is not necessarily trusted to handle
* bearer token credentials */
static const char *keys_to_remove[] = {GRPC_ARG_CHANNEL_CREDENTIALS};
grpc_channel_credentials *creds_sans_call_creds =
static const char* keys_to_remove[] = {GRPC_ARG_CHANNEL_CREDENTIALS};
grpc_channel_credentials* creds_sans_call_creds =
grpc_channel_credentials_duplicate_without_call_credentials(
channel_credentials);
GPR_ASSERT(creds_sans_call_creds != NULL);
@ -52,7 +52,7 @@ grpc_channel *grpc_lb_policy_grpclb_create_lb_channel(
GPR_ARRAY_SIZE(args_to_add));
grpc_channel_credentials_unref(exec_ctx, creds_sans_call_creds);
}
grpc_channel *lb_channel = grpc_client_channel_factory_create_channel(
grpc_channel* lb_channel = grpc_client_channel_factory_create_channel(
exec_ctx, client_channel_factory, lb_service_target_addresses,
GRPC_CLIENT_CHANNEL_TYPE_LOAD_BALANCING, new_args);
if (channel_credentials != NULL) {
@ -61,10 +61,10 @@ grpc_channel *grpc_lb_policy_grpclb_create_lb_channel(
return lb_channel;
}
grpc_channel_args *grpc_lb_policy_grpclb_build_lb_channel_args(
grpc_exec_ctx *exec_ctx, grpc_slice_hash_table *targets_info,
grpc_fake_resolver_response_generator *response_generator,
const grpc_channel_args *args) {
grpc_channel_args* grpc_lb_policy_grpclb_build_lb_channel_args(
grpc_exec_ctx* exec_ctx, grpc_slice_hash_table* targets_info,
grpc_fake_resolver_response_generator* response_generator,
const grpc_channel_args* args) {
const grpc_arg to_add[] = {
grpc_lb_targets_info_create_channel_arg(targets_info),
grpc_fake_resolver_response_generator_arg(response_generator)};
@ -89,7 +89,7 @@ grpc_channel_args *grpc_lb_policy_grpclb_build_lb_channel_args(
*
* - The fake resolver generator, because we are replacing it with the one
* from the grpclb policy, used to propagate updates to the LB channel. */
static const char *keys_to_remove[] = {
static const char* keys_to_remove[] = {
GRPC_ARG_LB_POLICY_NAME, GRPC_ARG_LB_ADDRESSES, GRPC_ARG_SERVER_URI,
GRPC_ARG_FAKE_RESOLVER_RESPONSE_GENERATOR};
/* Add the targets info table to be used for secure naming */

@ -70,4 +70,4 @@ void grpc_grpclb_dropped_call_counts_destroy(
#endif
#endif /* GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_LB_POLICY_GRPCLB_GRPCLB_CLIENT_STATS_H \
*/
*/

@ -23,9 +23,9 @@
#include <grpc/support/alloc.h>
/* invoked once for every Server in ServerList */
static bool count_serverlist(pb_istream_t *stream, const pb_field_t *field,
void **arg) {
grpc_grpclb_serverlist *sl = (grpc_grpclb_serverlist *)*arg;
static bool count_serverlist(pb_istream_t* stream, const pb_field_t* field,
void** arg) {
grpc_grpclb_serverlist* sl = (grpc_grpclb_serverlist*)*arg;
grpc_grpclb_server server;
if (!pb_decode(stream, grpc_lb_v1_Server_fields, &server)) {
gpr_log(GPR_ERROR, "nanopb error: %s", PB_GET_ERROR(stream));
@ -40,16 +40,16 @@ typedef struct decode_serverlist_arg {
* which index of the serverlist are we currently decoding */
size_t decoding_idx;
/* The decoded serverlist */
grpc_grpclb_serverlist *serverlist;
grpc_grpclb_serverlist* serverlist;
} decode_serverlist_arg;
/* invoked once for every Server in ServerList */
static bool decode_serverlist(pb_istream_t *stream, const pb_field_t *field,
void **arg) {
decode_serverlist_arg *dec_arg = (decode_serverlist_arg *)*arg;
static bool decode_serverlist(pb_istream_t* stream, const pb_field_t* field,
void** arg) {
decode_serverlist_arg* dec_arg = (decode_serverlist_arg*)*arg;
GPR_ASSERT(dec_arg->serverlist->num_servers >= dec_arg->decoding_idx);
grpc_grpclb_server *server =
(grpc_grpclb_server *)gpr_zalloc(sizeof(grpc_grpclb_server));
grpc_grpclb_server* server =
(grpc_grpclb_server*)gpr_zalloc(sizeof(grpc_grpclb_server));
if (!pb_decode(stream, grpc_lb_v1_Server_fields, server)) {
gpr_free(server);
gpr_log(GPR_ERROR, "nanopb error: %s", PB_GET_ERROR(stream));
@ -59,9 +59,9 @@ static bool decode_serverlist(pb_istream_t *stream, const pb_field_t *field,
return true;
}
grpc_grpclb_request *grpc_grpclb_request_create(const char *lb_service_name) {
grpc_grpclb_request *req =
(grpc_grpclb_request *)gpr_malloc(sizeof(grpc_grpclb_request));
grpc_grpclb_request* grpc_grpclb_request_create(const char* lb_service_name) {
grpc_grpclb_request* req =
(grpc_grpclb_request*)gpr_malloc(sizeof(grpc_grpclb_request));
req->has_client_stats = false;
req->has_initial_request = true;
req->initial_request.has_name = true;
@ -71,24 +71,24 @@ grpc_grpclb_request *grpc_grpclb_request_create(const char *lb_service_name) {
}
static void populate_timestamp(gpr_timespec timestamp,
struct _grpc_lb_v1_Timestamp *timestamp_pb) {
struct _grpc_lb_v1_Timestamp* timestamp_pb) {
timestamp_pb->has_seconds = true;
timestamp_pb->seconds = timestamp.tv_sec;
timestamp_pb->has_nanos = true;
timestamp_pb->nanos = timestamp.tv_nsec;
}
static bool encode_string(pb_ostream_t *stream, const pb_field_t *field,
void *const *arg) {
char *str = (char *)*arg;
static bool encode_string(pb_ostream_t* stream, const pb_field_t* field,
void* const* arg) {
char* str = (char*)*arg;
if (!pb_encode_tag_for_field(stream, field)) return false;
return pb_encode_string(stream, (uint8_t *)str, strlen(str));
return pb_encode_string(stream, (uint8_t*)str, strlen(str));
}
static bool encode_drops(pb_ostream_t *stream, const pb_field_t *field,
void *const *arg) {
grpc_grpclb_dropped_call_counts *drop_entries =
(grpc_grpclb_dropped_call_counts *)*arg;
static bool encode_drops(pb_ostream_t* stream, const pb_field_t* field,
void* const* arg) {
grpc_grpclb_dropped_call_counts* drop_entries =
(grpc_grpclb_dropped_call_counts*)*arg;
if (drop_entries == NULL) return true;
for (size_t i = 0; i < drop_entries->num_entries; ++i) {
if (!pb_encode_tag_for_field(stream, field)) return false;
@ -105,10 +105,10 @@ static bool encode_drops(pb_ostream_t *stream, const pb_field_t *field,
return true;
}
grpc_grpclb_request *grpc_grpclb_load_report_request_create_locked(
grpc_grpclb_client_stats *client_stats) {
grpc_grpclb_request *req =
(grpc_grpclb_request *)gpr_zalloc(sizeof(grpc_grpclb_request));
grpc_grpclb_request* grpc_grpclb_load_report_request_create_locked(
grpc_grpclb_client_stats* client_stats) {
grpc_grpclb_request* req =
(grpc_grpclb_request*)gpr_zalloc(sizeof(grpc_grpclb_request));
req->has_client_stats = true;
req->client_stats.has_timestamp = true;
populate_timestamp(gpr_now(GPR_CLOCK_REALTIME), &req->client_stats.timestamp);
@ -123,12 +123,12 @@ grpc_grpclb_request *grpc_grpclb_load_report_request_create_locked(
&req->client_stats.num_calls_finished,
&req->client_stats.num_calls_finished_with_client_failed_to_send,
&req->client_stats.num_calls_finished_known_received,
(grpc_grpclb_dropped_call_counts **)&req->client_stats
(grpc_grpclb_dropped_call_counts**)&req->client_stats
.calls_finished_with_drop.arg);
return req;
}
grpc_slice grpc_grpclb_request_encode(const grpc_grpclb_request *request) {
grpc_slice grpc_grpclb_request_encode(const grpc_grpclb_request* request) {
size_t encoded_length;
pb_ostream_t sizestream;
pb_ostream_t outputstream;
@ -145,10 +145,10 @@ grpc_slice grpc_grpclb_request_encode(const grpc_grpclb_request *request) {
return slice;
}
void grpc_grpclb_request_destroy(grpc_grpclb_request *request) {
void grpc_grpclb_request_destroy(grpc_grpclb_request* request) {
if (request->has_client_stats) {
grpc_grpclb_dropped_call_counts *drop_entries =
(grpc_grpclb_dropped_call_counts *)
grpc_grpclb_dropped_call_counts* drop_entries =
(grpc_grpclb_dropped_call_counts*)
request->client_stats.calls_finished_with_drop.arg;
grpc_grpclb_dropped_call_counts_destroy(drop_entries);
}
@ -156,7 +156,7 @@ void grpc_grpclb_request_destroy(grpc_grpclb_request *request) {
}
typedef grpc_lb_v1_LoadBalanceResponse grpc_grpclb_response;
grpc_grpclb_initial_response *grpc_grpclb_initial_response_parse(
grpc_grpclb_initial_response* grpc_grpclb_initial_response_parse(
grpc_slice encoded_grpc_grpclb_response) {
pb_istream_t stream =
pb_istream_from_buffer(GRPC_SLICE_START_PTR(encoded_grpc_grpclb_response),
@ -170,8 +170,8 @@ grpc_grpclb_initial_response *grpc_grpclb_initial_response_parse(
if (!res.has_initial_response) return NULL;
grpc_grpclb_initial_response *initial_res =
(grpc_grpclb_initial_response *)gpr_malloc(
grpc_grpclb_initial_response* initial_res =
(grpc_grpclb_initial_response*)gpr_malloc(
sizeof(grpc_grpclb_initial_response));
memcpy(initial_res, &res.initial_response,
sizeof(grpc_grpclb_initial_response));
@ -179,14 +179,14 @@ grpc_grpclb_initial_response *grpc_grpclb_initial_response_parse(
return initial_res;
}
grpc_grpclb_serverlist *grpc_grpclb_response_parse_serverlist(
grpc_grpclb_serverlist* grpc_grpclb_response_parse_serverlist(
grpc_slice encoded_grpc_grpclb_response) {
pb_istream_t stream =
pb_istream_from_buffer(GRPC_SLICE_START_PTR(encoded_grpc_grpclb_response),
GRPC_SLICE_LENGTH(encoded_grpc_grpclb_response));
pb_istream_t stream_at_start = stream;
grpc_grpclb_serverlist *sl =
(grpc_grpclb_serverlist *)gpr_zalloc(sizeof(grpc_grpclb_serverlist));
grpc_grpclb_serverlist* sl =
(grpc_grpclb_serverlist*)gpr_zalloc(sizeof(grpc_grpclb_serverlist));
grpc_grpclb_response res;
memset(&res, 0, sizeof(grpc_grpclb_response));
// First pass: count number of servers.
@ -200,8 +200,8 @@ grpc_grpclb_serverlist *grpc_grpclb_response_parse_serverlist(
}
// Second pass: populate servers.
if (sl->num_servers > 0) {
sl->servers = (grpc_grpclb_server **)gpr_zalloc(
sizeof(grpc_grpclb_server *) * sl->num_servers);
sl->servers = (grpc_grpclb_server**)gpr_zalloc(sizeof(grpc_grpclb_server*) *
sl->num_servers);
decode_serverlist_arg decode_arg;
memset(&decode_arg, 0, sizeof(decode_arg));
decode_arg.serverlist = sl;
@ -221,7 +221,7 @@ grpc_grpclb_serverlist *grpc_grpclb_response_parse_serverlist(
return sl;
}
void grpc_grpclb_destroy_serverlist(grpc_grpclb_serverlist *serverlist) {
void grpc_grpclb_destroy_serverlist(grpc_grpclb_serverlist* serverlist) {
if (serverlist == NULL) {
return;
}
@ -232,25 +232,25 @@ void grpc_grpclb_destroy_serverlist(grpc_grpclb_serverlist *serverlist) {
gpr_free(serverlist);
}
grpc_grpclb_serverlist *grpc_grpclb_serverlist_copy(
const grpc_grpclb_serverlist *sl) {
grpc_grpclb_serverlist *copy =
(grpc_grpclb_serverlist *)gpr_zalloc(sizeof(grpc_grpclb_serverlist));
grpc_grpclb_serverlist* grpc_grpclb_serverlist_copy(
const grpc_grpclb_serverlist* sl) {
grpc_grpclb_serverlist* copy =
(grpc_grpclb_serverlist*)gpr_zalloc(sizeof(grpc_grpclb_serverlist));
copy->num_servers = sl->num_servers;
memcpy(&copy->expiration_interval, &sl->expiration_interval,
sizeof(grpc_grpclb_duration));
copy->servers = (grpc_grpclb_server **)gpr_malloc(
sizeof(grpc_grpclb_server *) * sl->num_servers);
copy->servers = (grpc_grpclb_server**)gpr_malloc(sizeof(grpc_grpclb_server*) *
sl->num_servers);
for (size_t i = 0; i < sl->num_servers; i++) {
copy->servers[i] =
(grpc_grpclb_server *)gpr_malloc(sizeof(grpc_grpclb_server));
(grpc_grpclb_server*)gpr_malloc(sizeof(grpc_grpclb_server));
memcpy(copy->servers[i], sl->servers[i], sizeof(grpc_grpclb_server));
}
return copy;
}
bool grpc_grpclb_serverlist_equals(const grpc_grpclb_serverlist *lhs,
const grpc_grpclb_serverlist *rhs) {
bool grpc_grpclb_serverlist_equals(const grpc_grpclb_serverlist* lhs,
const grpc_grpclb_serverlist* rhs) {
if (lhs == NULL || rhs == NULL) {
return false;
}
@ -269,13 +269,13 @@ bool grpc_grpclb_serverlist_equals(const grpc_grpclb_serverlist *lhs,
return true;
}
bool grpc_grpclb_server_equals(const grpc_grpclb_server *lhs,
const grpc_grpclb_server *rhs) {
bool grpc_grpclb_server_equals(const grpc_grpclb_server* lhs,
const grpc_grpclb_server* rhs) {
return memcmp(lhs, rhs, sizeof(grpc_grpclb_server)) == 0;
}
int grpc_grpclb_duration_compare(const grpc_grpclb_duration *lhs,
const grpc_grpclb_duration *rhs) {
int grpc_grpclb_duration_compare(const grpc_grpclb_duration* lhs,
const grpc_grpclb_duration* rhs) {
GPR_ASSERT(lhs && rhs);
if (lhs->has_seconds && rhs->has_seconds) {
if (lhs->seconds < rhs->seconds) return -1;
@ -299,13 +299,13 @@ int grpc_grpclb_duration_compare(const grpc_grpclb_duration *lhs,
return 0;
}
grpc_millis grpc_grpclb_duration_to_millis(grpc_grpclb_duration *duration_pb) {
grpc_millis grpc_grpclb_duration_to_millis(grpc_grpclb_duration* duration_pb) {
return (grpc_millis)(
(duration_pb->has_seconds ? duration_pb->seconds : 0) * GPR_MS_PER_SEC +
(duration_pb->has_nanos ? duration_pb->nanos : 0) / GPR_NS_PER_MS);
}
void grpc_grpclb_initial_response_destroy(
grpc_grpclb_initial_response *response) {
grpc_grpclb_initial_response* response) {
gpr_free(response);
}

@ -37,59 +37,59 @@ typedef grpc_lb_v1_InitialLoadBalanceResponse grpc_grpclb_initial_response;
typedef grpc_lb_v1_Server grpc_grpclb_server;
typedef grpc_lb_v1_Duration grpc_grpclb_duration;
typedef struct {
grpc_grpclb_server **servers;
grpc_grpclb_server** servers;
size_t num_servers;
grpc_grpclb_duration expiration_interval;
} grpc_grpclb_serverlist;
/** Create a request for a gRPC LB service under \a lb_service_name */
grpc_grpclb_request *grpc_grpclb_request_create(const char *lb_service_name);
grpc_grpclb_request *grpc_grpclb_load_report_request_create_locked(
grpc_grpclb_client_stats *client_stats);
grpc_grpclb_request* grpc_grpclb_request_create(const char* lb_service_name);
grpc_grpclb_request* grpc_grpclb_load_report_request_create_locked(
grpc_grpclb_client_stats* client_stats);
/** Protocol Buffers v3-encode \a request */
grpc_slice grpc_grpclb_request_encode(const grpc_grpclb_request *request);
grpc_slice grpc_grpclb_request_encode(const grpc_grpclb_request* request);
/** Destroy \a request */
void grpc_grpclb_request_destroy(grpc_grpclb_request *request);
void grpc_grpclb_request_destroy(grpc_grpclb_request* request);
/** Parse (ie, decode) the bytes in \a encoded_grpc_grpclb_response as a \a
* grpc_grpclb_initial_response */
grpc_grpclb_initial_response *grpc_grpclb_initial_response_parse(
grpc_grpclb_initial_response* grpc_grpclb_initial_response_parse(
grpc_slice encoded_grpc_grpclb_response);
/** Parse the list of servers from an encoded \a grpc_grpclb_response */
grpc_grpclb_serverlist *grpc_grpclb_response_parse_serverlist(
grpc_grpclb_serverlist* grpc_grpclb_response_parse_serverlist(
grpc_slice encoded_grpc_grpclb_response);
/** Return a copy of \a sl. The caller is responsible for calling \a
* grpc_grpclb_destroy_serverlist on the returned copy. */
grpc_grpclb_serverlist *grpc_grpclb_serverlist_copy(
const grpc_grpclb_serverlist *sl);
grpc_grpclb_serverlist* grpc_grpclb_serverlist_copy(
const grpc_grpclb_serverlist* sl);
bool grpc_grpclb_serverlist_equals(const grpc_grpclb_serverlist *lhs,
const grpc_grpclb_serverlist *rhs);
bool grpc_grpclb_serverlist_equals(const grpc_grpclb_serverlist* lhs,
const grpc_grpclb_serverlist* rhs);
bool grpc_grpclb_server_equals(const grpc_grpclb_server *lhs,
const grpc_grpclb_server *rhs);
bool grpc_grpclb_server_equals(const grpc_grpclb_server* lhs,
const grpc_grpclb_server* rhs);
/** Destroy \a serverlist */
void grpc_grpclb_destroy_serverlist(grpc_grpclb_serverlist *serverlist);
void grpc_grpclb_destroy_serverlist(grpc_grpclb_serverlist* serverlist);
/** Compare \a lhs against \a rhs and return 0 if \a lhs and \a rhs are equal,
* < 0 if \a lhs represents a duration shorter than \a rhs and > 0 otherwise */
int grpc_grpclb_duration_compare(const grpc_grpclb_duration *lhs,
const grpc_grpclb_duration *rhs);
int grpc_grpclb_duration_compare(const grpc_grpclb_duration* lhs,
const grpc_grpclb_duration* rhs);
grpc_millis grpc_grpclb_duration_to_millis(grpc_grpclb_duration *duration_pb);
grpc_millis grpc_grpclb_duration_to_millis(grpc_grpclb_duration* duration_pb);
/** Destroy \a initial_response */
void grpc_grpclb_initial_response_destroy(
grpc_grpclb_initial_response *response);
grpc_grpclb_initial_response* response);
#ifdef __cplusplus
}
#endif
#endif /* GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_LB_POLICY_GRPCLB_LOAD_BALANCER_API_H \
*/
*/

@ -33,33 +33,33 @@ grpc_tracer_flag grpc_lb_pick_first_trace =
GRPC_TRACER_INITIALIZER(false, "pick_first");
typedef struct pending_pick {
struct pending_pick *next;
struct pending_pick* next;
uint32_t initial_metadata_flags;
grpc_connected_subchannel **target;
grpc_closure *on_complete;
grpc_connected_subchannel** target;
grpc_closure* on_complete;
} pending_pick;
typedef struct {
/** base policy: must be first */
grpc_lb_policy base;
/** all our subchannels */
grpc_lb_subchannel_list *subchannel_list;
grpc_lb_subchannel_list* subchannel_list;
/** latest pending subchannel list */
grpc_lb_subchannel_list *latest_pending_subchannel_list;
grpc_lb_subchannel_list* latest_pending_subchannel_list;
/** selected subchannel in \a subchannel_list */
grpc_lb_subchannel_data *selected;
grpc_lb_subchannel_data* selected;
/** have we started picking? */
bool started_picking;
/** are we shut down? */
bool shutdown;
/** list of picks that are waiting on connectivity */
pending_pick *pending_picks;
pending_pick* pending_picks;
/** our connectivity state tracker */
grpc_connectivity_state_tracker state_tracker;
} pick_first_lb_policy;
static void pf_destroy(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol) {
pick_first_lb_policy *p = (pick_first_lb_policy *)pol;
static void pf_destroy(grpc_exec_ctx* exec_ctx, grpc_lb_policy* pol) {
pick_first_lb_policy* p = (pick_first_lb_policy*)pol;
GPR_ASSERT(p->subchannel_list == NULL);
GPR_ASSERT(p->latest_pending_subchannel_list == NULL);
GPR_ASSERT(p->pending_picks == NULL);
@ -67,17 +67,17 @@ static void pf_destroy(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol) {
gpr_free(p);
grpc_subchannel_index_unref();
if (GRPC_TRACER_ON(grpc_lb_pick_first_trace)) {
gpr_log(GPR_DEBUG, "Pick First %p destroyed.", (void *)p);
gpr_log(GPR_DEBUG, "Pick First %p destroyed.", (void*)p);
}
}
static void shutdown_locked(grpc_exec_ctx *exec_ctx, pick_first_lb_policy *p,
grpc_error *error) {
static void shutdown_locked(grpc_exec_ctx* exec_ctx, pick_first_lb_policy* p,
grpc_error* error) {
if (GRPC_TRACER_ON(grpc_lb_pick_first_trace)) {
gpr_log(GPR_DEBUG, "Pick First %p Shutting down", p);
}
p->shutdown = true;
pending_pick *pp;
pending_pick* pp;
while ((pp = p->pending_picks) != NULL) {
p->pending_picks = pp->next;
*pp->target = NULL;
@ -100,19 +100,19 @@ static void shutdown_locked(grpc_exec_ctx *exec_ctx, pick_first_lb_policy *p,
GRPC_ERROR_UNREF(error);
}
static void pf_shutdown_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol) {
shutdown_locked(exec_ctx, (pick_first_lb_policy *)pol,
static void pf_shutdown_locked(grpc_exec_ctx* exec_ctx, grpc_lb_policy* pol) {
shutdown_locked(exec_ctx, (pick_first_lb_policy*)pol,
GRPC_ERROR_CREATE_FROM_STATIC_STRING("Channel shutdown"));
}
static void pf_cancel_pick_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
grpc_connected_subchannel **target,
grpc_error *error) {
pick_first_lb_policy *p = (pick_first_lb_policy *)pol;
pending_pick *pp = p->pending_picks;
static void pf_cancel_pick_locked(grpc_exec_ctx* exec_ctx, grpc_lb_policy* pol,
grpc_connected_subchannel** target,
grpc_error* error) {
pick_first_lb_policy* p = (pick_first_lb_policy*)pol;
pending_pick* pp = p->pending_picks;
p->pending_picks = NULL;
while (pp != NULL) {
pending_pick *next = pp->next;
pending_pick* next = pp->next;
if (pp->target == target) {
*target = NULL;
GRPC_CLOSURE_SCHED(exec_ctx, pp->on_complete,
@ -128,15 +128,15 @@ static void pf_cancel_pick_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
GRPC_ERROR_UNREF(error);
}
static void pf_cancel_picks_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
static void pf_cancel_picks_locked(grpc_exec_ctx* exec_ctx, grpc_lb_policy* pol,
uint32_t initial_metadata_flags_mask,
uint32_t initial_metadata_flags_eq,
grpc_error *error) {
pick_first_lb_policy *p = (pick_first_lb_policy *)pol;
pending_pick *pp = p->pending_picks;
grpc_error* error) {
pick_first_lb_policy* p = (pick_first_lb_policy*)pol;
pending_pick* pp = p->pending_picks;
p->pending_picks = NULL;
while (pp != NULL) {
pending_pick *next = pp->next;
pending_pick* next = pp->next;
if ((pp->initial_metadata_flags & initial_metadata_flags_mask) ==
initial_metadata_flags_eq) {
GRPC_CLOSURE_SCHED(exec_ctx, pp->on_complete,
@ -152,8 +152,8 @@ static void pf_cancel_picks_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
GRPC_ERROR_UNREF(error);
}
static void start_picking_locked(grpc_exec_ctx *exec_ctx,
pick_first_lb_policy *p) {
static void start_picking_locked(grpc_exec_ctx* exec_ctx,
pick_first_lb_policy* p) {
p->started_picking = true;
if (p->subchannel_list != NULL && p->subchannel_list->num_subchannels > 0) {
p->subchannel_list->checking_subchannel = 0;
@ -164,19 +164,19 @@ static void start_picking_locked(grpc_exec_ctx *exec_ctx,
}
}
static void pf_exit_idle_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol) {
pick_first_lb_policy *p = (pick_first_lb_policy *)pol;
static void pf_exit_idle_locked(grpc_exec_ctx* exec_ctx, grpc_lb_policy* pol) {
pick_first_lb_policy* p = (pick_first_lb_policy*)pol;
if (!p->started_picking) {
start_picking_locked(exec_ctx, p);
}
}
static int pf_pick_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
const grpc_lb_policy_pick_args *pick_args,
grpc_connected_subchannel **target,
grpc_call_context_element *context, void **user_data,
grpc_closure *on_complete) {
pick_first_lb_policy *p = (pick_first_lb_policy *)pol;
static int pf_pick_locked(grpc_exec_ctx* exec_ctx, grpc_lb_policy* pol,
const grpc_lb_policy_pick_args* pick_args,
grpc_connected_subchannel** target,
grpc_call_context_element* context, void** user_data,
grpc_closure* on_complete) {
pick_first_lb_policy* p = (pick_first_lb_policy*)pol;
// If we have a selected subchannel already, return synchronously.
if (p->selected != NULL) {
*target = GRPC_CONNECTED_SUBCHANNEL_REF(p->selected->connected_subchannel,
@ -187,7 +187,7 @@ static int pf_pick_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
if (!p->started_picking) {
start_picking_locked(exec_ctx, p);
}
pending_pick *pp = (pending_pick *)gpr_malloc(sizeof(*pp));
pending_pick* pp = (pending_pick*)gpr_malloc(sizeof(*pp));
pp->next = p->pending_picks;
pp->target = target;
pp->initial_metadata_flags = pick_args->initial_metadata_flags;
@ -196,10 +196,10 @@ static int pf_pick_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
return 0;
}
static void destroy_unselected_subchannels_locked(grpc_exec_ctx *exec_ctx,
pick_first_lb_policy *p) {
static void destroy_unselected_subchannels_locked(grpc_exec_ctx* exec_ctx,
pick_first_lb_policy* p) {
for (size_t i = 0; i < p->subchannel_list->num_subchannels; ++i) {
grpc_lb_subchannel_data *sd = &p->subchannel_list->subchannels[i];
grpc_lb_subchannel_data* sd = &p->subchannel_list->subchannels[i];
if (p->selected != sd) {
grpc_lb_subchannel_data_unref_subchannel(exec_ctx, sd,
"selected_different_subchannel");
@ -208,23 +208,23 @@ static void destroy_unselected_subchannels_locked(grpc_exec_ctx *exec_ctx,
}
static grpc_connectivity_state pf_check_connectivity_locked(
grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol, grpc_error **error) {
pick_first_lb_policy *p = (pick_first_lb_policy *)pol;
grpc_exec_ctx* exec_ctx, grpc_lb_policy* pol, grpc_error** error) {
pick_first_lb_policy* p = (pick_first_lb_policy*)pol;
return grpc_connectivity_state_get(&p->state_tracker, error);
}
static void pf_notify_on_state_change_locked(grpc_exec_ctx *exec_ctx,
grpc_lb_policy *pol,
grpc_connectivity_state *current,
grpc_closure *notify) {
pick_first_lb_policy *p = (pick_first_lb_policy *)pol;
static void pf_notify_on_state_change_locked(grpc_exec_ctx* exec_ctx,
grpc_lb_policy* pol,
grpc_connectivity_state* current,
grpc_closure* notify) {
pick_first_lb_policy* p = (pick_first_lb_policy*)pol;
grpc_connectivity_state_notify_on_state_change(exec_ctx, &p->state_tracker,
current, notify);
}
static void pf_ping_one_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
grpc_closure *closure) {
pick_first_lb_policy *p = (pick_first_lb_policy *)pol;
static void pf_ping_one_locked(grpc_exec_ctx* exec_ctx, grpc_lb_policy* pol,
grpc_closure* closure) {
pick_first_lb_policy* p = (pick_first_lb_policy*)pol;
if (p->selected) {
grpc_connected_subchannel_ping(exec_ctx, p->selected->connected_subchannel,
closure);
@ -234,13 +234,13 @@ static void pf_ping_one_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
}
}
static void pf_connectivity_changed_locked(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error);
static void pf_connectivity_changed_locked(grpc_exec_ctx* exec_ctx, void* arg,
grpc_error* error);
static void pf_update_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy,
const grpc_lb_policy_args *args) {
pick_first_lb_policy *p = (pick_first_lb_policy *)policy;
const grpc_arg *arg =
static void pf_update_locked(grpc_exec_ctx* exec_ctx, grpc_lb_policy* policy,
const grpc_lb_policy_args* args) {
pick_first_lb_policy* p = (pick_first_lb_policy*)policy;
const grpc_arg* arg =
grpc_channel_args_find(args->args, GRPC_ARG_LB_ADDRESSES);
if (arg == NULL || arg->type != GRPC_ARG_POINTER) {
if (p->subchannel_list == NULL) {
@ -254,17 +254,17 @@ static void pf_update_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy,
gpr_log(GPR_ERROR,
"No valid LB addresses channel arg for Pick First %p update, "
"ignoring.",
(void *)p);
(void*)p);
}
return;
}
const grpc_lb_addresses *addresses =
(const grpc_lb_addresses *)arg->value.pointer.p;
const grpc_lb_addresses* addresses =
(const grpc_lb_addresses*)arg->value.pointer.p;
if (GRPC_TRACER_ON(grpc_lb_pick_first_trace)) {
gpr_log(GPR_INFO, "Pick First %p received update with %lu addresses",
(void *)p, (unsigned long)addresses->num_addresses);
(void*)p, (unsigned long)addresses->num_addresses);
}
grpc_lb_subchannel_list *subchannel_list = grpc_lb_subchannel_list_create(
grpc_lb_subchannel_list* subchannel_list = grpc_lb_subchannel_list_create(
exec_ctx, &p->base, &grpc_lb_pick_first_trace, addresses, args,
pf_connectivity_changed_locked);
if (subchannel_list->num_subchannels == 0) {
@ -294,7 +294,7 @@ static void pf_update_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy,
// We do have a selected subchannel.
// Check if it's present in the new list. If so, we're done.
for (size_t i = 0; i < subchannel_list->num_subchannels; ++i) {
grpc_lb_subchannel_data *sd = &subchannel_list->subchannels[i];
grpc_lb_subchannel_data* sd = &subchannel_list->subchannels[i];
if (sd->subchannel == p->selected->subchannel) {
// The currently selected subchannel is in the update: we are done.
if (GRPC_TRACER_ON(grpc_lb_pick_first_trace)) {
@ -339,8 +339,8 @@ static void pf_update_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy,
gpr_log(GPR_DEBUG,
"Pick First %p Shutting down latest pending subchannel list "
"%p, about to be replaced by newer latest %p",
(void *)p, (void *)p->latest_pending_subchannel_list,
(void *)subchannel_list);
(void*)p, (void*)p->latest_pending_subchannel_list,
(void*)subchannel_list);
}
grpc_lb_subchannel_list_shutdown_and_unref(
exec_ctx, p->latest_pending_subchannel_list,
@ -358,19 +358,19 @@ static void pf_update_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy,
}
}
static void pf_connectivity_changed_locked(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error) {
grpc_lb_subchannel_data *sd = (grpc_lb_subchannel_data *)arg;
pick_first_lb_policy *p = (pick_first_lb_policy *)sd->subchannel_list->policy;
static void pf_connectivity_changed_locked(grpc_exec_ctx* exec_ctx, void* arg,
grpc_error* error) {
grpc_lb_subchannel_data* sd = (grpc_lb_subchannel_data*)arg;
pick_first_lb_policy* p = (pick_first_lb_policy*)sd->subchannel_list->policy;
if (GRPC_TRACER_ON(grpc_lb_pick_first_trace)) {
gpr_log(GPR_DEBUG,
"Pick First %p connectivity changed for subchannel %p (%" PRIuPTR
" of %" PRIuPTR
"), subchannel_list %p: state=%s p->shutdown=%d "
"sd->subchannel_list->shutting_down=%d error=%s",
(void *)p, (void *)sd->subchannel,
(void*)p, (void*)sd->subchannel,
sd->subchannel_list->checking_subchannel,
sd->subchannel_list->num_subchannels, (void *)sd->subchannel_list,
sd->subchannel_list->num_subchannels, (void*)sd->subchannel_list,
grpc_connectivity_state_name(sd->pending_connectivity_state_unsafe),
p->shutdown, sd->subchannel_list->shutting_down,
grpc_error_string(error));
@ -465,13 +465,13 @@ static void pf_connectivity_changed_locked(grpc_exec_ctx *exec_ctx, void *arg,
"connected");
p->selected = sd;
if (GRPC_TRACER_ON(grpc_lb_pick_first_trace)) {
gpr_log(GPR_INFO, "Pick First %p selected subchannel %p", (void *)p,
(void *)sd->subchannel);
gpr_log(GPR_INFO, "Pick First %p selected subchannel %p", (void*)p,
(void*)sd->subchannel);
}
// Drop all other subchannels, since we are now connected.
destroy_unselected_subchannels_locked(exec_ctx, p);
// Update any calls that were waiting for a pick.
pending_pick *pp;
pending_pick* pp;
while ((pp = p->pending_picks)) {
p->pending_picks = pp->next;
*pp->target = GRPC_CONNECTED_SUBCHANNEL_REF(
@ -479,7 +479,7 @@ static void pf_connectivity_changed_locked(grpc_exec_ctx *exec_ctx, void *arg,
if (GRPC_TRACER_ON(grpc_lb_pick_first_trace)) {
gpr_log(GPR_INFO,
"Servicing pending pick with selected subchannel %p",
(void *)p->selected);
(void*)p->selected);
}
GRPC_CLOSURE_SCHED(exec_ctx, pp->on_complete, GRPC_ERROR_NONE);
gpr_free(pp);
@ -530,7 +530,7 @@ static void pf_connectivity_changed_locked(grpc_exec_ctx *exec_ctx, void *arg,
grpc_lb_subchannel_data_unref_subchannel(exec_ctx, sd,
"pf_candidate_shutdown");
// Advance to next subchannel and check its state.
grpc_lb_subchannel_data *original_sd = sd;
grpc_lb_subchannel_data* original_sd = sd;
do {
sd->subchannel_list->checking_subchannel =
(sd->subchannel_list->checking_subchannel + 1) %
@ -578,17 +578,17 @@ static const grpc_lb_policy_vtable pick_first_lb_policy_vtable = {
pf_notify_on_state_change_locked,
pf_update_locked};
static void pick_first_factory_ref(grpc_lb_policy_factory *factory) {}
static void pick_first_factory_ref(grpc_lb_policy_factory* factory) {}
static void pick_first_factory_unref(grpc_lb_policy_factory *factory) {}
static void pick_first_factory_unref(grpc_lb_policy_factory* factory) {}
static grpc_lb_policy *create_pick_first(grpc_exec_ctx *exec_ctx,
grpc_lb_policy_factory *factory,
grpc_lb_policy_args *args) {
static grpc_lb_policy* create_pick_first(grpc_exec_ctx* exec_ctx,
grpc_lb_policy_factory* factory,
grpc_lb_policy_args* args) {
GPR_ASSERT(args->client_channel_factory != NULL);
pick_first_lb_policy *p = (pick_first_lb_policy *)gpr_zalloc(sizeof(*p));
pick_first_lb_policy* p = (pick_first_lb_policy*)gpr_zalloc(sizeof(*p));
if (GRPC_TRACER_ON(grpc_lb_pick_first_trace)) {
gpr_log(GPR_DEBUG, "Pick First %p created.", (void *)p);
gpr_log(GPR_DEBUG, "Pick First %p created.", (void*)p);
}
pf_update_locked(exec_ctx, &p->base, args);
grpc_lb_policy_init(&p->base, &pick_first_lb_policy_vtable, args->combiner);
@ -603,7 +603,7 @@ static const grpc_lb_policy_factory_vtable pick_first_factory_vtable = {
static grpc_lb_policy_factory pick_first_lb_policy_factory = {
&pick_first_factory_vtable};
static grpc_lb_policy_factory *pick_first_lb_factory_create() {
static grpc_lb_policy_factory* pick_first_lb_factory_create() {
return &pick_first_lb_policy_factory;
}

@ -46,12 +46,12 @@ grpc_tracer_flag grpc_lb_round_robin_trace =
*
* Once a pick is available, \a target is updated and \a on_complete called. */
typedef struct pending_pick {
struct pending_pick *next;
struct pending_pick* next;
/* output argument where to store the pick()ed user_data. It'll be NULL if no
* such data is present or there's an error (the definite test for errors is
* \a target being NULL). */
void **user_data;
void** user_data;
/* bitmask passed to pick() and used for selective cancelling. See
* grpc_lb_policy_cancel_picks() */
@ -59,24 +59,24 @@ typedef struct pending_pick {
/* output argument where to store the pick()ed connected subchannel, or NULL
* upon error. */
grpc_connected_subchannel **target;
grpc_connected_subchannel** target;
/* to be invoked once the pick() has completed (regardless of success) */
grpc_closure *on_complete;
grpc_closure* on_complete;
} pending_pick;
typedef struct round_robin_lb_policy {
/** base policy: must be first */
grpc_lb_policy base;
grpc_lb_subchannel_list *subchannel_list;
grpc_lb_subchannel_list* subchannel_list;
/** have we started picking? */
bool started_picking;
/** are we shutting down? */
bool shutdown;
/** List of picks that are waiting on connectivity */
pending_pick *pending_picks;
pending_pick* pending_picks;
/** our connectivity state tracker */
grpc_connectivity_state_tracker state_tracker;
@ -89,7 +89,7 @@ typedef struct round_robin_lb_policy {
* lists if they equal \a latest_pending_subchannel_list. In other words,
* racing callbacks that reference outdated subchannel lists won't perform any
* update. */
grpc_lb_subchannel_list *latest_pending_subchannel_list;
grpc_lb_subchannel_list* latest_pending_subchannel_list;
} round_robin_lb_policy;
/** Returns the index into p->subchannel_list->subchannels of the next
@ -99,13 +99,13 @@ typedef struct round_robin_lb_policy {
* Note that this function does *not* update p->last_ready_subchannel_index.
* The caller must do that if it returns a pick. */
static size_t get_next_ready_subchannel_index_locked(
const round_robin_lb_policy *p) {
const round_robin_lb_policy* p) {
GPR_ASSERT(p->subchannel_list != NULL);
if (GRPC_TRACER_ON(grpc_lb_round_robin_trace)) {
gpr_log(GPR_INFO,
"[RR %p] getting next ready subchannel (out of %lu), "
"last_ready_subchannel_index=%lu",
(void *)p, (unsigned long)p->subchannel_list->num_subchannels,
(void*)p, (unsigned long)p->subchannel_list->num_subchannels,
(unsigned long)p->last_ready_subchannel_index);
}
for (size_t i = 0; i < p->subchannel_list->num_subchannels; ++i) {
@ -116,8 +116,8 @@ static size_t get_next_ready_subchannel_index_locked(
GPR_DEBUG,
"[RR %p] checking subchannel %p, subchannel_list %p, index %lu: "
"state=%s",
(void *)p, (void *)p->subchannel_list->subchannels[index].subchannel,
(void *)p->subchannel_list, (unsigned long)index,
(void*)p, (void*)p->subchannel_list->subchannels[index].subchannel,
(void*)p->subchannel_list, (unsigned long)index,
grpc_connectivity_state_name(
p->subchannel_list->subchannels[index].curr_connectivity_state));
}
@ -127,40 +127,39 @@ static size_t get_next_ready_subchannel_index_locked(
gpr_log(GPR_DEBUG,
"[RR %p] found next ready subchannel (%p) at index %lu of "
"subchannel_list %p",
(void *)p,
(void *)p->subchannel_list->subchannels[index].subchannel,
(unsigned long)index, (void *)p->subchannel_list);
(void*)p,
(void*)p->subchannel_list->subchannels[index].subchannel,
(unsigned long)index, (void*)p->subchannel_list);
}
return index;
}
}
if (GRPC_TRACER_ON(grpc_lb_round_robin_trace)) {
gpr_log(GPR_DEBUG, "[RR %p] no subchannels in ready state", (void *)p);
gpr_log(GPR_DEBUG, "[RR %p] no subchannels in ready state", (void*)p);
}
return p->subchannel_list->num_subchannels;
}
// Sets p->last_ready_subchannel_index to last_ready_index.
static void update_last_ready_subchannel_index_locked(round_robin_lb_policy *p,
static void update_last_ready_subchannel_index_locked(round_robin_lb_policy* p,
size_t last_ready_index) {
GPR_ASSERT(last_ready_index < p->subchannel_list->num_subchannels);
p->last_ready_subchannel_index = last_ready_index;
if (GRPC_TRACER_ON(grpc_lb_round_robin_trace)) {
gpr_log(
GPR_DEBUG,
"[RR %p] setting last_ready_subchannel_index=%lu (SC %p, CSC %p)",
(void *)p, (unsigned long)last_ready_index,
(void *)p->subchannel_list->subchannels[last_ready_index].subchannel,
(void *)p->subchannel_list->subchannels[last_ready_index]
.connected_subchannel);
gpr_log(GPR_DEBUG,
"[RR %p] setting last_ready_subchannel_index=%lu (SC %p, CSC %p)",
(void*)p, (unsigned long)last_ready_index,
(void*)p->subchannel_list->subchannels[last_ready_index].subchannel,
(void*)p->subchannel_list->subchannels[last_ready_index]
.connected_subchannel);
}
}
static void rr_destroy(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol) {
round_robin_lb_policy *p = (round_robin_lb_policy *)pol;
static void rr_destroy(grpc_exec_ctx* exec_ctx, grpc_lb_policy* pol) {
round_robin_lb_policy* p = (round_robin_lb_policy*)pol;
if (GRPC_TRACER_ON(grpc_lb_round_robin_trace)) {
gpr_log(GPR_DEBUG, "[RR %p] Destroying Round Robin policy at %p",
(void *)pol, (void *)pol);
(void*)pol, (void*)pol);
}
GPR_ASSERT(p->subchannel_list == NULL);
GPR_ASSERT(p->latest_pending_subchannel_list == NULL);
@ -169,13 +168,13 @@ static void rr_destroy(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol) {
gpr_free(p);
}
static void shutdown_locked(grpc_exec_ctx *exec_ctx, round_robin_lb_policy *p,
grpc_error *error) {
static void shutdown_locked(grpc_exec_ctx* exec_ctx, round_robin_lb_policy* p,
grpc_error* error) {
if (GRPC_TRACER_ON(grpc_lb_round_robin_trace)) {
gpr_log(GPR_DEBUG, "[RR %p] Shutting down", p);
}
p->shutdown = true;
pending_pick *pp;
pending_pick* pp;
while ((pp = p->pending_picks) != NULL) {
p->pending_picks = pp->next;
*pp->target = NULL;
@ -199,20 +198,20 @@ static void shutdown_locked(grpc_exec_ctx *exec_ctx, round_robin_lb_policy *p,
GRPC_ERROR_UNREF(error);
}
static void rr_shutdown_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol) {
round_robin_lb_policy *p = (round_robin_lb_policy *)pol;
static void rr_shutdown_locked(grpc_exec_ctx* exec_ctx, grpc_lb_policy* pol) {
round_robin_lb_policy* p = (round_robin_lb_policy*)pol;
shutdown_locked(exec_ctx, p,
GRPC_ERROR_CREATE_FROM_STATIC_STRING("Channel Shutdown"));
}
static void rr_cancel_pick_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
grpc_connected_subchannel **target,
grpc_error *error) {
round_robin_lb_policy *p = (round_robin_lb_policy *)pol;
pending_pick *pp = p->pending_picks;
static void rr_cancel_pick_locked(grpc_exec_ctx* exec_ctx, grpc_lb_policy* pol,
grpc_connected_subchannel** target,
grpc_error* error) {
round_robin_lb_policy* p = (round_robin_lb_policy*)pol;
pending_pick* pp = p->pending_picks;
p->pending_picks = NULL;
while (pp != NULL) {
pending_pick *next = pp->next;
pending_pick* next = pp->next;
if (pp->target == target) {
*target = NULL;
GRPC_CLOSURE_SCHED(exec_ctx, pp->on_complete,
@ -228,15 +227,15 @@ static void rr_cancel_pick_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
GRPC_ERROR_UNREF(error);
}
static void rr_cancel_picks_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
static void rr_cancel_picks_locked(grpc_exec_ctx* exec_ctx, grpc_lb_policy* pol,
uint32_t initial_metadata_flags_mask,
uint32_t initial_metadata_flags_eq,
grpc_error *error) {
round_robin_lb_policy *p = (round_robin_lb_policy *)pol;
pending_pick *pp = p->pending_picks;
grpc_error* error) {
round_robin_lb_policy* p = (round_robin_lb_policy*)pol;
pending_pick* pp = p->pending_picks;
p->pending_picks = NULL;
while (pp != NULL) {
pending_pick *next = pp->next;
pending_pick* next = pp->next;
if ((pp->initial_metadata_flags & initial_metadata_flags_mask) ==
initial_metadata_flags_eq) {
*pp->target = NULL;
@ -253,8 +252,8 @@ static void rr_cancel_picks_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
GRPC_ERROR_UNREF(error);
}
static void start_picking_locked(grpc_exec_ctx *exec_ctx,
round_robin_lb_policy *p) {
static void start_picking_locked(grpc_exec_ctx* exec_ctx,
round_robin_lb_policy* p) {
p->started_picking = true;
for (size_t i = 0; i < p->subchannel_list->num_subchannels; i++) {
grpc_lb_subchannel_list_ref_for_connectivity_watch(p->subchannel_list,
@ -264,28 +263,28 @@ static void start_picking_locked(grpc_exec_ctx *exec_ctx,
}
}
static void rr_exit_idle_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol) {
round_robin_lb_policy *p = (round_robin_lb_policy *)pol;
static void rr_exit_idle_locked(grpc_exec_ctx* exec_ctx, grpc_lb_policy* pol) {
round_robin_lb_policy* p = (round_robin_lb_policy*)pol;
if (!p->started_picking) {
start_picking_locked(exec_ctx, p);
}
}
static int rr_pick_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
const grpc_lb_policy_pick_args *pick_args,
grpc_connected_subchannel **target,
grpc_call_context_element *context, void **user_data,
grpc_closure *on_complete) {
round_robin_lb_policy *p = (round_robin_lb_policy *)pol;
static int rr_pick_locked(grpc_exec_ctx* exec_ctx, grpc_lb_policy* pol,
const grpc_lb_policy_pick_args* pick_args,
grpc_connected_subchannel** target,
grpc_call_context_element* context, void** user_data,
grpc_closure* on_complete) {
round_robin_lb_policy* p = (round_robin_lb_policy*)pol;
GPR_ASSERT(!p->shutdown);
if (GRPC_TRACER_ON(grpc_lb_round_robin_trace)) {
gpr_log(GPR_INFO, "[RR %p] Trying to pick", (void *)pol);
gpr_log(GPR_INFO, "[RR %p] Trying to pick", (void*)pol);
}
if (p->subchannel_list != NULL) {
const size_t next_ready_index = get_next_ready_subchannel_index_locked(p);
if (next_ready_index < p->subchannel_list->num_subchannels) {
/* readily available, report right away */
grpc_lb_subchannel_data *sd =
grpc_lb_subchannel_data* sd =
&p->subchannel_list->subchannels[next_ready_index];
*target =
GRPC_CONNECTED_SUBCHANNEL_REF(sd->connected_subchannel, "rr_picked");
@ -297,8 +296,8 @@ static int rr_pick_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
GPR_DEBUG,
"[RR %p] Picked target <-- Subchannel %p (connected %p) (sl %p, "
"index %lu)",
(void *)p, (void *)sd->subchannel, (void *)*target,
(void *)sd->subchannel_list, (unsigned long)next_ready_index);
(void*)p, (void*)sd->subchannel, (void*)*target,
(void*)sd->subchannel_list, (unsigned long)next_ready_index);
}
/* only advance the last picked pointer if the selection was used */
update_last_ready_subchannel_index_locked(p, next_ready_index);
@ -309,7 +308,7 @@ static int rr_pick_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
if (!p->started_picking) {
start_picking_locked(exec_ctx, p);
}
pending_pick *pp = (pending_pick *)gpr_malloc(sizeof(*pp));
pending_pick* pp = (pending_pick*)gpr_malloc(sizeof(*pp));
pp->next = p->pending_picks;
pp->target = target;
pp->on_complete = on_complete;
@ -319,8 +318,8 @@ static int rr_pick_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
return 0;
}
static void update_state_counters_locked(grpc_lb_subchannel_data *sd) {
grpc_lb_subchannel_list *subchannel_list = sd->subchannel_list;
static void update_state_counters_locked(grpc_lb_subchannel_data* sd) {
grpc_lb_subchannel_list* subchannel_list = sd->subchannel_list;
if (sd->prev_connectivity_state == GRPC_CHANNEL_READY) {
GPR_ASSERT(subchannel_list->num_ready > 0);
--subchannel_list->num_ready;
@ -352,7 +351,7 @@ static void update_state_counters_locked(grpc_lb_subchannel_data *sd) {
* used upon policy transition to TRANSIENT_FAILURE or SHUTDOWN. Returns the
* connectivity status set. */
static grpc_connectivity_state update_lb_connectivity_status_locked(
grpc_exec_ctx *exec_ctx, grpc_lb_subchannel_data *sd, grpc_error *error) {
grpc_exec_ctx* exec_ctx, grpc_lb_subchannel_data* sd, grpc_error* error) {
/* In priority order. The first rule to match terminates the search (ie, if we
* are on rule n, all previous rules were unfulfilled).
*
@ -374,8 +373,8 @@ static grpc_connectivity_state update_lb_connectivity_status_locked(
* CHECK: p->num_idle == p->subchannel_list->num_subchannels.
*/
grpc_connectivity_state new_state = sd->curr_connectivity_state;
grpc_lb_subchannel_list *subchannel_list = sd->subchannel_list;
round_robin_lb_policy *p = (round_robin_lb_policy *)subchannel_list->policy;
grpc_lb_subchannel_list* subchannel_list = sd->subchannel_list;
round_robin_lb_policy* p = (round_robin_lb_policy*)subchannel_list->policy;
if (subchannel_list->num_ready > 0) { /* 1) READY */
grpc_connectivity_state_set(exec_ctx, &p->state_tracker, GRPC_CHANNEL_READY,
GRPC_ERROR_NONE, "rr_ready");
@ -409,18 +408,18 @@ static grpc_connectivity_state update_lb_connectivity_status_locked(
return new_state;
}
static void rr_connectivity_changed_locked(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error) {
grpc_lb_subchannel_data *sd = (grpc_lb_subchannel_data *)arg;
round_robin_lb_policy *p =
(round_robin_lb_policy *)sd->subchannel_list->policy;
static void rr_connectivity_changed_locked(grpc_exec_ctx* exec_ctx, void* arg,
grpc_error* error) {
grpc_lb_subchannel_data* sd = (grpc_lb_subchannel_data*)arg;
round_robin_lb_policy* p =
(round_robin_lb_policy*)sd->subchannel_list->policy;
if (GRPC_TRACER_ON(grpc_lb_round_robin_trace)) {
gpr_log(
GPR_DEBUG,
"[RR %p] connectivity changed for subchannel %p, subchannel_list %p: "
"prev_state=%s new_state=%s p->shutdown=%d "
"sd->subchannel_list->shutting_down=%d error=%s",
(void *)p, (void *)sd->subchannel, (void *)sd->subchannel_list,
(void*)p, (void*)sd->subchannel, (void*)sd->subchannel_list,
grpc_connectivity_state_name(sd->prev_connectivity_state),
grpc_connectivity_state_name(sd->pending_connectivity_state_unsafe),
p->shutdown, sd->subchannel_list->shutting_down,
@ -487,8 +486,8 @@ static void rr_connectivity_changed_locked(grpc_exec_ctx *exec_ctx, void *arg,
gpr_log(GPR_DEBUG,
"[RR %p] phasing out subchannel list %p (size %lu) in favor "
"of %p (size %lu)",
(void *)p, (void *)p->subchannel_list, num_subchannels,
(void *)sd->subchannel_list, num_subchannels);
(void*)p, (void*)p->subchannel_list, num_subchannels,
(void*)sd->subchannel_list, num_subchannels);
}
if (p->subchannel_list != NULL) {
// dispose of the current subchannel_list
@ -503,14 +502,14 @@ static void rr_connectivity_changed_locked(grpc_exec_ctx *exec_ctx, void *arg,
* p->pending_picks. This preemtively replicates rr_pick()'s actions. */
const size_t next_ready_index = get_next_ready_subchannel_index_locked(p);
GPR_ASSERT(next_ready_index < p->subchannel_list->num_subchannels);
grpc_lb_subchannel_data *selected =
grpc_lb_subchannel_data* selected =
&p->subchannel_list->subchannels[next_ready_index];
if (p->pending_picks != NULL) {
// if the selected subchannel is going to be used for the pending
// picks, update the last picked pointer
update_last_ready_subchannel_index_locked(p, next_ready_index);
}
pending_pick *pp;
pending_pick* pp;
while ((pp = p->pending_picks)) {
p->pending_picks = pp->next;
*pp->target = GRPC_CONNECTED_SUBCHANNEL_REF(
@ -522,8 +521,8 @@ static void rr_connectivity_changed_locked(grpc_exec_ctx *exec_ctx, void *arg,
gpr_log(GPR_DEBUG,
"[RR %p] Fulfilling pending pick. Target <-- subchannel %p "
"(subchannel_list %p, index %lu)",
(void *)p, (void *)selected->subchannel,
(void *)p->subchannel_list, (unsigned long)next_ready_index);
(void*)p, (void*)selected->subchannel,
(void*)p->subchannel_list, (unsigned long)next_ready_index);
}
GRPC_CLOSURE_SCHED(exec_ctx, pp->on_complete, GRPC_ERROR_NONE);
gpr_free(pp);
@ -535,41 +534,42 @@ static void rr_connectivity_changed_locked(grpc_exec_ctx *exec_ctx, void *arg,
}
static grpc_connectivity_state rr_check_connectivity_locked(
grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol, grpc_error **error) {
round_robin_lb_policy *p = (round_robin_lb_policy *)pol;
grpc_exec_ctx* exec_ctx, grpc_lb_policy* pol, grpc_error** error) {
round_robin_lb_policy* p = (round_robin_lb_policy*)pol;
return grpc_connectivity_state_get(&p->state_tracker, error);
}
static void rr_notify_on_state_change_locked(grpc_exec_ctx *exec_ctx,
grpc_lb_policy *pol,
grpc_connectivity_state *current,
grpc_closure *notify) {
round_robin_lb_policy *p = (round_robin_lb_policy *)pol;
static void rr_notify_on_state_change_locked(grpc_exec_ctx* exec_ctx,
grpc_lb_policy* pol,
grpc_connectivity_state* current,
grpc_closure* notify) {
round_robin_lb_policy* p = (round_robin_lb_policy*)pol;
grpc_connectivity_state_notify_on_state_change(exec_ctx, &p->state_tracker,
current, notify);
}
static void rr_ping_one_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
grpc_closure *closure) {
round_robin_lb_policy *p = (round_robin_lb_policy *)pol;
static void rr_ping_one_locked(grpc_exec_ctx* exec_ctx, grpc_lb_policy* pol,
grpc_closure* closure) {
round_robin_lb_policy* p = (round_robin_lb_policy*)pol;
const size_t next_ready_index = get_next_ready_subchannel_index_locked(p);
if (next_ready_index < p->subchannel_list->num_subchannels) {
grpc_lb_subchannel_data *selected =
grpc_lb_subchannel_data* selected =
&p->subchannel_list->subchannels[next_ready_index];
grpc_connected_subchannel *target = GRPC_CONNECTED_SUBCHANNEL_REF(
grpc_connected_subchannel* target = GRPC_CONNECTED_SUBCHANNEL_REF(
selected->connected_subchannel, "rr_ping");
grpc_connected_subchannel_ping(exec_ctx, target, closure);
GRPC_CONNECTED_SUBCHANNEL_UNREF(exec_ctx, target, "rr_ping");
} else {
GRPC_CLOSURE_SCHED(exec_ctx, closure, GRPC_ERROR_CREATE_FROM_STATIC_STRING(
"Round Robin not connected"));
GRPC_CLOSURE_SCHED(
exec_ctx, closure,
GRPC_ERROR_CREATE_FROM_STATIC_STRING("Round Robin not connected"));
}
}
static void rr_update_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy,
const grpc_lb_policy_args *args) {
round_robin_lb_policy *p = (round_robin_lb_policy *)policy;
const grpc_arg *arg =
static void rr_update_locked(grpc_exec_ctx* exec_ctx, grpc_lb_policy* policy,
const grpc_lb_policy_args* args) {
round_robin_lb_policy* p = (round_robin_lb_policy*)policy;
const grpc_arg* arg =
grpc_channel_args_find(args->args, GRPC_ARG_LB_ADDRESSES);
if (arg == NULL || arg->type != GRPC_ARG_POINTER) {
gpr_log(GPR_ERROR, "[RR %p] update provided no addresses; ignoring", p);
@ -583,12 +583,12 @@ static void rr_update_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy,
}
return;
}
grpc_lb_addresses *addresses = (grpc_lb_addresses *)arg->value.pointer.p;
grpc_lb_addresses* addresses = (grpc_lb_addresses*)arg->value.pointer.p;
if (GRPC_TRACER_ON(grpc_lb_round_robin_trace)) {
gpr_log(GPR_DEBUG, "[RR %p] received update with %" PRIuPTR " addresses", p,
addresses->num_addresses);
}
grpc_lb_subchannel_list *subchannel_list = grpc_lb_subchannel_list_create(
grpc_lb_subchannel_list* subchannel_list = grpc_lb_subchannel_list_create(
exec_ctx, &p->base, &grpc_lb_round_robin_trace, addresses, args,
rr_connectivity_changed_locked);
if (subchannel_list->num_subchannels == 0) {
@ -609,8 +609,8 @@ static void rr_update_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy,
gpr_log(GPR_DEBUG,
"[RR %p] Shutting down latest pending subchannel list %p, "
"about to be replaced by newer latest %p",
(void *)p, (void *)p->latest_pending_subchannel_list,
(void *)subchannel_list);
(void*)p, (void*)p->latest_pending_subchannel_list,
(void*)subchannel_list);
}
grpc_lb_subchannel_list_shutdown_and_unref(
exec_ctx, p->latest_pending_subchannel_list, "sl_outdated");
@ -649,22 +649,22 @@ static const grpc_lb_policy_vtable round_robin_lb_policy_vtable = {
rr_notify_on_state_change_locked,
rr_update_locked};
static void round_robin_factory_ref(grpc_lb_policy_factory *factory) {}
static void round_robin_factory_ref(grpc_lb_policy_factory* factory) {}
static void round_robin_factory_unref(grpc_lb_policy_factory *factory) {}
static void round_robin_factory_unref(grpc_lb_policy_factory* factory) {}
static grpc_lb_policy *round_robin_create(grpc_exec_ctx *exec_ctx,
grpc_lb_policy_factory *factory,
grpc_lb_policy_args *args) {
static grpc_lb_policy* round_robin_create(grpc_exec_ctx* exec_ctx,
grpc_lb_policy_factory* factory,
grpc_lb_policy_args* args) {
GPR_ASSERT(args->client_channel_factory != NULL);
round_robin_lb_policy *p = (round_robin_lb_policy *)gpr_zalloc(sizeof(*p));
round_robin_lb_policy* p = (round_robin_lb_policy*)gpr_zalloc(sizeof(*p));
grpc_lb_policy_init(&p->base, &round_robin_lb_policy_vtable, args->combiner);
grpc_subchannel_index_ref();
grpc_connectivity_state_init(&p->state_tracker, GRPC_CHANNEL_IDLE,
"round_robin");
rr_update_locked(exec_ctx, &p->base, args);
if (GRPC_TRACER_ON(grpc_lb_round_robin_trace)) {
gpr_log(GPR_DEBUG, "[RR %p] Created with %lu subchannels", (void *)p,
gpr_log(GPR_DEBUG, "[RR %p] Created with %lu subchannels", (void*)p,
(unsigned long)p->subchannel_list->num_subchannels);
}
return &p->base;
@ -677,7 +677,7 @@ static const grpc_lb_policy_factory_vtable round_robin_factory_vtable = {
static grpc_lb_policy_factory round_robin_lb_policy_factory = {
&round_robin_factory_vtable};
static grpc_lb_policy_factory *round_robin_lb_factory_create() {
static grpc_lb_policy_factory* round_robin_lb_factory_create() {
return &round_robin_lb_policy_factory;
}

@ -28,17 +28,18 @@
#include "src/core/lib/iomgr/sockaddr_utils.h"
#include "src/core/lib/transport/connectivity_state.h"
void grpc_lb_subchannel_data_unref_subchannel(grpc_exec_ctx *exec_ctx,
grpc_lb_subchannel_data *sd,
const char *reason) {
void grpc_lb_subchannel_data_unref_subchannel(grpc_exec_ctx* exec_ctx,
grpc_lb_subchannel_data* sd,
const char* reason) {
if (sd->subchannel != NULL) {
if (GRPC_TRACER_ON(*sd->subchannel_list->tracer)) {
gpr_log(
GPR_DEBUG, "[%s %p] subchannel list %p index %" PRIuPTR
" of %" PRIuPTR " (subchannel %p): unreffing subchannel",
sd->subchannel_list->tracer->name, sd->subchannel_list->policy,
sd->subchannel_list, (size_t)(sd - sd->subchannel_list->subchannels),
sd->subchannel_list->num_subchannels, sd->subchannel);
gpr_log(GPR_DEBUG,
"[%s %p] subchannel list %p index %" PRIuPTR " of %" PRIuPTR
" (subchannel %p): unreffing subchannel",
sd->subchannel_list->tracer->name, sd->subchannel_list->policy,
sd->subchannel_list,
(size_t)(sd - sd->subchannel_list->subchannels),
sd->subchannel_list->num_subchannels, sd->subchannel);
}
GRPC_SUBCHANNEL_UNREF(exec_ctx, sd->subchannel, reason);
sd->subchannel = NULL;
@ -56,7 +57,7 @@ void grpc_lb_subchannel_data_unref_subchannel(grpc_exec_ctx *exec_ctx,
}
void grpc_lb_subchannel_data_start_connectivity_watch(
grpc_exec_ctx *exec_ctx, grpc_lb_subchannel_data *sd) {
grpc_exec_ctx* exec_ctx, grpc_lb_subchannel_data* sd) {
if (GRPC_TRACER_ON(*sd->subchannel_list->tracer)) {
gpr_log(GPR_DEBUG,
"[%s %p] subchannel list %p index %" PRIuPTR " of %" PRIuPTR
@ -74,25 +75,26 @@ void grpc_lb_subchannel_data_start_connectivity_watch(
}
void grpc_lb_subchannel_data_stop_connectivity_watch(
grpc_exec_ctx *exec_ctx, grpc_lb_subchannel_data *sd) {
grpc_exec_ctx* exec_ctx, grpc_lb_subchannel_data* sd) {
if (GRPC_TRACER_ON(*sd->subchannel_list->tracer)) {
gpr_log(
GPR_DEBUG, "[%s %p] subchannel list %p index %" PRIuPTR " of %" PRIuPTR
" (subchannel %p): stopping connectivity watch",
sd->subchannel_list->tracer->name, sd->subchannel_list->policy,
sd->subchannel_list, (size_t)(sd - sd->subchannel_list->subchannels),
sd->subchannel_list->num_subchannels, sd->subchannel);
gpr_log(GPR_DEBUG,
"[%s %p] subchannel list %p index %" PRIuPTR " of %" PRIuPTR
" (subchannel %p): stopping connectivity watch",
sd->subchannel_list->tracer->name, sd->subchannel_list->policy,
sd->subchannel_list,
(size_t)(sd - sd->subchannel_list->subchannels),
sd->subchannel_list->num_subchannels, sd->subchannel);
}
GPR_ASSERT(sd->connectivity_notification_pending);
sd->connectivity_notification_pending = false;
}
grpc_lb_subchannel_list *grpc_lb_subchannel_list_create(
grpc_exec_ctx *exec_ctx, grpc_lb_policy *p, grpc_tracer_flag *tracer,
const grpc_lb_addresses *addresses, const grpc_lb_policy_args *args,
grpc_lb_subchannel_list* grpc_lb_subchannel_list_create(
grpc_exec_ctx* exec_ctx, grpc_lb_policy* p, grpc_tracer_flag* tracer,
const grpc_lb_addresses* addresses, const grpc_lb_policy_args* args,
grpc_iomgr_cb_func connectivity_changed_cb) {
grpc_lb_subchannel_list *subchannel_list =
(grpc_lb_subchannel_list *)gpr_zalloc(sizeof(*subchannel_list));
grpc_lb_subchannel_list* subchannel_list =
(grpc_lb_subchannel_list*)gpr_zalloc(sizeof(*subchannel_list));
if (GRPC_TRACER_ON(*tracer)) {
gpr_log(GPR_DEBUG,
"[%s %p] Creating subchannel list %p for %" PRIuPTR " subchannels",
@ -101,11 +103,11 @@ grpc_lb_subchannel_list *grpc_lb_subchannel_list_create(
subchannel_list->policy = p;
subchannel_list->tracer = tracer;
gpr_ref_init(&subchannel_list->refcount, 1);
subchannel_list->subchannels = (grpc_lb_subchannel_data *)gpr_zalloc(
subchannel_list->subchannels = (grpc_lb_subchannel_data*)gpr_zalloc(
sizeof(grpc_lb_subchannel_data) * addresses->num_addresses);
// We need to remove the LB addresses in order to be able to compare the
// subchannel keys of subchannels from a different batch of addresses.
static const char *keys_to_remove[] = {GRPC_ARG_SUBCHANNEL_ADDRESS,
static const char* keys_to_remove[] = {GRPC_ARG_SUBCHANNEL_ADDRESS,
GRPC_ARG_LB_ADDRESSES};
// Create a subchannel for each address.
grpc_subchannel_args sc_args;
@ -116,18 +118,18 @@ grpc_lb_subchannel_list *grpc_lb_subchannel_list_create(
memset(&sc_args, 0, sizeof(grpc_subchannel_args));
grpc_arg addr_arg =
grpc_create_subchannel_address_arg(&addresses->addresses[i].address);
grpc_channel_args *new_args = grpc_channel_args_copy_and_add_and_remove(
grpc_channel_args* new_args = grpc_channel_args_copy_and_add_and_remove(
args->args, keys_to_remove, GPR_ARRAY_SIZE(keys_to_remove), &addr_arg,
1);
gpr_free(addr_arg.value.string);
sc_args.args = new_args;
grpc_subchannel *subchannel = grpc_client_channel_factory_create_subchannel(
grpc_subchannel* subchannel = grpc_client_channel_factory_create_subchannel(
exec_ctx, args->client_channel_factory, &sc_args);
grpc_channel_args_destroy(exec_ctx, new_args);
if (subchannel == NULL) {
// Subchannel could not be created.
if (GRPC_TRACER_ON(*tracer)) {
char *address_uri =
char* address_uri =
grpc_sockaddr_to_uri(&addresses->addresses[i].address);
gpr_log(GPR_DEBUG,
"[%s %p] could not create subchannel for address uri %s, "
@ -138,15 +140,16 @@ grpc_lb_subchannel_list *grpc_lb_subchannel_list_create(
continue;
}
if (GRPC_TRACER_ON(*tracer)) {
char *address_uri =
char* address_uri =
grpc_sockaddr_to_uri(&addresses->addresses[i].address);
gpr_log(GPR_DEBUG, "[%s %p] subchannel list %p index %" PRIuPTR
": Created subchannel %p for address uri %s",
gpr_log(GPR_DEBUG,
"[%s %p] subchannel list %p index %" PRIuPTR
": Created subchannel %p for address uri %s",
tracer->name, p, subchannel_list, subchannel_index, subchannel,
address_uri);
gpr_free(address_uri);
}
grpc_lb_subchannel_data *sd =
grpc_lb_subchannel_data* sd =
&subchannel_list->subchannels[subchannel_index++];
sd->subchannel_list = subchannel_list;
sd->subchannel = subchannel;
@ -169,15 +172,15 @@ grpc_lb_subchannel_list *grpc_lb_subchannel_list_create(
return subchannel_list;
}
static void subchannel_list_destroy(grpc_exec_ctx *exec_ctx,
grpc_lb_subchannel_list *subchannel_list) {
static void subchannel_list_destroy(grpc_exec_ctx* exec_ctx,
grpc_lb_subchannel_list* subchannel_list) {
if (GRPC_TRACER_ON(*subchannel_list->tracer)) {
gpr_log(GPR_DEBUG, "[%s %p] Destroying subchannel_list %p",
subchannel_list->tracer->name, subchannel_list->policy,
subchannel_list);
}
for (size_t i = 0; i < subchannel_list->num_subchannels; i++) {
grpc_lb_subchannel_data *sd = &subchannel_list->subchannels[i];
grpc_lb_subchannel_data* sd = &subchannel_list->subchannels[i];
grpc_lb_subchannel_data_unref_subchannel(exec_ctx, sd,
"subchannel_list_destroy");
}
@ -185,8 +188,8 @@ static void subchannel_list_destroy(grpc_exec_ctx *exec_ctx,
gpr_free(subchannel_list);
}
void grpc_lb_subchannel_list_ref(grpc_lb_subchannel_list *subchannel_list,
const char *reason) {
void grpc_lb_subchannel_list_ref(grpc_lb_subchannel_list* subchannel_list,
const char* reason) {
gpr_ref_non_zero(&subchannel_list->refcount);
if (GRPC_TRACER_ON(*subchannel_list->tracer)) {
const gpr_atm count = gpr_atm_acq_load(&subchannel_list->refcount.count);
@ -197,9 +200,9 @@ void grpc_lb_subchannel_list_ref(grpc_lb_subchannel_list *subchannel_list,
}
}
void grpc_lb_subchannel_list_unref(grpc_exec_ctx *exec_ctx,
grpc_lb_subchannel_list *subchannel_list,
const char *reason) {
void grpc_lb_subchannel_list_unref(grpc_exec_ctx* exec_ctx,
grpc_lb_subchannel_list* subchannel_list,
const char* reason) {
const bool done = gpr_unref(&subchannel_list->refcount);
if (GRPC_TRACER_ON(*subchannel_list->tracer)) {
const gpr_atm count = gpr_atm_acq_load(&subchannel_list->refcount.count);
@ -214,35 +217,36 @@ void grpc_lb_subchannel_list_unref(grpc_exec_ctx *exec_ctx,
}
void grpc_lb_subchannel_list_ref_for_connectivity_watch(
grpc_lb_subchannel_list *subchannel_list, const char *reason) {
grpc_lb_subchannel_list* subchannel_list, const char* reason) {
GRPC_LB_POLICY_WEAK_REF(subchannel_list->policy, reason);
grpc_lb_subchannel_list_ref(subchannel_list, reason);
}
void grpc_lb_subchannel_list_unref_for_connectivity_watch(
grpc_exec_ctx *exec_ctx, grpc_lb_subchannel_list *subchannel_list,
const char *reason) {
grpc_exec_ctx* exec_ctx, grpc_lb_subchannel_list* subchannel_list,
const char* reason) {
GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, subchannel_list->policy, reason);
grpc_lb_subchannel_list_unref(exec_ctx, subchannel_list, reason);
}
static void subchannel_data_cancel_connectivity_watch(
grpc_exec_ctx *exec_ctx, grpc_lb_subchannel_data *sd, const char *reason) {
grpc_exec_ctx* exec_ctx, grpc_lb_subchannel_data* sd, const char* reason) {
if (GRPC_TRACER_ON(*sd->subchannel_list->tracer)) {
gpr_log(
GPR_DEBUG, "[%s %p] subchannel list %p index %" PRIuPTR " of %" PRIuPTR
" (subchannel %p): canceling connectivity watch (%s)",
sd->subchannel_list->tracer->name, sd->subchannel_list->policy,
sd->subchannel_list, (size_t)(sd - sd->subchannel_list->subchannels),
sd->subchannel_list->num_subchannels, sd->subchannel, reason);
gpr_log(GPR_DEBUG,
"[%s %p] subchannel list %p index %" PRIuPTR " of %" PRIuPTR
" (subchannel %p): canceling connectivity watch (%s)",
sd->subchannel_list->tracer->name, sd->subchannel_list->policy,
sd->subchannel_list,
(size_t)(sd - sd->subchannel_list->subchannels),
sd->subchannel_list->num_subchannels, sd->subchannel, reason);
}
grpc_subchannel_notify_on_state_change(exec_ctx, sd->subchannel, NULL, NULL,
&sd->connectivity_changed_closure);
}
void grpc_lb_subchannel_list_shutdown_and_unref(
grpc_exec_ctx *exec_ctx, grpc_lb_subchannel_list *subchannel_list,
const char *reason) {
grpc_exec_ctx* exec_ctx, grpc_lb_subchannel_list* subchannel_list,
const char* reason) {
if (GRPC_TRACER_ON(*subchannel_list->tracer)) {
gpr_log(GPR_DEBUG, "[%s %p] Shutting down subchannel_list %p (%s)",
subchannel_list->tracer->name, subchannel_list->policy,
@ -251,7 +255,7 @@ void grpc_lb_subchannel_list_shutdown_and_unref(
GPR_ASSERT(!subchannel_list->shutting_down);
subchannel_list->shutting_down = true;
for (size_t i = 0; i < subchannel_list->num_subchannels; i++) {
grpc_lb_subchannel_data *sd = &subchannel_list->subchannels[i];
grpc_lb_subchannel_data* sd = &subchannel_list->subchannels[i];
// If there's a pending notification for this subchannel, cancel it;
// the callback is responsible for unreffing the subchannel.
// Otherwise, unref the subchannel directly.

@ -44,10 +44,10 @@ typedef struct grpc_lb_subchannel_list grpc_lb_subchannel_list;
typedef struct {
/** backpointer to owning subchannel list */
grpc_lb_subchannel_list *subchannel_list;
grpc_lb_subchannel_list* subchannel_list;
/** subchannel itself */
grpc_subchannel *subchannel;
grpc_connected_subchannel *connected_subchannel;
grpc_subchannel* subchannel;
grpc_connected_subchannel* connected_subchannel;
/** Is a connectivity notification pending? */
bool connectivity_notification_pending;
/** notification that connectivity has changed on subchannel */
@ -63,36 +63,36 @@ typedef struct {
* \a connectivity_changed_closure. */
grpc_connectivity_state pending_connectivity_state_unsafe;
/** the subchannel's target user data */
void *user_data;
void* user_data;
/** vtable to operate over \a user_data */
const grpc_lb_user_data_vtable *user_data_vtable;
const grpc_lb_user_data_vtable* user_data_vtable;
} grpc_lb_subchannel_data;
/// Unrefs the subchannel contained in sd.
void grpc_lb_subchannel_data_unref_subchannel(grpc_exec_ctx *exec_ctx,
grpc_lb_subchannel_data *sd,
const char *reason);
void grpc_lb_subchannel_data_unref_subchannel(grpc_exec_ctx* exec_ctx,
grpc_lb_subchannel_data* sd,
const char* reason);
/// Starts watching the connectivity state of the subchannel.
/// The connectivity_changed_cb callback must invoke either
/// grpc_lb_subchannel_data_stop_connectivity_watch() or again call
/// grpc_lb_subchannel_data_start_connectivity_watch().
void grpc_lb_subchannel_data_start_connectivity_watch(
grpc_exec_ctx *exec_ctx, grpc_lb_subchannel_data *sd);
grpc_exec_ctx* exec_ctx, grpc_lb_subchannel_data* sd);
/// Stops watching the connectivity state of the subchannel.
void grpc_lb_subchannel_data_stop_connectivity_watch(
grpc_exec_ctx *exec_ctx, grpc_lb_subchannel_data *sd);
grpc_exec_ctx* exec_ctx, grpc_lb_subchannel_data* sd);
struct grpc_lb_subchannel_list {
/** backpointer to owning policy */
grpc_lb_policy *policy;
grpc_lb_policy* policy;
grpc_tracer_flag *tracer;
grpc_tracer_flag* tracer;
/** all our subchannels */
size_t num_subchannels;
grpc_lb_subchannel_data *subchannels;
grpc_lb_subchannel_data* subchannels;
/** Index into subchannels of the one we're currently checking.
* Used when connecting to subchannels serially instead of in parallel. */
@ -120,31 +120,31 @@ struct grpc_lb_subchannel_list {
bool shutting_down;
};
grpc_lb_subchannel_list *grpc_lb_subchannel_list_create(
grpc_exec_ctx *exec_ctx, grpc_lb_policy *p, grpc_tracer_flag *tracer,
const grpc_lb_addresses *addresses, const grpc_lb_policy_args *args,
grpc_lb_subchannel_list* grpc_lb_subchannel_list_create(
grpc_exec_ctx* exec_ctx, grpc_lb_policy* p, grpc_tracer_flag* tracer,
const grpc_lb_addresses* addresses, const grpc_lb_policy_args* args,
grpc_iomgr_cb_func connectivity_changed_cb);
void grpc_lb_subchannel_list_ref(grpc_lb_subchannel_list *subchannel_list,
const char *reason);
void grpc_lb_subchannel_list_ref(grpc_lb_subchannel_list* subchannel_list,
const char* reason);
void grpc_lb_subchannel_list_unref(grpc_exec_ctx *exec_ctx,
grpc_lb_subchannel_list *subchannel_list,
const char *reason);
void grpc_lb_subchannel_list_unref(grpc_exec_ctx* exec_ctx,
grpc_lb_subchannel_list* subchannel_list,
const char* reason);
/// Takes and releases refs needed for a connectivity notification.
/// This includes a ref to subchannel_list and a weak ref to the LB policy.
void grpc_lb_subchannel_list_ref_for_connectivity_watch(
grpc_lb_subchannel_list *subchannel_list, const char *reason);
grpc_lb_subchannel_list* subchannel_list, const char* reason);
void grpc_lb_subchannel_list_unref_for_connectivity_watch(
grpc_exec_ctx *exec_ctx, grpc_lb_subchannel_list *subchannel_list,
const char *reason);
grpc_exec_ctx* exec_ctx, grpc_lb_subchannel_list* subchannel_list,
const char* reason);
/// Mark subchannel_list as discarded. Unsubscribes all its subchannels. The
/// connectivity state notification callback will ultimately unref it.
void grpc_lb_subchannel_list_shutdown_and_unref(
grpc_exec_ctx *exec_ctx, grpc_lb_subchannel_list *subchannel_list,
const char *reason);
grpc_exec_ctx* exec_ctx, grpc_lb_subchannel_list* subchannel_list,
const char* reason);
#ifdef __cplusplus
}

@ -37,7 +37,7 @@ typedef struct grpc_lb_policy_factory grpc_lb_policy_factory;
typedef struct grpc_lb_policy_factory_vtable grpc_lb_policy_factory_vtable;
struct grpc_lb_policy_factory {
const grpc_lb_policy_factory_vtable *vtable;
const grpc_lb_policy_factory_vtable* vtable;
};
/** A resolved address alongside any LB related information associated with it.
@ -48,91 +48,91 @@ struct grpc_lb_policy_factory {
typedef struct grpc_lb_address {
grpc_resolved_address address;
bool is_balancer;
char *balancer_name; /* For secure naming. */
void *user_data;
char* balancer_name; /* For secure naming. */
void* user_data;
} grpc_lb_address;
typedef struct grpc_lb_user_data_vtable {
void *(*copy)(void *);
void (*destroy)(grpc_exec_ctx *exec_ctx, void *);
int (*cmp)(void *, void *);
void* (*copy)(void*);
void (*destroy)(grpc_exec_ctx* exec_ctx, void*);
int (*cmp)(void*, void*);
} grpc_lb_user_data_vtable;
typedef struct grpc_lb_addresses {
size_t num_addresses;
grpc_lb_address *addresses;
const grpc_lb_user_data_vtable *user_data_vtable;
grpc_lb_address* addresses;
const grpc_lb_user_data_vtable* user_data_vtable;
} grpc_lb_addresses;
/** Returns a grpc_addresses struct with enough space for
\a num_addresses addresses. The \a user_data_vtable argument may be
NULL if no user data will be added. */
grpc_lb_addresses *grpc_lb_addresses_create(
size_t num_addresses, const grpc_lb_user_data_vtable *user_data_vtable);
grpc_lb_addresses* grpc_lb_addresses_create(
size_t num_addresses, const grpc_lb_user_data_vtable* user_data_vtable);
/** Creates a copy of \a addresses. */
grpc_lb_addresses *grpc_lb_addresses_copy(const grpc_lb_addresses *addresses);
grpc_lb_addresses* grpc_lb_addresses_copy(const grpc_lb_addresses* addresses);
/** Sets the value of the address at index \a index of \a addresses.
* \a address is a socket address of length \a address_len.
* Takes ownership of \a balancer_name. */
void grpc_lb_addresses_set_address(grpc_lb_addresses *addresses, size_t index,
const void *address, size_t address_len,
bool is_balancer, const char *balancer_name,
void *user_data);
void grpc_lb_addresses_set_address(grpc_lb_addresses* addresses, size_t index,
const void* address, size_t address_len,
bool is_balancer, const char* balancer_name,
void* user_data);
/** Sets the value of the address at index \a index of \a addresses from \a uri.
* Returns true upon success, false otherwise. Takes ownership of \a
* balancer_name. */
bool grpc_lb_addresses_set_address_from_uri(grpc_lb_addresses *addresses,
size_t index, const grpc_uri *uri,
bool grpc_lb_addresses_set_address_from_uri(grpc_lb_addresses* addresses,
size_t index, const grpc_uri* uri,
bool is_balancer,
const char *balancer_name,
void *user_data);
const char* balancer_name,
void* user_data);
/** Compares \a addresses1 and \a addresses2. */
int grpc_lb_addresses_cmp(const grpc_lb_addresses *addresses1,
const grpc_lb_addresses *addresses2);
int grpc_lb_addresses_cmp(const grpc_lb_addresses* addresses1,
const grpc_lb_addresses* addresses2);
/** Destroys \a addresses. */
void grpc_lb_addresses_destroy(grpc_exec_ctx *exec_ctx,
grpc_lb_addresses *addresses);
void grpc_lb_addresses_destroy(grpc_exec_ctx* exec_ctx,
grpc_lb_addresses* addresses);
/** Returns a channel arg containing \a addresses. */
grpc_arg grpc_lb_addresses_create_channel_arg(
const grpc_lb_addresses *addresses);
const grpc_lb_addresses* addresses);
/** Returns the \a grpc_lb_addresses instance in \a channel_args or NULL */
grpc_lb_addresses *grpc_lb_addresses_find_channel_arg(
const grpc_channel_args *channel_args);
grpc_lb_addresses* grpc_lb_addresses_find_channel_arg(
const grpc_channel_args* channel_args);
/** Arguments passed to LB policies. */
struct grpc_lb_policy_args {
grpc_client_channel_factory *client_channel_factory;
grpc_channel_args *args;
grpc_combiner *combiner;
grpc_client_channel_factory* client_channel_factory;
grpc_channel_args* args;
grpc_combiner* combiner;
};
struct grpc_lb_policy_factory_vtable {
void (*ref)(grpc_lb_policy_factory *factory);
void (*unref)(grpc_lb_policy_factory *factory);
void (*ref)(grpc_lb_policy_factory* factory);
void (*unref)(grpc_lb_policy_factory* factory);
/** Implementation of grpc_lb_policy_factory_create_lb_policy */
grpc_lb_policy *(*create_lb_policy)(grpc_exec_ctx *exec_ctx,
grpc_lb_policy_factory *factory,
grpc_lb_policy_args *args);
grpc_lb_policy* (*create_lb_policy)(grpc_exec_ctx* exec_ctx,
grpc_lb_policy_factory* factory,
grpc_lb_policy_args* args);
/** Name for the LB policy this factory implements */
const char *name;
const char* name;
};
void grpc_lb_policy_factory_ref(grpc_lb_policy_factory *factory);
void grpc_lb_policy_factory_unref(grpc_lb_policy_factory *factory);
void grpc_lb_policy_factory_ref(grpc_lb_policy_factory* factory);
void grpc_lb_policy_factory_unref(grpc_lb_policy_factory* factory);
/** Create a lb_policy instance. */
grpc_lb_policy *grpc_lb_policy_factory_create_lb_policy(
grpc_exec_ctx *exec_ctx, grpc_lb_policy_factory *factory,
grpc_lb_policy_args *args);
grpc_lb_policy* grpc_lb_policy_factory_create_lb_policy(
grpc_exec_ctx* exec_ctx, grpc_lb_policy_factory* factory,
grpc_lb_policy_args* args);
#ifdef __cplusplus
}

@ -24,7 +24,7 @@
#define MAX_POLICIES 10
static grpc_lb_policy_factory *g_all_of_the_lb_policies[MAX_POLICIES];
static grpc_lb_policy_factory* g_all_of_the_lb_policies[MAX_POLICIES];
static int g_number_of_lb_policies = 0;
void grpc_lb_policy_registry_init(void) { g_number_of_lb_policies = 0; }
@ -36,7 +36,7 @@ void grpc_lb_policy_registry_shutdown(void) {
}
}
void grpc_register_lb_policy(grpc_lb_policy_factory *factory) {
void grpc_register_lb_policy(grpc_lb_policy_factory* factory) {
int i;
for (i = 0; i < g_number_of_lb_policies; i++) {
GPR_ASSERT(0 != gpr_stricmp(factory->vtable->name,
@ -47,7 +47,7 @@ void grpc_register_lb_policy(grpc_lb_policy_factory *factory) {
g_all_of_the_lb_policies[g_number_of_lb_policies++] = factory;
}
static grpc_lb_policy_factory *lookup_factory(const char *name) {
static grpc_lb_policy_factory* lookup_factory(const char* name) {
int i;
if (name == NULL) return NULL;
@ -61,10 +61,10 @@ static grpc_lb_policy_factory *lookup_factory(const char *name) {
return NULL;
}
grpc_lb_policy *grpc_lb_policy_create(grpc_exec_ctx *exec_ctx, const char *name,
grpc_lb_policy_args *args) {
grpc_lb_policy_factory *factory = lookup_factory(name);
grpc_lb_policy *lb_policy =
grpc_lb_policy* grpc_lb_policy_create(grpc_exec_ctx* exec_ctx, const char* name,
grpc_lb_policy_args* args) {
grpc_lb_policy_factory* factory = lookup_factory(name);
grpc_lb_policy* lb_policy =
grpc_lb_policy_factory_create_lb_policy(exec_ctx, factory, args);
return lb_policy;
}

@ -32,14 +32,14 @@ void grpc_lb_policy_registry_init(void);
void grpc_lb_policy_registry_shutdown(void);
/** Register a LB policy factory. */
void grpc_register_lb_policy(grpc_lb_policy_factory *factory);
void grpc_register_lb_policy(grpc_lb_policy_factory* factory);
/** Create a \a grpc_lb_policy instance.
*
* If \a name is NULL, the default factory from \a grpc_lb_policy_registry_init
* will be returned. */
grpc_lb_policy *grpc_lb_policy_create(grpc_exec_ctx *exec_ctx, const char *name,
grpc_lb_policy_args *args);
grpc_lb_policy* grpc_lb_policy_create(grpc_exec_ctx* exec_ctx, const char* name,
grpc_lb_policy_args* args);
#ifdef __cplusplus
}

@ -33,13 +33,13 @@
#ifdef GRPC_HAVE_UNIX_SOCKET
bool grpc_parse_unix(const grpc_uri *uri,
grpc_resolved_address *resolved_addr) {
bool grpc_parse_unix(const grpc_uri* uri,
grpc_resolved_address* resolved_addr) {
if (strcmp("unix", uri->scheme) != 0) {
gpr_log(GPR_ERROR, "Expected 'unix' scheme, got '%s'", uri->scheme);
return false;
}
struct sockaddr_un *un = (struct sockaddr_un *)resolved_addr->addr;
struct sockaddr_un* un = (struct sockaddr_un*)resolved_addr->addr;
const size_t maxlen = sizeof(un->sun_path);
const size_t path_len = strnlen(uri->path, maxlen);
if (path_len == maxlen) return false;
@ -51,24 +51,24 @@ bool grpc_parse_unix(const grpc_uri *uri,
#else /* GRPC_HAVE_UNIX_SOCKET */
bool grpc_parse_unix(const grpc_uri *uri,
grpc_resolved_address *resolved_addr) {
bool grpc_parse_unix(const grpc_uri* uri,
grpc_resolved_address* resolved_addr) {
abort();
}
#endif /* GRPC_HAVE_UNIX_SOCKET */
bool grpc_parse_ipv4_hostport(const char *hostport, grpc_resolved_address *addr,
bool grpc_parse_ipv4_hostport(const char* hostport, grpc_resolved_address* addr,
bool log_errors) {
bool success = false;
// Split host and port.
char *host;
char *port;
char* host;
char* port;
if (!gpr_split_host_port(hostport, &host, &port)) return false;
// Parse IP address.
memset(addr, 0, sizeof(*addr));
addr->len = sizeof(struct sockaddr_in);
struct sockaddr_in *in = (struct sockaddr_in *)addr->addr;
struct sockaddr_in* in = (struct sockaddr_in*)addr->addr;
in->sin_family = AF_INET;
if (inet_pton(AF_INET, host, &in->sin_addr) == 0) {
if (log_errors) gpr_log(GPR_ERROR, "invalid ipv4 address: '%s'", host);
@ -92,32 +92,32 @@ done:
return success;
}
bool grpc_parse_ipv4(const grpc_uri *uri,
grpc_resolved_address *resolved_addr) {
bool grpc_parse_ipv4(const grpc_uri* uri,
grpc_resolved_address* resolved_addr) {
if (strcmp("ipv4", uri->scheme) != 0) {
gpr_log(GPR_ERROR, "Expected 'ipv4' scheme, got '%s'", uri->scheme);
return false;
}
const char *host_port = uri->path;
const char* host_port = uri->path;
if (*host_port == '/') ++host_port;
return grpc_parse_ipv4_hostport(host_port, resolved_addr,
true /* log_errors */);
}
bool grpc_parse_ipv6_hostport(const char *hostport, grpc_resolved_address *addr,
bool grpc_parse_ipv6_hostport(const char* hostport, grpc_resolved_address* addr,
bool log_errors) {
bool success = false;
// Split host and port.
char *host;
char *port;
char* host;
char* port;
if (!gpr_split_host_port(hostport, &host, &port)) return false;
// Parse IP address.
memset(addr, 0, sizeof(*addr));
addr->len = sizeof(struct sockaddr_in6);
struct sockaddr_in6 *in6 = (struct sockaddr_in6 *)addr->addr;
struct sockaddr_in6* in6 = (struct sockaddr_in6*)addr->addr;
in6->sin6_family = AF_INET6;
// Handle the RFC6874 syntax for IPv6 zone identifiers.
char *host_end = (char *)gpr_memrchr(host, '%', strlen(host));
char* host_end = (char*)gpr_memrchr(host, '%', strlen(host));
if (host_end != NULL) {
GPR_ASSERT(host_end >= host);
char host_without_scope[INET6_ADDRSTRLEN];
@ -161,19 +161,19 @@ done:
return success;
}
bool grpc_parse_ipv6(const grpc_uri *uri,
grpc_resolved_address *resolved_addr) {
bool grpc_parse_ipv6(const grpc_uri* uri,
grpc_resolved_address* resolved_addr) {
if (strcmp("ipv6", uri->scheme) != 0) {
gpr_log(GPR_ERROR, "Expected 'ipv6' scheme, got '%s'", uri->scheme);
return false;
}
const char *host_port = uri->path;
const char* host_port = uri->path;
if (*host_port == '/') ++host_port;
return grpc_parse_ipv6_hostport(host_port, resolved_addr,
true /* log_errors */);
}
bool grpc_parse_uri(const grpc_uri *uri, grpc_resolved_address *resolved_addr) {
bool grpc_parse_uri(const grpc_uri* uri, grpc_resolved_address* resolved_addr) {
if (strcmp("unix", uri->scheme) == 0) {
return grpc_parse_unix(uri, resolved_addr);
} else if (strcmp("ipv4", uri->scheme) == 0) {

@ -30,23 +30,23 @@ extern "C" {
/** Populate \a resolved_addr from \a uri, whose path is expected to contain a
* unix socket path. Returns true upon success. */
bool grpc_parse_unix(const grpc_uri *uri, grpc_resolved_address *resolved_addr);
bool grpc_parse_unix(const grpc_uri* uri, grpc_resolved_address* resolved_addr);
/** Populate \a resolved_addr from \a uri, whose path is expected to contain an
* IPv4 host:port pair. Returns true upon success. */
bool grpc_parse_ipv4(const grpc_uri *uri, grpc_resolved_address *resolved_addr);
bool grpc_parse_ipv4(const grpc_uri* uri, grpc_resolved_address* resolved_addr);
/** Populate \a resolved_addr from \a uri, whose path is expected to contain an
* IPv6 host:port pair. Returns true upon success. */
bool grpc_parse_ipv6(const grpc_uri *uri, grpc_resolved_address *resolved_addr);
bool grpc_parse_ipv6(const grpc_uri* uri, grpc_resolved_address* resolved_addr);
/** Populate \a resolved_addr from \a uri. Returns true upon success. */
bool grpc_parse_uri(const grpc_uri *uri, grpc_resolved_address *resolved_addr);
bool grpc_parse_uri(const grpc_uri* uri, grpc_resolved_address* resolved_addr);
/** Parse bare IPv4 or IPv6 "IP:port" strings. */
bool grpc_parse_ipv4_hostport(const char *hostport, grpc_resolved_address *addr,
bool grpc_parse_ipv4_hostport(const char* hostport, grpc_resolved_address* addr,
bool log_errors);
bool grpc_parse_ipv6_hostport(const char *hostport, grpc_resolved_address *addr,
bool grpc_parse_ipv6_hostport(const char* hostport, grpc_resolved_address* addr,
bool log_errors);
#ifdef __cplusplus

@ -24,17 +24,17 @@ grpc_tracer_flag grpc_trace_resolver_refcount =
GRPC_TRACER_INITIALIZER(false, "resolver_refcount");
#endif
void grpc_resolver_init(grpc_resolver *resolver,
const grpc_resolver_vtable *vtable,
grpc_combiner *combiner) {
void grpc_resolver_init(grpc_resolver* resolver,
const grpc_resolver_vtable* vtable,
grpc_combiner* combiner) {
resolver->vtable = vtable;
resolver->combiner = GRPC_COMBINER_REF(combiner, "resolver");
gpr_ref_init(&resolver->refs, 1);
}
#ifndef NDEBUG
void grpc_resolver_ref(grpc_resolver *resolver, const char *file, int line,
const char *reason) {
void grpc_resolver_ref(grpc_resolver* resolver, const char* file, int line,
const char* reason) {
if (GRPC_TRACER_ON(grpc_trace_resolver_refcount)) {
gpr_atm old_refs = gpr_atm_no_barrier_load(&resolver->refs.count);
gpr_log(file, line, GPR_LOG_SEVERITY_DEBUG,
@ -42,14 +42,14 @@ void grpc_resolver_ref(grpc_resolver *resolver, const char *file, int line,
old_refs, old_refs + 1, reason);
}
#else
void grpc_resolver_ref(grpc_resolver *resolver) {
void grpc_resolver_ref(grpc_resolver* resolver) {
#endif
gpr_ref(&resolver->refs);
}
#ifndef NDEBUG
void grpc_resolver_unref(grpc_exec_ctx *exec_ctx, grpc_resolver *resolver,
const char *file, int line, const char *reason) {
void grpc_resolver_unref(grpc_exec_ctx* exec_ctx, grpc_resolver* resolver,
const char* file, int line, const char* reason) {
if (GRPC_TRACER_ON(grpc_trace_resolver_refcount)) {
gpr_atm old_refs = gpr_atm_no_barrier_load(&resolver->refs.count);
gpr_log(file, line, GPR_LOG_SEVERITY_DEBUG,
@ -57,27 +57,27 @@ void grpc_resolver_unref(grpc_exec_ctx *exec_ctx, grpc_resolver *resolver,
old_refs, old_refs - 1, reason);
}
#else
void grpc_resolver_unref(grpc_exec_ctx *exec_ctx, grpc_resolver *resolver) {
void grpc_resolver_unref(grpc_exec_ctx* exec_ctx, grpc_resolver* resolver) {
#endif
if (gpr_unref(&resolver->refs)) {
grpc_combiner *combiner = resolver->combiner;
grpc_combiner* combiner = resolver->combiner;
resolver->vtable->destroy(exec_ctx, resolver);
GRPC_COMBINER_UNREF(exec_ctx, combiner, "resolver");
}
}
void grpc_resolver_shutdown_locked(grpc_exec_ctx *exec_ctx,
grpc_resolver *resolver) {
void grpc_resolver_shutdown_locked(grpc_exec_ctx* exec_ctx,
grpc_resolver* resolver) {
resolver->vtable->shutdown_locked(exec_ctx, resolver);
}
void grpc_resolver_channel_saw_error_locked(grpc_exec_ctx *exec_ctx,
grpc_resolver *resolver) {
void grpc_resolver_channel_saw_error_locked(grpc_exec_ctx* exec_ctx,
grpc_resolver* resolver) {
resolver->vtable->channel_saw_error_locked(exec_ctx, resolver);
}
void grpc_resolver_next_locked(grpc_exec_ctx *exec_ctx, grpc_resolver *resolver,
grpc_channel_args **result,
grpc_closure *on_complete) {
void grpc_resolver_next_locked(grpc_exec_ctx* exec_ctx, grpc_resolver* resolver,
grpc_channel_args** result,
grpc_closure* on_complete) {
resolver->vtable->next_locked(exec_ctx, resolver, result, on_complete);
}

@ -35,49 +35,49 @@ extern grpc_tracer_flag grpc_trace_resolver_refcount;
/** \a grpc_resolver provides \a grpc_channel_args objects to its caller */
struct grpc_resolver {
const grpc_resolver_vtable *vtable;
const grpc_resolver_vtable* vtable;
gpr_refcount refs;
grpc_combiner *combiner;
grpc_combiner* combiner;
};
struct grpc_resolver_vtable {
void (*destroy)(grpc_exec_ctx *exec_ctx, grpc_resolver *resolver);
void (*shutdown_locked)(grpc_exec_ctx *exec_ctx, grpc_resolver *resolver);
void (*channel_saw_error_locked)(grpc_exec_ctx *exec_ctx,
grpc_resolver *resolver);
void (*next_locked)(grpc_exec_ctx *exec_ctx, grpc_resolver *resolver,
grpc_channel_args **result, grpc_closure *on_complete);
void (*destroy)(grpc_exec_ctx* exec_ctx, grpc_resolver* resolver);
void (*shutdown_locked)(grpc_exec_ctx* exec_ctx, grpc_resolver* resolver);
void (*channel_saw_error_locked)(grpc_exec_ctx* exec_ctx,
grpc_resolver* resolver);
void (*next_locked)(grpc_exec_ctx* exec_ctx, grpc_resolver* resolver,
grpc_channel_args** result, grpc_closure* on_complete);
};
#ifndef NDEBUG
#define GRPC_RESOLVER_REF(p, r) grpc_resolver_ref((p), __FILE__, __LINE__, (r))
#define GRPC_RESOLVER_UNREF(e, p, r) \
grpc_resolver_unref((e), (p), __FILE__, __LINE__, (r))
void grpc_resolver_ref(grpc_resolver *policy, const char *file, int line,
const char *reason);
void grpc_resolver_unref(grpc_exec_ctx *exec_ctx, grpc_resolver *policy,
const char *file, int line, const char *reason);
void grpc_resolver_ref(grpc_resolver* policy, const char* file, int line,
const char* reason);
void grpc_resolver_unref(grpc_exec_ctx* exec_ctx, grpc_resolver* policy,
const char* file, int line, const char* reason);
#else
#define GRPC_RESOLVER_REF(p, r) grpc_resolver_ref((p))
#define GRPC_RESOLVER_UNREF(e, p, r) grpc_resolver_unref((e), (p))
void grpc_resolver_ref(grpc_resolver *policy);
void grpc_resolver_unref(grpc_exec_ctx *exec_ctx, grpc_resolver *policy);
void grpc_resolver_ref(grpc_resolver* policy);
void grpc_resolver_unref(grpc_exec_ctx* exec_ctx, grpc_resolver* policy);
#endif
void grpc_resolver_init(grpc_resolver *resolver,
const grpc_resolver_vtable *vtable,
grpc_combiner *combiner);
void grpc_resolver_init(grpc_resolver* resolver,
const grpc_resolver_vtable* vtable,
grpc_combiner* combiner);
void grpc_resolver_shutdown_locked(grpc_exec_ctx *exec_ctx,
grpc_resolver *resolver);
void grpc_resolver_shutdown_locked(grpc_exec_ctx* exec_ctx,
grpc_resolver* resolver);
/** Notification that the channel has seen an error on some address.
Can be used as a hint that re-resolution is desirable soon.
Must be called from the combiner passed as a resolver_arg at construction
time.*/
void grpc_resolver_channel_saw_error_locked(grpc_exec_ctx *exec_ctx,
grpc_resolver *resolver);
void grpc_resolver_channel_saw_error_locked(grpc_exec_ctx* exec_ctx,
grpc_resolver* resolver);
/** Get the next result from the resolver. Expected to set \a *result with
new channel args and then schedule \a on_complete for execution.
@ -87,9 +87,9 @@ void grpc_resolver_channel_saw_error_locked(grpc_exec_ctx *exec_ctx,
Must be called from the combiner passed as a resolver_arg at construction
time.*/
void grpc_resolver_next_locked(grpc_exec_ctx *exec_ctx, grpc_resolver *resolver,
grpc_channel_args **result,
grpc_closure *on_complete);
void grpc_resolver_next_locked(grpc_exec_ctx* exec_ctx, grpc_resolver* resolver,
grpc_channel_args** result,
grpc_closure* on_complete);
#ifdef __cplusplus
}

@ -53,38 +53,38 @@ typedef struct {
/** base class: must be first */
grpc_resolver base;
/** DNS server to use (if not system default) */
char *dns_server;
char* dns_server;
/** name to resolve (usually the same as target_name) */
char *name_to_resolve;
char* name_to_resolve;
/** default port to use */
char *default_port;
char* default_port;
/** channel args. */
grpc_channel_args *channel_args;
grpc_channel_args* channel_args;
/** whether to request the service config */
bool request_service_config;
/** pollset_set to drive the name resolution process */
grpc_pollset_set *interested_parties;
grpc_pollset_set* interested_parties;
/** Closures used by the combiner */
grpc_closure dns_ares_on_retry_timer_locked;
grpc_closure dns_ares_on_resolved_locked;
/** Combiner guarding the rest of the state */
grpc_combiner *combiner;
grpc_combiner* combiner;
/** are we currently resolving? */
bool resolving;
/** the pending resolving request */
grpc_ares_request *pending_request;
grpc_ares_request* pending_request;
/** which version of the result have we published? */
int published_version;
/** which version of the result is current? */
int resolved_version;
/** pending next completion, or NULL */
grpc_closure *next_completion;
grpc_closure* next_completion;
/** target result address for next completion */
grpc_channel_args **target_result;
grpc_channel_args** target_result;
/** current (fully resolved) result */
grpc_channel_args *resolved_result;
grpc_channel_args* resolved_result;
/** retry timer */
bool have_retry_timer;
grpc_timer retry_timer;
@ -92,32 +92,32 @@ typedef struct {
grpc_backoff backoff_state;
/** currently resolving addresses */
grpc_lb_addresses *lb_addresses;
grpc_lb_addresses* lb_addresses;
/** currently resolving service config */
char *service_config_json;
char* service_config_json;
} ares_dns_resolver;
static void dns_ares_destroy(grpc_exec_ctx *exec_ctx, grpc_resolver *r);
static void dns_ares_destroy(grpc_exec_ctx* exec_ctx, grpc_resolver* r);
static void dns_ares_start_resolving_locked(grpc_exec_ctx *exec_ctx,
ares_dns_resolver *r);
static void dns_ares_maybe_finish_next_locked(grpc_exec_ctx *exec_ctx,
ares_dns_resolver *r);
static void dns_ares_start_resolving_locked(grpc_exec_ctx* exec_ctx,
ares_dns_resolver* r);
static void dns_ares_maybe_finish_next_locked(grpc_exec_ctx* exec_ctx,
ares_dns_resolver* r);
static void dns_ares_shutdown_locked(grpc_exec_ctx *exec_ctx, grpc_resolver *r);
static void dns_ares_channel_saw_error_locked(grpc_exec_ctx *exec_ctx,
grpc_resolver *r);
static void dns_ares_next_locked(grpc_exec_ctx *exec_ctx, grpc_resolver *r,
grpc_channel_args **target_result,
grpc_closure *on_complete);
static void dns_ares_shutdown_locked(grpc_exec_ctx* exec_ctx, grpc_resolver* r);
static void dns_ares_channel_saw_error_locked(grpc_exec_ctx* exec_ctx,
grpc_resolver* r);
static void dns_ares_next_locked(grpc_exec_ctx* exec_ctx, grpc_resolver* r,
grpc_channel_args** target_result,
grpc_closure* on_complete);
static const grpc_resolver_vtable dns_ares_resolver_vtable = {
dns_ares_destroy, dns_ares_shutdown_locked,
dns_ares_channel_saw_error_locked, dns_ares_next_locked};
static void dns_ares_shutdown_locked(grpc_exec_ctx *exec_ctx,
grpc_resolver *resolver) {
ares_dns_resolver *r = (ares_dns_resolver *)resolver;
static void dns_ares_shutdown_locked(grpc_exec_ctx* exec_ctx,
grpc_resolver* resolver) {
ares_dns_resolver* r = (ares_dns_resolver*)resolver;
if (r->have_retry_timer) {
grpc_timer_cancel(exec_ctx, &r->retry_timer);
}
@ -133,18 +133,18 @@ static void dns_ares_shutdown_locked(grpc_exec_ctx *exec_ctx,
}
}
static void dns_ares_channel_saw_error_locked(grpc_exec_ctx *exec_ctx,
grpc_resolver *resolver) {
ares_dns_resolver *r = (ares_dns_resolver *)resolver;
static void dns_ares_channel_saw_error_locked(grpc_exec_ctx* exec_ctx,
grpc_resolver* resolver) {
ares_dns_resolver* r = (ares_dns_resolver*)resolver;
if (!r->resolving) {
grpc_backoff_reset(&r->backoff_state);
dns_ares_start_resolving_locked(exec_ctx, r);
}
}
static void dns_ares_on_retry_timer_locked(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error) {
ares_dns_resolver *r = (ares_dns_resolver *)arg;
static void dns_ares_on_retry_timer_locked(grpc_exec_ctx* exec_ctx, void* arg,
grpc_error* error) {
ares_dns_resolver* r = (ares_dns_resolver*)arg;
r->have_retry_timer = false;
if (error == GRPC_ERROR_NONE) {
if (!r->resolving) {
@ -154,8 +154,8 @@ static void dns_ares_on_retry_timer_locked(grpc_exec_ctx *exec_ctx, void *arg,
GRPC_RESOLVER_UNREF(exec_ctx, &r->base, "retry-timer");
}
static bool value_in_json_array(grpc_json *array, const char *value) {
for (grpc_json *entry = array->child; entry != NULL; entry = entry->next) {
static bool value_in_json_array(grpc_json* array, const char* value) {
for (grpc_json* entry = array->child; entry != NULL; entry = entry->next) {
if (entry->type == GRPC_JSON_STRING && strcmp(entry->value, value) == 0) {
return true;
}
@ -163,21 +163,21 @@ static bool value_in_json_array(grpc_json *array, const char *value) {
return false;
}
static char *choose_service_config(char *service_config_choice_json) {
grpc_json *choices_json = grpc_json_parse_string(service_config_choice_json);
static char* choose_service_config(char* service_config_choice_json) {
grpc_json* choices_json = grpc_json_parse_string(service_config_choice_json);
if (choices_json == NULL || choices_json->type != GRPC_JSON_ARRAY) {
gpr_log(GPR_ERROR, "cannot parse service config JSON string");
return NULL;
}
char *service_config = NULL;
for (grpc_json *choice = choices_json->child; choice != NULL;
char* service_config = NULL;
for (grpc_json* choice = choices_json->child; choice != NULL;
choice = choice->next) {
if (choice->type != GRPC_JSON_OBJECT) {
gpr_log(GPR_ERROR, "cannot parse service config JSON string");
break;
}
grpc_json *service_config_json = NULL;
for (grpc_json *field = choice->child; field != NULL; field = field->next) {
grpc_json* service_config_json = NULL;
for (grpc_json* field = choice->child; field != NULL; field = field->next) {
// Check client language, if specified.
if (strcmp(field->key, "clientLanguage") == 0) {
if (field->type != GRPC_JSON_ARRAY ||
@ -188,7 +188,7 @@ static char *choose_service_config(char *service_config_choice_json) {
}
// Check client hostname, if specified.
if (strcmp(field->key, "clientHostname") == 0) {
char *hostname = grpc_gethostname();
char* hostname = grpc_gethostname();
if (hostname == NULL || field->type != GRPC_JSON_ARRAY ||
!value_in_json_array(field, hostname)) {
service_config_json = NULL;
@ -225,22 +225,22 @@ static char *choose_service_config(char *service_config_choice_json) {
return service_config;
}
static void dns_ares_on_resolved_locked(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error) {
ares_dns_resolver *r = (ares_dns_resolver *)arg;
grpc_channel_args *result = NULL;
static void dns_ares_on_resolved_locked(grpc_exec_ctx* exec_ctx, void* arg,
grpc_error* error) {
ares_dns_resolver* r = (ares_dns_resolver*)arg;
grpc_channel_args* result = NULL;
GPR_ASSERT(r->resolving);
r->resolving = false;
r->pending_request = NULL;
if (r->lb_addresses != NULL) {
static const char *args_to_remove[2];
static const char* args_to_remove[2];
size_t num_args_to_remove = 0;
grpc_arg new_args[3];
size_t num_args_to_add = 0;
new_args[num_args_to_add++] =
grpc_lb_addresses_create_channel_arg(r->lb_addresses);
grpc_service_config *service_config = NULL;
char *service_config_string = NULL;
grpc_service_config* service_config = NULL;
char* service_config_string = NULL;
if (r->service_config_json != NULL) {
service_config_string = choose_service_config(r->service_config_json);
gpr_free(r->service_config_json);
@ -249,15 +249,15 @@ static void dns_ares_on_resolved_locked(grpc_exec_ctx *exec_ctx, void *arg,
service_config_string);
args_to_remove[num_args_to_remove++] = GRPC_ARG_SERVICE_CONFIG;
new_args[num_args_to_add++] = grpc_channel_arg_string_create(
(char *)GRPC_ARG_SERVICE_CONFIG, service_config_string);
(char*)GRPC_ARG_SERVICE_CONFIG, service_config_string);
service_config = grpc_service_config_create(service_config_string);
if (service_config != NULL) {
const char *lb_policy_name =
const char* lb_policy_name =
grpc_service_config_get_lb_policy_name(service_config);
if (lb_policy_name != NULL) {
args_to_remove[num_args_to_remove++] = GRPC_ARG_LB_POLICY_NAME;
new_args[num_args_to_add++] = grpc_channel_arg_string_create(
(char *)GRPC_ARG_LB_POLICY_NAME, (char *)lb_policy_name);
(char*)GRPC_ARG_LB_POLICY_NAME, (char*)lb_policy_name);
}
}
}
@ -269,7 +269,7 @@ static void dns_ares_on_resolved_locked(grpc_exec_ctx *exec_ctx, void *arg,
gpr_free(service_config_string);
grpc_lb_addresses_destroy(exec_ctx, r->lb_addresses);
} else {
const char *msg = grpc_error_string(error);
const char* msg = grpc_error_string(error);
gpr_log(GPR_DEBUG, "dns resolution failed: %s", msg);
grpc_millis next_try =
grpc_backoff_step(exec_ctx, &r->backoff_state).next_attempt_start_time;
@ -296,12 +296,12 @@ static void dns_ares_on_resolved_locked(grpc_exec_ctx *exec_ctx, void *arg,
GRPC_RESOLVER_UNREF(exec_ctx, &r->base, "dns-resolving");
}
static void dns_ares_next_locked(grpc_exec_ctx *exec_ctx,
grpc_resolver *resolver,
grpc_channel_args **target_result,
grpc_closure *on_complete) {
static void dns_ares_next_locked(grpc_exec_ctx* exec_ctx,
grpc_resolver* resolver,
grpc_channel_args** target_result,
grpc_closure* on_complete) {
gpr_log(GPR_DEBUG, "dns_ares_next is called.");
ares_dns_resolver *r = (ares_dns_resolver *)resolver;
ares_dns_resolver* r = (ares_dns_resolver*)resolver;
GPR_ASSERT(!r->next_completion);
r->next_completion = on_complete;
r->target_result = target_result;
@ -313,8 +313,8 @@ static void dns_ares_next_locked(grpc_exec_ctx *exec_ctx,
}
}
static void dns_ares_start_resolving_locked(grpc_exec_ctx *exec_ctx,
ares_dns_resolver *r) {
static void dns_ares_start_resolving_locked(grpc_exec_ctx* exec_ctx,
ares_dns_resolver* r) {
GRPC_RESOLVER_REF(&r->base, "dns-resolving");
GPR_ASSERT(!r->resolving);
r->resolving = true;
@ -327,8 +327,8 @@ static void dns_ares_start_resolving_locked(grpc_exec_ctx *exec_ctx,
r->request_service_config ? &r->service_config_json : NULL);
}
static void dns_ares_maybe_finish_next_locked(grpc_exec_ctx *exec_ctx,
ares_dns_resolver *r) {
static void dns_ares_maybe_finish_next_locked(grpc_exec_ctx* exec_ctx,
ares_dns_resolver* r) {
if (r->next_completion != NULL &&
r->resolved_version != r->published_version) {
*r->target_result = r->resolved_result == NULL
@ -341,9 +341,9 @@ static void dns_ares_maybe_finish_next_locked(grpc_exec_ctx *exec_ctx,
}
}
static void dns_ares_destroy(grpc_exec_ctx *exec_ctx, grpc_resolver *gr) {
static void dns_ares_destroy(grpc_exec_ctx* exec_ctx, grpc_resolver* gr) {
gpr_log(GPR_DEBUG, "dns_ares_destroy");
ares_dns_resolver *r = (ares_dns_resolver *)gr;
ares_dns_resolver* r = (ares_dns_resolver*)gr;
if (r->resolved_result != NULL) {
grpc_channel_args_destroy(exec_ctx, r->resolved_result);
}
@ -355,15 +355,15 @@ static void dns_ares_destroy(grpc_exec_ctx *exec_ctx, grpc_resolver *gr) {
gpr_free(r);
}
static grpc_resolver *dns_ares_create(grpc_exec_ctx *exec_ctx,
grpc_resolver_args *args,
const char *default_port) {
static grpc_resolver* dns_ares_create(grpc_exec_ctx* exec_ctx,
grpc_resolver_args* args,
const char* default_port) {
/* Get name from args. */
const char *path = args->uri->path;
const char* path = args->uri->path;
if (path[0] == '/') ++path;
/* Create resolver. */
ares_dns_resolver *r =
(ares_dns_resolver *)gpr_zalloc(sizeof(ares_dns_resolver));
ares_dns_resolver* r =
(ares_dns_resolver*)gpr_zalloc(sizeof(ares_dns_resolver));
grpc_resolver_init(&r->base, &dns_ares_resolver_vtable, args->combiner);
if (0 != strcmp(args->uri->authority, "")) {
r->dns_server = gpr_strdup(args->uri->authority);
@ -371,7 +371,7 @@ static grpc_resolver *dns_ares_create(grpc_exec_ctx *exec_ctx,
r->name_to_resolve = gpr_strdup(path);
r->default_port = gpr_strdup(default_port);
r->channel_args = grpc_channel_args_copy(args->args);
const grpc_arg *arg = grpc_channel_args_find(
const grpc_arg* arg = grpc_channel_args_find(
r->channel_args, GRPC_ARG_SERVICE_CONFIG_DISABLE_RESOLUTION);
r->request_service_config = !grpc_channel_arg_get_integer(
arg, (grpc_integer_options){false, false, true});
@ -398,19 +398,19 @@ static grpc_resolver *dns_ares_create(grpc_exec_ctx *exec_ctx,
* FACTORY
*/
static void dns_ares_factory_ref(grpc_resolver_factory *factory) {}
static void dns_ares_factory_ref(grpc_resolver_factory* factory) {}
static void dns_ares_factory_unref(grpc_resolver_factory *factory) {}
static void dns_ares_factory_unref(grpc_resolver_factory* factory) {}
static grpc_resolver *dns_factory_create_resolver(
grpc_exec_ctx *exec_ctx, grpc_resolver_factory *factory,
grpc_resolver_args *args) {
static grpc_resolver* dns_factory_create_resolver(
grpc_exec_ctx* exec_ctx, grpc_resolver_factory* factory,
grpc_resolver_args* args) {
return dns_ares_create(exec_ctx, args, "https");
}
static char *dns_ares_factory_get_default_host_name(
grpc_resolver_factory *factory, grpc_uri *uri) {
const char *path = uri->path;
static char* dns_ares_factory_get_default_host_name(
grpc_resolver_factory* factory, grpc_uri* uri) {
const char* path = uri->path;
if (path[0] == '/') ++path;
return gpr_strdup(path);
}
@ -420,16 +420,16 @@ static const grpc_resolver_factory_vtable dns_ares_factory_vtable = {
dns_ares_factory_get_default_host_name, "dns"};
static grpc_resolver_factory dns_resolver_factory = {&dns_ares_factory_vtable};
static grpc_resolver_factory *dns_ares_resolver_factory_create() {
static grpc_resolver_factory* dns_ares_resolver_factory_create() {
return &dns_resolver_factory;
}
extern "C" void grpc_resolver_dns_ares_init(void) {
char *resolver = gpr_getenv("GRPC_DNS_RESOLVER");
char* resolver = gpr_getenv("GRPC_DNS_RESOLVER");
/* TODO(zyc): Turn on c-ares based resolver by default after the address
sorter and the CNAME support are added. */
if (resolver != NULL && gpr_stricmp(resolver, "ares") == 0) {
grpc_error *error = grpc_ares_init();
grpc_error* error = grpc_ares_init();
if (error != GRPC_ERROR_NONE) {
GRPC_LOG_IF_ERROR("ares_library_init() failed", error);
return;
@ -441,7 +441,7 @@ extern "C" void grpc_resolver_dns_ares_init(void) {
}
extern "C" void grpc_resolver_dns_ares_shutdown(void) {
char *resolver = gpr_getenv("GRPC_DNS_RESOLVER");
char* resolver = gpr_getenv("GRPC_DNS_RESOLVER");
if (resolver != NULL && gpr_stricmp(resolver, "ares") == 0) {
grpc_ares_cleanup();
}

@ -31,31 +31,31 @@ typedef struct grpc_ares_ev_driver grpc_ares_ev_driver;
/* Start \a ev_driver. It will keep working until all IO on its ares_channel is
done, or grpc_ares_ev_driver_destroy() is called. It may notify the callbacks
bound to its ares_channel when necessary. */
void grpc_ares_ev_driver_start(grpc_exec_ctx *exec_ctx,
grpc_ares_ev_driver *ev_driver);
void grpc_ares_ev_driver_start(grpc_exec_ctx* exec_ctx,
grpc_ares_ev_driver* ev_driver);
/* Returns the ares_channel owned by \a ev_driver. To bind a c-ares query to
\a ev_driver, use the ares_channel owned by \a ev_driver as the arg of the
query. */
ares_channel *grpc_ares_ev_driver_get_channel(grpc_ares_ev_driver *ev_driver);
ares_channel* grpc_ares_ev_driver_get_channel(grpc_ares_ev_driver* ev_driver);
/* Creates a new grpc_ares_ev_driver. Returns GRPC_ERROR_NONE if \a ev_driver is
created successfully. */
grpc_error *grpc_ares_ev_driver_create(grpc_ares_ev_driver **ev_driver,
grpc_pollset_set *pollset_set);
grpc_error* grpc_ares_ev_driver_create(grpc_ares_ev_driver** ev_driver,
grpc_pollset_set* pollset_set);
/* Destroys \a ev_driver asynchronously. Pending lookups made on \a ev_driver
will be cancelled and their on_done callbacks will be invoked with a status
of ARES_ECANCELLED. */
void grpc_ares_ev_driver_destroy(grpc_ares_ev_driver *ev_driver);
void grpc_ares_ev_driver_destroy(grpc_ares_ev_driver* ev_driver);
/* Shutdown all the grpc_fds used by \a ev_driver */
void grpc_ares_ev_driver_shutdown(grpc_exec_ctx *exec_ctx,
grpc_ares_ev_driver *ev_driver);
void grpc_ares_ev_driver_shutdown(grpc_exec_ctx* exec_ctx,
grpc_ares_ev_driver* ev_driver);
#ifdef __cplusplus
}
#endif
#endif /* GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_RESOLVER_DNS_C_ARES_GRPC_ARES_EV_DRIVER_H \
*/
*/

@ -37,7 +37,7 @@
typedef struct fd_node {
/** the owner of this fd node */
grpc_ares_ev_driver *ev_driver;
grpc_ares_ev_driver* ev_driver;
/** a closure wrapping on_readable_cb, which should be invoked when the
grpc_fd in this node becomes readable. */
grpc_closure read_closure;
@ -45,12 +45,12 @@ typedef struct fd_node {
grpc_fd in this node becomes writable. */
grpc_closure write_closure;
/** next fd node in the list */
struct fd_node *next;
struct fd_node* next;
/** mutex guarding the rest of the state */
gpr_mu mu;
/** the grpc_fd owned by this fd node */
grpc_fd *fd;
grpc_fd* fd;
/** if the readable closure has been registered */
bool readable_registered;
/** if the writable closure has been registered */
@ -63,31 +63,31 @@ struct grpc_ares_ev_driver {
/** the ares_channel owned by this event driver */
ares_channel channel;
/** pollset set for driving the IO events of the channel */
grpc_pollset_set *pollset_set;
grpc_pollset_set* pollset_set;
/** refcount of the event driver */
gpr_refcount refs;
/** mutex guarding the rest of the state */
gpr_mu mu;
/** a list of grpc_fd that this event driver is currently using. */
fd_node *fds;
fd_node* fds;
/** is this event driver currently working? */
bool working;
/** is this event driver being shut down */
bool shutting_down;
};
static void grpc_ares_notify_on_event_locked(grpc_exec_ctx *exec_ctx,
grpc_ares_ev_driver *ev_driver);
static void grpc_ares_notify_on_event_locked(grpc_exec_ctx* exec_ctx,
grpc_ares_ev_driver* ev_driver);
static grpc_ares_ev_driver *grpc_ares_ev_driver_ref(
grpc_ares_ev_driver *ev_driver) {
static grpc_ares_ev_driver* grpc_ares_ev_driver_ref(
grpc_ares_ev_driver* ev_driver) {
gpr_log(GPR_DEBUG, "Ref ev_driver %" PRIuPTR, (uintptr_t)ev_driver);
gpr_ref(&ev_driver->refs);
return ev_driver;
}
static void grpc_ares_ev_driver_unref(grpc_ares_ev_driver *ev_driver) {
static void grpc_ares_ev_driver_unref(grpc_ares_ev_driver* ev_driver) {
gpr_log(GPR_DEBUG, "Unref ev_driver %" PRIuPTR, (uintptr_t)ev_driver);
if (gpr_unref(&ev_driver->refs)) {
gpr_log(GPR_DEBUG, "destroy ev_driver %" PRIuPTR, (uintptr_t)ev_driver);
@ -98,7 +98,7 @@ static void grpc_ares_ev_driver_unref(grpc_ares_ev_driver *ev_driver) {
}
}
static void fd_node_destroy(grpc_exec_ctx *exec_ctx, fd_node *fdn) {
static void fd_node_destroy(grpc_exec_ctx* exec_ctx, fd_node* fdn) {
gpr_log(GPR_DEBUG, "delete fd: %d", grpc_fd_wrapped_fd(fdn->fd));
GPR_ASSERT(!fdn->readable_registered);
GPR_ASSERT(!fdn->writable_registered);
@ -111,29 +111,30 @@ static void fd_node_destroy(grpc_exec_ctx *exec_ctx, fd_node *fdn) {
gpr_free(fdn);
}
static void fd_node_shutdown(grpc_exec_ctx *exec_ctx, fd_node *fdn) {
static void fd_node_shutdown(grpc_exec_ctx* exec_ctx, fd_node* fdn) {
gpr_mu_lock(&fdn->mu);
fdn->shutting_down = true;
if (!fdn->readable_registered && !fdn->writable_registered) {
gpr_mu_unlock(&fdn->mu);
fd_node_destroy(exec_ctx, fdn);
} else {
grpc_fd_shutdown(exec_ctx, fdn->fd, GRPC_ERROR_CREATE_FROM_STATIC_STRING(
"c-ares fd shutdown"));
grpc_fd_shutdown(
exec_ctx, fdn->fd,
GRPC_ERROR_CREATE_FROM_STATIC_STRING("c-ares fd shutdown"));
gpr_mu_unlock(&fdn->mu);
}
}
grpc_error *grpc_ares_ev_driver_create(grpc_ares_ev_driver **ev_driver,
grpc_pollset_set *pollset_set) {
*ev_driver = (grpc_ares_ev_driver *)gpr_malloc(sizeof(grpc_ares_ev_driver));
grpc_error* grpc_ares_ev_driver_create(grpc_ares_ev_driver** ev_driver,
grpc_pollset_set* pollset_set) {
*ev_driver = (grpc_ares_ev_driver*)gpr_malloc(sizeof(grpc_ares_ev_driver));
int status = ares_init(&(*ev_driver)->channel);
gpr_log(GPR_DEBUG, "grpc_ares_ev_driver_create");
if (status != ARES_SUCCESS) {
char *err_msg;
char* err_msg;
gpr_asprintf(&err_msg, "Failed to init ares channel. C-ares error: %s",
ares_strerror(status));
grpc_error *err = GRPC_ERROR_CREATE_FROM_COPIED_STRING(err_msg);
grpc_error* err = GRPC_ERROR_CREATE_FROM_COPIED_STRING(err_msg);
gpr_free(err_msg);
gpr_free(*ev_driver);
return err;
@ -147,7 +148,7 @@ grpc_error *grpc_ares_ev_driver_create(grpc_ares_ev_driver **ev_driver,
return GRPC_ERROR_NONE;
}
void grpc_ares_ev_driver_destroy(grpc_ares_ev_driver *ev_driver) {
void grpc_ares_ev_driver_destroy(grpc_ares_ev_driver* ev_driver) {
// It's not safe to shut down remaining fds here directly, becauses
// ares_host_callback does not provide an exec_ctx. We mark the event driver
// as being shut down. If the event driver is working,
@ -159,14 +160,15 @@ void grpc_ares_ev_driver_destroy(grpc_ares_ev_driver *ev_driver) {
grpc_ares_ev_driver_unref(ev_driver);
}
void grpc_ares_ev_driver_shutdown(grpc_exec_ctx *exec_ctx,
grpc_ares_ev_driver *ev_driver) {
void grpc_ares_ev_driver_shutdown(grpc_exec_ctx* exec_ctx,
grpc_ares_ev_driver* ev_driver) {
gpr_mu_lock(&ev_driver->mu);
ev_driver->shutting_down = true;
fd_node *fn = ev_driver->fds;
fd_node* fn = ev_driver->fds;
while (fn != NULL) {
grpc_fd_shutdown(exec_ctx, fn->fd, GRPC_ERROR_CREATE_FROM_STATIC_STRING(
"grpc_ares_ev_driver_shutdown"));
grpc_fd_shutdown(
exec_ctx, fn->fd,
GRPC_ERROR_CREATE_FROM_STATIC_STRING("grpc_ares_ev_driver_shutdown"));
fn = fn->next;
}
gpr_mu_unlock(&ev_driver->mu);
@ -174,13 +176,13 @@ void grpc_ares_ev_driver_shutdown(grpc_exec_ctx *exec_ctx,
// Search fd in the fd_node list head. This is an O(n) search, the max possible
// value of n is ARES_GETSOCK_MAXNUM (16). n is typically 1 - 2 in our tests.
static fd_node *pop_fd_node(fd_node **head, int fd) {
static fd_node* pop_fd_node(fd_node** head, int fd) {
fd_node dummy_head;
dummy_head.next = *head;
fd_node *node = &dummy_head;
fd_node* node = &dummy_head;
while (node->next != NULL) {
if (grpc_fd_wrapped_fd(node->next->fd) == fd) {
fd_node *ret = node->next;
fd_node* ret = node->next;
node->next = node->next->next;
*head = dummy_head.next;
return ret;
@ -191,16 +193,16 @@ static fd_node *pop_fd_node(fd_node **head, int fd) {
}
/* Check if \a fd is still readable */
static bool grpc_ares_is_fd_still_readable(grpc_ares_ev_driver *ev_driver,
static bool grpc_ares_is_fd_still_readable(grpc_ares_ev_driver* ev_driver,
int fd) {
size_t bytes_available = 0;
return ioctl(fd, FIONREAD, &bytes_available) == 0 && bytes_available > 0;
}
static void on_readable_cb(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error) {
fd_node *fdn = (fd_node *)arg;
grpc_ares_ev_driver *ev_driver = fdn->ev_driver;
static void on_readable_cb(grpc_exec_ctx* exec_ctx, void* arg,
grpc_error* error) {
fd_node* fdn = (fd_node*)arg;
grpc_ares_ev_driver* ev_driver = fdn->ev_driver;
gpr_mu_lock(&fdn->mu);
const int fd = grpc_fd_wrapped_fd(fdn->fd);
fdn->readable_registered = false;
@ -232,10 +234,10 @@ static void on_readable_cb(grpc_exec_ctx *exec_ctx, void *arg,
grpc_ares_ev_driver_unref(ev_driver);
}
static void on_writable_cb(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error) {
fd_node *fdn = (fd_node *)arg;
grpc_ares_ev_driver *ev_driver = fdn->ev_driver;
static void on_writable_cb(grpc_exec_ctx* exec_ctx, void* arg,
grpc_error* error) {
fd_node* fdn = (fd_node*)arg;
grpc_ares_ev_driver* ev_driver = fdn->ev_driver;
gpr_mu_lock(&fdn->mu);
const int fd = grpc_fd_wrapped_fd(fdn->fd);
fdn->writable_registered = false;
@ -265,15 +267,15 @@ static void on_writable_cb(grpc_exec_ctx *exec_ctx, void *arg,
grpc_ares_ev_driver_unref(ev_driver);
}
ares_channel *grpc_ares_ev_driver_get_channel(grpc_ares_ev_driver *ev_driver) {
ares_channel* grpc_ares_ev_driver_get_channel(grpc_ares_ev_driver* ev_driver) {
return &ev_driver->channel;
}
// Get the file descriptors used by the ev_driver's ares channel, register
// driver_closure with these filedescriptors.
static void grpc_ares_notify_on_event_locked(grpc_exec_ctx *exec_ctx,
grpc_ares_ev_driver *ev_driver) {
fd_node *new_list = NULL;
static void grpc_ares_notify_on_event_locked(grpc_exec_ctx* exec_ctx,
grpc_ares_ev_driver* ev_driver) {
fd_node* new_list = NULL;
if (!ev_driver->shutting_down) {
ares_socket_t socks[ARES_GETSOCK_MAXNUM];
int socks_bitmask =
@ -281,12 +283,12 @@ static void grpc_ares_notify_on_event_locked(grpc_exec_ctx *exec_ctx,
for (size_t i = 0; i < ARES_GETSOCK_MAXNUM; i++) {
if (ARES_GETSOCK_READABLE(socks_bitmask, i) ||
ARES_GETSOCK_WRITABLE(socks_bitmask, i)) {
fd_node *fdn = pop_fd_node(&ev_driver->fds, socks[i]);
fd_node* fdn = pop_fd_node(&ev_driver->fds, socks[i]);
// Create a new fd_node if sock[i] is not in the fd_node list.
if (fdn == NULL) {
char *fd_name;
char* fd_name;
gpr_asprintf(&fd_name, "ares_ev_driver-%" PRIuPTR, i);
fdn = (fd_node *)gpr_malloc(sizeof(fd_node));
fdn = (fd_node*)gpr_malloc(sizeof(fd_node));
gpr_log(GPR_DEBUG, "new fd: %d", socks[i]);
fdn->fd = grpc_fd_create(socks[i], fd_name);
fdn->ev_driver = ev_driver;
@ -331,7 +333,7 @@ static void grpc_ares_notify_on_event_locked(grpc_exec_ctx *exec_ctx,
// are therefore no longer in use, so they can be shut down and removed from
// the list.
while (ev_driver->fds != NULL) {
fd_node *cur = ev_driver->fds;
fd_node* cur = ev_driver->fds;
ev_driver->fds = ev_driver->fds->next;
fd_node_shutdown(exec_ctx, cur);
}
@ -343,8 +345,8 @@ static void grpc_ares_notify_on_event_locked(grpc_exec_ctx *exec_ctx,
}
}
void grpc_ares_ev_driver_start(grpc_exec_ctx *exec_ctx,
grpc_ares_ev_driver *ev_driver) {
void grpc_ares_ev_driver_start(grpc_exec_ctx* exec_ctx,
grpc_ares_ev_driver* ev_driver) {
gpr_mu_lock(&ev_driver->mu);
if (!ev_driver->working) {
ev_driver->working = true;

@ -51,13 +51,13 @@ struct grpc_ares_request {
struct ares_addr_port_node dns_server_addr;
/** following members are set in grpc_resolve_address_ares_impl */
/** closure to call when the request completes */
grpc_closure *on_done;
grpc_closure* on_done;
/** the pointer to receive the resolved addresses */
grpc_lb_addresses **lb_addrs_out;
grpc_lb_addresses** lb_addrs_out;
/** the pointer to receive the service config in JSON */
char **service_config_json_out;
char** service_config_json_out;
/** the evernt driver used by this request */
grpc_ares_ev_driver *ev_driver;
grpc_ares_ev_driver* ev_driver;
/** number of ongoing queries */
gpr_refcount pending_queries;
@ -66,15 +66,15 @@ struct grpc_ares_request {
/** is there at least one successful query, set in on_done_cb */
bool success;
/** the errors explaining the request failure, set in on_done_cb */
grpc_error *error;
grpc_error* error;
};
typedef struct grpc_ares_hostbyname_request {
/** following members are set in create_hostbyname_request */
/** the top-level request instance */
grpc_ares_request *parent_request;
grpc_ares_request* parent_request;
/** host to resolve, parsed from the name to resolve */
char *host;
char* host;
/** port to fill in sockaddr_in, parsed from the name to resolve */
uint16_t port;
/** is it a grpclb address */
@ -83,7 +83,7 @@ typedef struct grpc_ares_hostbyname_request {
static void do_basic_init(void) { gpr_mu_init(&g_init_mu); }
static uint16_t strhtons(const char *port) {
static uint16_t strhtons(const char* port) {
if (strcmp(port, "http") == 0) {
return htons(80);
} else if (strcmp(port, "https") == 0) {
@ -92,12 +92,12 @@ static uint16_t strhtons(const char *port) {
return htons((unsigned short)atoi(port));
}
static void grpc_ares_request_ref(grpc_ares_request *r) {
static void grpc_ares_request_ref(grpc_ares_request* r) {
gpr_ref(&r->pending_queries);
}
static void grpc_ares_request_unref(grpc_exec_ctx *exec_ctx,
grpc_ares_request *r) {
static void grpc_ares_request_unref(grpc_exec_ctx* exec_ctx,
grpc_ares_request* r) {
/* If there are no pending queries, invoke on_done callback and destroy the
request */
if (gpr_unref(&r->pending_queries)) {
@ -120,10 +120,10 @@ static void grpc_ares_request_unref(grpc_exec_ctx *exec_ctx,
}
}
static grpc_ares_hostbyname_request *create_hostbyname_request(
grpc_ares_request *parent_request, char *host, uint16_t port,
static grpc_ares_hostbyname_request* create_hostbyname_request(
grpc_ares_request* parent_request, char* host, uint16_t port,
bool is_balancer) {
grpc_ares_hostbyname_request *hr = (grpc_ares_hostbyname_request *)gpr_zalloc(
grpc_ares_hostbyname_request* hr = (grpc_ares_hostbyname_request*)gpr_zalloc(
sizeof(grpc_ares_hostbyname_request));
hr->parent_request = parent_request;
hr->host = gpr_strdup(host);
@ -133,23 +133,23 @@ static grpc_ares_hostbyname_request *create_hostbyname_request(
return hr;
}
static void destroy_hostbyname_request(grpc_exec_ctx *exec_ctx,
grpc_ares_hostbyname_request *hr) {
static void destroy_hostbyname_request(grpc_exec_ctx* exec_ctx,
grpc_ares_hostbyname_request* hr) {
grpc_ares_request_unref(exec_ctx, hr->parent_request);
gpr_free(hr->host);
gpr_free(hr);
}
static void on_hostbyname_done_cb(void *arg, int status, int timeouts,
struct hostent *hostent) {
grpc_ares_hostbyname_request *hr = (grpc_ares_hostbyname_request *)arg;
grpc_ares_request *r = hr->parent_request;
static void on_hostbyname_done_cb(void* arg, int status, int timeouts,
struct hostent* hostent) {
grpc_ares_hostbyname_request* hr = (grpc_ares_hostbyname_request*)arg;
grpc_ares_request* r = hr->parent_request;
gpr_mu_lock(&r->mu);
if (status == ARES_SUCCESS) {
GRPC_ERROR_UNREF(r->error);
r->error = GRPC_ERROR_NONE;
r->success = true;
grpc_lb_addresses **lb_addresses = r->lb_addrs_out;
grpc_lb_addresses** lb_addresses = r->lb_addrs_out;
if (*lb_addresses == NULL) {
*lb_addresses = grpc_lb_addresses_create(0, NULL);
}
@ -158,7 +158,7 @@ static void on_hostbyname_done_cb(void *arg, int status, int timeouts,
for (i = 0; hostent->h_addr_list[i] != NULL; i++) {
}
(*lb_addresses)->num_addresses += i;
(*lb_addresses)->addresses = (grpc_lb_address *)gpr_realloc(
(*lb_addresses)->addresses = (grpc_lb_address*)gpr_realloc(
(*lb_addresses)->addresses,
sizeof(grpc_lb_address) * (*lb_addresses)->num_addresses);
for (i = prev_naddr; i < (*lb_addresses)->num_addresses; i++) {
@ -208,10 +208,10 @@ static void on_hostbyname_done_cb(void *arg, int status, int timeouts,
}
}
} else if (!r->success) {
char *error_msg;
char* error_msg;
gpr_asprintf(&error_msg, "C-ares status is not ARES_SUCCESS: %s",
ares_strerror(status));
grpc_error *error = GRPC_ERROR_CREATE_FROM_COPIED_STRING(error_msg);
grpc_error* error = GRPC_ERROR_CREATE_FROM_COPIED_STRING(error_msg);
gpr_free(error_msg);
if (r->error == GRPC_ERROR_NONE) {
r->error = error;
@ -223,26 +223,26 @@ static void on_hostbyname_done_cb(void *arg, int status, int timeouts,
destroy_hostbyname_request(NULL, hr);
}
static void on_srv_query_done_cb(void *arg, int status, int timeouts,
unsigned char *abuf, int alen) {
grpc_ares_request *r = (grpc_ares_request *)arg;
static void on_srv_query_done_cb(void* arg, int status, int timeouts,
unsigned char* abuf, int alen) {
grpc_ares_request* r = (grpc_ares_request*)arg;
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
gpr_log(GPR_DEBUG, "on_query_srv_done_cb");
if (status == ARES_SUCCESS) {
gpr_log(GPR_DEBUG, "on_query_srv_done_cb ARES_SUCCESS");
struct ares_srv_reply *reply;
struct ares_srv_reply* reply;
const int parse_status = ares_parse_srv_reply(abuf, alen, &reply);
if (parse_status == ARES_SUCCESS) {
ares_channel *channel = grpc_ares_ev_driver_get_channel(r->ev_driver);
for (struct ares_srv_reply *srv_it = reply; srv_it != NULL;
ares_channel* channel = grpc_ares_ev_driver_get_channel(r->ev_driver);
for (struct ares_srv_reply* srv_it = reply; srv_it != NULL;
srv_it = srv_it->next) {
if (grpc_ipv6_loopback_available()) {
grpc_ares_hostbyname_request *hr = create_hostbyname_request(
grpc_ares_hostbyname_request* hr = create_hostbyname_request(
r, srv_it->host, htons(srv_it->port), true /* is_balancer */);
ares_gethostbyname(*channel, hr->host, AF_INET6,
on_hostbyname_done_cb, hr);
}
grpc_ares_hostbyname_request *hr = create_hostbyname_request(
grpc_ares_hostbyname_request* hr = create_hostbyname_request(
r, srv_it->host, htons(srv_it->port), true /* is_balancer */);
ares_gethostbyname(*channel, hr->host, AF_INET, on_hostbyname_done_cb,
hr);
@ -253,10 +253,10 @@ static void on_srv_query_done_cb(void *arg, int status, int timeouts,
ares_free_data(reply);
}
} else if (!r->success) {
char *error_msg;
char* error_msg;
gpr_asprintf(&error_msg, "C-ares status is not ARES_SUCCESS: %s",
ares_strerror(status));
grpc_error *error = GRPC_ERROR_CREATE_FROM_COPIED_STRING(error_msg);
grpc_error* error = GRPC_ERROR_CREATE_FROM_COPIED_STRING(error_msg);
gpr_free(error_msg);
if (r->error == GRPC_ERROR_NONE) {
r->error = error;
@ -270,15 +270,15 @@ static void on_srv_query_done_cb(void *arg, int status, int timeouts,
static const char g_service_config_attribute_prefix[] = "grpc_config=";
static void on_txt_done_cb(void *arg, int status, int timeouts,
unsigned char *buf, int len) {
static void on_txt_done_cb(void* arg, int status, int timeouts,
unsigned char* buf, int len) {
gpr_log(GPR_DEBUG, "on_txt_done_cb");
char *error_msg;
grpc_ares_request *r = (grpc_ares_request *)arg;
char* error_msg;
grpc_ares_request* r = (grpc_ares_request*)arg;
const size_t prefix_len = sizeof(g_service_config_attribute_prefix) - 1;
struct ares_txt_ext *result = NULL;
struct ares_txt_ext *reply = NULL;
grpc_error *error = GRPC_ERROR_NONE;
struct ares_txt_ext* result = NULL;
struct ares_txt_ext* reply = NULL;
grpc_error* error = GRPC_ERROR_NONE;
gpr_mu_lock(&r->mu);
if (status != ARES_SUCCESS) goto fail;
status = ares_parse_txt_reply_ext(buf, len, &reply);
@ -294,12 +294,12 @@ static void on_txt_done_cb(void *arg, int status, int timeouts,
// Found a service config record.
if (result != NULL) {
size_t service_config_len = result->length - prefix_len;
*r->service_config_json_out = (char *)gpr_malloc(service_config_len + 1);
*r->service_config_json_out = (char*)gpr_malloc(service_config_len + 1);
memcpy(*r->service_config_json_out, result->txt + prefix_len,
service_config_len);
for (result = result->next; result != NULL && !result->record_start;
result = result->next) {
*r->service_config_json_out = (char *)gpr_realloc(
*r->service_config_json_out = (char*)gpr_realloc(
*r->service_config_json_out, service_config_len + result->length + 1);
memcpy(*r->service_config_json_out + service_config_len, result->txt,
result->length);
@ -326,15 +326,15 @@ done:
grpc_ares_request_unref(NULL, r);
}
static grpc_ares_request *grpc_dns_lookup_ares_impl(
grpc_exec_ctx *exec_ctx, const char *dns_server, const char *name,
const char *default_port, grpc_pollset_set *interested_parties,
grpc_closure *on_done, grpc_lb_addresses **addrs, bool check_grpclb,
char **service_config_json) {
grpc_error *error = GRPC_ERROR_NONE;
grpc_ares_hostbyname_request *hr = NULL;
grpc_ares_request *r = NULL;
ares_channel *channel = NULL;
static grpc_ares_request* grpc_dns_lookup_ares_impl(
grpc_exec_ctx* exec_ctx, const char* dns_server, const char* name,
const char* default_port, grpc_pollset_set* interested_parties,
grpc_closure* on_done, grpc_lb_addresses** addrs, bool check_grpclb,
char** service_config_json) {
grpc_error* error = GRPC_ERROR_NONE;
grpc_ares_hostbyname_request* hr = NULL;
grpc_ares_request* r = NULL;
ares_channel* channel = NULL;
/* TODO(zyc): Enable tracing after #9603 is checked in */
/* if (grpc_dns_trace) {
gpr_log(GPR_DEBUG, "resolve_address (blocking): name=%s, default_port=%s",
@ -342,8 +342,8 @@ static grpc_ares_request *grpc_dns_lookup_ares_impl(
} */
/* parse name, splitting it into host and port parts */
char *host;
char *port;
char* host;
char* port;
gpr_split_host_port(name, &host, &port);
if (host == NULL) {
error = grpc_error_set_str(
@ -360,11 +360,11 @@ static grpc_ares_request *grpc_dns_lookup_ares_impl(
port = gpr_strdup(default_port);
}
grpc_ares_ev_driver *ev_driver;
grpc_ares_ev_driver* ev_driver;
error = grpc_ares_ev_driver_create(&ev_driver, interested_parties);
if (error != GRPC_ERROR_NONE) goto error_cleanup;
r = (grpc_ares_request *)gpr_zalloc(sizeof(grpc_ares_request));
r = (grpc_ares_request*)gpr_zalloc(sizeof(grpc_ares_request));
gpr_mu_init(&r->mu);
r->ev_driver = ev_driver;
r->on_done = on_done;
@ -380,7 +380,7 @@ static grpc_ares_request *grpc_dns_lookup_ares_impl(
grpc_resolved_address addr;
if (grpc_parse_ipv4_hostport(dns_server, &addr, false /* log_errors */)) {
r->dns_server_addr.family = AF_INET;
struct sockaddr_in *in = (struct sockaddr_in *)addr.addr;
struct sockaddr_in* in = (struct sockaddr_in*)addr.addr;
memcpy(&r->dns_server_addr.addr.addr4, &in->sin_addr,
sizeof(struct in_addr));
r->dns_server_addr.tcp_port = grpc_sockaddr_get_port(&addr);
@ -388,7 +388,7 @@ static grpc_ares_request *grpc_dns_lookup_ares_impl(
} else if (grpc_parse_ipv6_hostport(dns_server, &addr,
false /* log_errors */)) {
r->dns_server_addr.family = AF_INET6;
struct sockaddr_in6 *in6 = (struct sockaddr_in6 *)addr.addr;
struct sockaddr_in6* in6 = (struct sockaddr_in6*)addr.addr;
memcpy(&r->dns_server_addr.addr.addr6, &in6->sin6_addr,
sizeof(struct in6_addr));
r->dns_server_addr.tcp_port = grpc_sockaddr_get_port(&addr);
@ -402,7 +402,7 @@ static grpc_ares_request *grpc_dns_lookup_ares_impl(
}
int status = ares_set_servers_ports(*channel, &r->dns_server_addr);
if (status != ARES_SUCCESS) {
char *error_msg;
char* error_msg;
gpr_asprintf(&error_msg, "C-ares status is not ARES_SUCCESS: %s",
ares_strerror(status));
error = GRPC_ERROR_CREATE_FROM_COPIED_STRING(error_msg);
@ -423,7 +423,7 @@ static grpc_ares_request *grpc_dns_lookup_ares_impl(
if (check_grpclb) {
/* Query the SRV record */
grpc_ares_request_ref(r);
char *service_name;
char* service_name;
gpr_asprintf(&service_name, "_grpclb._tcp.%s", host);
ares_query(*channel, service_name, ns_c_in, ns_t_srv, on_srv_query_done_cb,
r);
@ -447,29 +447,29 @@ error_cleanup:
return NULL;
}
grpc_ares_request *(*grpc_dns_lookup_ares)(
grpc_exec_ctx *exec_ctx, const char *dns_server, const char *name,
const char *default_port, grpc_pollset_set *interested_parties,
grpc_closure *on_done, grpc_lb_addresses **addrs, bool check_grpclb,
char **service_config_json) = grpc_dns_lookup_ares_impl;
grpc_ares_request* (*grpc_dns_lookup_ares)(
grpc_exec_ctx* exec_ctx, const char* dns_server, const char* name,
const char* default_port, grpc_pollset_set* interested_parties,
grpc_closure* on_done, grpc_lb_addresses** addrs, bool check_grpclb,
char** service_config_json) = grpc_dns_lookup_ares_impl;
void grpc_cancel_ares_request(grpc_exec_ctx *exec_ctx, grpc_ares_request *r) {
void grpc_cancel_ares_request(grpc_exec_ctx* exec_ctx, grpc_ares_request* r) {
if (grpc_dns_lookup_ares == grpc_dns_lookup_ares_impl) {
grpc_ares_ev_driver_shutdown(exec_ctx, r->ev_driver);
}
}
grpc_error *grpc_ares_init(void) {
grpc_error* grpc_ares_init(void) {
gpr_once_init(&g_basic_init, do_basic_init);
gpr_mu_lock(&g_init_mu);
int status = ares_library_init(ARES_LIB_INIT_ALL);
gpr_mu_unlock(&g_init_mu);
if (status != ARES_SUCCESS) {
char *error_msg;
char* error_msg;
gpr_asprintf(&error_msg, "ares_library_init failed: %s",
ares_strerror(status));
grpc_error *error = GRPC_ERROR_CREATE_FROM_COPIED_STRING(error_msg);
grpc_error* error = GRPC_ERROR_CREATE_FROM_COPIED_STRING(error_msg);
gpr_free(error_msg);
return error;
}
@ -488,28 +488,28 @@ void grpc_ares_cleanup(void) {
typedef struct grpc_resolve_address_ares_request {
/** the pointer to receive the resolved addresses */
grpc_resolved_addresses **addrs_out;
grpc_resolved_addresses** addrs_out;
/** currently resolving lb addresses */
grpc_lb_addresses *lb_addrs;
grpc_lb_addresses* lb_addrs;
/** closure to call when the resolve_address_ares request completes */
grpc_closure *on_resolve_address_done;
grpc_closure* on_resolve_address_done;
/** a closure wrapping on_dns_lookup_done_cb, which should be invoked when the
grpc_dns_lookup_ares operation is done. */
grpc_closure on_dns_lookup_done;
} grpc_resolve_address_ares_request;
static void on_dns_lookup_done_cb(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error) {
grpc_resolve_address_ares_request *r =
(grpc_resolve_address_ares_request *)arg;
grpc_resolved_addresses **resolved_addresses = r->addrs_out;
static void on_dns_lookup_done_cb(grpc_exec_ctx* exec_ctx, void* arg,
grpc_error* error) {
grpc_resolve_address_ares_request* r =
(grpc_resolve_address_ares_request*)arg;
grpc_resolved_addresses** resolved_addresses = r->addrs_out;
if (r->lb_addrs == NULL || r->lb_addrs->num_addresses == 0) {
*resolved_addresses = NULL;
} else {
*resolved_addresses =
(grpc_resolved_addresses *)gpr_zalloc(sizeof(grpc_resolved_addresses));
(grpc_resolved_addresses*)gpr_zalloc(sizeof(grpc_resolved_addresses));
(*resolved_addresses)->naddrs = r->lb_addrs->num_addresses;
(*resolved_addresses)->addrs = (grpc_resolved_address *)gpr_zalloc(
(*resolved_addresses)->addrs = (grpc_resolved_address*)gpr_zalloc(
sizeof(grpc_resolved_address) * (*resolved_addresses)->naddrs);
for (size_t i = 0; i < (*resolved_addresses)->naddrs; i++) {
GPR_ASSERT(!r->lb_addrs->addresses[i].is_balancer);
@ -523,14 +523,14 @@ static void on_dns_lookup_done_cb(grpc_exec_ctx *exec_ctx, void *arg,
gpr_free(r);
}
static void grpc_resolve_address_ares_impl(grpc_exec_ctx *exec_ctx,
const char *name,
const char *default_port,
grpc_pollset_set *interested_parties,
grpc_closure *on_done,
grpc_resolved_addresses **addrs) {
grpc_resolve_address_ares_request *r =
(grpc_resolve_address_ares_request *)gpr_zalloc(
static void grpc_resolve_address_ares_impl(grpc_exec_ctx* exec_ctx,
const char* name,
const char* default_port,
grpc_pollset_set* interested_parties,
grpc_closure* on_done,
grpc_resolved_addresses** addrs) {
grpc_resolve_address_ares_request* r =
(grpc_resolve_address_ares_request*)gpr_zalloc(
sizeof(grpc_resolve_address_ares_request));
r->addrs_out = addrs;
r->on_resolve_address_done = on_done;
@ -543,8 +543,8 @@ static void grpc_resolve_address_ares_impl(grpc_exec_ctx *exec_ctx,
}
void (*grpc_resolve_address_ares)(
grpc_exec_ctx *exec_ctx, const char *name, const char *default_port,
grpc_pollset_set *interested_parties, grpc_closure *on_done,
grpc_resolved_addresses **addrs) = grpc_resolve_address_ares_impl;
grpc_exec_ctx* exec_ctx, const char* name, const char* default_port,
grpc_pollset_set* interested_parties, grpc_closure* on_done,
grpc_resolved_addresses** addrs) = grpc_resolve_address_ares_impl;
#endif /* GRPC_ARES == 1 && !defined(GRPC_UV) */

@ -36,12 +36,12 @@ typedef struct grpc_ares_request grpc_ares_request;
must be called at least once before this function. \a on_done may be
called directly in this function without being scheduled with \a exec_ctx,
so it must not try to acquire locks that are being held by the caller. */
extern void (*grpc_resolve_address_ares)(grpc_exec_ctx *exec_ctx,
const char *name,
const char *default_port,
grpc_pollset_set *interested_parties,
grpc_closure *on_done,
grpc_resolved_addresses **addresses);
extern void (*grpc_resolve_address_ares)(grpc_exec_ctx* exec_ctx,
const char* name,
const char* default_port,
grpc_pollset_set* interested_parties,
grpc_closure* on_done,
grpc_resolved_addresses** addresses);
/* Asynchronously resolve \a name. It will try to resolve grpclb SRV records in
addition to the normal address records. For normal address records, it uses
@ -50,19 +50,19 @@ extern void (*grpc_resolve_address_ares)(grpc_exec_ctx *exec_ctx,
function. \a on_done may be called directly in this function without being
scheduled with \a exec_ctx, so it must not try to acquire locks that are
being held by the caller. */
extern grpc_ares_request *(*grpc_dns_lookup_ares)(
grpc_exec_ctx *exec_ctx, const char *dns_server, const char *name,
const char *default_port, grpc_pollset_set *interested_parties,
grpc_closure *on_done, grpc_lb_addresses **addresses, bool check_grpclb,
char **service_config_json);
extern grpc_ares_request* (*grpc_dns_lookup_ares)(
grpc_exec_ctx* exec_ctx, const char* dns_server, const char* name,
const char* default_port, grpc_pollset_set* interested_parties,
grpc_closure* on_done, grpc_lb_addresses** addresses, bool check_grpclb,
char** service_config_json);
/* Cancel the pending grpc_ares_request \a request */
void grpc_cancel_ares_request(grpc_exec_ctx *exec_ctx,
grpc_ares_request *request);
void grpc_cancel_ares_request(grpc_exec_ctx* exec_ctx,
grpc_ares_request* request);
/* Initialize gRPC ares wrapper. Must be called at least once before
grpc_resolve_address_ares(). */
grpc_error *grpc_ares_init(void);
grpc_error* grpc_ares_init(void);
/* Uninitialized gRPC ares wrapper. If there was more than one previous call to
grpc_ares_init(), this function uninitializes the gRPC ares wrapper only if
@ -74,4 +74,4 @@ void grpc_ares_cleanup(void);
#endif
#endif /* GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_RESOLVER_DNS_C_ARES_GRPC_ARES_WRAPPER_H \
*/
*/

@ -25,36 +25,36 @@ struct grpc_ares_request {
char val;
};
static grpc_ares_request *grpc_dns_lookup_ares_impl(
grpc_exec_ctx *exec_ctx, const char *dns_server, const char *name,
const char *default_port, grpc_pollset_set *interested_parties,
grpc_closure *on_done, grpc_lb_addresses **addrs, bool check_grpclb,
char **service_config_json) {
static grpc_ares_request* grpc_dns_lookup_ares_impl(
grpc_exec_ctx* exec_ctx, const char* dns_server, const char* name,
const char* default_port, grpc_pollset_set* interested_parties,
grpc_closure* on_done, grpc_lb_addresses** addrs, bool check_grpclb,
char** service_config_json) {
return NULL;
}
grpc_ares_request *(*grpc_dns_lookup_ares)(
grpc_exec_ctx *exec_ctx, const char *dns_server, const char *name,
const char *default_port, grpc_pollset_set *interested_parties,
grpc_closure *on_done, grpc_lb_addresses **addrs, bool check_grpclb,
char **service_config_json) = grpc_dns_lookup_ares_impl;
grpc_ares_request* (*grpc_dns_lookup_ares)(
grpc_exec_ctx* exec_ctx, const char* dns_server, const char* name,
const char* default_port, grpc_pollset_set* interested_parties,
grpc_closure* on_done, grpc_lb_addresses** addrs, bool check_grpclb,
char** service_config_json) = grpc_dns_lookup_ares_impl;
void grpc_cancel_ares_request(grpc_exec_ctx *exec_ctx, grpc_ares_request *r) {}
void grpc_cancel_ares_request(grpc_exec_ctx* exec_ctx, grpc_ares_request* r) {}
grpc_error *grpc_ares_init(void) { return GRPC_ERROR_NONE; }
grpc_error* grpc_ares_init(void) { return GRPC_ERROR_NONE; }
void grpc_ares_cleanup(void) {}
static void grpc_resolve_address_ares_impl(grpc_exec_ctx *exec_ctx,
const char *name,
const char *default_port,
grpc_pollset_set *interested_parties,
grpc_closure *on_done,
grpc_resolved_addresses **addrs) {}
static void grpc_resolve_address_ares_impl(grpc_exec_ctx* exec_ctx,
const char* name,
const char* default_port,
grpc_pollset_set* interested_parties,
grpc_closure* on_done,
grpc_resolved_addresses** addrs) {}
void (*grpc_resolve_address_ares)(
grpc_exec_ctx *exec_ctx, const char *name, const char *default_port,
grpc_pollset_set *interested_parties, grpc_closure *on_done,
grpc_resolved_addresses **addrs) = grpc_resolve_address_ares_impl;
grpc_exec_ctx* exec_ctx, const char* name, const char* default_port,
grpc_pollset_set* interested_parties, grpc_closure* on_done,
grpc_resolved_addresses** addrs) = grpc_resolve_address_ares_impl;
#endif /* GRPC_ARES != 1 || defined(GRPC_UV) */

Some files were not shown because too many files have changed in this diff Show More

Loading…
Cancel
Save