Merge branch 'master' into multiprocessing-example

pull/18312/head
Richard Belleville 6 years ago
commit b2a75fdc65
  1. 2
      CMakeLists.txt
  2. 2
      Makefile
  3. 39
      doc/service_config.md
  4. 1
      gRPC-Core.podspec
  5. 2
      grpc.gyp
  6. 4
      include/grpc/grpc.h
  7. 14
      src/core/ext/filters/client_channel/README.md
  8. 57
      src/core/ext/filters/client_channel/client_channel.cc
  9. 4
      src/core/ext/filters/client_channel/client_channel_channelz.h
  10. 56
      src/core/ext/filters/client_channel/client_channel_factory.cc
  11. 57
      src/core/ext/filters/client_channel/client_channel_factory.h
  12. 11
      src/core/ext/filters/client_channel/lb_policy.h
  13. 61
      src/core/ext/filters/client_channel/lb_policy/grpclb/client_load_reporting_filter.cc
  14. 133
      src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.cc
  15. 6
      src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_client_stats.h
  16. 16
      src/core/ext/filters/client_channel/lb_policy/grpclb/load_balancer_api.cc
  17. 4
      src/core/ext/filters/client_channel/lb_policy/grpclb/load_balancer_api.h
  18. 25
      src/core/ext/filters/client_channel/lb_policy/xds/xds.cc
  19. 16
      src/core/ext/filters/client_channel/lb_policy/xds/xds_load_balancer_api.cc
  20. 4
      src/core/ext/filters/client_channel/lb_policy/xds/xds_load_balancer_api.h
  21. 4
      src/core/ext/filters/client_channel/resolving_lb_policy.cc
  22. 90
      src/core/ext/transport/chttp2/client/insecure/channel_create.cc
  23. 267
      src/core/ext/transport/chttp2/client/secure/secure_channel_create.cc
  24. 8
      src/core/ext/transport/chttp2/transport/bin_decoder.cc
  25. 8
      src/core/ext/transport/chttp2/transport/bin_decoder.h
  26. 13
      src/core/ext/transport/chttp2/transport/bin_encoder.cc
  27. 7
      src/core/ext/transport/chttp2/transport/bin_encoder.h
  28. 4
      src/core/ext/transport/chttp2/transport/chttp2_transport.cc
  29. 3
      src/core/ext/transport/chttp2/transport/frame_data.cc
  30. 2
      src/core/ext/transport/chttp2/transport/frame_data.h
  31. 11
      src/core/ext/transport/chttp2/transport/frame_goaway.cc
  32. 5
      src/core/ext/transport/chttp2/transport/frame_goaway.h
  33. 9
      src/core/ext/transport/chttp2/transport/frame_ping.cc
  34. 2
      src/core/ext/transport/chttp2/transport/frame_ping.h
  35. 9
      src/core/ext/transport/chttp2/transport/frame_rst_stream.cc
  36. 3
      src/core/ext/transport/chttp2/transport/frame_rst_stream.h
  37. 3
      src/core/ext/transport/chttp2/transport/frame_settings.cc
  38. 3
      src/core/ext/transport/chttp2/transport/frame_settings.h
  39. 8
      src/core/ext/transport/chttp2/transport/frame_window_update.cc
  40. 2
      src/core/ext/transport/chttp2/transport/frame_window_update.h
  41. 11
      src/core/ext/transport/chttp2/transport/hpack_parser.cc
  42. 5
      src/core/ext/transport/chttp2/transport/hpack_parser.h
  43. 9
      src/core/ext/transport/chttp2/transport/internal.h
  44. 18
      src/core/ext/transport/chttp2/transport/parsing.cc
  45. 8
      src/core/lib/channel/channel_trace.cc
  46. 8
      src/core/lib/channel/channel_trace.h
  47. 8
      src/core/lib/channel/channelz.h
  48. 3
      src/core/lib/channel/context.h
  49. 6
      src/core/lib/compression/algorithm_metadata.h
  50. 2
      src/core/lib/compression/compression.cc
  51. 4
      src/core/lib/compression/compression_internal.cc
  52. 3
      src/core/lib/http/httpcli.cc
  53. 3
      src/core/lib/http/parser.cc
  54. 3
      src/core/lib/http/parser.h
  55. 18
      src/core/lib/iomgr/error.cc
  56. 7
      src/core/lib/iomgr/error.h
  57. 4
      src/core/lib/iomgr/tcp_posix.cc
  58. 11
      src/core/lib/security/credentials/jwt/jwt_verifier.cc
  59. 3
      src/core/lib/security/credentials/jwt/jwt_verifier.h
  60. 4
      src/core/lib/security/transport/auth_filters.h
  61. 74
      src/core/lib/security/transport/client_auth_filter.cc
  62. 6
      src/core/lib/slice/percent_encoding.cc
  63. 6
      src/core/lib/slice/percent_encoding.h
  64. 13
      src/core/lib/slice/slice.cc
  65. 4
      src/core/lib/slice/slice_hash_table.h
  66. 2
      src/core/lib/slice/slice_intern.cc
  67. 17
      src/core/lib/slice/slice_internal.h
  68. 6
      src/core/lib/slice/slice_traits.h
  69. 8
      src/core/lib/slice/slice_weak_hash_table.h
  70. 99
      src/core/lib/transport/metadata.cc
  71. 2
      src/core/lib/transport/metadata_batch.cc
  72. 2
      src/core/lib/transport/metadata_batch.h
  73. 4
      src/core/lib/transport/service_config.h
  74. 2
      src/core/lib/transport/timeout_encoding.cc
  75. 2
      src/core/lib/transport/timeout_encoding.h
  76. 6
      src/core/tsi/alts/handshaker/alts_handshaker_client.cc
  77. 2
      src/core/tsi/alts/handshaker/alts_handshaker_client.h
  78. 7
      src/core/tsi/alts/handshaker/transport_security_common_api.cc
  79. 2
      src/core/tsi/alts/handshaker/transport_security_common_api.h
  80. 13
      src/python/grpcio/grpc/_cython/_cygrpc/tag.pyx.pxi
  81. 8
      test/core/end2end/end2end_nosec_tests.cc
  82. 8
      test/core/end2end/end2end_tests.cc
  83. 1
      test/core/end2end/gen_build_yaml.py
  84. 1
      test/core/end2end/generate_tests.bzl
  85. 318
      test/core/end2end/tests/filter_context.cc
  86. 23
      test/core/transport/metadata_test.cc
  87. 4
      test/core/util/test_lb_policies.cc
  88. 8
      test/cpp/end2end/flaky_network_test.cc
  89. 3
      test/cpp/end2end/grpclb_end2end_test.cc
  90. 32
      test/cpp/microbenchmarks/bm_call_create.cc
  91. 2
      test/distrib/python/test_packages.sh
  92. 2
      tools/internal_ci/linux/grpc_flaky_network_in_docker.sh
  93. 18
      tools/run_tests/artifacts/build_package_python.sh
  94. 2
      tools/run_tests/generated/sources_and_headers.json
  95. 883
      tools/run_tests/generated/tests.json

@ -5616,6 +5616,7 @@ add_library(end2end_tests
test/core/end2end/tests/empty_batch.cc
test/core/end2end/tests/filter_call_init_fails.cc
test/core/end2end/tests/filter_causes_close.cc
test/core/end2end/tests/filter_context.cc
test/core/end2end/tests/filter_latency.cc
test/core/end2end/tests/filter_status_code.cc
test/core/end2end/tests/graceful_server_shutdown.cc
@ -5739,6 +5740,7 @@ add_library(end2end_nosec_tests
test/core/end2end/tests/empty_batch.cc
test/core/end2end/tests/filter_call_init_fails.cc
test/core/end2end/tests/filter_causes_close.cc
test/core/end2end/tests/filter_context.cc
test/core/end2end/tests/filter_latency.cc
test/core/end2end/tests/filter_status_code.cc
test/core/end2end/tests/graceful_server_shutdown.cc

@ -10410,6 +10410,7 @@ LIBEND2END_TESTS_SRC = \
test/core/end2end/tests/empty_batch.cc \
test/core/end2end/tests/filter_call_init_fails.cc \
test/core/end2end/tests/filter_causes_close.cc \
test/core/end2end/tests/filter_context.cc \
test/core/end2end/tests/filter_latency.cc \
test/core/end2end/tests/filter_status_code.cc \
test/core/end2end/tests/graceful_server_shutdown.cc \
@ -10526,6 +10527,7 @@ LIBEND2END_NOSEC_TESTS_SRC = \
test/core/end2end/tests/empty_batch.cc \
test/core/end2end/tests/filter_call_init_fails.cc \
test/core/end2end/tests/filter_causes_close.cc \
test/core/end2end/tests/filter_context.cc \
test/core/end2end/tests/filter_latency.cc \
test/core/end2end/tests/filter_status_code.cc \
test/core/end2end/tests/graceful_server_shutdown.cc \

@ -12,11 +12,13 @@ The service config is a JSON string of the following form:
```
{
// Load balancing policy name (case insensitive).
// [deprecated] Load balancing policy name (case insensitive).
// Currently, the only selectable client-side policy provided with gRPC
// is 'round_robin', but third parties may add their own policies.
// This field is optional; if unset, the default behavior is to pick
// the first available backend.
// the first available backend. If set, the load balancing policy should be
// supported by the client, otherwise the service config is considered
// invalid.
// If the policy name is set via the client API, that value overrides
// the value specified here.
//
@ -61,10 +63,11 @@ The service config is a JSON string of the following form:
}
],
// Whether RPCs sent to this method should wait until the connection is
// ready by default. If false, the RPC will abort immediately if there
// is a transient failure connecting to the server. Otherwise, gRPC will
// attempt to connect until the deadline is exceeded.
// Optional. Whether RPCs sent to this method should wait until the
// connection is ready by default. If false, the RPC will abort
// immediately if there is a transient failure connecting to the server.
// Otherwise, gRPC will attempt to connect until the deadline is
// exceeded.
//
// The value specified via the gRPC client API will override the value
// set here. However, note that setting the value in the client API will
@ -73,10 +76,10 @@ The service config is a JSON string of the following form:
// is obtained by the gRPC client via name resolution.
'waitForReady': bool,
// The default timeout in seconds for RPCs sent to this method. This can
// be overridden in code. If no reply is received in the specified amount
// of time, the request is aborted and a deadline-exceeded error status
// is returned to the caller.
// Optional. The default timeout in seconds for RPCs sent to this method.
// This can be overridden in code. If no reply is received in the
// specified amount of time, the request is aborted and a
// deadline-exceeded error status is returned to the caller.
//
// The actual deadline used will be the minimum of the value specified
// here and the value set by the application via the gRPC client API.
@ -87,10 +90,10 @@ The service config is a JSON string of the following form:
// https://developers.google.com/protocol-buffers/docs/proto3#json
'timeout': string,
// The maximum allowed payload size for an individual request or object
// in a stream (client->server) in bytes. The size which is measured is
// the serialized, uncompressed payload in bytes. This applies both
// to streaming and non-streaming requests.
// Optional. The maximum allowed payload size for an individual request
// or object in a stream (client->server) in bytes. The size which is
// measured is the serialized, uncompressed payload in bytes. This
// applies both to streaming and non-streaming requests.
//
// The actual value used is the minimum of the value specified here and
// the value set by the application via the gRPC client API.
@ -103,10 +106,10 @@ The service config is a JSON string of the following form:
// be empty.
'maxRequestMessageBytes': number,
// The maximum allowed payload size for an individual response or object
// in a stream (server->client) in bytes. The size which is measured is
// the serialized, uncompressed payload in bytes. This applies both
// to streaming and non-streaming requests.
// Optional. The maximum allowed payload size for an individual response
// or object in a stream (server->client) in bytes. The size which is
// measured is the serialized, uncompressed payload in bytes. This
// applies both to streaming and non-streaming requests.
//
// The actual value used is the minimum of the value specified here and
// the value set by the application via the gRPC client API.

@ -1293,6 +1293,7 @@ Pod::Spec.new do |s|
'test/core/end2end/tests/empty_batch.cc',
'test/core/end2end/tests/filter_call_init_fails.cc',
'test/core/end2end/tests/filter_causes_close.cc',
'test/core/end2end/tests/filter_context.cc',
'test/core/end2end/tests/filter_latency.cc',
'test/core/end2end/tests/filter_status_code.cc',
'test/core/end2end/tests/graceful_server_shutdown.cc',

@ -2710,6 +2710,7 @@
'test/core/end2end/tests/empty_batch.cc',
'test/core/end2end/tests/filter_call_init_fails.cc',
'test/core/end2end/tests/filter_causes_close.cc',
'test/core/end2end/tests/filter_context.cc',
'test/core/end2end/tests/filter_latency.cc',
'test/core/end2end/tests/filter_status_code.cc',
'test/core/end2end/tests/graceful_server_shutdown.cc',
@ -2799,6 +2800,7 @@
'test/core/end2end/tests/empty_batch.cc',
'test/core/end2end/tests/filter_call_init_fails.cc',
'test/core/end2end/tests/filter_causes_close.cc',
'test/core/end2end/tests/filter_context.cc',
'test/core/end2end/tests/filter_latency.cc',
'test/core/end2end/tests/filter_status_code.cc',
'test/core/end2end/tests/graceful_server_shutdown.cc',

@ -323,14 +323,14 @@ GRPCAPI void grpc_channel_destroy(grpc_channel* channel);
If a grpc_call fails, it's guaranteed that no change to the call state
has been made. */
/** Called by clients to cancel an RPC on the server.
/** Cancel an RPC.
Can be called multiple times, from any thread.
THREAD-SAFETY grpc_call_cancel and grpc_call_cancel_with_status
are thread-safe, and can be called at any point before grpc_call_unref
is called.*/
GRPCAPI grpc_call_error grpc_call_cancel(grpc_call* call, void* reserved);
/** Called by clients to cancel an RPC on the server.
/** Cancel an RPC.
Can be called multiple times, from any thread.
If a status has not been received for the call, set it to the status code
and description passed in.

@ -4,7 +4,7 @@ Client Configuration Support for GRPC
This library provides high level configuration machinery to construct client
channels and load balance between them.
Each grpc_channel is created with a grpc_resolver. It is the resolver's duty
Each `grpc_channel` is created with a `Resolver`. It is the resolver's duty
to resolve a name into a set of arguments for the channel. Such arguments
might include:
@ -12,7 +12,7 @@ might include:
- a load balancing policy to decide which server to send a request to
- a set of filters to mutate outgoing requests (say, by adding metadata)
The resolver provides this data as a stream of grpc_channel_args objects to
The resolver provides this data as a stream of `grpc_channel_args` objects to
the channel. We represent arguments as a stream so that they can be changed
by the resolver during execution, by reacting to external events (such as
new service configuration data being pushed to some store).
@ -21,11 +21,11 @@ new service configuration data being pushed to some store).
Load Balancing
--------------
Load balancing configuration is provided by a grpc_lb_policy object.
Load balancing configuration is provided by a `LoadBalancingPolicy` object.
The primary job of the load balancing policies is to pick a target server
given only the initial metadata for a request. It does this by providing
a grpc_subchannel object to the owning channel.
a `ConnectedSubchannel` object to the owning channel.
Sub-Channels
@ -38,9 +38,9 @@ decisions (for example, by avoiding disconnected backends).
Configured sub-channels are fully setup to participate in the grpc data plane.
Their behavior is specified by a set of grpc channel filters defined at their
construction. To customize this behavior, resolvers build
grpc_client_channel_factory objects, which use the decorator pattern to customize
construction arguments for concrete grpc_subchannel instances.
construction. To customize this behavior, transports build
`ClientChannelFactory` objects, which customize construction arguments for
concrete subchannel instances.
Naming for GRPC

@ -107,8 +107,8 @@ typedef struct client_channel_channel_data {
grpc_channel_stack* owning_stack;
/** interested parties (owned) */
grpc_pollset_set* interested_parties;
// Client channel factory. Holds a ref.
grpc_client_channel_factory* client_channel_factory;
// Client channel factory.
grpc_core::ClientChannelFactory* client_channel_factory;
// Subchannel pool.
grpc_core::RefCountedPtr<grpc_core::SubchannelPoolInterface> subchannel_pool;
@ -205,16 +205,15 @@ class ClientChannelControlHelper
chand_->subchannel_pool.get());
grpc_channel_args* new_args =
grpc_channel_args_copy_and_add(&args, &arg, 1);
Subchannel* subchannel = grpc_client_channel_factory_create_subchannel(
chand_->client_channel_factory, new_args);
Subchannel* subchannel =
chand_->client_channel_factory->CreateSubchannel(new_args);
grpc_channel_args_destroy(new_args);
return subchannel;
}
grpc_channel* CreateChannel(const char* target, grpc_client_channel_type type,
grpc_channel* CreateChannel(const char* target,
const grpc_channel_args& args) override {
return grpc_client_channel_factory_create_channel(
chand_->client_channel_factory, target, type, &args);
return chand_->client_channel_factory->CreateChannel(target, &args);
}
void UpdateState(
@ -420,19 +419,12 @@ static grpc_error* cc_init_channel_elem(grpc_channel_element* elem,
arg = grpc_channel_args_find(args->channel_args, GRPC_ARG_ENABLE_RETRIES);
chand->enable_retries = grpc_channel_arg_get_bool(arg, true);
// Record client channel factory.
arg = grpc_channel_args_find(args->channel_args,
GRPC_ARG_CLIENT_CHANNEL_FACTORY);
if (arg == nullptr) {
chand->client_channel_factory =
grpc_core::ClientChannelFactory::GetFromChannelArgs(args->channel_args);
if (chand->client_channel_factory == nullptr) {
return GRPC_ERROR_CREATE_FROM_STATIC_STRING(
"Missing client channel factory in args for client channel filter");
}
if (arg->type != GRPC_ARG_POINTER) {
return GRPC_ERROR_CREATE_FROM_STATIC_STRING(
"client channel factory arg must be a pointer");
}
chand->client_channel_factory =
static_cast<grpc_client_channel_factory*>(arg->value.pointer.p);
grpc_client_channel_factory_ref(chand->client_channel_factory);
// Get server name to resolve, using proxy mapper if needed.
arg = grpc_channel_args_find(args->channel_args, GRPC_ARG_SERVER_URI);
if (arg == nullptr) {
@ -509,9 +501,6 @@ static void cc_destroy_channel_elem(grpc_channel_element* elem) {
// longer be any need to explicitly reset these smart pointer data members.
chand->picker.reset();
chand->subchannel_pool.reset();
if (chand->client_channel_factory != nullptr) {
grpc_client_channel_factory_unref(chand->client_channel_factory);
}
chand->info_lb_policy_name.reset();
chand->info_service_config_json.reset();
chand->retry_throttle_data.reset();
@ -705,6 +694,7 @@ struct call_data {
arena(args.arena),
owning_call(args.call_stack),
call_combiner(args.call_combiner),
call_context(args.context),
pending_send_initial_metadata(false),
pending_send_message(false),
pending_send_trailing_metadata(false),
@ -718,12 +708,6 @@ struct call_data {
for (size_t i = 0; i < GPR_ARRAY_SIZE(pending_batches); ++i) {
GPR_ASSERT(pending_batches[i].batch == nullptr);
}
for (size_t i = 0; i < GRPC_CONTEXT_COUNT; ++i) {
if (pick.pick.subchannel_call_context[i].destroy != nullptr) {
pick.pick.subchannel_call_context[i].destroy(
pick.pick.subchannel_call_context[i].value);
}
}
}
// State for handling deadlines.
@ -740,6 +724,7 @@ struct call_data {
gpr_arena* arena;
grpc_call_stack* owning_call;
grpc_call_combiner* call_combiner;
grpc_call_context_element* call_context;
grpc_core::RefCountedPtr<ServerRetryThrottleData> retry_throttle_data;
grpc_core::RefCountedPtr<ClientChannelMethodParams> method_params;
@ -2440,14 +2425,16 @@ static void create_subchannel_call(grpc_call_element* elem) {
const size_t parent_data_size =
calld->enable_retries ? sizeof(subchannel_call_retry_state) : 0;
const grpc_core::ConnectedSubchannel::CallArgs call_args = {
calld->pollent, // pollent
calld->path, // path
calld->call_start_time, // start_time
calld->deadline, // deadline
calld->arena, // arena
calld->pick.pick.subchannel_call_context, // context
calld->call_combiner, // call_combiner
parent_data_size // parent_data_size
calld->pollent, // pollent
calld->path, // path
calld->call_start_time, // start_time
calld->deadline, // deadline
calld->arena, // arena
// TODO(roth): When we implement hedging support, we will probably
// need to use a separate call context for each subchannel call.
calld->call_context, // context
calld->call_combiner, // call_combiner
parent_data_size // parent_data_size
};
grpc_error* error = GRPC_ERROR_NONE;
calld->subchannel_call =
@ -2462,7 +2449,7 @@ static void create_subchannel_call(grpc_call_element* elem) {
} else {
if (parent_data_size > 0) {
new (calld->subchannel_call->GetParentData())
subchannel_call_retry_state(calld->pick.pick.subchannel_call_context);
subchannel_call_retry_state(calld->call_context);
}
pending_batches_resume(elem);
}

@ -71,11 +71,11 @@ class SubchannelNode : public BaseNode {
grpc_json* RenderJson() override;
// proxy methods to composed classes.
void AddTraceEvent(ChannelTrace::Severity severity, grpc_slice data) {
void AddTraceEvent(ChannelTrace::Severity severity, const grpc_slice& data) {
trace_.AddTraceEvent(severity, data);
}
void AddTraceEventWithReference(ChannelTrace::Severity severity,
grpc_slice data,
const grpc_slice& data,
RefCountedPtr<BaseNode> referenced_channel) {
trace_.AddTraceEventWithReference(severity, data,
std::move(referenced_channel));

@ -21,47 +21,35 @@
#include "src/core/ext/filters/client_channel/client_channel_factory.h"
#include "src/core/lib/channel/channel_args.h"
void grpc_client_channel_factory_ref(grpc_client_channel_factory* factory) {
factory->vtable->ref(factory);
}
// Channel arg key for client channel factory.
#define GRPC_ARG_CLIENT_CHANNEL_FACTORY "grpc.client_channel_factory"
void grpc_client_channel_factory_unref(grpc_client_channel_factory* factory) {
factory->vtable->unref(factory);
}
namespace grpc_core {
grpc_core::Subchannel* grpc_client_channel_factory_create_subchannel(
grpc_client_channel_factory* factory, const grpc_channel_args* args) {
return factory->vtable->create_subchannel(factory, args);
}
namespace {
grpc_channel* grpc_client_channel_factory_create_channel(
grpc_client_channel_factory* factory, const char* target,
grpc_client_channel_type type, const grpc_channel_args* args) {
return factory->vtable->create_client_channel(factory, target, type, args);
void* factory_arg_copy(void* f) { return f; }
void factory_arg_destroy(void* f) {}
int factory_arg_cmp(void* factory1, void* factory2) {
return GPR_ICMP(factory1, factory2);
}
const grpc_arg_pointer_vtable factory_arg_vtable = {
factory_arg_copy, factory_arg_destroy, factory_arg_cmp};
static void* factory_arg_copy(void* factory) {
grpc_client_channel_factory_ref(
static_cast<grpc_client_channel_factory*>(factory));
return factory;
}
} // namespace
static void factory_arg_destroy(void* factory) {
grpc_client_channel_factory_unref(
static_cast<grpc_client_channel_factory*>(factory));
grpc_arg ClientChannelFactory::CreateChannelArg(ClientChannelFactory* factory) {
return grpc_channel_arg_pointer_create(
const_cast<char*>(GRPC_ARG_CLIENT_CHANNEL_FACTORY), factory,
&factory_arg_vtable);
}
static int factory_arg_cmp(void* factory1, void* factory2) {
if (factory1 < factory2) return -1;
if (factory1 > factory2) return 1;
return 0;
ClientChannelFactory* ClientChannelFactory::GetFromChannelArgs(
const grpc_channel_args* args) {
const grpc_arg* arg =
grpc_channel_args_find(args, GRPC_ARG_CLIENT_CHANNEL_FACTORY);
if (arg == nullptr || arg->type != GRPC_ARG_POINTER) return nullptr;
return static_cast<ClientChannelFactory*>(arg->value.pointer.p);
}
static const grpc_arg_pointer_vtable factory_arg_vtable = {
factory_arg_copy, factory_arg_destroy, factory_arg_cmp};
grpc_arg grpc_client_channel_factory_create_channel_arg(
grpc_client_channel_factory* factory) {
return grpc_channel_arg_pointer_create((char*)GRPC_ARG_CLIENT_CHANNEL_FACTORY,
factory, &factory_arg_vtable);
}
} // namespace grpc_core

@ -24,51 +24,32 @@
#include <grpc/impl/codegen/grpc_types.h>
#include "src/core/ext/filters/client_channel/subchannel.h"
#include "src/core/lib/channel/channel_stack.h"
#include "src/core/lib/gprpp/abstract.h"
// Channel arg key for client channel factory.
#define GRPC_ARG_CLIENT_CHANNEL_FACTORY "grpc.client_channel_factory"
namespace grpc_core {
typedef struct grpc_client_channel_factory grpc_client_channel_factory;
typedef struct grpc_client_channel_factory_vtable
grpc_client_channel_factory_vtable;
class ClientChannelFactory {
public:
virtual ~ClientChannelFactory() = default;
typedef enum {
GRPC_CLIENT_CHANNEL_TYPE_REGULAR, /** for the user-level regular calls */
GRPC_CLIENT_CHANNEL_TYPE_LOAD_BALANCING, /** for communication with a load
balancing service */
} grpc_client_channel_type;
// Creates a subchannel with the specified args.
virtual Subchannel* CreateSubchannel(const grpc_channel_args* args)
GRPC_ABSTRACT;
/** Constructor for new configured channels.
Creating decorators around this type is encouraged to adapt behavior. */
struct grpc_client_channel_factory {
const grpc_client_channel_factory_vtable* vtable;
};
struct grpc_client_channel_factory_vtable {
void (*ref)(grpc_client_channel_factory* factory);
void (*unref)(grpc_client_channel_factory* factory);
grpc_core::Subchannel* (*create_subchannel)(
grpc_client_channel_factory* factory, const grpc_channel_args* args);
grpc_channel* (*create_client_channel)(grpc_client_channel_factory* factory,
const char* target,
grpc_client_channel_type type,
const grpc_channel_args* args);
};
// Creates a channel for the specified target with the specified args.
virtual grpc_channel* CreateChannel(
const char* target, const grpc_channel_args* args) GRPC_ABSTRACT;
void grpc_client_channel_factory_ref(grpc_client_channel_factory* factory);
void grpc_client_channel_factory_unref(grpc_client_channel_factory* factory);
// Returns a channel arg containing the specified factory.
static grpc_arg CreateChannelArg(ClientChannelFactory* factory);
/** Create a new grpc_subchannel */
grpc_core::Subchannel* grpc_client_channel_factory_create_subchannel(
grpc_client_channel_factory* factory, const grpc_channel_args* args);
// Returns the factory from args, or null if not found.
static ClientChannelFactory* GetFromChannelArgs(
const grpc_channel_args* args);
/** Create a new grpc_channel */
grpc_channel* grpc_client_channel_factory_create_channel(
grpc_client_channel_factory* factory, const char* target,
grpc_client_channel_type type, const grpc_channel_args* args);
GRPC_ABSTRACT_BASE_CLASS
};
grpc_arg grpc_client_channel_factory_create_channel_arg(
grpc_client_channel_factory* factory);
} // namespace grpc_core
#endif /* GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_CLIENT_CHANNEL_FACTORY_H */

@ -22,7 +22,6 @@
#include <grpc/support/port_platform.h>
#include "src/core/ext/filters/client_channel/client_channel_channelz.h"
#include "src/core/ext/filters/client_channel/client_channel_factory.h"
#include "src/core/ext/filters/client_channel/subchannel.h"
#include "src/core/lib/gprpp/abstract.h"
#include "src/core/lib/gprpp/orphanable.h"
@ -74,11 +73,6 @@ class LoadBalancingPolicy : public InternallyRefCounted<LoadBalancingPolicy> {
/// Will be set to the selected subchannel, or nullptr on failure or when
/// the LB policy decides to drop the call.
RefCountedPtr<ConnectedSubchannel> connected_subchannel;
/// Will be populated with context to pass to the subchannel call, if
/// needed.
// TODO(roth): Remove this from the API, especially since it's not
// working properly anyway (see https://github.com/grpc/grpc/issues/15927).
grpc_call_context_element subchannel_call_context[GRPC_CONTEXT_COUNT] = {};
};
/// A picker is the object used to actual perform picks.
@ -193,10 +187,9 @@ class LoadBalancingPolicy : public InternallyRefCounted<LoadBalancingPolicy> {
virtual Subchannel* CreateSubchannel(const grpc_channel_args& args)
GRPC_ABSTRACT;
/// Creates a channel with the specified target, type, and channel args.
/// Creates a channel with the specified target and channel args.
virtual grpc_channel* CreateChannel(
const char* target, grpc_client_channel_type type,
const grpc_channel_args& args) GRPC_ABSTRACT;
const char* target, const grpc_channel_args& args) GRPC_ABSTRACT;
/// Sets the connectivity state and returns a new picker to be used
/// by the client channel.

@ -37,17 +37,6 @@ static void destroy_channel_elem(grpc_channel_element* elem) {}
namespace {
struct call_data {
call_data(const grpc_call_element_args& args) {
if (args.context[GRPC_GRPCLB_CLIENT_STATS].value != nullptr) {
// Get stats object from context and take a ref.
client_stats = static_cast<grpc_core::GrpcLbClientStats*>(
args.context[GRPC_GRPCLB_CLIENT_STATS].value)
->Ref();
// Record call started.
client_stats->AddCallStarted();
}
}
// Stats object to update.
grpc_core::RefCountedPtr<grpc_core::GrpcLbClientStats> client_stats;
// State for intercepting send_initial_metadata.
@ -82,7 +71,7 @@ static void recv_initial_metadata_ready(void* arg, grpc_error* error) {
static grpc_error* init_call_elem(grpc_call_element* elem,
const grpc_call_element_args* args) {
GPR_ASSERT(args->context != nullptr);
new (elem->call_data) call_data(*args);
new (elem->call_data) call_data();
return GRPC_ERROR_NONE;
}
@ -96,9 +85,6 @@ static void destroy_call_elem(grpc_call_element* elem,
calld->client_stats->AddCallFinished(
!calld->send_initial_metadata_succeeded /* client_failed_to_send */,
calld->recv_initial_metadata_succeeded /* known_received */);
// All done, so unref the stats object.
// TODO(roth): Eliminate this once filter stack is converted to C++.
calld->client_stats.reset();
}
calld->~call_data();
}
@ -107,25 +93,36 @@ static void start_transport_stream_op_batch(
grpc_call_element* elem, grpc_transport_stream_op_batch* batch) {
call_data* calld = static_cast<call_data*>(elem->call_data);
GPR_TIMER_SCOPE("clr_start_transport_stream_op_batch", 0);
if (calld->client_stats != nullptr) {
// Intercept send_initial_metadata.
if (batch->send_initial_metadata) {
calld->original_on_complete_for_send = batch->on_complete;
GRPC_CLOSURE_INIT(&calld->on_complete_for_send, on_complete_for_send,
calld, grpc_schedule_on_exec_ctx);
batch->on_complete = &calld->on_complete_for_send;
}
// Intercept recv_initial_metadata.
if (batch->recv_initial_metadata) {
calld->original_recv_initial_metadata_ready =
batch->payload->recv_initial_metadata.recv_initial_metadata_ready;
GRPC_CLOSURE_INIT(&calld->recv_initial_metadata_ready,
recv_initial_metadata_ready, calld,
grpc_schedule_on_exec_ctx);
batch->payload->recv_initial_metadata.recv_initial_metadata_ready =
&calld->recv_initial_metadata_ready;
// Handle send_initial_metadata.
if (batch->send_initial_metadata) {
// Grab client stats object from user_data for LB token metadata.
grpc_linked_mdelem* lb_token =
batch->payload->send_initial_metadata.send_initial_metadata->idx.named
.lb_token;
if (lb_token != nullptr) {
grpc_core::GrpcLbClientStats* client_stats =
static_cast<grpc_core::GrpcLbClientStats*>(grpc_mdelem_get_user_data(
lb_token->md, grpc_core::GrpcLbClientStats::Destroy));
if (client_stats != nullptr) {
calld->client_stats = client_stats->Ref();
// Intercept completion.
calld->original_on_complete_for_send = batch->on_complete;
GRPC_CLOSURE_INIT(&calld->on_complete_for_send, on_complete_for_send,
calld, grpc_schedule_on_exec_ctx);
batch->on_complete = &calld->on_complete_for_send;
}
}
}
// Intercept completion of recv_initial_metadata.
if (batch->recv_initial_metadata) {
calld->original_recv_initial_metadata_ready =
batch->payload->recv_initial_metadata.recv_initial_metadata_ready;
GRPC_CLOSURE_INIT(&calld->recv_initial_metadata_ready,
recv_initial_metadata_ready, calld,
grpc_schedule_on_exec_ctx);
batch->payload->recv_initial_metadata.recv_initial_metadata_ready =
&calld->recv_initial_metadata_ready;
}
// Chain to next filter.
grpc_call_next_op(elem, batch);
}

@ -225,7 +225,8 @@ class GrpcLb : public LoadBalancingPolicy {
UniquePtr<char> AsText() const;
// Extracts all non-drop entries into a ServerAddressList.
ServerAddressList GetServerAddressList() const;
ServerAddressList GetServerAddressList(
GrpcLbClientStats* client_stats) const;
// Returns true if the serverlist contains at least one drop entry and
// no backend address entries.
@ -273,7 +274,6 @@ class GrpcLb : public LoadBalancingPolicy {
Subchannel* CreateSubchannel(const grpc_channel_args& args) override;
grpc_channel* CreateChannel(const char* target,
grpc_client_channel_type type,
const grpc_channel_args& args) override;
void UpdateState(grpc_connectivity_state state, grpc_error* state_error,
UniquePtr<SubchannelPicker> picker) override;
@ -295,8 +295,6 @@ class GrpcLb : public LoadBalancingPolicy {
static void OnFallbackTimerLocked(void* arg, grpc_error* error);
void StartBalancerCallRetryTimerLocked();
static void OnBalancerCallRetryTimerLocked(void* arg, grpc_error* error);
static void OnBalancerChannelConnectivityChangedLocked(void* arg,
grpc_error* error);
// Methods for dealing with the RR policy.
grpc_channel_args* CreateRoundRobinPolicyArgsLocked();
@ -316,10 +314,6 @@ class GrpcLb : public LoadBalancingPolicy {
grpc_channel* lb_channel_ = nullptr;
// Uuid of the lb channel. Used for channelz.
gpr_atm lb_channel_uuid_ = 0;
grpc_connectivity_state lb_channel_connectivity_;
grpc_closure lb_channel_on_connectivity_changed_;
// Are we already watching the LB channel's connectivity?
bool watching_lb_channel_ = false;
// Response generator to inject address updates into lb_channel_.
RefCountedPtr<FakeResolverResponseGenerator> response_generator_;
@ -453,7 +447,8 @@ bool IsServerValid(const grpc_grpclb_server* server, size_t idx, bool log) {
}
// Returns addresses extracted from the serverlist.
ServerAddressList GrpcLb::Serverlist::GetServerAddressList() const {
ServerAddressList GrpcLb::Serverlist::GetServerAddressList(
GrpcLbClientStats* client_stats) const {
ServerAddressList addresses;
for (size_t i = 0; i < serverlist_->num_servers; ++i) {
const grpc_grpclb_server* server = serverlist_->servers[i];
@ -471,6 +466,11 @@ ServerAddressList GrpcLb::Serverlist::GetServerAddressList() const {
grpc_slice lb_token_mdstr = grpc_slice_from_copied_buffer(
server->load_balance_token, lb_token_length);
lb_token = grpc_mdelem_from_slices(GRPC_MDSTR_LB_TOKEN, lb_token_mdstr);
if (client_stats != nullptr) {
GPR_ASSERT(grpc_mdelem_set_user_data(
lb_token, GrpcLbClientStats::Destroy,
client_stats->Ref().release()) == client_stats);
}
} else {
char* uri = grpc_sockaddr_to_uri(&addr);
gpr_log(GPR_INFO,
@ -511,22 +511,6 @@ const char* GrpcLb::Serverlist::ShouldDrop() {
// GrpcLb::Picker
//
// Adds lb_token of selected subchannel (address) to the call's initial
// metadata.
grpc_error* AddLbTokenToInitialMetadata(
grpc_mdelem lb_token, grpc_linked_mdelem* lb_token_mdelem_storage,
grpc_metadata_batch* initial_metadata) {
GPR_ASSERT(lb_token_mdelem_storage != nullptr);
GPR_ASSERT(!GRPC_MDISNULL(lb_token));
return grpc_metadata_batch_add_tail(initial_metadata, lb_token_mdelem_storage,
lb_token);
}
// Destroy function used when embedding client stats in call context.
void DestroyClientStats(void* arg) {
static_cast<GrpcLbClientStats*>(arg)->Unref();
}
GrpcLb::Picker::PickResult GrpcLb::Picker::Pick(PickState* pick,
grpc_error** error) {
// Check if we should drop the call.
@ -557,15 +541,14 @@ GrpcLb::Picker::PickResult GrpcLb::Picker::Pick(PickState* pick,
abort();
}
grpc_mdelem lb_token = {reinterpret_cast<uintptr_t>(arg->value.pointer.p)};
AddLbTokenToInitialMetadata(GRPC_MDELEM_REF(lb_token),
&pick->lb_token_mdelem_storage,
pick->initial_metadata);
// Pass on client stats via context. Passes ownership of the reference.
if (client_stats_ != nullptr) {
pick->subchannel_call_context[GRPC_GRPCLB_CLIENT_STATS].value =
client_stats_->Ref().release();
pick->subchannel_call_context[GRPC_GRPCLB_CLIENT_STATS].destroy =
DestroyClientStats;
GPR_ASSERT(!GRPC_MDISNULL(lb_token));
GPR_ASSERT(grpc_metadata_batch_add_tail(
pick->initial_metadata, &pick->lb_token_mdelem_storage,
GRPC_MDELEM_REF(lb_token)) == GRPC_ERROR_NONE);
GrpcLbClientStats* client_stats = static_cast<GrpcLbClientStats*>(
grpc_mdelem_get_user_data(lb_token, GrpcLbClientStats::Destroy));
if (client_stats != nullptr) {
client_stats->AddCallStarted();
}
}
return result;
@ -581,10 +564,9 @@ Subchannel* GrpcLb::Helper::CreateSubchannel(const grpc_channel_args& args) {
}
grpc_channel* GrpcLb::Helper::CreateChannel(const char* target,
grpc_client_channel_type type,
const grpc_channel_args& args) {
if (parent_->shutting_down_) return nullptr;
return parent_->channel_control_helper()->CreateChannel(target, type, args);
return parent_->channel_control_helper()->CreateChannel(target, args);
}
void GrpcLb::Helper::UpdateState(grpc_connectivity_state state,
@ -1182,10 +1164,6 @@ GrpcLb::GrpcLb(Args args)
.set_jitter(GRPC_GRPCLB_RECONNECT_JITTER)
.set_max_backoff(GRPC_GRPCLB_RECONNECT_MAX_BACKOFF_SECONDS *
1000)) {
// Initialization.
GRPC_CLOSURE_INIT(&lb_channel_on_connectivity_changed_,
&GrpcLb::OnBalancerChannelConnectivityChangedLocked, this,
grpc_combiner_scheduler(args.combiner));
// Record server name.
const grpc_arg* arg = grpc_channel_args_find(args.args, GRPC_ARG_SERVER_URI);
const char* server_uri = grpc_channel_arg_get_string(arg);
@ -1305,8 +1283,8 @@ void GrpcLb::ProcessChannelArgsLocked(const grpc_channel_args& args) {
if (lb_channel_ == nullptr) {
char* uri_str;
gpr_asprintf(&uri_str, "fake:///%s", server_name_);
lb_channel_ = channel_control_helper()->CreateChannel(
uri_str, GRPC_CLIENT_CHANNEL_TYPE_LOAD_BALANCING, *lb_channel_args);
lb_channel_ =
channel_control_helper()->CreateChannel(uri_str, *lb_channel_args);
GPR_ASSERT(lb_channel_ != nullptr);
grpc_core::channelz::ChannelNode* channel_node =
grpc_channel_get_channelz_node(lb_channel_);
@ -1327,7 +1305,8 @@ void GrpcLb::UpdateLocked(const grpc_channel_args& args,
ProcessChannelArgsLocked(args);
// Update the existing RR policy.
if (rr_policy_ != nullptr) CreateOrUpdateRoundRobinPolicyLocked();
// If this is the initial update, start the fallback timer.
// If this is the initial update, start the fallback timer and the
// balancer call.
if (is_initial_update) {
if (lb_fallback_timeout_ms_ > 0 && serverlist_ == nullptr &&
!fallback_timer_callback_pending_) {
@ -1339,26 +1318,6 @@ void GrpcLb::UpdateLocked(const grpc_channel_args& args,
grpc_timer_init(&lb_fallback_timer_, deadline, &lb_on_fallback_);
}
StartBalancerCallLocked();
} else if (!watching_lb_channel_) {
// If this is not the initial update and we're not already watching
// the LB channel's connectivity state, start a watch now. This
// ensures that we'll know when to switch to a new balancer call.
lb_channel_connectivity_ = grpc_channel_check_connectivity_state(
lb_channel_, true /* try to connect */);
grpc_channel_element* client_channel_elem = grpc_channel_stack_last_element(
grpc_channel_get_channel_stack(lb_channel_));
GPR_ASSERT(client_channel_elem->filter == &grpc_client_channel_filter);
watching_lb_channel_ = true;
// TODO(roth): We currently track this ref manually. Once the
// ClosureRef API is ready, we should pass the RefCountedPtr<> along
// with the callback.
auto self = Ref(DEBUG_LOCATION, "watch_lb_channel_connectivity");
self.release();
grpc_client_channel_watch_connectivity_state(
client_channel_elem,
grpc_polling_entity_create_from_pollset_set(interested_parties()),
&lb_channel_connectivity_, &lb_channel_on_connectivity_changed_,
nullptr);
}
}
@ -1436,51 +1395,6 @@ void GrpcLb::OnBalancerCallRetryTimerLocked(void* arg, grpc_error* error) {
grpclb_policy->Unref(DEBUG_LOCATION, "on_balancer_call_retry_timer");
}
// Invoked as part of the update process. It continues watching the LB channel
// until it shuts down or becomes READY. It's invoked even if the LB channel
// stayed READY throughout the update (for example if the update is identical).
void GrpcLb::OnBalancerChannelConnectivityChangedLocked(void* arg,
grpc_error* error) {
GrpcLb* grpclb_policy = static_cast<GrpcLb*>(arg);
if (grpclb_policy->shutting_down_) goto done;
// Re-initialize the lb_call. This should also take care of updating the
// embedded RR policy. Note that the current RR policy, if any, will stay in
// effect until an update from the new lb_call is received.
switch (grpclb_policy->lb_channel_connectivity_) {
case GRPC_CHANNEL_CONNECTING:
case GRPC_CHANNEL_TRANSIENT_FAILURE: {
// Keep watching the LB channel.
grpc_channel_element* client_channel_elem =
grpc_channel_stack_last_element(
grpc_channel_get_channel_stack(grpclb_policy->lb_channel_));
GPR_ASSERT(client_channel_elem->filter == &grpc_client_channel_filter);
grpc_client_channel_watch_connectivity_state(
client_channel_elem,
grpc_polling_entity_create_from_pollset_set(
grpclb_policy->interested_parties()),
&grpclb_policy->lb_channel_connectivity_,
&grpclb_policy->lb_channel_on_connectivity_changed_, nullptr);
break;
}
// The LB channel may be IDLE because it's shut down before the update.
// Restart the LB call to kick the LB channel into gear.
case GRPC_CHANNEL_IDLE:
case GRPC_CHANNEL_READY:
grpclb_policy->lb_calld_.reset();
if (grpclb_policy->retry_timer_callback_pending_) {
grpc_timer_cancel(&grpclb_policy->lb_call_retry_timer_);
}
grpclb_policy->lb_call_backoff_.Reset();
grpclb_policy->StartBalancerCallLocked();
// fallthrough
case GRPC_CHANNEL_SHUTDOWN:
done:
grpclb_policy->watching_lb_channel_ = false;
grpclb_policy->Unref(DEBUG_LOCATION,
"watch_lb_channel_connectivity_cb_shutdown");
}
}
//
// code for interacting with the RR policy
//
@ -1490,7 +1404,8 @@ grpc_channel_args* GrpcLb::CreateRoundRobinPolicyArgsLocked() {
ServerAddressList* addresses = &tmp_addresses;
bool is_backend_from_grpclb_load_balancer = false;
if (serverlist_ != nullptr) {
tmp_addresses = serverlist_->GetServerAddressList();
tmp_addresses = serverlist_->GetServerAddressList(
lb_calld_ == nullptr ? nullptr : lb_calld_->client_stats());
is_backend_from_grpclb_load_balancer = true;
} else {
// If CreateOrUpdateRoundRobinPolicyLocked() is invoked when we haven't

@ -56,6 +56,12 @@ class GrpcLbClientStats : public RefCounted<GrpcLbClientStats> {
int64_t* num_calls_finished_known_received,
UniquePtr<DroppedCallCounts>* drop_token_counts);
// A destruction function to use as the user_data key when attaching
// client stats to a grpc_mdelem.
static void Destroy(void* arg) {
static_cast<GrpcLbClientStats*>(arg)->Unref();
}
private:
// This field must only be accessed via *_locked() methods.
UniquePtr<DroppedCallCounts> drop_token_counts_;

@ -161,10 +161,10 @@ void grpc_grpclb_request_destroy(grpc_grpclb_request* request) {
typedef grpc_lb_v1_LoadBalanceResponse grpc_grpclb_response;
grpc_grpclb_initial_response* grpc_grpclb_initial_response_parse(
grpc_slice encoded_grpc_grpclb_response) {
pb_istream_t stream =
pb_istream_from_buffer(GRPC_SLICE_START_PTR(encoded_grpc_grpclb_response),
GRPC_SLICE_LENGTH(encoded_grpc_grpclb_response));
const grpc_slice& encoded_grpc_grpclb_response) {
pb_istream_t stream = pb_istream_from_buffer(
const_cast<uint8_t*>(GRPC_SLICE_START_PTR(encoded_grpc_grpclb_response)),
GRPC_SLICE_LENGTH(encoded_grpc_grpclb_response));
grpc_grpclb_response res;
memset(&res, 0, sizeof(grpc_grpclb_response));
if (GPR_UNLIKELY(
@ -185,10 +185,10 @@ grpc_grpclb_initial_response* grpc_grpclb_initial_response_parse(
}
grpc_grpclb_serverlist* grpc_grpclb_response_parse_serverlist(
grpc_slice encoded_grpc_grpclb_response) {
pb_istream_t stream =
pb_istream_from_buffer(GRPC_SLICE_START_PTR(encoded_grpc_grpclb_response),
GRPC_SLICE_LENGTH(encoded_grpc_grpclb_response));
const grpc_slice& encoded_grpc_grpclb_response) {
pb_istream_t stream = pb_istream_from_buffer(
const_cast<uint8_t*>(GRPC_SLICE_START_PTR(encoded_grpc_grpclb_response)),
GRPC_SLICE_LENGTH(encoded_grpc_grpclb_response));
pb_istream_t stream_at_start = stream;
grpc_grpclb_serverlist* sl = static_cast<grpc_grpclb_serverlist*>(
gpr_zalloc(sizeof(grpc_grpclb_serverlist)));

@ -55,11 +55,11 @@ void grpc_grpclb_request_destroy(grpc_grpclb_request* request);
/** Parse (ie, decode) the bytes in \a encoded_grpc_grpclb_response as a \a
* grpc_grpclb_initial_response */
grpc_grpclb_initial_response* grpc_grpclb_initial_response_parse(
grpc_slice encoded_grpc_grpclb_response);
const grpc_slice& encoded_grpc_grpclb_response);
/** Parse the list of servers from an encoded \a grpc_grpclb_response */
grpc_grpclb_serverlist* grpc_grpclb_response_parse_serverlist(
grpc_slice encoded_grpc_grpclb_response);
const grpc_slice& encoded_grpc_grpclb_response);
/** Return a copy of \a sl. The caller is responsible for calling \a
* grpc_grpclb_destroy_serverlist on the returned copy. */

@ -223,7 +223,6 @@ class XdsLb : public LoadBalancingPolicy {
Subchannel* CreateSubchannel(const grpc_channel_args& args) override;
grpc_channel* CreateChannel(const char* target,
grpc_client_channel_type type,
const grpc_channel_args& args) override;
void UpdateState(grpc_connectivity_state state, grpc_error* state_error,
UniquePtr<SubchannelPicker> picker) override;
@ -323,11 +322,6 @@ class XdsLb : public LoadBalancingPolicy {
// XdsLb::Picker
//
// Destroy function used when embedding client stats in call context.
void DestroyClientStats(void* arg) {
static_cast<XdsLbClientStats*>(arg)->Unref();
}
XdsLb::Picker::PickResult XdsLb::Picker::Pick(PickState* pick,
grpc_error** error) {
// TODO(roth): Add support for drop handling.
@ -336,10 +330,7 @@ XdsLb::Picker::PickResult XdsLb::Picker::Pick(PickState* pick,
// If pick succeeded, add client stats.
if (result == PickResult::PICK_COMPLETE &&
pick->connected_subchannel != nullptr && client_stats_ != nullptr) {
pick->subchannel_call_context[GRPC_GRPCLB_CLIENT_STATS].value =
client_stats_->Ref().release();
pick->subchannel_call_context[GRPC_GRPCLB_CLIENT_STATS].destroy =
DestroyClientStats;
// TODO(roth): Add support for client stats.
}
return result;
}
@ -354,10 +345,9 @@ Subchannel* XdsLb::Helper::CreateSubchannel(const grpc_channel_args& args) {
}
grpc_channel* XdsLb::Helper::CreateChannel(const char* target,
grpc_client_channel_type type,
const grpc_channel_args& args) {
if (parent_->shutting_down_) return nullptr;
return parent_->channel_control_helper()->CreateChannel(target, type, args);
return parent_->channel_control_helper()->CreateChannel(target, args);
}
void XdsLb::Helper::UpdateState(grpc_connectivity_state state,
@ -1076,8 +1066,8 @@ void XdsLb::ProcessChannelArgsLocked(const grpc_channel_args& args) {
char* uri_str;
gpr_asprintf(&uri_str, "fake:///%s", server_name_);
gpr_mu_lock(&lb_channel_mu_);
lb_channel_ = channel_control_helper()->CreateChannel(
uri_str, GRPC_CLIENT_CHANNEL_TYPE_LOAD_BALANCING, *lb_channel_args);
lb_channel_ =
channel_control_helper()->CreateChannel(uri_str, *lb_channel_args);
gpr_mu_unlock(&lb_channel_mu_);
GPR_ASSERT(lb_channel_ != nullptr);
gpr_free(uri_str);
@ -1307,11 +1297,14 @@ grpc_channel_args* XdsLb::CreateChildPolicyArgsLocked() {
grpc_channel_arg_integer_create(
const_cast<char*>(GRPC_ARG_ADDRESS_IS_BACKEND_FROM_XDS_LOAD_BALANCER),
1),
// Inhibit client-side health checking, since the balancer does
// this for us.
grpc_channel_arg_integer_create(
const_cast<char*>(GRPC_ARG_INHIBIT_HEALTH_CHECKING), 1),
};
grpc_channel_args* args = grpc_channel_args_copy_and_add_and_remove(
return grpc_channel_args_copy_and_add_and_remove(
args_, keys_to_remove, GPR_ARRAY_SIZE(keys_to_remove), args_to_add,
GPR_ARRAY_SIZE(args_to_add));
return args;
}
void XdsLb::CreateChildPolicyLocked(const char* name, Args args) {

@ -161,10 +161,10 @@ void xds_grpclb_request_destroy(xds_grpclb_request* request) {
typedef grpc_lb_v1_LoadBalanceResponse xds_grpclb_response;
xds_grpclb_initial_response* xds_grpclb_initial_response_parse(
grpc_slice encoded_xds_grpclb_response) {
pb_istream_t stream =
pb_istream_from_buffer(GRPC_SLICE_START_PTR(encoded_xds_grpclb_response),
GRPC_SLICE_LENGTH(encoded_xds_grpclb_response));
const grpc_slice& encoded_xds_grpclb_response) {
pb_istream_t stream = pb_istream_from_buffer(
const_cast<uint8_t*>(GRPC_SLICE_START_PTR(encoded_xds_grpclb_response)),
GRPC_SLICE_LENGTH(encoded_xds_grpclb_response));
xds_grpclb_response res;
memset(&res, 0, sizeof(xds_grpclb_response));
if (GPR_UNLIKELY(
@ -185,10 +185,10 @@ xds_grpclb_initial_response* xds_grpclb_initial_response_parse(
}
xds_grpclb_serverlist* xds_grpclb_response_parse_serverlist(
grpc_slice encoded_xds_grpclb_response) {
pb_istream_t stream =
pb_istream_from_buffer(GRPC_SLICE_START_PTR(encoded_xds_grpclb_response),
GRPC_SLICE_LENGTH(encoded_xds_grpclb_response));
const grpc_slice& encoded_xds_grpclb_response) {
pb_istream_t stream = pb_istream_from_buffer(
const_cast<uint8_t*>(GRPC_SLICE_START_PTR(encoded_xds_grpclb_response)),
GRPC_SLICE_LENGTH(encoded_xds_grpclb_response));
pb_istream_t stream_at_start = stream;
xds_grpclb_serverlist* sl = static_cast<xds_grpclb_serverlist*>(
gpr_zalloc(sizeof(xds_grpclb_serverlist)));

@ -55,11 +55,11 @@ void xds_grpclb_request_destroy(xds_grpclb_request* request);
/** Parse (ie, decode) the bytes in \a encoded_xds_grpclb_response as a \a
* xds_grpclb_initial_response */
xds_grpclb_initial_response* xds_grpclb_initial_response_parse(
grpc_slice encoded_xds_grpclb_response);
const grpc_slice& encoded_xds_grpclb_response);
/** Parse the list of servers from an encoded \a xds_grpclb_response */
xds_grpclb_serverlist* xds_grpclb_response_parse_serverlist(
grpc_slice encoded_xds_grpclb_response);
const grpc_slice& encoded_xds_grpclb_response);
/** Return a copy of \a sl. The caller is responsible for calling \a
* xds_grpclb_destroy_serverlist on the returned copy. */

@ -80,10 +80,10 @@ class ResolvingLoadBalancingPolicy::ResolvingControlHelper
return parent_->channel_control_helper()->CreateSubchannel(args);
}
grpc_channel* CreateChannel(const char* target, grpc_client_channel_type type,
grpc_channel* CreateChannel(const char* target,
const grpc_channel_args& args) override {
if (parent_->resolver_ == nullptr) return nullptr; // Shutting down.
return parent_->channel_control_helper()->CreateChannel(target, type, args);
return parent_->channel_control_helper()->CreateChannel(target, args);
}
void UpdateState(grpc_connectivity_state state, grpc_error* state_error,

@ -33,50 +33,53 @@
#include "src/core/lib/surface/api_trace.h"
#include "src/core/lib/surface/channel.h"
static void client_channel_factory_ref(
grpc_client_channel_factory* cc_factory) {}
namespace grpc_core {
static void client_channel_factory_unref(
grpc_client_channel_factory* cc_factory) {}
static grpc_core::Subchannel* client_channel_factory_create_subchannel(
grpc_client_channel_factory* cc_factory, const grpc_channel_args* args) {
grpc_channel_args* new_args = grpc_default_authority_add_if_not_present(args);
grpc_connector* connector = grpc_chttp2_connector_create();
grpc_core::Subchannel* s = grpc_core::Subchannel::Create(connector, new_args);
grpc_connector_unref(connector);
grpc_channel_args_destroy(new_args);
return s;
}
class Chttp2InsecureClientChannelFactory : public ClientChannelFactory {
public:
Subchannel* CreateSubchannel(const grpc_channel_args* args) override {
grpc_channel_args* new_args =
grpc_default_authority_add_if_not_present(args);
grpc_connector* connector = grpc_chttp2_connector_create();
Subchannel* s = Subchannel::Create(connector, new_args);
grpc_connector_unref(connector);
grpc_channel_args_destroy(new_args);
return s;
}
static grpc_channel* client_channel_factory_create_channel(
grpc_client_channel_factory* cc_factory, const char* target,
grpc_client_channel_type type, const grpc_channel_args* args) {
if (target == nullptr) {
gpr_log(GPR_ERROR, "cannot create channel with NULL target name");
return nullptr;
grpc_channel* CreateChannel(const char* target,
const grpc_channel_args* args) override {
if (target == nullptr) {
gpr_log(GPR_ERROR, "cannot create channel with NULL target name");
return nullptr;
}
// Add channel arg containing the server URI.
UniquePtr<char> canonical_target =
ResolverRegistry::AddDefaultPrefixIfNeeded(target);
grpc_arg arg = grpc_channel_arg_string_create(
const_cast<char*>(GRPC_ARG_SERVER_URI), canonical_target.get());
const char* to_remove[] = {GRPC_ARG_SERVER_URI};
grpc_channel_args* new_args =
grpc_channel_args_copy_and_add_and_remove(args, to_remove, 1, &arg, 1);
grpc_channel* channel =
grpc_channel_create(target, new_args, GRPC_CLIENT_CHANNEL, nullptr);
grpc_channel_args_destroy(new_args);
return channel;
}
// Add channel arg containing the server URI.
grpc_core::UniquePtr<char> canonical_target =
grpc_core::ResolverRegistry::AddDefaultPrefixIfNeeded(target);
grpc_arg arg = grpc_channel_arg_string_create(
const_cast<char*>(GRPC_ARG_SERVER_URI), canonical_target.get());
const char* to_remove[] = {GRPC_ARG_SERVER_URI};
grpc_channel_args* new_args =
grpc_channel_args_copy_and_add_and_remove(args, to_remove, 1, &arg, 1);
grpc_channel* channel =
grpc_channel_create(target, new_args, GRPC_CLIENT_CHANNEL, nullptr);
grpc_channel_args_destroy(new_args);
return channel;
}
};
static const grpc_client_channel_factory_vtable client_channel_factory_vtable =
{client_channel_factory_ref, client_channel_factory_unref,
client_channel_factory_create_subchannel,
client_channel_factory_create_channel};
} // namespace grpc_core
static grpc_client_channel_factory client_channel_factory = {
&client_channel_factory_vtable};
namespace {
grpc_core::Chttp2InsecureClientChannelFactory* g_factory;
gpr_once g_factory_once = GPR_ONCE_INIT;
void FactoryInit() {
g_factory = grpc_core::New<grpc_core::Chttp2InsecureClientChannelFactory>();
}
} // namespace
/* Create a client channel:
Asynchronously: - resolve target
@ -91,16 +94,13 @@ grpc_channel* grpc_insecure_channel_create(const char* target,
(target, args, reserved));
GPR_ASSERT(reserved == nullptr);
// Add channel arg containing the client channel factory.
grpc_arg arg =
grpc_client_channel_factory_create_channel_arg(&client_channel_factory);
gpr_once_init(&g_factory_once, FactoryInit);
grpc_arg arg = grpc_core::ClientChannelFactory::CreateChannelArg(g_factory);
grpc_channel_args* new_args = grpc_channel_args_copy_and_add(args, &arg, 1);
// Create channel.
grpc_channel* channel = client_channel_factory_create_channel(
&client_channel_factory, target, GRPC_CLIENT_CHANNEL_TYPE_REGULAR,
new_args);
grpc_channel* channel = g_factory->CreateChannel(target, new_args);
// Clean up.
grpc_channel_args_destroy(new_args);
return channel != nullptr ? channel
: grpc_lame_client_channel_create(
target, GRPC_STATUS_INTERNAL,

@ -40,148 +40,148 @@
#include "src/core/lib/surface/channel.h"
#include "src/core/lib/uri/uri_parser.h"
static void client_channel_factory_ref(
grpc_client_channel_factory* cc_factory) {}
namespace grpc_core {
static void client_channel_factory_unref(
grpc_client_channel_factory* cc_factory) {}
static grpc_channel_args* get_secure_naming_channel_args(
const grpc_channel_args* args) {
grpc_channel_credentials* channel_credentials =
grpc_channel_credentials_find_in_args(args);
if (channel_credentials == nullptr) {
gpr_log(GPR_ERROR,
"Can't create subchannel: channel credentials missing for secure "
"channel.");
return nullptr;
}
// Make sure security connector does not already exist in args.
if (grpc_security_connector_find_in_args(args) != nullptr) {
gpr_log(GPR_ERROR,
"Can't create subchannel: security connector already present in "
"channel args.");
return nullptr;
}
// To which address are we connecting? By default, use the server URI.
const grpc_arg* server_uri_arg =
grpc_channel_args_find(args, GRPC_ARG_SERVER_URI);
const char* server_uri_str = grpc_channel_arg_get_string(server_uri_arg);
GPR_ASSERT(server_uri_str != nullptr);
grpc_uri* server_uri =
grpc_uri_parse(server_uri_str, true /* supress errors */);
GPR_ASSERT(server_uri != nullptr);
const grpc_core::TargetAuthorityTable* target_authority_table =
grpc_core::FindTargetAuthorityTableInArgs(args);
grpc_core::UniquePtr<char> authority;
if (target_authority_table != nullptr) {
// Find the authority for the target.
const char* target_uri_str =
grpc_core::Subchannel::GetUriFromSubchannelAddressArg(args);
grpc_uri* target_uri =
grpc_uri_parse(target_uri_str, false /* suppress errors */);
GPR_ASSERT(target_uri != nullptr);
if (target_uri->path[0] != '\0') { // "path" may be empty
const grpc_slice key = grpc_slice_from_static_string(
target_uri->path[0] == '/' ? target_uri->path + 1 : target_uri->path);
const grpc_core::UniquePtr<char>* value =
target_authority_table->Get(key);
if (value != nullptr) authority.reset(gpr_strdup(value->get()));
grpc_slice_unref_internal(key);
class Chttp2SecureClientChannelFactory : public ClientChannelFactory {
public:
Subchannel* CreateSubchannel(const grpc_channel_args* args) override {
grpc_channel_args* new_args = GetSecureNamingChannelArgs(args);
if (new_args == nullptr) {
gpr_log(GPR_ERROR,
"Failed to create channel args during subchannel creation.");
return nullptr;
}
grpc_uri_destroy(target_uri);
}
// If the authority hasn't already been set (either because no target
// authority table was present or because the target was not present
// in the table), fall back to using the original server URI.
if (authority == nullptr) {
authority =
grpc_core::ResolverRegistry::GetDefaultAuthority(server_uri_str);
grpc_connector* connector = grpc_chttp2_connector_create();
Subchannel* s = Subchannel::Create(connector, new_args);
grpc_connector_unref(connector);
grpc_channel_args_destroy(new_args);
return s;
}
grpc_arg args_to_add[2];
size_t num_args_to_add = 0;
if (grpc_channel_args_find(args, GRPC_ARG_DEFAULT_AUTHORITY) == nullptr) {
// If the channel args don't already contain GRPC_ARG_DEFAULT_AUTHORITY, add
// the arg, setting it to the value just obtained.
args_to_add[num_args_to_add++] = grpc_channel_arg_string_create(
const_cast<char*>(GRPC_ARG_DEFAULT_AUTHORITY), authority.get());
grpc_channel* CreateChannel(const char* target,
const grpc_channel_args* args) override {
if (target == nullptr) {
gpr_log(GPR_ERROR, "cannot create channel with NULL target name");
return nullptr;
}
// Add channel arg containing the server URI.
UniquePtr<char> canonical_target =
ResolverRegistry::AddDefaultPrefixIfNeeded(target);
grpc_arg arg = grpc_channel_arg_string_create(
const_cast<char*>(GRPC_ARG_SERVER_URI), canonical_target.get());
const char* to_remove[] = {GRPC_ARG_SERVER_URI};
grpc_channel_args* new_args =
grpc_channel_args_copy_and_add_and_remove(args, to_remove, 1, &arg, 1);
grpc_channel* channel =
grpc_channel_create(target, new_args, GRPC_CLIENT_CHANNEL, nullptr);
grpc_channel_args_destroy(new_args);
return channel;
}
grpc_channel_args* args_with_authority =
grpc_channel_args_copy_and_add(args, args_to_add, num_args_to_add);
grpc_uri_destroy(server_uri);
// Create the security connector using the credentials and target name.
grpc_channel_args* new_args_from_connector = nullptr;
grpc_core::RefCountedPtr<grpc_channel_security_connector>
subchannel_security_connector =
channel_credentials->create_security_connector(
/*call_creds=*/nullptr, authority.get(), args_with_authority,
&new_args_from_connector);
if (subchannel_security_connector == nullptr) {
gpr_log(GPR_ERROR,
"Failed to create secure subchannel for secure name '%s'",
authority.get());
private:
static grpc_channel_args* GetSecureNamingChannelArgs(
const grpc_channel_args* args) {
grpc_channel_credentials* channel_credentials =
grpc_channel_credentials_find_in_args(args);
if (channel_credentials == nullptr) {
gpr_log(GPR_ERROR,
"Can't create subchannel: channel credentials missing for secure "
"channel.");
return nullptr;
}
// Make sure security connector does not already exist in args.
if (grpc_security_connector_find_in_args(args) != nullptr) {
gpr_log(GPR_ERROR,
"Can't create subchannel: security connector already present in "
"channel args.");
return nullptr;
}
// To which address are we connecting? By default, use the server URI.
const grpc_arg* server_uri_arg =
grpc_channel_args_find(args, GRPC_ARG_SERVER_URI);
const char* server_uri_str = grpc_channel_arg_get_string(server_uri_arg);
GPR_ASSERT(server_uri_str != nullptr);
grpc_uri* server_uri =
grpc_uri_parse(server_uri_str, true /* suppress errors */);
GPR_ASSERT(server_uri != nullptr);
const TargetAuthorityTable* target_authority_table =
FindTargetAuthorityTableInArgs(args);
UniquePtr<char> authority;
if (target_authority_table != nullptr) {
// Find the authority for the target.
const char* target_uri_str =
Subchannel::GetUriFromSubchannelAddressArg(args);
grpc_uri* target_uri =
grpc_uri_parse(target_uri_str, false /* suppress errors */);
GPR_ASSERT(target_uri != nullptr);
if (target_uri->path[0] != '\0') { // "path" may be empty
const grpc_slice key = grpc_slice_from_static_string(
target_uri->path[0] == '/' ? target_uri->path + 1
: target_uri->path);
const UniquePtr<char>* value = target_authority_table->Get(key);
if (value != nullptr) authority.reset(gpr_strdup(value->get()));
grpc_slice_unref_internal(key);
}
grpc_uri_destroy(target_uri);
}
// If the authority hasn't already been set (either because no target
// authority table was present or because the target was not present
// in the table), fall back to using the original server URI.
if (authority == nullptr) {
authority = ResolverRegistry::GetDefaultAuthority(server_uri_str);
}
grpc_arg args_to_add[2];
size_t num_args_to_add = 0;
if (grpc_channel_args_find(args, GRPC_ARG_DEFAULT_AUTHORITY) == nullptr) {
// If the channel args don't already contain GRPC_ARG_DEFAULT_AUTHORITY,
// add the arg, setting it to the value just obtained.
args_to_add[num_args_to_add++] = grpc_channel_arg_string_create(
const_cast<char*>(GRPC_ARG_DEFAULT_AUTHORITY), authority.get());
}
grpc_channel_args* args_with_authority =
grpc_channel_args_copy_and_add(args, args_to_add, num_args_to_add);
grpc_uri_destroy(server_uri);
// Create the security connector using the credentials and target name.
grpc_channel_args* new_args_from_connector = nullptr;
RefCountedPtr<grpc_channel_security_connector>
subchannel_security_connector =
channel_credentials->create_security_connector(
/*call_creds=*/nullptr, authority.get(), args_with_authority,
&new_args_from_connector);
if (subchannel_security_connector == nullptr) {
gpr_log(GPR_ERROR,
"Failed to create secure subchannel for secure name '%s'",
authority.get());
grpc_channel_args_destroy(args_with_authority);
return nullptr;
}
grpc_arg new_security_connector_arg =
grpc_security_connector_to_arg(subchannel_security_connector.get());
grpc_channel_args* new_args = grpc_channel_args_copy_and_add(
new_args_from_connector != nullptr ? new_args_from_connector
: args_with_authority,
&new_security_connector_arg, 1);
subchannel_security_connector.reset(DEBUG_LOCATION, "lb_channel_create");
if (new_args_from_connector != nullptr) {
grpc_channel_args_destroy(new_args_from_connector);
}
grpc_channel_args_destroy(args_with_authority);
return nullptr;
return new_args;
}
grpc_arg new_security_connector_arg =
grpc_security_connector_to_arg(subchannel_security_connector.get());
};
grpc_channel_args* new_args = grpc_channel_args_copy_and_add(
new_args_from_connector != nullptr ? new_args_from_connector
: args_with_authority,
&new_security_connector_arg, 1);
} // namespace grpc_core
subchannel_security_connector.reset(DEBUG_LOCATION, "lb_channel_create");
if (new_args_from_connector != nullptr) {
grpc_channel_args_destroy(new_args_from_connector);
}
grpc_channel_args_destroy(args_with_authority);
return new_args;
}
namespace {
static grpc_core::Subchannel* client_channel_factory_create_subchannel(
grpc_client_channel_factory* cc_factory, const grpc_channel_args* args) {
grpc_channel_args* new_args = get_secure_naming_channel_args(args);
if (new_args == nullptr) {
gpr_log(GPR_ERROR,
"Failed to create channel args during subchannel creation.");
return nullptr;
}
grpc_connector* connector = grpc_chttp2_connector_create();
grpc_core::Subchannel* s = grpc_core::Subchannel::Create(connector, new_args);
grpc_connector_unref(connector);
grpc_channel_args_destroy(new_args);
return s;
}
grpc_core::Chttp2SecureClientChannelFactory* g_factory;
gpr_once g_factory_once = GPR_ONCE_INIT;
static grpc_channel* client_channel_factory_create_channel(
grpc_client_channel_factory* cc_factory, const char* target,
grpc_client_channel_type type, const grpc_channel_args* args) {
if (target == nullptr) {
gpr_log(GPR_ERROR, "cannot create channel with NULL target name");
return nullptr;
}
// Add channel arg containing the server URI.
grpc_core::UniquePtr<char> canonical_target =
grpc_core::ResolverRegistry::AddDefaultPrefixIfNeeded(target);
grpc_arg arg = grpc_channel_arg_string_create((char*)GRPC_ARG_SERVER_URI,
canonical_target.get());
const char* to_remove[] = {GRPC_ARG_SERVER_URI};
grpc_channel_args* new_args =
grpc_channel_args_copy_and_add_and_remove(args, to_remove, 1, &arg, 1);
grpc_channel* channel =
grpc_channel_create(target, new_args, GRPC_CLIENT_CHANNEL, nullptr);
grpc_channel_args_destroy(new_args);
return channel;
void FactoryInit() {
g_factory = grpc_core::New<grpc_core::Chttp2SecureClientChannelFactory>();
}
static const grpc_client_channel_factory_vtable client_channel_factory_vtable =
{client_channel_factory_ref, client_channel_factory_unref,
client_channel_factory_create_subchannel,
client_channel_factory_create_channel};
static grpc_client_channel_factory client_channel_factory = {
&client_channel_factory_vtable};
} // namespace
// Create a secure client channel:
// Asynchronously: - resolve target
@ -201,16 +201,15 @@ grpc_channel* grpc_secure_channel_create(grpc_channel_credentials* creds,
if (creds != nullptr) {
// Add channel args containing the client channel factory and channel
// credentials.
gpr_once_init(&g_factory_once, FactoryInit);
grpc_arg args_to_add[] = {
grpc_client_channel_factory_create_channel_arg(&client_channel_factory),
grpc_core::ClientChannelFactory::CreateChannelArg(g_factory),
grpc_channel_credentials_to_arg(creds)};
grpc_channel_args* new_args = grpc_channel_args_copy_and_add(
args, args_to_add, GPR_ARRAY_SIZE(args_to_add));
new_args = creds->update_arguments(new_args);
// Create channel.
channel = client_channel_factory_create_channel(
&client_channel_factory, target, GRPC_CLIENT_CHANNEL_TYPE_REGULAR,
new_args);
channel = g_factory->CreateChannel(target, new_args);
// Clean up.
grpc_channel_args_destroy(new_args);
}

@ -51,7 +51,7 @@ static uint8_t decode_table[] = {
static const uint8_t tail_xtra[4] = {0, 0, 1, 2};
static bool input_is_valid(uint8_t* input_ptr, size_t length) {
static bool input_is_valid(const uint8_t* input_ptr, size_t length) {
size_t i;
for (i = 0; i < length; ++i) {
@ -158,7 +158,7 @@ bool grpc_base64_decode_partial(struct grpc_base64_decode_context* ctx) {
return true;
}
grpc_slice grpc_chttp2_base64_decode(grpc_slice input) {
grpc_slice grpc_chttp2_base64_decode(const grpc_slice& input) {
size_t input_length = GRPC_SLICE_LENGTH(input);
size_t output_length = input_length / 4 * 3;
struct grpc_base64_decode_context ctx;
@ -174,7 +174,7 @@ grpc_slice grpc_chttp2_base64_decode(grpc_slice input) {
}
if (input_length > 0) {
uint8_t* input_end = GRPC_SLICE_END_PTR(input);
const uint8_t* input_end = GRPC_SLICE_END_PTR(input);
if (*(--input_end) == '=') {
output_length--;
if (*(--input_end) == '=') {
@ -202,7 +202,7 @@ grpc_slice grpc_chttp2_base64_decode(grpc_slice input) {
return output;
}
grpc_slice grpc_chttp2_base64_decode_with_length(grpc_slice input,
grpc_slice grpc_chttp2_base64_decode_with_length(const grpc_slice& input,
size_t output_length) {
size_t input_length = GRPC_SLICE_LENGTH(input);
grpc_slice output = GRPC_SLICE_MALLOC(output_length);

@ -26,8 +26,8 @@
struct grpc_base64_decode_context {
/* input/output: */
uint8_t* input_cur;
uint8_t* input_end;
const uint8_t* input_cur;
const uint8_t* input_end;
uint8_t* output_cur;
uint8_t* output_end;
/* Indicate if the decoder should handle the tail of input data*/
@ -42,12 +42,12 @@ bool grpc_base64_decode_partial(struct grpc_base64_decode_context* ctx);
/* base64 decode a slice with pad chars. Returns a new slice, does not take
ownership of the input. Returns an empty slice if decoding is failed. */
grpc_slice grpc_chttp2_base64_decode(grpc_slice input);
grpc_slice grpc_chttp2_base64_decode(const grpc_slice& input);
/* base64 decode a slice without pad chars, data length is needed. Returns a new
slice, does not take ownership of the input. Returns an empty slice if
decoding is failed. */
grpc_slice grpc_chttp2_base64_decode_with_length(grpc_slice input,
grpc_slice grpc_chttp2_base64_decode_with_length(const grpc_slice& input,
size_t output_length);
/* Infer the length of decoded data from encoded data. */

@ -48,13 +48,13 @@ static const b64_huff_sym huff_alphabet[64] = {
static const uint8_t tail_xtra[3] = {0, 2, 3};
grpc_slice grpc_chttp2_base64_encode(grpc_slice input) {
grpc_slice grpc_chttp2_base64_encode(const grpc_slice& input) {
size_t input_length = GRPC_SLICE_LENGTH(input);
size_t input_triplets = input_length / 3;
size_t tail_case = input_length % 3;
size_t output_length = input_triplets * 4 + tail_xtra[tail_case];
grpc_slice output = GRPC_SLICE_MALLOC(output_length);
uint8_t* in = GRPC_SLICE_START_PTR(input);
const uint8_t* in = GRPC_SLICE_START_PTR(input);
char* out = reinterpret_cast<char*> GRPC_SLICE_START_PTR(output);
size_t i;
@ -92,9 +92,9 @@ grpc_slice grpc_chttp2_base64_encode(grpc_slice input) {
return output;
}
grpc_slice grpc_chttp2_huffman_compress(grpc_slice input) {
grpc_slice grpc_chttp2_huffman_compress(const grpc_slice& input) {
size_t nbits;
uint8_t* in;
const uint8_t* in;
uint8_t* out;
grpc_slice output;
uint32_t temp = 0;
@ -166,7 +166,8 @@ static void enc_add1(huff_out* out, uint8_t a) {
enc_flush_some(out);
}
grpc_slice grpc_chttp2_base64_encode_and_huffman_compress(grpc_slice input) {
grpc_slice grpc_chttp2_base64_encode_and_huffman_compress(
const grpc_slice& input) {
size_t input_length = GRPC_SLICE_LENGTH(input);
size_t input_triplets = input_length / 3;
size_t tail_case = input_length % 3;
@ -174,7 +175,7 @@ grpc_slice grpc_chttp2_base64_encode_and_huffman_compress(grpc_slice input) {
size_t max_output_bits = 11 * output_syms;
size_t max_output_length = max_output_bits / 8 + (max_output_bits % 8 != 0);
grpc_slice output = GRPC_SLICE_MALLOC(max_output_length);
uint8_t* in = GRPC_SLICE_START_PTR(input);
const uint8_t* in = GRPC_SLICE_START_PTR(input);
uint8_t* start_out = GRPC_SLICE_START_PTR(output);
huff_out out;
size_t i;

@ -25,17 +25,18 @@
/* base64 encode a slice. Returns a new slice, does not take ownership of the
input */
grpc_slice grpc_chttp2_base64_encode(grpc_slice input);
grpc_slice grpc_chttp2_base64_encode(const grpc_slice& input);
/* Compress a slice with the static huffman encoder detailed in the hpack
standard. Returns a new slice, does not take ownership of the input */
grpc_slice grpc_chttp2_huffman_compress(grpc_slice input);
grpc_slice grpc_chttp2_huffman_compress(const grpc_slice& input);
/* equivalent to:
grpc_slice x = grpc_chttp2_base64_encode(input);
grpc_slice y = grpc_chttp2_huffman_compress(x);
grpc_slice_unref_internal( x);
return y; */
grpc_slice grpc_chttp2_base64_encode_and_huffman_compress(grpc_slice input);
grpc_slice grpc_chttp2_base64_encode_and_huffman_compress(
const grpc_slice& input);
#endif /* GRPC_CORE_EXT_TRANSPORT_CHTTP2_TRANSPORT_BIN_ENCODER_H */

@ -1129,7 +1129,7 @@ static void queue_setting_update(grpc_chttp2_transport* t,
void grpc_chttp2_add_incoming_goaway(grpc_chttp2_transport* t,
uint32_t goaway_error,
grpc_slice goaway_text) {
const grpc_slice& goaway_text) {
// Discard the error from a previous goaway frame (if any)
if (t->goaway_error != GRPC_ERROR_NONE) {
GRPC_ERROR_UNREF(t->goaway_error);
@ -2996,7 +2996,7 @@ void Chttp2IncomingByteStream::PublishError(grpc_error* error) {
grpc_chttp2_cancel_stream(transport_, stream_, GRPC_ERROR_REF(error));
}
grpc_error* Chttp2IncomingByteStream::Push(grpc_slice slice,
grpc_error* Chttp2IncomingByteStream::Push(const grpc_slice& slice,
grpc_slice* slice_out) {
if (remaining_bytes_ < GRPC_SLICE_LENGTH(slice)) {
grpc_error* error =

@ -287,7 +287,8 @@ grpc_error* grpc_deframe_unprocessed_incoming_frames(
grpc_error* grpc_chttp2_data_parser_parse(void* parser,
grpc_chttp2_transport* t,
grpc_chttp2_stream* s,
grpc_slice slice, int is_last) {
const grpc_slice& slice,
int is_last) {
if (!s->pending_byte_stream) {
grpc_slice_ref_internal(slice);
grpc_slice_buffer_add(&s->frame_storage, slice);

@ -67,7 +67,7 @@ grpc_error* grpc_chttp2_data_parser_begin_frame(grpc_chttp2_data_parser* parser,
grpc_error* grpc_chttp2_data_parser_parse(void* parser,
grpc_chttp2_transport* t,
grpc_chttp2_stream* s,
grpc_slice slice, int is_last);
const grpc_slice& slice, int is_last);
void grpc_chttp2_encode_data(uint32_t id, grpc_slice_buffer* inbuf,
uint32_t write_bytes, int is_eof,

@ -57,10 +57,11 @@ grpc_error* grpc_chttp2_goaway_parser_begin_frame(grpc_chttp2_goaway_parser* p,
grpc_error* grpc_chttp2_goaway_parser_parse(void* parser,
grpc_chttp2_transport* t,
grpc_chttp2_stream* s,
grpc_slice slice, int is_last) {
uint8_t* const beg = GRPC_SLICE_START_PTR(slice);
uint8_t* const end = GRPC_SLICE_END_PTR(slice);
uint8_t* cur = beg;
const grpc_slice& slice,
int is_last) {
const uint8_t* const beg = GRPC_SLICE_START_PTR(slice);
const uint8_t* const end = GRPC_SLICE_END_PTR(slice);
const uint8_t* cur = beg;
grpc_chttp2_goaway_parser* p =
static_cast<grpc_chttp2_goaway_parser*>(parser);
@ -149,7 +150,7 @@ grpc_error* grpc_chttp2_goaway_parser_parse(void* parser,
}
void grpc_chttp2_goaway_append(uint32_t last_stream_id, uint32_t error_code,
grpc_slice debug_data,
const grpc_slice& debug_data,
grpc_slice_buffer* slice_buffer) {
grpc_slice header = GRPC_SLICE_MALLOC(9 + 4 + 4);
uint8_t* p = GRPC_SLICE_START_PTR(header);

@ -53,10 +53,11 @@ grpc_error* grpc_chttp2_goaway_parser_begin_frame(
grpc_error* grpc_chttp2_goaway_parser_parse(void* parser,
grpc_chttp2_transport* t,
grpc_chttp2_stream* s,
grpc_slice slice, int is_last);
const grpc_slice& slice,
int is_last);
void grpc_chttp2_goaway_append(uint32_t last_stream_id, uint32_t error_code,
grpc_slice debug_data,
const grpc_slice& debug_data,
grpc_slice_buffer* slice_buffer);
#endif /* GRPC_CORE_EXT_TRANSPORT_CHTTP2_TRANSPORT_FRAME_GOAWAY_H */

@ -73,10 +73,11 @@ grpc_error* grpc_chttp2_ping_parser_begin_frame(grpc_chttp2_ping_parser* parser,
grpc_error* grpc_chttp2_ping_parser_parse(void* parser,
grpc_chttp2_transport* t,
grpc_chttp2_stream* s,
grpc_slice slice, int is_last) {
uint8_t* const beg = GRPC_SLICE_START_PTR(slice);
uint8_t* const end = GRPC_SLICE_END_PTR(slice);
uint8_t* cur = beg;
const grpc_slice& slice,
int is_last) {
const uint8_t* const beg = GRPC_SLICE_START_PTR(slice);
const uint8_t* const end = GRPC_SLICE_END_PTR(slice);
const uint8_t* cur = beg;
grpc_chttp2_ping_parser* p = static_cast<grpc_chttp2_ping_parser*>(parser);
while (p->byte != 8 && cur != end) {

@ -37,7 +37,7 @@ grpc_error* grpc_chttp2_ping_parser_begin_frame(grpc_chttp2_ping_parser* parser,
grpc_error* grpc_chttp2_ping_parser_parse(void* parser,
grpc_chttp2_transport* t,
grpc_chttp2_stream* s,
grpc_slice slice, int is_last);
const grpc_slice& slice, int is_last);
/* Test-only function for disabling ping ack */
void grpc_set_disable_ping_ack(bool disable_ping_ack);

@ -74,10 +74,11 @@ grpc_error* grpc_chttp2_rst_stream_parser_begin_frame(
grpc_error* grpc_chttp2_rst_stream_parser_parse(void* parser,
grpc_chttp2_transport* t,
grpc_chttp2_stream* s,
grpc_slice slice, int is_last) {
uint8_t* const beg = GRPC_SLICE_START_PTR(slice);
uint8_t* const end = GRPC_SLICE_END_PTR(slice);
uint8_t* cur = beg;
const grpc_slice& slice,
int is_last) {
const uint8_t* const beg = GRPC_SLICE_START_PTR(slice);
const uint8_t* const end = GRPC_SLICE_END_PTR(slice);
const uint8_t* cur = beg;
grpc_chttp2_rst_stream_parser* p =
static_cast<grpc_chttp2_rst_stream_parser*>(parser);

@ -38,6 +38,7 @@ grpc_error* grpc_chttp2_rst_stream_parser_begin_frame(
grpc_error* grpc_chttp2_rst_stream_parser_parse(void* parser,
grpc_chttp2_transport* t,
grpc_chttp2_stream* s,
grpc_slice slice, int is_last);
const grpc_slice& slice,
int is_last);
#endif /* GRPC_CORE_EXT_TRANSPORT_CHTTP2_TRANSPORT_FRAME_RST_STREAM_H */

@ -111,7 +111,8 @@ grpc_error* grpc_chttp2_settings_parser_begin_frame(
grpc_error* grpc_chttp2_settings_parser_parse(void* p, grpc_chttp2_transport* t,
grpc_chttp2_stream* s,
grpc_slice slice, int is_last) {
const grpc_slice& slice,
int is_last) {
grpc_chttp2_settings_parser* parser =
static_cast<grpc_chttp2_settings_parser*>(p);
const uint8_t* cur = GRPC_SLICE_START_PTR(slice);

@ -55,6 +55,7 @@ grpc_error* grpc_chttp2_settings_parser_begin_frame(
grpc_error* grpc_chttp2_settings_parser_parse(void* parser,
grpc_chttp2_transport* t,
grpc_chttp2_stream* s,
grpc_slice slice, int is_last);
const grpc_slice& slice,
int is_last);
#endif /* GRPC_CORE_EXT_TRANSPORT_CHTTP2_TRANSPORT_FRAME_SETTINGS_H */

@ -69,11 +69,11 @@ grpc_error* grpc_chttp2_window_update_parser_begin_frame(
grpc_error* grpc_chttp2_window_update_parser_parse(void* parser,
grpc_chttp2_transport* t,
grpc_chttp2_stream* s,
grpc_slice slice,
const grpc_slice& slice,
int is_last) {
uint8_t* const beg = GRPC_SLICE_START_PTR(slice);
uint8_t* const end = GRPC_SLICE_END_PTR(slice);
uint8_t* cur = beg;
const uint8_t* const beg = GRPC_SLICE_START_PTR(slice);
const uint8_t* const end = GRPC_SLICE_END_PTR(slice);
const uint8_t* cur = beg;
grpc_chttp2_window_update_parser* p =
static_cast<grpc_chttp2_window_update_parser*>(parser);

@ -39,7 +39,7 @@ grpc_error* grpc_chttp2_window_update_parser_begin_frame(
grpc_error* grpc_chttp2_window_update_parser_parse(void* parser,
grpc_chttp2_transport* t,
grpc_chttp2_stream* s,
grpc_slice slice,
const grpc_slice& slice,
int is_last);
#endif /* GRPC_CORE_EXT_TRANSPORT_CHTTP2_TRANSPORT_FRAME_WINDOW_UPDATE_H */

@ -1570,16 +1570,16 @@ void grpc_chttp2_hpack_parser_destroy(grpc_chttp2_hpack_parser* p) {
}
grpc_error* grpc_chttp2_hpack_parser_parse(grpc_chttp2_hpack_parser* p,
grpc_slice slice) {
const grpc_slice& slice) {
/* max number of bytes to parse at a time... limits call stack depth on
* compilers without TCO */
#define MAX_PARSE_LENGTH 1024
p->current_slice_refcount = slice.refcount;
uint8_t* start = GRPC_SLICE_START_PTR(slice);
uint8_t* end = GRPC_SLICE_END_PTR(slice);
const uint8_t* start = GRPC_SLICE_START_PTR(slice);
const uint8_t* end = GRPC_SLICE_END_PTR(slice);
grpc_error* error = GRPC_ERROR_NONE;
while (start != end && error == GRPC_ERROR_NONE) {
uint8_t* target = start + GPR_MIN(MAX_PARSE_LENGTH, end - start);
const uint8_t* target = start + GPR_MIN(MAX_PARSE_LENGTH, end - start);
error = p->state(p, start, target);
start = target;
}
@ -1621,7 +1621,8 @@ static void parse_stream_compression_md(grpc_chttp2_transport* t,
grpc_error* grpc_chttp2_header_parser_parse(void* hpack_parser,
grpc_chttp2_transport* t,
grpc_chttp2_stream* s,
grpc_slice slice, int is_last) {
const grpc_slice& slice,
int is_last) {
GPR_TIMER_SCOPE("grpc_chttp2_header_parser_parse", 0);
grpc_chttp2_hpack_parser* parser =
static_cast<grpc_chttp2_hpack_parser*>(hpack_parser);

@ -97,13 +97,14 @@ void grpc_chttp2_hpack_parser_destroy(grpc_chttp2_hpack_parser* p);
void grpc_chttp2_hpack_parser_set_has_priority(grpc_chttp2_hpack_parser* p);
grpc_error* grpc_chttp2_hpack_parser_parse(grpc_chttp2_hpack_parser* p,
grpc_slice slice);
const grpc_slice& slice);
/* wraps grpc_chttp2_hpack_parser_parse to provide a frame level parser for
the transport */
grpc_error* grpc_chttp2_header_parser_parse(void* hpack_parser,
grpc_chttp2_transport* t,
grpc_chttp2_stream* s,
grpc_slice slice, int is_last);
const grpc_slice& slice,
int is_last);
#endif /* GRPC_CORE_EXT_TRANSPORT_CHTTP2_TRANSPORT_HPACK_PARSER_H */

@ -245,7 +245,7 @@ class Chttp2IncomingByteStream : public ByteStream {
void PublishError(grpc_error* error);
grpc_error* Push(grpc_slice slice, grpc_slice* slice_out);
grpc_error* Push(const grpc_slice& slice, grpc_slice* slice_out);
grpc_error* Finished(grpc_error* error, bool reset_on_error);
@ -438,7 +438,8 @@ struct grpc_chttp2_transport {
void* parser_data = nullptr;
grpc_chttp2_stream* incoming_stream = nullptr;
grpc_error* (*parser)(void* parser_user_data, grpc_chttp2_transport* t,
grpc_chttp2_stream* s, grpc_slice slice, int is_last);
grpc_chttp2_stream* s, const grpc_slice& slice,
int is_last);
grpc_chttp2_write_cb* write_cb_pool = nullptr;
@ -681,7 +682,7 @@ void grpc_chttp2_end_write(grpc_chttp2_transport* t, grpc_error* error);
/** Process one slice of incoming data; return 1 if the connection is still
viable after reading, or 0 if the connection should be torn down */
grpc_error* grpc_chttp2_perform_read(grpc_chttp2_transport* t,
grpc_slice slice);
const grpc_slice& slice);
bool grpc_chttp2_list_add_writable_stream(grpc_chttp2_transport* t,
grpc_chttp2_stream* s);
@ -740,7 +741,7 @@ grpc_chttp2_stream* grpc_chttp2_parsing_accept_stream(grpc_chttp2_transport* t,
void grpc_chttp2_add_incoming_goaway(grpc_chttp2_transport* t,
uint32_t goaway_error,
grpc_slice goaway_text);
const grpc_slice& goaway_text);
void grpc_chttp2_parsing_become_skip_parser(grpc_chttp2_transport* t);

@ -45,14 +45,14 @@ static grpc_error* init_goaway_parser(grpc_chttp2_transport* t);
static grpc_error* init_skip_frame_parser(grpc_chttp2_transport* t,
int is_header);
static grpc_error* parse_frame_slice(grpc_chttp2_transport* t, grpc_slice slice,
int is_last);
static grpc_error* parse_frame_slice(grpc_chttp2_transport* t,
const grpc_slice& slice, int is_last);
grpc_error* grpc_chttp2_perform_read(grpc_chttp2_transport* t,
grpc_slice slice) {
uint8_t* beg = GRPC_SLICE_START_PTR(slice);
uint8_t* end = GRPC_SLICE_END_PTR(slice);
uint8_t* cur = beg;
const grpc_slice& slice) {
const uint8_t* beg = GRPC_SLICE_START_PTR(slice);
const uint8_t* end = GRPC_SLICE_END_PTR(slice);
const uint8_t* cur = beg;
grpc_error* err;
if (cur == end) return GRPC_ERROR_NONE;
@ -312,7 +312,7 @@ static grpc_error* init_frame_parser(grpc_chttp2_transport* t) {
}
static grpc_error* skip_parser(void* parser, grpc_chttp2_transport* t,
grpc_chttp2_stream* s, grpc_slice slice,
grpc_chttp2_stream* s, const grpc_slice& slice,
int is_last) {
return GRPC_ERROR_NONE;
}
@ -753,8 +753,8 @@ static grpc_error* init_settings_frame_parser(grpc_chttp2_transport* t) {
return GRPC_ERROR_NONE;
}
static grpc_error* parse_frame_slice(grpc_chttp2_transport* t, grpc_slice slice,
int is_last) {
static grpc_error* parse_frame_slice(grpc_chttp2_transport* t,
const grpc_slice& slice, int is_last) {
grpc_chttp2_stream* s = t->incoming_stream;
grpc_error* err = t->parser(t->parser_data, t, s, slice, is_last);
intptr_t unused;

@ -41,7 +41,7 @@
namespace grpc_core {
namespace channelz {
ChannelTrace::TraceEvent::TraceEvent(Severity severity, grpc_slice data,
ChannelTrace::TraceEvent::TraceEvent(Severity severity, const grpc_slice& data,
RefCountedPtr<BaseNode> referenced_entity)
: severity_(severity),
data_(data),
@ -51,7 +51,7 @@ ChannelTrace::TraceEvent::TraceEvent(Severity severity, grpc_slice data,
referenced_entity_(std::move(referenced_entity)),
memory_usage_(sizeof(TraceEvent) + grpc_slice_memory_usage(data)) {}
ChannelTrace::TraceEvent::TraceEvent(Severity severity, grpc_slice data)
ChannelTrace::TraceEvent::TraceEvent(Severity severity, const grpc_slice& data)
: severity_(severity),
data_(data),
timestamp_(grpc_millis_to_timespec(grpc_core::ExecCtx::Get()->Now(),
@ -107,7 +107,7 @@ void ChannelTrace::AddTraceEventHelper(TraceEvent* new_trace_event) {
}
}
void ChannelTrace::AddTraceEvent(Severity severity, grpc_slice data) {
void ChannelTrace::AddTraceEvent(Severity severity, const grpc_slice& data) {
if (max_event_memory_ == 0) {
grpc_slice_unref_internal(data);
return; // tracing is disabled if max_event_memory_ == 0
@ -116,7 +116,7 @@ void ChannelTrace::AddTraceEvent(Severity severity, grpc_slice data) {
}
void ChannelTrace::AddTraceEventWithReference(
Severity severity, grpc_slice data,
Severity severity, const grpc_slice& data,
RefCountedPtr<BaseNode> referenced_entity) {
if (max_event_memory_ == 0) {
grpc_slice_unref_internal(data);

@ -62,7 +62,7 @@ class ChannelTrace {
// TODO(ncteisen): as this call is used more and more throughout the gRPC
// stack, determine if it makes more sense to accept a char* instead of a
// slice.
void AddTraceEvent(Severity severity, grpc_slice data);
void AddTraceEvent(Severity severity, const grpc_slice& data);
// Adds a new trace event to the tracing object. This trace event refers to a
// an event that concerns a different channelz entity. For example, if this
@ -72,7 +72,7 @@ class ChannelTrace {
// NOTE: see the note in the method above.
//
// TODO(ncteisen): see the todo in the method above.
void AddTraceEventWithReference(Severity severity, grpc_slice data,
void AddTraceEventWithReference(Severity severity, const grpc_slice& data,
RefCountedPtr<BaseNode> referenced_entity);
// Creates and returns the raw grpc_json object, so a parent channelz
@ -87,12 +87,12 @@ class ChannelTrace {
class TraceEvent {
public:
// Constructor for a TraceEvent that references a channel.
TraceEvent(Severity severity, grpc_slice data,
TraceEvent(Severity severity, const grpc_slice& data,
RefCountedPtr<BaseNode> referenced_entity_);
// Constructor for a TraceEvent that does not reverence a different
// channel.
TraceEvent(Severity severity, grpc_slice data);
TraceEvent(Severity severity, const grpc_slice& data);
~TraceEvent();

@ -180,11 +180,11 @@ class ChannelNode : public BaseNode {
bool ChannelIsDestroyed() { return channel_ == nullptr; }
// proxy methods to composed classes.
void AddTraceEvent(ChannelTrace::Severity severity, grpc_slice data) {
void AddTraceEvent(ChannelTrace::Severity severity, const grpc_slice& data) {
trace_.AddTraceEvent(severity, data);
}
void AddTraceEventWithReference(ChannelTrace::Severity severity,
grpc_slice data,
const grpc_slice& data,
RefCountedPtr<BaseNode> referenced_channel) {
trace_.AddTraceEventWithReference(severity, data,
std::move(referenced_channel));
@ -214,11 +214,11 @@ class ServerNode : public BaseNode {
intptr_t pagination_limit);
// proxy methods to composed classes.
void AddTraceEvent(ChannelTrace::Severity severity, grpc_slice data) {
void AddTraceEvent(ChannelTrace::Severity severity, const grpc_slice& data) {
trace_.AddTraceEvent(severity, data);
}
void AddTraceEventWithReference(ChannelTrace::Severity severity,
grpc_slice data,
const grpc_slice& data,
RefCountedPtr<BaseNode> referenced_channel) {
trace_.AddTraceEventWithReference(severity, data,
std::move(referenced_channel));

@ -35,9 +35,6 @@ typedef enum {
/// Reserved for traffic_class_context.
GRPC_CONTEXT_TRAFFIC,
/// Value is a \a grpc_grpclb_client_stats.
GRPC_GRPCLB_CLIENT_STATS,
GRPC_CONTEXT_COUNT
} grpc_context_index;

@ -32,7 +32,7 @@ grpc_slice grpc_compression_algorithm_slice(
/** Find compression algorithm based on passed in mdstr - returns
* GRPC_COMPRESS_ALGORITHM_COUNT on failure */
grpc_compression_algorithm grpc_compression_algorithm_from_slice(
grpc_slice str);
const grpc_slice& str);
/** Return compression algorithm based metadata element */
grpc_mdelem grpc_compression_encoding_mdelem(
@ -51,11 +51,11 @@ grpc_mdelem grpc_stream_compression_encoding_mdelem(
/** Find compression algorithm based on passed in mdstr - returns
* GRPC_COMPRESS_ALGORITHM_COUNT on failure */
grpc_message_compression_algorithm
grpc_message_compression_algorithm_from_slice(grpc_slice str);
grpc_message_compression_algorithm_from_slice(const grpc_slice& str);
/** Find stream compression algorithm based on passed in mdstr - returns
* GRPC_STREAM_COMPRESS_ALGORITHM_COUNT on failure */
grpc_stream_compression_algorithm grpc_stream_compression_algorithm_from_slice(
grpc_slice str);
const grpc_slice& str);
#endif /* GRPC_CORE_LIB_COMPRESSION_ALGORITHM_METADATA_H */

@ -147,7 +147,7 @@ grpc_slice grpc_compression_algorithm_slice(
}
grpc_compression_algorithm grpc_compression_algorithm_from_slice(
grpc_slice str) {
const grpc_slice& str) {
if (grpc_slice_eq(str, GRPC_MDSTR_IDENTITY)) return GRPC_COMPRESS_NONE;
if (grpc_slice_eq(str, GRPC_MDSTR_DEFLATE)) return GRPC_COMPRESS_DEFLATE;
if (grpc_slice_eq(str, GRPC_MDSTR_GZIP)) return GRPC_COMPRESS_GZIP;

@ -32,7 +32,7 @@
/* Interfaces related to MD */
grpc_message_compression_algorithm
grpc_message_compression_algorithm_from_slice(grpc_slice str) {
grpc_message_compression_algorithm_from_slice(const grpc_slice& str) {
if (grpc_slice_eq(str, GRPC_MDSTR_IDENTITY))
return GRPC_MESSAGE_COMPRESS_NONE;
if (grpc_slice_eq(str, GRPC_MDSTR_DEFLATE))
@ -42,7 +42,7 @@ grpc_message_compression_algorithm_from_slice(grpc_slice str) {
}
grpc_stream_compression_algorithm grpc_stream_compression_algorithm_from_slice(
grpc_slice str) {
const grpc_slice& str) {
if (grpc_slice_eq(str, GRPC_MDSTR_IDENTITY)) return GRPC_STREAM_COMPRESS_NONE;
if (grpc_slice_eq(str, GRPC_MDSTR_GZIP)) return GRPC_STREAM_COMPRESS_GZIP;
return GRPC_STREAM_COMPRESS_ALGORITHMS_COUNT;

@ -229,7 +229,8 @@ static void internal_request_begin(grpc_httpcli_context* context,
const grpc_httpcli_request* request,
grpc_millis deadline, grpc_closure* on_done,
grpc_httpcli_response* response,
const char* name, grpc_slice request_text) {
const char* name,
const grpc_slice& request_text) {
internal_request* req =
static_cast<internal_request*>(gpr_malloc(sizeof(internal_request)));
memset(req, 0, sizeof(*req));

@ -351,7 +351,8 @@ void grpc_http_response_destroy(grpc_http_response* response) {
gpr_free(response->hdrs);
}
grpc_error* grpc_http_parser_parse(grpc_http_parser* parser, grpc_slice slice,
grpc_error* grpc_http_parser_parse(grpc_http_parser* parser,
const grpc_slice& slice,
size_t* start_of_body) {
for (size_t i = 0; i < GRPC_SLICE_LENGTH(slice); i++) {
bool found_body_start = false;

@ -101,7 +101,8 @@ void grpc_http_parser_init(grpc_http_parser* parser, grpc_http_type type,
void grpc_http_parser_destroy(grpc_http_parser* parser);
/* Sets \a start_of_body to the offset in \a slice of the start of the body. */
grpc_error* grpc_http_parser_parse(grpc_http_parser* parser, grpc_slice slice,
grpc_error* grpc_http_parser_parse(grpc_http_parser* parser,
const grpc_slice& slice,
size_t* start_of_body);
grpc_error* grpc_http_parser_eof(grpc_http_parser* parser);

@ -150,13 +150,12 @@ static void unref_errs(grpc_error* err) {
}
}
static void unref_slice(grpc_slice slice) { grpc_slice_unref_internal(slice); }
static void unref_strs(grpc_error* err) {
for (size_t which = 0; which < GRPC_ERROR_STR_MAX; ++which) {
uint8_t slot = err->strs[which];
if (slot != UINT8_MAX) {
unref_slice(*reinterpret_cast<grpc_slice*>(err->arena + slot));
grpc_slice_unref_internal(
*reinterpret_cast<grpc_slice*>(err->arena + slot));
}
}
}
@ -231,7 +230,7 @@ static void internal_set_int(grpc_error** err, grpc_error_ints which,
}
static void internal_set_str(grpc_error** err, grpc_error_strs which,
grpc_slice value) {
const grpc_slice& value) {
uint8_t slot = (*err)->strs[which];
if (slot == UINT8_MAX) {
slot = get_placement(err, sizeof(value));
@ -243,7 +242,8 @@ static void internal_set_str(grpc_error** err, grpc_error_strs which,
return;
}
} else {
unref_slice(*reinterpret_cast<grpc_slice*>((*err)->arena + slot));
grpc_slice_unref_internal(
*reinterpret_cast<grpc_slice*>((*err)->arena + slot));
}
(*err)->strs[which] = slot;
memcpy((*err)->arena + slot, &value, sizeof(value));
@ -313,8 +313,8 @@ void grpc_enable_error_creation() {
gpr_atm_no_barrier_store(&g_error_creation_allowed, true);
}
grpc_error* grpc_error_create(const char* file, int line, grpc_slice desc,
grpc_error** referencing,
grpc_error* grpc_error_create(const char* file, int line,
const grpc_slice& desc, grpc_error** referencing,
size_t num_referencing) {
GPR_TIMER_SCOPE("grpc_error_create", 0);
uint8_t initial_arena_capacity = static_cast<uint8_t>(
@ -472,7 +472,7 @@ bool grpc_error_get_int(grpc_error* err, grpc_error_ints which, intptr_t* p) {
}
grpc_error* grpc_error_set_str(grpc_error* src, grpc_error_strs which,
grpc_slice str) {
const grpc_slice& str) {
GPR_TIMER_SCOPE("grpc_error_set_str", 0);
grpc_error* new_err = copy_error_and_unref(src);
internal_set_str(&new_err, which, str);
@ -620,7 +620,7 @@ static char* key_str(grpc_error_strs which) {
return gpr_strdup(error_str_name(which));
}
static char* fmt_str(grpc_slice slice) {
static char* fmt_str(const grpc_slice& slice) {
char* s = nullptr;
size_t sz = 0;
size_t cap = 0;

@ -138,8 +138,9 @@ void grpc_enable_error_creation();
const char* grpc_error_string(grpc_error* error);
/// Create an error - but use GRPC_ERROR_CREATE instead
grpc_error* grpc_error_create(const char* file, int line, grpc_slice desc,
grpc_error** referencing, size_t num_referencing);
grpc_error* grpc_error_create(const char* file, int line,
const grpc_slice& desc, grpc_error** referencing,
size_t num_referencing);
/// Create an error (this is the preferred way of generating an error that is
/// not due to a system call - for system calls, use GRPC_OS_ERROR or
/// GRPC_WSA_ERROR as appropriate)
@ -200,7 +201,7 @@ bool grpc_error_get_int(grpc_error* error, grpc_error_ints which, intptr_t* p);
/// This call takes ownership of the slice; the error is responsible for
/// eventually unref-ing it.
grpc_error* grpc_error_set_str(grpc_error* src, grpc_error_strs which,
grpc_slice str) GRPC_MUST_USE_RESULT;
const grpc_slice& str) GRPC_MUST_USE_RESULT;
/// Returns false if the specified string is not set.
/// Caller does NOT own the slice.
bool grpc_error_get_str(grpc_error* error, grpc_error_strs which,

@ -250,8 +250,6 @@ static void notify_on_read(grpc_tcp* tcp) {
if (grpc_tcp_trace.enabled()) {
gpr_log(GPR_INFO, "TCP:%p notify_on_read", tcp);
}
GRPC_CLOSURE_INIT(&tcp->read_done_closure, tcp_handle_read, tcp,
grpc_schedule_on_exec_ctx);
grpc_fd_notify_on_read(tcp->em_fd, &tcp->read_done_closure);
}
@ -1157,6 +1155,8 @@ grpc_endpoint* grpc_tcp_create(grpc_fd* em_fd,
grpc_resource_quota_unref_internal(resource_quota);
gpr_mu_init(&tcp->tb_mu);
tcp->tb_head = nullptr;
GRPC_CLOSURE_INIT(&tcp->read_done_closure, tcp_handle_read, tcp,
grpc_schedule_on_exec_ctx);
/* Start being notified on errors if event engine can track errors. */
if (grpc_event_engine_can_track_errors()) {
/* Grab a ref to tcp so that we can safely access the tcp struct when

@ -134,7 +134,8 @@ static void jose_header_destroy(jose_header* h) {
}
/* Takes ownership of json and buffer. */
static jose_header* jose_header_from_json(grpc_json* json, grpc_slice buffer) {
static jose_header* jose_header_from_json(grpc_json* json,
const grpc_slice& buffer) {
grpc_json* cur;
jose_header* h = static_cast<jose_header*>(gpr_zalloc(sizeof(jose_header)));
h->buffer = buffer;
@ -235,7 +236,8 @@ gpr_timespec grpc_jwt_claims_not_before(const grpc_jwt_claims* claims) {
}
/* Takes ownership of json and buffer even in case of failure. */
grpc_jwt_claims* grpc_jwt_claims_from_json(grpc_json* json, grpc_slice buffer) {
grpc_jwt_claims* grpc_jwt_claims_from_json(grpc_json* json,
const grpc_slice& buffer) {
grpc_json* cur;
grpc_jwt_claims* claims =
static_cast<grpc_jwt_claims*>(gpr_malloc(sizeof(grpc_jwt_claims)));
@ -350,7 +352,7 @@ typedef struct {
/* Takes ownership of the header, claims and signature. */
static verifier_cb_ctx* verifier_cb_ctx_create(
grpc_jwt_verifier* verifier, grpc_pollset* pollset, jose_header* header,
grpc_jwt_claims* claims, const char* audience, grpc_slice signature,
grpc_jwt_claims* claims, const char* audience, const grpc_slice& signature,
const char* signed_jwt, size_t signed_jwt_len, void* user_data,
grpc_jwt_verification_done_cb cb) {
grpc_core::ApplicationCallbackExecCtx callback_exec_ctx;
@ -602,7 +604,8 @@ static EVP_PKEY* find_verification_key(const grpc_json* json,
}
static int verify_jwt_signature(EVP_PKEY* key, const char* alg,
grpc_slice signature, grpc_slice signed_data) {
const grpc_slice& signature,
const grpc_slice& signed_data) {
EVP_MD_CTX* md_ctx = EVP_MD_CTX_create();
const EVP_MD* md = evp_md_from_alg(alg);
int result = 0;

@ -115,7 +115,8 @@ void grpc_jwt_verifier_verify(grpc_jwt_verifier* verifier,
/* --- TESTING ONLY exposed functions. --- */
grpc_jwt_claims* grpc_jwt_claims_from_json(grpc_json* json, grpc_slice buffer);
grpc_jwt_claims* grpc_jwt_claims_from_json(grpc_json* json,
const grpc_slice& buffer);
grpc_jwt_verifier_status grpc_jwt_claims_check(const grpc_jwt_claims* claims,
const char* audience);
const char* grpc_jwt_issuer_email_domain(const char* issuer);

@ -28,8 +28,8 @@ extern const grpc_channel_filter grpc_client_auth_filter;
extern const grpc_channel_filter grpc_server_auth_filter;
void grpc_auth_metadata_context_build(
const char* url_scheme, grpc_slice call_host, grpc_slice call_method,
grpc_auth_context* auth_context,
const char* url_scheme, const grpc_slice& call_host,
const grpc_slice& call_method, grpc_auth_context* auth_context,
grpc_auth_metadata_context* auth_md_context);
void grpc_auth_metadata_context_reset(grpc_auth_metadata_context* context);

@ -41,12 +41,42 @@
#define MAX_CREDENTIALS_METADATA_COUNT 4
namespace {
/* We can have a per-channel credentials. */
struct channel_data {
channel_data(grpc_channel_security_connector* security_connector,
grpc_auth_context* auth_context)
: security_connector(
security_connector->Ref(DEBUG_LOCATION, "client_auth_filter")),
auth_context(auth_context->Ref(DEBUG_LOCATION, "client_auth_filter")) {}
~channel_data() {
security_connector.reset(DEBUG_LOCATION, "client_auth_filter");
auth_context.reset(DEBUG_LOCATION, "client_auth_filter");
}
grpc_core::RefCountedPtr<grpc_channel_security_connector> security_connector;
grpc_core::RefCountedPtr<grpc_auth_context> auth_context;
};
/* We can have a per-call credentials. */
struct call_data {
call_data(grpc_call_element* elem, const grpc_call_element_args& args)
: arena(args.arena),
owning_call(args.call_stack),
call_combiner(args.call_combiner) {}
: owning_call(args.call_stack), call_combiner(args.call_combiner) {
channel_data* chand = static_cast<channel_data*>(elem->channel_data);
GPR_ASSERT(args.context != nullptr);
if (args.context[GRPC_CONTEXT_SECURITY].value == nullptr) {
args.context[GRPC_CONTEXT_SECURITY].value =
grpc_client_security_context_create(args.arena, /*creds=*/nullptr);
args.context[GRPC_CONTEXT_SECURITY].destroy =
grpc_client_security_context_destroy;
}
grpc_client_security_context* sec_ctx =
static_cast<grpc_client_security_context*>(
args.context[GRPC_CONTEXT_SECURITY].value);
sec_ctx->auth_context.reset(DEBUG_LOCATION, "client_auth_filter");
sec_ctx->auth_context =
chand->auth_context->Ref(DEBUG_LOCATION, "client_auth_filter");
}
// This method is technically the dtor of this class. However, since
// `get_request_metadata_cancel_closure` can run in parallel to
@ -61,7 +91,6 @@ struct call_data {
grpc_auth_metadata_context_reset(&auth_md_context);
}
gpr_arena* arena;
grpc_call_stack* owning_call;
grpc_call_combiner* call_combiner;
grpc_core::RefCountedPtr<grpc_call_credentials> creds;
@ -81,21 +110,6 @@ struct call_data {
grpc_closure get_request_metadata_cancel_closure;
};
/* We can have a per-channel credentials. */
struct channel_data {
channel_data(grpc_channel_security_connector* security_connector,
grpc_auth_context* auth_context)
: security_connector(
security_connector->Ref(DEBUG_LOCATION, "client_auth_filter")),
auth_context(auth_context->Ref(DEBUG_LOCATION, "client_auth_filter")) {}
~channel_data() {
security_connector.reset(DEBUG_LOCATION, "client_auth_filter");
auth_context.reset(DEBUG_LOCATION, "client_auth_filter");
}
grpc_core::RefCountedPtr<grpc_channel_security_connector> security_connector;
grpc_core::RefCountedPtr<grpc_auth_context> auth_context;
};
} // namespace
void grpc_auth_metadata_context_reset(
@ -155,8 +169,8 @@ static void on_credentials_metadata(void* arg, grpc_error* input_error) {
}
void grpc_auth_metadata_context_build(
const char* url_scheme, grpc_slice call_host, grpc_slice call_method,
grpc_auth_context* auth_context,
const char* url_scheme, const grpc_slice& call_host,
const grpc_slice& call_method, grpc_auth_context* auth_context,
grpc_auth_metadata_context* auth_md_context) {
char* service = grpc_slice_to_c_string(call_method);
char* last_slash = strrchr(service, '/');
@ -307,24 +321,6 @@ static void auth_start_transport_stream_op_batch(
call_data* calld = static_cast<call_data*>(elem->call_data);
channel_data* chand = static_cast<channel_data*>(elem->channel_data);
if (!batch->cancel_stream) {
// TODO(hcaseyal): move this to init_call_elem once issue #15927 is
// resolved.
GPR_ASSERT(batch->payload->context != nullptr);
if (batch->payload->context[GRPC_CONTEXT_SECURITY].value == nullptr) {
batch->payload->context[GRPC_CONTEXT_SECURITY].value =
grpc_client_security_context_create(calld->arena, /*creds=*/nullptr);
batch->payload->context[GRPC_CONTEXT_SECURITY].destroy =
grpc_client_security_context_destroy;
}
grpc_client_security_context* sec_ctx =
static_cast<grpc_client_security_context*>(
batch->payload->context[GRPC_CONTEXT_SECURITY].value);
sec_ctx->auth_context.reset(DEBUG_LOCATION, "client_auth_filter");
sec_ctx->auth_context =
chand->auth_context->Ref(DEBUG_LOCATION, "client_auth_filter");
}
if (batch->send_initial_metadata) {
grpc_metadata_batch* metadata =
batch->payload->send_initial_metadata.send_initial_metadata;

@ -38,7 +38,7 @@ static bool is_unreserved_character(uint8_t c,
return ((unreserved_bytes[c / 8] >> (c % 8)) & 1) != 0;
}
grpc_slice grpc_percent_encode_slice(grpc_slice slice,
grpc_slice grpc_percent_encode_slice(const grpc_slice& slice,
const uint8_t* unreserved_bytes) {
static const uint8_t hex[] = "0123456789ABCDEF";
@ -86,7 +86,7 @@ static uint8_t dehex(uint8_t c) {
GPR_UNREACHABLE_CODE(return 255);
}
bool grpc_strict_percent_decode_slice(grpc_slice slice_in,
bool grpc_strict_percent_decode_slice(const grpc_slice& slice_in,
const uint8_t* unreserved_bytes,
grpc_slice* slice_out) {
const uint8_t* p = GRPC_SLICE_START_PTR(slice_in);
@ -126,7 +126,7 @@ bool grpc_strict_percent_decode_slice(grpc_slice slice_in,
return true;
}
grpc_slice grpc_permissive_percent_decode_slice(grpc_slice slice_in) {
grpc_slice grpc_permissive_percent_decode_slice(const grpc_slice& slice_in) {
const uint8_t* p = GRPC_SLICE_START_PTR(slice_in);
const uint8_t* in_end = GRPC_SLICE_END_PTR(slice_in);
size_t out_length = 0;

@ -46,7 +46,7 @@ extern const uint8_t grpc_compatible_percent_encoding_unreserved_bytes[256 / 8];
/* Percent-encode a slice, returning the new slice (this cannot fail):
unreserved_bytes is a bitfield indicating which bytes are considered
unreserved and thus do not need percent encoding */
grpc_slice grpc_percent_encode_slice(grpc_slice slice,
grpc_slice grpc_percent_encode_slice(const grpc_slice& slice,
const uint8_t* unreserved_bytes);
/* Percent-decode a slice, strictly.
If the input is legal (contains no unreserved bytes, and legal % encodings),
@ -54,12 +54,12 @@ grpc_slice grpc_percent_encode_slice(grpc_slice slice,
If the input is not legal, returns false and leaves *slice_out untouched.
unreserved_bytes is a bitfield indicating which bytes are considered
unreserved and thus do not need percent encoding */
bool grpc_strict_percent_decode_slice(grpc_slice slice_in,
bool grpc_strict_percent_decode_slice(const grpc_slice& slice_in,
const uint8_t* unreserved_bytes,
grpc_slice* slice_out);
/* Percent-decode a slice, permissively.
If a % triplet can not be decoded, pass it through verbatim.
This cannot fail. */
grpc_slice grpc_permissive_percent_decode_slice(grpc_slice slice_in);
grpc_slice grpc_permissive_percent_decode_slice(const grpc_slice& slice_in);
#endif /* GRPC_CORE_LIB_SLICE_PERCENT_ENCODING_H */

@ -50,19 +50,6 @@ grpc_slice grpc_slice_copy(grpc_slice s) {
return out;
}
grpc_slice grpc_slice_ref_internal(grpc_slice slice) {
if (slice.refcount) {
slice.refcount->vtable->ref(slice.refcount);
}
return slice;
}
void grpc_slice_unref_internal(grpc_slice slice) {
if (slice.refcount) {
slice.refcount->vtable->unref(slice.refcount);
}
}
/* Public API */
grpc_slice grpc_slice_ref(grpc_slice slice) {
return grpc_slice_ref_internal(slice);

@ -88,7 +88,7 @@ class SliceHashTable : public RefCounted<SliceHashTable<T>> {
SliceHashTable(size_t num_entries, Entry* entries, ValueCmp value_cmp);
virtual ~SliceHashTable();
void Add(grpc_slice key, T& value);
void Add(const grpc_slice& key, T& value);
// Default value comparison function, if none specified by caller.
static int DefaultValueCmp(const T& a, const T& b) { return GPR_ICMP(a, b); }
@ -137,7 +137,7 @@ SliceHashTable<T>::~SliceHashTable() {
}
template <typename T>
void SliceHashTable<T>::Add(grpc_slice key, T& value) {
void SliceHashTable<T>::Add(const grpc_slice& key, T& value) {
const size_t hash = grpc_slice_hash(key);
for (size_t offset = 0; offset < size_; ++offset) {
const size_t idx = (hash + offset) % size_;

@ -196,7 +196,7 @@ grpc_slice grpc_slice_maybe_static_intern(grpc_slice slice,
return slice;
}
bool grpc_slice_is_interned(grpc_slice slice) {
bool grpc_slice_is_interned(const grpc_slice& slice) {
return (slice.refcount && slice.refcount->vtable == &interned_slice_vtable) ||
GRPC_IS_STATIC_METADATA_STRING(slice);
}

@ -24,15 +24,26 @@
#include <grpc/slice.h>
#include <grpc/slice_buffer.h>
grpc_slice grpc_slice_ref_internal(grpc_slice slice);
void grpc_slice_unref_internal(grpc_slice slice);
inline const grpc_slice& grpc_slice_ref_internal(const grpc_slice& slice) {
if (slice.refcount) {
slice.refcount->vtable->ref(slice.refcount);
}
return slice;
}
inline void grpc_slice_unref_internal(const grpc_slice& slice) {
if (slice.refcount) {
slice.refcount->vtable->unref(slice.refcount);
}
}
void grpc_slice_buffer_reset_and_unref_internal(grpc_slice_buffer* sb);
void grpc_slice_buffer_partial_unref_internal(grpc_slice_buffer* sb,
size_t idx);
void grpc_slice_buffer_destroy_internal(grpc_slice_buffer* sb);
/* Check if a slice is interned */
bool grpc_slice_is_interned(grpc_slice slice);
bool grpc_slice_is_interned(const grpc_slice& slice);
void grpc_slice_intern_init(void);
void grpc_slice_intern_shutdown(void);

@ -24,8 +24,8 @@
#include <grpc/slice.h>
#include <stdbool.h>
bool grpc_slice_is_legal_header(grpc_slice s);
bool grpc_slice_is_legal_nonbin_header(grpc_slice s);
bool grpc_slice_is_bin_suffixed(grpc_slice s);
bool grpc_slice_is_legal_header(const grpc_slice& s);
bool grpc_slice_is_legal_nonbin_header(const grpc_slice& s);
bool grpc_slice_is_bin_suffixed(const grpc_slice& s);
#endif /* GRPC_CORE_LIB_SLICE_SLICE_TRAITS_H */

@ -46,7 +46,7 @@ class SliceWeakHashTable : public RefCounted<SliceWeakHashTable<T, Size>> {
/// Add a mapping from \a key to \a value, taking ownership of \a key. This
/// operation will always succeed. It may discard older entries.
void Add(grpc_slice key, T value) {
void Add(const grpc_slice& key, T value) {
const size_t idx = grpc_slice_hash(key) % Size;
entries_[idx].Set(key, std::move(value));
return;
@ -54,7 +54,7 @@ class SliceWeakHashTable : public RefCounted<SliceWeakHashTable<T, Size>> {
/// Returns the value from the table associated with / \a key or null if not
/// found.
const T* Get(const grpc_slice key) const {
const T* Get(const grpc_slice& key) const {
const size_t idx = grpc_slice_hash(key) % Size;
const auto& entry = entries_[idx];
return grpc_slice_eq(entry.key(), key) ? entry.value() : nullptr;
@ -79,7 +79,7 @@ class SliceWeakHashTable : public RefCounted<SliceWeakHashTable<T, Size>> {
~Entry() {
if (is_set_) grpc_slice_unref_internal(key_);
}
grpc_slice key() const { return key_; }
const grpc_slice& key() const { return key_; }
/// Return the entry's value, or null if unset.
const T* value() const {
@ -88,7 +88,7 @@ class SliceWeakHashTable : public RefCounted<SliceWeakHashTable<T, Size>> {
}
/// Set the \a key and \a value (which is moved) for the entry.
void Set(grpc_slice key, T&& value) {
void Set(const grpc_slice& key, T&& value) {
if (is_set_) grpc_slice_unref_internal(key_);
key_ = key;
value_ = std::move(value);

@ -71,6 +71,12 @@ grpc_core::DebugOnlyTraceFlag grpc_trace_metadata(false, "metadata");
typedef void (*destroy_user_data_func)(void* user_data);
struct UserData {
gpr_mu mu_user_data;
gpr_atm destroy_user_data;
gpr_atm user_data;
};
/* Shadow structure for grpc_mdelem_data for interned elements */
typedef struct interned_metadata {
/* must be byte compatible with grpc_mdelem_data */
@ -80,9 +86,7 @@ typedef struct interned_metadata {
/* private only data */
gpr_atm refcnt;
gpr_mu mu_user_data;
gpr_atm destroy_user_data;
gpr_atm user_data;
UserData user_data;
struct interned_metadata* bucket_next;
} interned_metadata;
@ -95,6 +99,8 @@ typedef struct allocated_metadata {
/* private only data */
gpr_atm refcnt;
UserData user_data;
} allocated_metadata;
typedef struct mdtab_shard {
@ -178,16 +184,17 @@ static void gc_mdtab(mdtab_shard* shard) {
for (i = 0; i < shard->capacity; i++) {
prev_next = &shard->elems[i];
for (md = shard->elems[i]; md; md = next) {
void* user_data = (void*)gpr_atm_no_barrier_load(&md->user_data);
void* user_data =
(void*)gpr_atm_no_barrier_load(&md->user_data.user_data);
next = md->bucket_next;
if (gpr_atm_acq_load(&md->refcnt) == 0) {
grpc_slice_unref_internal(md->key);
grpc_slice_unref_internal(md->value);
if (md->user_data) {
if (md->user_data.user_data) {
((destroy_user_data_func)gpr_atm_no_barrier_load(
&md->destroy_user_data))(user_data);
&md->user_data.destroy_user_data))(user_data);
}
gpr_mu_destroy(&md->mu_user_data);
gpr_mu_destroy(&md->user_data.mu_user_data);
gpr_free(md);
*prev_next = next;
num_freed++;
@ -251,6 +258,9 @@ grpc_mdelem grpc_mdelem_create(
allocated->key = grpc_slice_ref_internal(key);
allocated->value = grpc_slice_ref_internal(value);
gpr_atm_rel_store(&allocated->refcnt, 1);
allocated->user_data.user_data = 0;
allocated->user_data.destroy_user_data = 0;
gpr_mu_init(&allocated->user_data.mu_user_data);
#ifndef NDEBUG
if (grpc_trace_metadata.enabled()) {
char* key_str = grpc_slice_to_c_string(allocated->key);
@ -299,11 +309,11 @@ grpc_mdelem grpc_mdelem_create(
gpr_atm_rel_store(&md->refcnt, 1);
md->key = grpc_slice_ref_internal(key);
md->value = grpc_slice_ref_internal(value);
md->user_data = 0;
md->destroy_user_data = 0;
md->user_data.user_data = 0;
md->user_data.destroy_user_data = 0;
md->bucket_next = shard->elems[idx];
shard->elems[idx] = md;
gpr_mu_init(&md->mu_user_data);
gpr_mu_init(&md->user_data.mu_user_data);
#ifndef NDEBUG
if (grpc_trace_metadata.enabled()) {
char* key_str = grpc_slice_to_c_string(md->key);
@ -450,6 +460,13 @@ void grpc_mdelem_unref(grpc_mdelem gmd DEBUG_ARGS) {
if (1 == prev_refcount) {
grpc_slice_unref_internal(md->key);
grpc_slice_unref_internal(md->value);
if (md->user_data.user_data) {
destroy_user_data_func destroy_user_data =
(destroy_user_data_func)gpr_atm_no_barrier_load(
&md->user_data.destroy_user_data);
destroy_user_data((void*)md->user_data.user_data);
}
gpr_mu_destroy(&md->user_data.mu_user_data);
gpr_free(md);
}
break;
@ -457,58 +474,74 @@ void grpc_mdelem_unref(grpc_mdelem gmd DEBUG_ARGS) {
}
}
static void* get_user_data(UserData* user_data, void (*destroy_func)(void*)) {
if (gpr_atm_acq_load(&user_data->destroy_user_data) ==
(gpr_atm)destroy_func) {
return (void*)gpr_atm_no_barrier_load(&user_data->user_data);
} else {
return nullptr;
}
}
void* grpc_mdelem_get_user_data(grpc_mdelem md, void (*destroy_func)(void*)) {
switch (GRPC_MDELEM_STORAGE(md)) {
case GRPC_MDELEM_STORAGE_EXTERNAL:
case GRPC_MDELEM_STORAGE_ALLOCATED:
return nullptr;
case GRPC_MDELEM_STORAGE_STATIC:
return (void*)grpc_static_mdelem_user_data[GRPC_MDELEM_DATA(md) -
grpc_static_mdelem_table];
case GRPC_MDELEM_STORAGE_ALLOCATED: {
allocated_metadata* am =
reinterpret_cast<allocated_metadata*>(GRPC_MDELEM_DATA(md));
return get_user_data(&am->user_data, destroy_func);
}
case GRPC_MDELEM_STORAGE_INTERNED: {
interned_metadata* im =
reinterpret_cast<interned_metadata*> GRPC_MDELEM_DATA(md);
void* result;
if (gpr_atm_acq_load(&im->destroy_user_data) == (gpr_atm)destroy_func) {
return (void*)gpr_atm_no_barrier_load(&im->user_data);
} else {
return nullptr;
}
return result;
return get_user_data(&im->user_data, destroy_func);
}
}
GPR_UNREACHABLE_CODE(return nullptr);
}
static void* set_user_data(UserData* ud, void (*destroy_func)(void*),
void* user_data) {
GPR_ASSERT((user_data == nullptr) == (destroy_func == nullptr));
gpr_mu_lock(&ud->mu_user_data);
if (gpr_atm_no_barrier_load(&ud->destroy_user_data)) {
/* user data can only be set once */
gpr_mu_unlock(&ud->mu_user_data);
if (destroy_func != nullptr) {
destroy_func(user_data);
}
return (void*)gpr_atm_no_barrier_load(&ud->user_data);
}
gpr_atm_no_barrier_store(&ud->user_data, (gpr_atm)user_data);
gpr_atm_rel_store(&ud->destroy_user_data, (gpr_atm)destroy_func);
gpr_mu_unlock(&ud->mu_user_data);
return user_data;
}
void* grpc_mdelem_set_user_data(grpc_mdelem md, void (*destroy_func)(void*),
void* user_data) {
switch (GRPC_MDELEM_STORAGE(md)) {
case GRPC_MDELEM_STORAGE_EXTERNAL:
case GRPC_MDELEM_STORAGE_ALLOCATED:
destroy_func(user_data);
return nullptr;
case GRPC_MDELEM_STORAGE_STATIC:
destroy_func(user_data);
return (void*)grpc_static_mdelem_user_data[GRPC_MDELEM_DATA(md) -
grpc_static_mdelem_table];
case GRPC_MDELEM_STORAGE_ALLOCATED: {
allocated_metadata* am =
reinterpret_cast<allocated_metadata*>(GRPC_MDELEM_DATA(md));
return set_user_data(&am->user_data, destroy_func, user_data);
}
case GRPC_MDELEM_STORAGE_INTERNED: {
interned_metadata* im =
reinterpret_cast<interned_metadata*> GRPC_MDELEM_DATA(md);
GPR_ASSERT(!is_mdelem_static(md));
GPR_ASSERT((user_data == nullptr) == (destroy_func == nullptr));
gpr_mu_lock(&im->mu_user_data);
if (gpr_atm_no_barrier_load(&im->destroy_user_data)) {
/* user data can only be set once */
gpr_mu_unlock(&im->mu_user_data);
if (destroy_func != nullptr) {
destroy_func(user_data);
}
return (void*)gpr_atm_no_barrier_load(&im->user_data);
}
gpr_atm_no_barrier_store(&im->user_data, (gpr_atm)user_data);
gpr_atm_rel_store(&im->destroy_user_data, (gpr_atm)destroy_func);
gpr_mu_unlock(&im->mu_user_data);
return user_data;
return set_user_data(&im->user_data, destroy_func, user_data);
}
}
GPR_UNREACHABLE_CODE(return nullptr);

@ -227,7 +227,7 @@ void grpc_metadata_batch_remove(grpc_metadata_batch* batch,
}
void grpc_metadata_batch_set_value(grpc_linked_mdelem* storage,
grpc_slice value) {
const grpc_slice& value) {
grpc_mdelem old_mdelem = storage->md;
grpc_mdelem new_mdelem = grpc_mdelem_from_slices(
grpc_slice_ref_internal(GRPC_MDKEY(old_mdelem)), value);

@ -74,7 +74,7 @@ grpc_error* grpc_metadata_batch_substitute(grpc_metadata_batch* batch,
grpc_mdelem new_value);
void grpc_metadata_batch_set_value(grpc_linked_mdelem* storage,
grpc_slice value);
const grpc_slice& value);
/** Add \a storage to the beginning of \a batch. storage->md is
assumed to be valid.

@ -92,7 +92,7 @@ class ServiceConfig : public RefCounted<ServiceConfig> {
/// Caller does NOT own a reference to the result.
template <typename T>
static RefCountedPtr<T> MethodConfigTableLookup(
const SliceHashTable<RefCountedPtr<T>>& table, grpc_slice path);
const SliceHashTable<RefCountedPtr<T>>& table, const grpc_slice& path);
private:
// So New() can call our private ctor.
@ -223,7 +223,7 @@ ServiceConfig::CreateMethodConfigTable(CreateValue<T> create_value) {
template <typename T>
RefCountedPtr<T> ServiceConfig::MethodConfigTableLookup(
const SliceHashTable<RefCountedPtr<T>>& table, grpc_slice path) {
const SliceHashTable<RefCountedPtr<T>>& table, const grpc_slice& path) {
const RefCountedPtr<T>* value = table.Get(path);
// If we didn't find a match for the path, try looking for a wildcard
// entry (i.e., change "/service/method" to "/service/*").

@ -89,7 +89,7 @@ static int is_all_whitespace(const char* p, const char* end) {
return p == end;
}
int grpc_http2_decode_timeout(grpc_slice text, grpc_millis* timeout) {
int grpc_http2_decode_timeout(const grpc_slice& text, grpc_millis* timeout) {
grpc_millis x = 0;
const uint8_t* p = GRPC_SLICE_START_PTR(text);
const uint8_t* end = GRPC_SLICE_END_PTR(text);

@ -32,6 +32,6 @@
/* Encode/decode timeouts to the GRPC over HTTP/2 format;
encoding may round up arbitrarily */
void grpc_http2_encode_timeout(grpc_millis timeout, char* buffer);
int grpc_http2_decode_timeout(grpc_slice text, grpc_millis* timeout);
int grpc_http2_decode_timeout(const grpc_slice& text, grpc_millis* timeout);
#endif /* GRPC_CORE_LIB_TRANSPORT_TIMEOUT_ENCODING_H */

@ -363,7 +363,7 @@ static tsi_result handshaker_client_next(alts_handshaker_client* c,
alts_grpc_handshaker_client* client =
reinterpret_cast<alts_grpc_handshaker_client*>(c);
grpc_slice_unref_internal(client->recv_bytes);
client->recv_bytes = grpc_slice_ref(*bytes_received);
client->recv_bytes = grpc_slice_ref_internal(*bytes_received);
grpc_byte_buffer* buffer = get_serialized_next(bytes_received);
if (buffer == nullptr) {
gpr_log(GPR_ERROR, "get_serialized_next() failed");
@ -406,7 +406,7 @@ static const alts_handshaker_client_vtable vtable = {
alts_handshaker_client* alts_grpc_handshaker_client_create(
alts_tsi_handshaker* handshaker, grpc_channel* channel,
const char* handshaker_service_url, grpc_pollset_set* interested_parties,
grpc_alts_credentials_options* options, grpc_slice target_name,
grpc_alts_credentials_options* options, const grpc_slice& target_name,
grpc_iomgr_cb_func grpc_cb, tsi_handshaker_on_next_done_cb cb,
void* user_data, alts_handshaker_client_vtable* vtable_for_testing,
bool is_client) {
@ -487,7 +487,7 @@ void alts_handshaker_client_set_recv_bytes_for_testing(
GPR_ASSERT(c != nullptr);
alts_grpc_handshaker_client* client =
reinterpret_cast<alts_grpc_handshaker_client*>(c);
client->recv_bytes = grpc_slice_ref(*recv_bytes);
client->recv_bytes = grpc_slice_ref_internal(*recv_bytes);
}
void alts_handshaker_client_set_fields_for_testing(

@ -138,7 +138,7 @@ void alts_handshaker_client_destroy(alts_handshaker_client* client);
alts_handshaker_client* alts_grpc_handshaker_client_create(
alts_tsi_handshaker* handshaker, grpc_channel* channel,
const char* handshaker_service_url, grpc_pollset_set* interested_parties,
grpc_alts_credentials_options* options, grpc_slice target_name,
grpc_alts_credentials_options* options, const grpc_slice& target_name,
grpc_iomgr_cb_func grpc_cb, tsi_handshaker_on_next_done_cb cb,
void* user_data, alts_handshaker_client_vtable* vtable_for_testing,
bool is_client);

@ -106,15 +106,16 @@ bool grpc_gcp_rpc_protocol_versions_encode(
}
bool grpc_gcp_rpc_protocol_versions_decode(
grpc_slice slice, grpc_gcp_rpc_protocol_versions* versions) {
const grpc_slice& slice, grpc_gcp_rpc_protocol_versions* versions) {
if (versions == nullptr) {
gpr_log(GPR_ERROR,
"version is nullptr in "
"grpc_gcp_rpc_protocol_versions_decode().");
return false;
}
pb_istream_t stream = pb_istream_from_buffer(GRPC_SLICE_START_PTR(slice),
GRPC_SLICE_LENGTH(slice));
pb_istream_t stream =
pb_istream_from_buffer(const_cast<uint8_t*>(GRPC_SLICE_START_PTR(slice)),
GRPC_SLICE_LENGTH(slice));
if (!pb_decode(&stream, grpc_gcp_RpcProtocolVersions_fields, versions)) {
gpr_log(GPR_ERROR, "nanopb error: %s", PB_GET_ERROR(&stream));
return false;

@ -112,7 +112,7 @@ bool grpc_gcp_rpc_protocol_versions_encode(
* The method returns true on success and false otherwise.
*/
bool grpc_gcp_rpc_protocol_versions_decode(
grpc_slice slice, grpc_gcp_rpc_protocol_versions* versions);
const grpc_slice& slice, grpc_gcp_rpc_protocol_versions* versions);
/**
* This method performs a deep copy operation on rpc protocol versions

@ -56,18 +56,19 @@ cdef class _BatchOperationTag:
self._retained_call = call
cdef void prepare(self) except *:
cdef Operation operation
self.c_nops = 0 if self._operations is None else len(self._operations)
if 0 < self.c_nops:
self.c_ops = <grpc_op *>gpr_malloc(sizeof(grpc_op) * self.c_nops)
for index, operation in enumerate(self._operations):
(<Operation>operation).c()
self.c_ops[index] = (<Operation>operation).c_op
operation.c()
self.c_ops[index] = operation.c_op
cdef BatchOperationEvent event(self, grpc_event c_event):
cdef Operation operation
if 0 < self.c_nops:
for index, operation in enumerate(self._operations):
(<Operation>operation).c_op = self.c_ops[index]
(<Operation>operation).un_c()
for operation in self._operations:
operation.un_c()
gpr_free(self.c_ops)
return BatchOperationEvent(
c_event.type, c_event.success, self._user_tag, self._operations)
@ -84,4 +85,4 @@ cdef class _ServerShutdownTag(_Tag):
cdef ServerShutdownEvent event(self, grpc_event c_event):
self._shutting_down_server.notify_shutdown_complete()
return ServerShutdownEvent(c_event.type, c_event.success, self._user_tag)
return ServerShutdownEvent(c_event.type, c_event.success, self._user_tag)

@ -70,6 +70,8 @@ extern void filter_call_init_fails(grpc_end2end_test_config config);
extern void filter_call_init_fails_pre_init(void);
extern void filter_causes_close(grpc_end2end_test_config config);
extern void filter_causes_close_pre_init(void);
extern void filter_context(grpc_end2end_test_config config);
extern void filter_context_pre_init(void);
extern void filter_latency(grpc_end2end_test_config config);
extern void filter_latency_pre_init(void);
extern void filter_status_code(grpc_end2end_test_config config);
@ -207,6 +209,7 @@ void grpc_end2end_tests_pre_init(void) {
empty_batch_pre_init();
filter_call_init_fails_pre_init();
filter_causes_close_pre_init();
filter_context_pre_init();
filter_latency_pre_init();
filter_status_code_pre_init();
graceful_server_shutdown_pre_init();
@ -292,6 +295,7 @@ void grpc_end2end_tests(int argc, char **argv,
empty_batch(config);
filter_call_init_fails(config);
filter_causes_close(config);
filter_context(config);
filter_latency(config);
filter_status_code(config);
graceful_server_shutdown(config);
@ -432,6 +436,10 @@ void grpc_end2end_tests(int argc, char **argv,
filter_causes_close(config);
continue;
}
if (0 == strcmp("filter_context", argv[i])) {
filter_context(config);
continue;
}
if (0 == strcmp("filter_latency", argv[i])) {
filter_latency(config);
continue;

@ -72,6 +72,8 @@ extern void filter_call_init_fails(grpc_end2end_test_config config);
extern void filter_call_init_fails_pre_init(void);
extern void filter_causes_close(grpc_end2end_test_config config);
extern void filter_causes_close_pre_init(void);
extern void filter_context(grpc_end2end_test_config config);
extern void filter_context_pre_init(void);
extern void filter_latency(grpc_end2end_test_config config);
extern void filter_latency_pre_init(void);
extern void filter_status_code(grpc_end2end_test_config config);
@ -210,6 +212,7 @@ void grpc_end2end_tests_pre_init(void) {
empty_batch_pre_init();
filter_call_init_fails_pre_init();
filter_causes_close_pre_init();
filter_context_pre_init();
filter_latency_pre_init();
filter_status_code_pre_init();
graceful_server_shutdown_pre_init();
@ -296,6 +299,7 @@ void grpc_end2end_tests(int argc, char **argv,
empty_batch(config);
filter_call_init_fails(config);
filter_causes_close(config);
filter_context(config);
filter_latency(config);
filter_status_code(config);
graceful_server_shutdown(config);
@ -440,6 +444,10 @@ void grpc_end2end_tests(int argc, char **argv,
filter_causes_close(config);
continue;
}
if (0 == strcmp("filter_context", argv[i])) {
filter_context(config);
continue;
}
if (0 == strcmp("filter_latency", argv[i])) {
filter_latency(config);
continue;

@ -124,6 +124,7 @@ END2END_TESTS = {
'empty_batch': default_test_options._replace(cpu_cost=LOWCPU),
'filter_causes_close': default_test_options._replace(cpu_cost=LOWCPU),
'filter_call_init_fails': default_test_options,
'filter_context': default_test_options,
'filter_latency': default_test_options._replace(cpu_cost=LOWCPU),
'filter_status_code': default_test_options._replace(cpu_cost=LOWCPU),
'graceful_server_shutdown': default_test_options._replace(

@ -215,6 +215,7 @@ END2END_TESTS = {
"empty_batch": _test_options(),
"filter_causes_close": _test_options(),
"filter_call_init_fails": _test_options(),
"filter_context": _test_options(),
"graceful_server_shutdown": _test_options(exclude_inproc = True),
"hpack_size": _test_options(
proxyable = False,

@ -0,0 +1,318 @@
/*
*
* Copyright 2018 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
#include "test/core/end2end/end2end_tests.h"
#include <limits.h>
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <grpc/byte_buffer.h>
#include <grpc/support/alloc.h>
#include <grpc/support/log.h>
#include <grpc/support/time.h>
#include "src/core/lib/channel/channel_stack_builder.h"
#include "src/core/lib/surface/channel_init.h"
#include "test/core/end2end/cq_verifier.h"
enum { TIMEOUT = 200000 };
static bool g_enable_filter = false;
static void* tag(intptr_t t) { return (void*)t; }
static grpc_end2end_test_fixture begin_test(grpc_end2end_test_config config,
const char* test_name,
grpc_channel_args* client_args,
grpc_channel_args* server_args) {
grpc_end2end_test_fixture f;
gpr_log(GPR_INFO, "Running test: %s/%s", test_name, config.name);
f = config.create_fixture(client_args, server_args);
config.init_server(&f, server_args);
config.init_client(&f, client_args);
return f;
}
static gpr_timespec n_seconds_from_now(int n) {
return grpc_timeout_seconds_to_deadline(n);
}
static gpr_timespec five_seconds_from_now(void) {
return n_seconds_from_now(5);
}
static void drain_cq(grpc_completion_queue* cq) {
grpc_event ev;
do {
ev = grpc_completion_queue_next(cq, five_seconds_from_now(), nullptr);
} while (ev.type != GRPC_QUEUE_SHUTDOWN);
}
static void shutdown_server(grpc_end2end_test_fixture* f) {
if (!f->server) return;
grpc_server_shutdown_and_notify(f->server, f->shutdown_cq, tag(1000));
GPR_ASSERT(grpc_completion_queue_pluck(f->shutdown_cq, tag(1000),
grpc_timeout_seconds_to_deadline(5),
nullptr)
.type == GRPC_OP_COMPLETE);
grpc_server_destroy(f->server);
f->server = nullptr;
}
static void shutdown_client(grpc_end2end_test_fixture* f) {
if (!f->client) return;
grpc_channel_destroy(f->client);
f->client = nullptr;
}
static void end_test(grpc_end2end_test_fixture* f) {
shutdown_server(f);
shutdown_client(f);
grpc_completion_queue_shutdown(f->cq);
drain_cq(f->cq);
grpc_completion_queue_destroy(f->cq);
grpc_completion_queue_destroy(f->shutdown_cq);
}
// Simple request to test that filters see a consistent view of the
// call context.
static void test_request(grpc_end2end_test_config config) {
grpc_call* c;
grpc_call* s;
grpc_slice request_payload_slice =
grpc_slice_from_copied_string("hello world");
grpc_byte_buffer* request_payload =
grpc_raw_byte_buffer_create(&request_payload_slice, 1);
grpc_end2end_test_fixture f =
begin_test(config, "filter_context", nullptr, nullptr);
cq_verifier* cqv = cq_verifier_create(f.cq);
grpc_op ops[6];
grpc_op* op;
grpc_metadata_array initial_metadata_recv;
grpc_metadata_array trailing_metadata_recv;
grpc_metadata_array request_metadata_recv;
grpc_byte_buffer* request_payload_recv = nullptr;
grpc_call_details call_details;
grpc_status_code status;
grpc_call_error error;
grpc_slice details;
int was_cancelled = 2;
gpr_timespec deadline = five_seconds_from_now();
c = grpc_channel_create_call(f.client, nullptr, GRPC_PROPAGATE_DEFAULTS, f.cq,
grpc_slice_from_static_string("/foo"), nullptr,
deadline, nullptr);
GPR_ASSERT(c);
grpc_metadata_array_init(&initial_metadata_recv);
grpc_metadata_array_init(&trailing_metadata_recv);
grpc_metadata_array_init(&request_metadata_recv);
grpc_call_details_init(&call_details);
memset(ops, 0, sizeof(ops));
op = ops;
op->op = GRPC_OP_SEND_INITIAL_METADATA;
op->data.send_initial_metadata.count = 0;
op->data.send_initial_metadata.metadata = nullptr;
op->flags = 0;
op->reserved = nullptr;
op++;
op->op = GRPC_OP_SEND_MESSAGE;
op->data.send_message.send_message = request_payload;
op->flags = 0;
op->reserved = nullptr;
op++;
op->op = GRPC_OP_SEND_CLOSE_FROM_CLIENT;
op->flags = 0;
op->reserved = nullptr;
op++;
op->op = GRPC_OP_RECV_INITIAL_METADATA;
op->data.recv_initial_metadata.recv_initial_metadata = &initial_metadata_recv;
op->flags = 0;
op->reserved = nullptr;
op++;
op->op = GRPC_OP_RECV_STATUS_ON_CLIENT;
op->data.recv_status_on_client.trailing_metadata = &trailing_metadata_recv;
op->data.recv_status_on_client.status = &status;
op->data.recv_status_on_client.status_details = &details;
op->flags = 0;
op->reserved = nullptr;
op++;
error = grpc_call_start_batch(c, ops, static_cast<size_t>(op - ops), tag(1),
nullptr);
GPR_ASSERT(GRPC_CALL_OK == error);
error =
grpc_server_request_call(f.server, &s, &call_details,
&request_metadata_recv, f.cq, f.cq, tag(101));
GPR_ASSERT(GRPC_CALL_OK == error);
CQ_EXPECT_COMPLETION(cqv, tag(101), 1);
cq_verify(cqv);
memset(ops, 0, sizeof(ops));
op = ops;
op->op = GRPC_OP_SEND_INITIAL_METADATA;
op->data.send_initial_metadata.count = 0;
op->flags = 0;
op->reserved = nullptr;
op++;
op->op = GRPC_OP_SEND_STATUS_FROM_SERVER;
op->data.send_status_from_server.trailing_metadata_count = 0;
op->data.send_status_from_server.status = GRPC_STATUS_UNIMPLEMENTED;
grpc_slice status_string = grpc_slice_from_static_string("xyz");
op->data.send_status_from_server.status_details = &status_string;
op->flags = 0;
op->reserved = nullptr;
op++;
op->op = GRPC_OP_RECV_CLOSE_ON_SERVER;
op->data.recv_close_on_server.cancelled = &was_cancelled;
op->flags = 0;
op->reserved = nullptr;
op++;
error = grpc_call_start_batch(s, ops, static_cast<size_t>(op - ops), tag(102),
nullptr);
GPR_ASSERT(GRPC_CALL_OK == error);
CQ_EXPECT_COMPLETION(cqv, tag(102), 1);
CQ_EXPECT_COMPLETION(cqv, tag(1), 1);
cq_verify(cqv);
GPR_ASSERT(status == GRPC_STATUS_UNIMPLEMENTED);
GPR_ASSERT(0 == grpc_slice_str_cmp(details, "xyz"));
grpc_slice_unref(details);
grpc_metadata_array_destroy(&initial_metadata_recv);
grpc_metadata_array_destroy(&trailing_metadata_recv);
grpc_metadata_array_destroy(&request_metadata_recv);
grpc_call_details_destroy(&call_details);
grpc_call_unref(s);
grpc_call_unref(c);
cq_verifier_destroy(cqv);
grpc_byte_buffer_destroy(request_payload);
grpc_byte_buffer_destroy(request_payload_recv);
end_test(&f);
config.tear_down_data(&f);
}
/*******************************************************************************
* Test context filter
*/
struct call_data {
grpc_call_context_element* context;
};
static grpc_error* init_call_elem(grpc_call_element* elem,
const grpc_call_element_args* args) {
call_data* calld = static_cast<call_data*>(elem->call_data);
calld->context = args->context;
gpr_log(GPR_INFO, "init_call_elem(): context=%p", args->context);
return GRPC_ERROR_NONE;
}
static void start_transport_stream_op_batch(
grpc_call_element* elem, grpc_transport_stream_op_batch* batch) {
call_data* calld = static_cast<call_data*>(elem->call_data);
// If batch payload context is not null (which will happen in some
// cancellation cases), make sure we get the same context here that we
// saw in init_call_elem().
gpr_log(GPR_INFO, "start_transport_stream_op_batch(): context=%p",
batch->payload->context);
if (batch->payload->context != nullptr) {
GPR_ASSERT(calld->context == batch->payload->context);
}
grpc_call_next_op(elem, batch);
}
static void destroy_call_elem(grpc_call_element* elem,
const grpc_call_final_info* final_info,
grpc_closure* ignored) {}
static grpc_error* init_channel_elem(grpc_channel_element* elem,
grpc_channel_element_args* args) {
return GRPC_ERROR_NONE;
}
static void destroy_channel_elem(grpc_channel_element* elem) {}
static const grpc_channel_filter test_filter = {
start_transport_stream_op_batch,
grpc_channel_next_op,
sizeof(call_data),
init_call_elem,
grpc_call_stack_ignore_set_pollset_or_pollset_set,
destroy_call_elem,
0,
init_channel_elem,
destroy_channel_elem,
grpc_channel_next_get_info,
"filter_context"};
/*******************************************************************************
* Registration
*/
static bool maybe_add_filter(grpc_channel_stack_builder* builder, void* arg) {
grpc_channel_filter* filter = static_cast<grpc_channel_filter*>(arg);
if (g_enable_filter) {
// Want to add the filter as close to the end as possible, to make
// sure that all of the filters work well together. However, we
// can't add it at the very end, because the connected channel filter
// must be the last one. So we add it right before the last one.
grpc_channel_stack_builder_iterator* it =
grpc_channel_stack_builder_create_iterator_at_last(builder);
GPR_ASSERT(grpc_channel_stack_builder_move_prev(it));
const bool retval = grpc_channel_stack_builder_add_filter_before(
it, filter, nullptr, nullptr);
grpc_channel_stack_builder_iterator_destroy(it);
return retval;
} else {
return true;
}
}
static void init_plugin(void) {
grpc_channel_init_register_stage(GRPC_CLIENT_CHANNEL, INT_MAX,
maybe_add_filter, (void*)&test_filter);
grpc_channel_init_register_stage(GRPC_CLIENT_SUBCHANNEL, INT_MAX,
maybe_add_filter, (void*)&test_filter);
grpc_channel_init_register_stage(GRPC_CLIENT_DIRECT_CHANNEL, INT_MAX,
maybe_add_filter, (void*)&test_filter);
grpc_channel_init_register_stage(GRPC_SERVER_CHANNEL, INT_MAX,
maybe_add_filter, (void*)&test_filter);
}
static void destroy_plugin(void) {}
void filter_context(grpc_end2end_test_config config) {
g_enable_filter = true;
test_request(config);
g_enable_filter = false;
}
void filter_context_pre_init(void) {
grpc_register_plugin(init_plugin, destroy_plugin);
}

@ -289,6 +289,28 @@ static void test_user_data_works(void) {
grpc_shutdown();
}
static void test_user_data_works_for_allocated_md(void) {
int* ud1;
int* ud2;
grpc_mdelem md;
gpr_log(GPR_INFO, "test_user_data_works");
grpc_init();
grpc_core::ExecCtx exec_ctx;
ud1 = static_cast<int*>(gpr_malloc(sizeof(int)));
*ud1 = 1;
ud2 = static_cast<int*>(gpr_malloc(sizeof(int)));
*ud2 = 2;
md = grpc_mdelem_from_slices(grpc_slice_from_static_string("abc"),
grpc_slice_from_static_string("123"));
grpc_mdelem_set_user_data(md, gpr_free, ud1);
grpc_mdelem_set_user_data(md, gpr_free, ud2);
GPR_ASSERT(grpc_mdelem_get_user_data(md, gpr_free) == ud1);
GRPC_MDELEM_UNREF(md);
grpc_shutdown();
}
static void verify_ascii_header_size(const char* key, const char* value,
bool intern_key, bool intern_value) {
grpc_mdelem elem = grpc_mdelem_from_slices(
@ -386,6 +408,7 @@ int main(int argc, char** argv) {
test_create_many_persistant_metadata();
test_things_stick_around();
test_user_data_works();
test_user_data_works_for_allocated_md();
grpc_shutdown();
return 0;
}

@ -147,10 +147,8 @@ class InterceptRecvTrailingMetadataLoadBalancingPolicy
}
grpc_channel* CreateChannel(const char* target,
grpc_client_channel_type type,
const grpc_channel_args& args) override {
return parent_->channel_control_helper()->CreateChannel(target, type,
args);
return parent_->channel_control_helper()->CreateChannel(target, args);
}
void UpdateState(grpc_connectivity_state state, grpc_error* state_error,

@ -345,9 +345,12 @@ TEST_F(FlakyNetworkTest, ServerUnreachableWithKeepalive) {
args.SetInt(GRPC_ARG_KEEPALIVE_TIMEOUT_MS, kKeepAliveTimeoutMs);
args.SetInt(GRPC_ARG_KEEPALIVE_PERMIT_WITHOUT_CALLS, 1);
args.SetInt(GRPC_ARG_HTTP2_MAX_PINGS_WITHOUT_DATA, 0);
args.SetInt(GRPC_ARG_INITIAL_RECONNECT_BACKOFF_MS, kReconnectBackoffMs);
// max time for a connection attempt
args.SetInt(GRPC_ARG_MIN_RECONNECT_BACKOFF_MS, kReconnectBackoffMs);
// max time between reconnect attempts
args.SetInt(GRPC_ARG_MAX_RECONNECT_BACKOFF_MS, kReconnectBackoffMs);
gpr_log(GPR_DEBUG, "FlakyNetworkTest.ServerUnreachableWithKeepalive start");
auto channel = BuildChannel("pick_first", args);
auto stub = BuildStub(channel);
// Channel should be in READY state after we send an RPC
@ -366,15 +369,18 @@ TEST_F(FlakyNetworkTest, ServerUnreachableWithKeepalive) {
});
// break network connectivity
gpr_log(GPR_DEBUG, "Adding iptables rule to drop packets");
DropPackets();
std::this_thread::sleep_for(std::chrono::milliseconds(10000));
EXPECT_TRUE(WaitForChannelNotReady(channel.get()));
// bring network interface back up
RestoreNetwork();
gpr_log(GPR_DEBUG, "Removed iptables rule to drop packets");
EXPECT_TRUE(WaitForChannelReady(channel.get()));
EXPECT_EQ(channel->GetState(false), GRPC_CHANNEL_READY);
shutdown.store(true);
sender.join();
gpr_log(GPR_DEBUG, "FlakyNetworkTest.ServerUnreachableWithKeepalive end");
}
//

@ -1483,6 +1483,9 @@ class SingleBalancerWithClientLoadReportingTest : public GrpclbEnd2endTest {
SingleBalancerWithClientLoadReportingTest() : GrpclbEnd2endTest(4, 1, 3) {}
};
// TODO(roth): Add test that when switching balancers, we don't include
// any calls that were sent prior to connecting to the new balancer.
TEST_F(SingleBalancerWithClientLoadReportingTest, Vanilla) {
SetNextResolutionAllBalancers();
const size_t kNumRpcsPerAddress = 100;

@ -318,30 +318,18 @@ static void FilterDestroy(void* arg, grpc_error* error) { gpr_free(arg); }
static void DoNothing(void* arg, grpc_error* error) {}
class FakeClientChannelFactory : public grpc_client_channel_factory {
class FakeClientChannelFactory : public grpc_core::ClientChannelFactory {
public:
FakeClientChannelFactory() { vtable = &vtable_; }
private:
static void NoRef(grpc_client_channel_factory* factory) {}
static void NoUnref(grpc_client_channel_factory* factory) {}
static grpc_core::Subchannel* CreateSubchannel(
grpc_client_channel_factory* factory, const grpc_channel_args* args) {
grpc_core::Subchannel* CreateSubchannel(
const grpc_channel_args* args) override {
return nullptr;
}
static grpc_channel* CreateClientChannel(grpc_client_channel_factory* factory,
const char* target,
grpc_client_channel_type type,
const grpc_channel_args* args) {
grpc_channel* CreateChannel(const char* target,
const grpc_channel_args* args) override {
return nullptr;
}
static const grpc_client_channel_factory_vtable vtable_;
};
const grpc_client_channel_factory_vtable FakeClientChannelFactory::vtable_ = {
NoRef, NoUnref, CreateSubchannel, CreateClientChannel};
static grpc_arg StringArg(const char* key, const char* value) {
grpc_arg a;
a.type = GRPC_ARG_STRING;
@ -506,13 +494,13 @@ static void BM_IsolatedFilter(benchmark::State& state) {
TrackCounters track_counters;
Fixture fixture;
std::ostringstream label;
std::vector<grpc_arg> args;
FakeClientChannelFactory fake_client_channel_factory;
args.push_back(grpc_client_channel_factory_create_channel_arg(
&fake_client_channel_factory));
args.push_back(StringArg(GRPC_ARG_SERVER_URI, "localhost"));
std::vector<grpc_arg> args = {
grpc_core::ClientChannelFactory::CreateChannelArg(
&fake_client_channel_factory),
StringArg(GRPC_ARG_SERVER_URI, "localhost"),
};
grpc_channel_args channel_args = {args.size(), &args[0]};
std::vector<const grpc_channel_filter*> filters;

@ -41,7 +41,7 @@ PYTHON=$VIRTUAL_ENV/bin/python
function at_least_one_installs() {
for file in "$@"; do
if "$PYTHON" -m pip install "$file"; then
if "$PYTHON" -m pip install --require-hashes "$file"; then
return 0
fi
done

@ -28,4 +28,4 @@ cd /var/local/git/grpc/test/cpp/end2end
# iptables is used to drop traffic between client and server
apt-get install -y iptables
bazel test --spawn_strategy=standalone --genrule_strategy=standalone --test_output=all :flaky_network_test --test_env=GRPC_VERBOSITY=debug --test_env=GRPC_TRACE=channel,client_channel,call_error,connectivity_state
bazel test --spawn_strategy=standalone --genrule_strategy=standalone --test_output=all :flaky_network_test --test_env=GRPC_VERBOSITY=debug --test_env=GRPC_TRACE=channel,client_channel,call_error,connectivity_state,tcp

@ -23,6 +23,24 @@ mkdir -p artifacts/
# and we only collect them here to deliver them to the distribtest phase.
cp -r "${EXTERNAL_GIT_ROOT}"/input_artifacts/python_*/* artifacts/ || true
strip_binary_wheel() {
WHEEL_PATH="$1"
TEMP_WHEEL_DIR=$(mktemp -d)
wheel unpack "$WHEEL_PATH" -d "$TEMP_WHEEL_DIR"
find "$TEMP_WHEEL_DIR" -name "_protoc_compiler*.so" -exec strip --strip-debug {} ";"
find "$TEMP_WHEEL_DIR" -name "cygrpc*.so" -exec strip --strip-debug {} ";"
WHEEL_FILE=$(basename "$WHEEL_PATH")
DISTRIBUTION_NAME=$(basename "$WHEEL_PATH" | cut -d '-' -f 1)
VERSION=$(basename "$WHEEL_PATH" | cut -d '-' -f 2)
wheel pack "$TEMP_WHEEL_DIR/$DISTRIBUTION_NAME-$VERSION" -d "$TEMP_WHEEL_DIR"
mv "$TEMP_WHEEL_DIR/$WHEEL_FILE" "$WHEEL_PATH"
}
for wheel in artifacts/*.whl; do
strip_binary_wheel "$wheel"
done
# TODO: all the artifact builder configurations generate a grpcio-VERSION.tar.gz
# source distribution package, and only one of them will end up
# in the artifacts/ directory. They should be all equivalent though.

@ -8869,6 +8869,7 @@
"test/core/end2end/tests/empty_batch.cc",
"test/core/end2end/tests/filter_call_init_fails.cc",
"test/core/end2end/tests/filter_causes_close.cc",
"test/core/end2end/tests/filter_context.cc",
"test/core/end2end/tests/filter_latency.cc",
"test/core/end2end/tests/filter_status_code.cc",
"test/core/end2end/tests/graceful_server_shutdown.cc",
@ -8967,6 +8968,7 @@
"test/core/end2end/tests/empty_batch.cc",
"test/core/end2end/tests/filter_call_init_fails.cc",
"test/core/end2end/tests/filter_causes_close.cc",
"test/core/end2end/tests/filter_context.cc",
"test/core/end2end/tests/filter_latency.cc",
"test/core/end2end/tests/filter_status_code.cc",
"test/core/end2end/tests/graceful_server_shutdown.cc",

File diff suppressed because it is too large Load Diff
Loading…
Cancel
Save