Merge github.com:grpc/grpc into chaotic-party

pull/37015/head
Craig Tiller 5 months ago
commit 7f44b37b5d
  1. 265
      doc/grpc_xds_bootstrap_format.md
  2. 143
      src/core/BUILD
  3. 2
      src/core/ext/transport/chaotic_good/client/chaotic_good_connector.cc
  4. 2
      src/core/ext/transport/chaotic_good/client/chaotic_good_connector.h
  5. 5
      src/core/handshaker/handshaker.cc
  6. 58
      src/core/lib/iomgr/tcp_posix.cc
  7. 66
      src/core/lib/iomgr/tcp_server_posix.cc
  8. 57
      src/core/lib/transport/call_filters.cc
  9. 152
      src/core/lib/transport/call_filters.h
  10. 10
      src/core/lib/transport/call_spine.h
  11. 15
      src/core/lib/transport/interception_chain.cc
  12. 60
      src/core/lib/transport/interception_chain.h
  13. 4
      src/core/load_balancing/subchannel_interface.h
  14. 2
      test/core/call/bm_client_call.cc
  15. 2
      test/core/call/client_call_test.cc
  16. 2
      test/core/call/server_call_test.cc
  17. 2
      test/core/client_channel/client_channel_test.cc
  18. 2
      test/core/client_channel/load_balanced_call_destination_test.cc
  19. 10
      test/core/transport/bm_call_spine.cc
  20. 99
      test/core/transport/call_filters_test.cc
  21. 7
      test/core/transport/call_spine_benchmarks.h
  22. 10
      test/core/transport/call_spine_test.cc
  23. 12
      test/core/transport/chaotic_good/client_transport_error_test.cc
  24. 4
      test/core/transport/chaotic_good/client_transport_test.cc
  25. 2
      test/core/transport/chaotic_good/server_transport_test.cc
  26. 37
      test/core/transport/interception_chain_test.cc
  27. 4
      test/core/transport/test_suite/transport_test.cc
  28. 1
      tools/doxygen/Doxyfile.c++
  29. 1
      tools/doxygen/Doxyfile.c++.internal
  30. 1
      tools/doxygen/Doxyfile.core
  31. 1
      tools/doxygen/Doxyfile.core.internal
  32. 1
      tools/doxygen/Doxyfile.objc
  33. 1
      tools/doxygen/Doxyfile.objc.internal
  34. 1
      tools/doxygen/Doxyfile.php

@ -0,0 +1,265 @@
# xDS Bootstrap File Format in gRPC
This document specifies the xDS bootstrap file format supported by gRPC.
## Background
gRPC expects the xDS bootstrap configuration to be specified as a JSON string.
The xDS bootstrap file location may be specified using the environment variable
`GRPC_XDS_BOOTSTRAP`. Alternatively, the bootstrap file contents may be
specified using the environment variable `GRPC_XDS_BOOTSTRAP_CONFIG`. If both
are specified, the former takes precendence.
The xDS client inside of gRPC parses the bootstrap configuration specified by
one of the above means when it is created to configure itself.
The following sections describe the bootstrap file format, including links to
gRFCs where support for appropriate fields was added.
## File Format
```
{
// The xDS server to talk to. The value is an ordered array of server
// configurations, to support failing over to a secondary xDS server if the
// primary is down.
//
// Prior to gRFC A71, all but the first entry was ignored.
"xds_servers": [
{
// A target URI string suitable for creating a gRPC channel.
"server_uri": <string containing the target URI of xds server>,
// List of channel creds; client will stop at the first type it
// supports. This field is required and must contain at least one
// channel creds type that the client supports.
//
// See section titled "Supported Channel Credentials".
"channel_creds": [
{
"type": <string containing channel cred type>,
// The "config" field is optional; it may be missing if the
// credential type does not require config parameters.
"config": <JSON object containing config for the type>
}
],
// A list of features supported by the server. New values will
// be added over time. For forward compatibility reasons, the
// client will ignore any entry in the list that it does not
// understand, regardless of type.
//
// See section titled "Supported Server Features".
"server_features": [ ... ]
}
],
// Identifies a specific gRPC instance.
"node": {
// Opaque identifier for the gRPC instance.
"id": <string>,
// Identifier for the local service cluster where the gRPC instance is
// running.
"cluster": <string>,
// Specifies where the gRPC instance is running.
"locality": {
"region": <string>,
"zone": <string>,
"sub_zone": <string>,
},
// Opaque metadata extending the node identifier.
"metadata": <JSON Object>,
}
// Map of supported certificate providers, keyed by the provider instance
// name.
// See section titled "Supported certificate providers".
"certificate_providers": {
// Certificate provider instance name, specified by the
// control plane, to fetch certificates from.
"<instance_name>": {
// Name of the plugin implementation.
"plugin_name": <string containing plugin type>,
// A JSON object containing the configuration for the plugin, whose schema
// is defined by the plugin. The "config" field is optional; it may be
// missing if the credential type does not require config parameters.
"config": <JSON object containing config for the type>
}
}
// A template for the name of the Listener resource to subscribe to for a gRPC
// server. If the token `%s` is present in the string, all instances of the
// token will be replaced with the server's listening "IP:port" (e.g.,
// "0.0.0.0:8080", "[::]:8080").
"server_listener_resource_name_template": "example/resource/%s",
// A template for the name of the Listener resource to subscribe to for a gRPC
// client channel. Used only when the channel is created with an "xds:" URI
// with no authority.
//
// If starts with "xdstp:", will be interpreted as a new-style name, in which
// case the authority of the URI will be used to select the relevant
// configuration in the "authorities" map.
//
// The token "%s", if present in this string, will be replaced with the
// service authority (i.e., the path part of the target URI used to create the
// gRPC channel). If the template starts with "xdstp:", the replaced string
// will be percent-encoded. In that case, the replacement string must include
// only characters allowed in a URI path as per RFC-3986 section 3.3 (which
// includes '/'), and all other characters must be percent-encoded.
//
// Defaults to "%s".
"client_default_listener_resource_name_template": <string>,
// A map of authority name to corresponding configuration.
//
// This is used in the following cases:
// - A gRPC client channel is created using an "xds:" URI that includes
// an authority.
// - A gRPC client channel is created using an "xds:" URI with no
// authority, but the "client_default_listener_resource_name_template"
// field turns it into an "xdstp:" URI.
// - A gRPC server is created and the
// "server_listener_resource_name_template" field is an "xdstp:" URI.
//
// In any of those cases, it is an error if the specified authority is
// not present in this map.
"authorities": {
// Entries are keyed by authority name.
// Note: If a new-style resource name has no authority, we will use
// the empty string here as the key.
"<authority_name>": {
// A template for the name of the Listener resource to subscribe
// to for a gRPC client channel. Used only when the channel is
// created using an "xds:" URI with this authority name.
//
// The token "%s", if present in this string, will be replaced
// with percent-encoded service authority (i.e., the path part of the
// target URI used to create the gRPC channel). The replacement string
// must include only characters allowed in a URI path as per RFC-3986
// section 3.3 (which includes '/'), and all other characters must be
// percent-encoded.
//
// Must start with "xdstp://<authority_name>/". If it does not,
// that is considered a bootstrap file parsing error.
//
// If not present in the bootstrap file, defaults to
// "xdstp://<authority_name>/envoy.config.listener.v3.Listener/%s".
"client_listener_resource_name_template": <string>,
// Ordered list of xDS servers to contact for this authority.
// Format is exactly the same as the top level "xds_servers" field.
//
// If the same server is listed in multiple authorities, the
// entries will be de-duped (i.e., resources for both authorities
// will be fetched on the same ADS stream).
//
// If not specified, the top-level server list is used.
"xds_servers": [ ... ]
}
}
}
```
### Supported Channel Credentials
gRPC supports the following channel credentials as part of the `channel_creds`
field of `xds_servers`.
#### Insecure credentials
- **Type Name**: `insecure`
- **Config**: Accepts no configuration
#### Google Default credentials
- **Type Name**: `google_default`
- **Config**: Accepts no configuration
#### mTLS credentials
- **Type Name**: `tls`
- **Config**: As described in [gRFC A65](a65):
```
{
// Path to CA certificate file.
// If unset, system-wide root certs are used.
"ca_certificate_file": <string>,
// Paths to identity certificate file and private key file.
// If either of these fields are set, both must be set.
// If set, mTLS will be used; if unset, normal TLS will be used.
"certificate_file": <string>,
"private_key_file": <string>,
// How often to re-read the certificate files.
// Value is the JSON format described for a google.protobuf.Duration
// message in https://protobuf.dev/programming-guides/proto3/#json.
// If unset, defaults to "600s".
"refresh_interval": <string>
}
```
### Supported Certificate Provider Instances
gRPC supports the following Certificate Provider instances as part of the
`certificate_providers` field:
#### PEM file watcher
- **Plugin Name**: `file_watcher`
- **Config**: As described in [gRFC A29](a29):
```
{
"certificate_file": "<path to the certificate file in PEM format>",
"private_key_file": "<path to private key file in PEM format>",
"ca_certificate_file": "<path to CA certificate file in PEM format>",
"refresh_interval": "<JSON form of google.protobuf.Duration>"
}
```
### Supported Server Features
gRPC supports the following server features in the `server_features` field
inside `xds_servers`:
- `xds_v3`: Added in gRFC A30. Supported in older versions of gRPC. See
[here](grpc_xds_features.md) for when gRPC added support for xDS transport
protocol v3, and when support for xDS transport protocol v2 was dropped.
- `ignore_resource_deletion`: Added in [gRFC A53](a53)
### When were fields added?
| Bootstrap Field | Relevant gRFCs
------------------|---------------
`xds_servers` | [A27](a27), [A71](a71)
`google_default` channel credentials | [A27](a27)
`insecure` channel credentials | [A27](a27)
`node` | [A27](a27)
`certificate_providers` | [A29](a29)
`file_watcher`certificate provider | [A29](a29)
`xds_servers.server_features` | [A30](a30)
`server_listener_resource_name_template` | [A36](a36), [A47](a47)
`client_default_listener_resource_name_template` | [A47](a47)
`authorities` | [A47](a47)
`tls` channel credentials | [A65](a65)
[a27]: https://github.com/grpc/proposal/blob/master/A27-xds-global-load-balancing.md
[a29]: https://github.com/grpc/proposal/blob/master/A29-xds-tls-security.md#file_watcher-certificate-provider
[a30]: https://github.com/grpc/proposal/blob/master/A30-xds-v3.md
[a36]: https://github.com/grpc/proposal/blob/master/A36-xds-for-servers.md
[a47]: https://github.com/grpc/proposal/blob/master/A47-xds-federation.md
[a53]: https://github.com/grpc/proposal/blob/master/A53-xds-ignore-resource-deletion.md
[a65]: https://github.com/grpc/proposal/blob/master/A65-xds-mtls-creds-in-bootstrap.md#proposal
[a71]: https://github.com/grpc/proposal/blob/master/A71-xds-fallback.md

@ -5074,14 +5074,139 @@ grpc_cc_library(
)
grpc_cc_library(
name = "grpc_xds_client",
name = "xds_certificate_provider",
srcs = [
"xds/grpc/xds_certificate_provider.cc",
],
hdrs = [
"xds/grpc/xds_certificate_provider.h",
],
external_deps = [
"absl/base:core_headers",
"absl/functional:bind_front",
"absl/log:check",
"absl/log:log",
"absl/strings",
"absl/types:optional",
],
language = "c++",
tags = ["nofixdeps"],
deps = [
"channel_args",
"error",
"grpc_matchers",
"grpc_tls_credentials",
"unique_type_name",
"useful",
"//:gpr",
"//:grpc_base",
"//:ref_counted_ptr",
"//:tsi_ssl_credentials",
],
)
grpc_cc_library(
name = "xds_certificate_provider_store",
srcs = [
"lib/security/credentials/xds/xds_credentials.cc",
"xds/grpc/certificate_provider_store.cc",
],
hdrs = [
"xds/grpc/certificate_provider_store.h",
],
external_deps = [
"absl/base:core_headers",
"absl/log:log",
"absl/strings",
],
language = "c++",
tags = ["nofixdeps"],
deps = [
"certificate_provider_factory",
"certificate_provider_registry",
"grpc_tls_credentials",
"json",
"json_args",
"json_object_loader",
"unique_type_name",
"useful",
"validation_errors",
"//:config",
"//:gpr",
"//:grpc_base",
"//:orphanable",
"//:ref_counted_ptr",
],
)
grpc_cc_library(
name = "xds_credentials",
srcs = [
"lib/security/credentials/xds/xds_credentials.cc",
],
hdrs = [
"lib/security/credentials/xds/xds_credentials.h",
],
external_deps = [
"absl/status",
"absl/log:check",
"absl/log:log",
"absl/types:optional",
],
language = "c++",
tags = ["nofixdeps"],
deps = [
"channel_args",
"grpc_lb_xds_channel_args",
"grpc_matchers",
"grpc_tls_credentials",
"unique_type_name",
"useful",
"xds_certificate_provider",
"//:channel_arg_names",
"//:gpr",
"//:grpc_base",
"//:grpc_core_credentials_header",
"//:grpc_credentials_util",
"//:grpc_security_base",
"//:ref_counted_ptr",
],
)
grpc_cc_library(
name = "xds_file_watcher_certificate_provider_factory",
srcs = [
"xds/grpc/file_watcher_certificate_provider_factory.cc",
],
hdrs = [
"xds/grpc/file_watcher_certificate_provider_factory.h",
],
external_deps = [
"absl/log:log",
"absl/strings",
"absl/strings:str_format",
],
language = "c++",
tags = ["nofixdeps"],
deps = [
"certificate_provider_factory",
"grpc_tls_credentials",
"json",
"json_args",
"json_object_loader",
"time",
"validation_errors",
"//:config",
"//:gpr",
"//:grpc_base",
"//:ref_counted_ptr",
],
)
grpc_cc_library(
name = "grpc_xds_client",
srcs = [
"xds/grpc/xds_audit_logger_registry.cc",
"xds/grpc/xds_bootstrap_grpc.cc",
"xds/grpc/xds_certificate_provider.cc",
"xds/grpc/xds_client_grpc.cc",
"xds/grpc/xds_cluster.cc",
"xds/grpc/xds_cluster_specifier_plugin.cc",
@ -5099,12 +5224,8 @@ grpc_cc_library(
"xds/grpc/xds_transport_grpc.cc",
],
hdrs = [
"lib/security/credentials/xds/xds_credentials.h",
"xds/grpc/certificate_provider_store.h",
"xds/grpc/file_watcher_certificate_provider_factory.h",
"xds/grpc/xds_audit_logger_registry.h",
"xds/grpc/xds_bootstrap_grpc.h",
"xds/grpc/xds_certificate_provider.h",
"xds/grpc/xds_client_grpc.h",
"xds/grpc/xds_cluster.h",
"xds/grpc/xds_cluster_specifier_plugin.h",
@ -5249,6 +5370,10 @@ grpc_cc_library(
"upb_utils",
"useful",
"validation_errors",
"xds_certificate_provider",
"xds_certificate_provider_store",
"xds_credentials",
"xds_file_watcher_certificate_provider_factory",
"xds_type_upb",
"xds_type_upbdefs",
"//:channel",
@ -5337,6 +5462,9 @@ grpc_cc_library(
"resolved_address",
"slice_refcount",
"unique_type_name",
"xds_certificate_provider",
"xds_certificate_provider_store",
"xds_credentials",
"//:api_trace",
"//:config",
"//:debug_location",
@ -5481,6 +5609,7 @@ grpc_cc_library(
"resolved_address",
"subchannel_interface",
"validation_errors",
"xds_credentials",
"xds_dependency_manager",
"//:call_tracer",
"//:config",

@ -77,7 +77,7 @@ const int32_t kTimeoutSecs = 120;
ChaoticGoodConnector::ChaoticGoodConnector(
std::shared_ptr<grpc_event_engine::experimental::EventEngine> event_engine)
: event_engine_(std::move(event_engine)),
handshake_mgr_(std::make_shared<HandshakeManager>()) {
handshake_mgr_(MakeRefCounted<HandshakeManager>()) {
arena_->SetContext<grpc_event_engine::experimental::EventEngine>(
event_engine_.get());
}

@ -93,7 +93,7 @@ class ChaoticGoodConnector : public SubchannelConnector {
ActivityPtr connect_activity_ ABSL_GUARDED_BY(mu_);
const std::shared_ptr<grpc_event_engine::experimental::EventEngine>
event_engine_;
std::shared_ptr<HandshakeManager> handshake_mgr_;
RefCountedPtr<HandshakeManager> handshake_mgr_;
HPackCompressor hpack_compressor_;
HPackParser hpack_parser_;
absl::BitGen bitgen_;

@ -96,6 +96,11 @@ void HandshakeManager::DoHandshake(
Timestamp deadline, grpc_tcp_server_acceptor* acceptor,
absl::AnyInvocable<void(absl::StatusOr<HandshakerArgs*>)>
on_handshake_done) {
// We hold a ref until after the mutex is released, because we might
// wind up invoking on_handshake_done in another thread before we
// return from this function, and on_handshake_done might release the
// last ref to this object.
auto self = Ref();
MutexLock lock(&mu_);
CHECK_EQ(index_, 0u);
on_handshake_done_ = std::move(on_handshake_done);

@ -50,7 +50,6 @@
#include <grpc/slice.h>
#include <grpc/support/alloc.h>
#include <grpc/support/log.h>
#include <grpc/support/string_util.h>
#include <grpc/support/sync.h>
#include <grpc/support/time.h>
@ -619,7 +618,7 @@ static void tcp_drop_uncovered_then_handle_write(void* arg /* grpc_tcp */,
static void done_poller(void* bp, grpc_error_handle /*error_ignored*/) {
backup_poller* p = static_cast<backup_poller*>(bp);
if (GRPC_TRACE_FLAG_ENABLED(tcp)) {
gpr_log(GPR_INFO, "BACKUP_POLLER:%p destroy", p);
LOG(INFO) << "BACKUP_POLLER:" << p << " destroy";
}
grpc_pollset_destroy(BACKUP_POLLER_POLLSET(p));
gpr_free(p);
@ -628,7 +627,7 @@ static void done_poller(void* bp, grpc_error_handle /*error_ignored*/) {
static void run_poller(void* bp, grpc_error_handle /*error_ignored*/) {
backup_poller* p = static_cast<backup_poller*>(bp);
if (GRPC_TRACE_FLAG_ENABLED(tcp)) {
gpr_log(GPR_INFO, "BACKUP_POLLER:%p run", p);
LOG(INFO) << "BACKUP_POLLER:" << p << " run";
}
gpr_mu_lock(p->pollset_mu);
grpc_core::Timestamp deadline =
@ -645,7 +644,7 @@ static void run_poller(void* bp, grpc_error_handle /*error_ignored*/) {
g_uncovered_notifications_pending = 0;
g_backup_poller_mu->Unlock();
if (GRPC_TRACE_FLAG_ENABLED(tcp)) {
gpr_log(GPR_INFO, "BACKUP_POLLER:%p shutdown", p);
LOG(INFO) << "BACKUP_POLLER:" << p << " shutdown";
}
grpc_pollset_shutdown(BACKUP_POLLER_POLLSET(p),
GRPC_CLOSURE_INIT(&p->run_poller, done_poller, p,
@ -653,7 +652,7 @@ static void run_poller(void* bp, grpc_error_handle /*error_ignored*/) {
} else {
g_backup_poller_mu->Unlock();
if (GRPC_TRACE_FLAG_ENABLED(tcp)) {
gpr_log(GPR_INFO, "BACKUP_POLLER:%p reschedule", p);
LOG(INFO) << "BACKUP_POLLER:" << p << " reschedule";
}
grpc_core::Executor::Run(&p->run_poller, absl::OkStatus(),
grpc_core::ExecutorType::DEFAULT,
@ -670,8 +669,8 @@ static void drop_uncovered(grpc_tcp* /*tcp*/) {
g_backup_poller_mu->Unlock();
CHECK_GT(old_count, 1);
if (GRPC_TRACE_FLAG_ENABLED(tcp)) {
gpr_log(GPR_INFO, "BACKUP_POLLER:%p uncover cnt %d->%d", p, old_count,
old_count - 1);
LOG(INFO) << "BACKUP_POLLER:" << p << " uncover cnt " << old_count << "->"
<< old_count - 1;
}
}
@ -694,7 +693,7 @@ static void cover_self(grpc_tcp* tcp) {
grpc_pollset_init(BACKUP_POLLER_POLLSET(p), &p->pollset_mu);
g_backup_poller_mu->Unlock();
if (GRPC_TRACE_FLAG_ENABLED(tcp)) {
gpr_log(GPR_INFO, "BACKUP_POLLER:%p create", p);
LOG(INFO) << "BACKUP_POLLER:" << p << " create";
}
grpc_core::Executor::Run(
GRPC_CLOSURE_INIT(&p->run_poller, run_poller, p, nullptr),
@ -706,22 +705,22 @@ static void cover_self(grpc_tcp* tcp) {
g_backup_poller_mu->Unlock();
}
if (GRPC_TRACE_FLAG_ENABLED(tcp)) {
gpr_log(GPR_INFO, "BACKUP_POLLER:%p add %p cnt %d->%d", p, tcp,
old_count - 1, old_count);
LOG(INFO) << "BACKUP_POLLER:" << p << " add " << tcp << " cnt "
<< old_count - 1 << "->" << old_count;
}
grpc_pollset_add_fd(BACKUP_POLLER_POLLSET(p), tcp->em_fd);
}
static void notify_on_read(grpc_tcp* tcp) {
if (GRPC_TRACE_FLAG_ENABLED(tcp)) {
gpr_log(GPR_INFO, "TCP:%p notify_on_read", tcp);
LOG(INFO) << "TCP:" << tcp << " notify_on_read";
}
grpc_fd_notify_on_read(tcp->em_fd, &tcp->read_done_closure);
}
static void notify_on_write(grpc_tcp* tcp) {
if (GRPC_TRACE_FLAG_ENABLED(tcp)) {
gpr_log(GPR_INFO, "TCP:%p notify_on_write", tcp);
LOG(INFO) << "TCP:" << tcp << " notify_on_write";
}
if (!grpc_event_engine_run_in_background()) {
cover_self(tcp);
@ -732,8 +731,8 @@ static void notify_on_write(grpc_tcp* tcp) {
static void tcp_drop_uncovered_then_handle_write(void* arg,
grpc_error_handle error) {
if (GRPC_TRACE_FLAG_ENABLED(tcp)) {
gpr_log(GPR_INFO, "TCP:%p got_write: %s", arg,
grpc_core::StatusToString(error).c_str());
LOG(INFO) << "TCP:" << arg
<< " got_write: " << grpc_core::StatusToString(error);
}
drop_uncovered(static_cast<grpc_tcp*>(arg));
tcp_handle_write(arg, error);
@ -852,10 +851,11 @@ static void tcp_trace_read(grpc_tcp* tcp, grpc_error_handle error)
ABSL_EXCLUSIVE_LOCKS_REQUIRED(tcp->read_mu) {
grpc_closure* cb = tcp->read_cb;
if (GRPC_TRACE_FLAG_ENABLED(tcp)) {
gpr_log(GPR_INFO, "TCP:%p call_cb %p %p:%p", tcp, cb, cb->cb, cb->cb_arg);
LOG(INFO) << "TCP:" << tcp << " call_cb " << cb << " " << cb->cb << ":"
<< cb->cb_arg;
size_t i;
gpr_log(GPR_INFO, "READ %p (peer=%s) error=%s", tcp,
tcp->peer_string.c_str(), grpc_core::StatusToString(error).c_str());
LOG(INFO) << "READ " << tcp << " (peer=" << tcp->peer_string
<< ") error=" << grpc_core::StatusToString(error);
if (ABSL_VLOG_IS_ON(2)) {
for (i = 0; i < tcp->incoming_buffer->count; i++) {
char* dump = grpc_dump_slice(tcp->incoming_buffer->slices[i],
@ -903,10 +903,8 @@ static void update_rcvlowat(grpc_tcp* tcp)
}
if (setsockopt(tcp->fd, SOL_SOCKET, SO_RCVLOWAT, &remaining,
sizeof(remaining)) != 0) {
gpr_log(GPR_ERROR, "%s",
absl::StrCat("Cannot set SO_RCVLOWAT on fd=", tcp->fd,
" err=", grpc_core::StrError(errno).c_str())
.c_str());
LOG(ERROR) << "Cannot set SO_RCVLOWAT on fd=" << tcp->fd
<< " err=" << grpc_core::StrError(errno);
return;
}
tcp->set_rcvlowat = remaining;
@ -917,7 +915,7 @@ static void update_rcvlowat(grpc_tcp* tcp)
static bool tcp_do_read(grpc_tcp* tcp, grpc_error_handle* error)
ABSL_EXCLUSIVE_LOCKS_REQUIRED(tcp->read_mu) {
if (GRPC_TRACE_FLAG_ENABLED(tcp)) {
gpr_log(GPR_INFO, "TCP:%p do_read", tcp);
LOG(INFO) << "TCP:" << tcp << " do_read";
}
struct msghdr msg;
struct iovec iov[MAX_READ_IOVEC];
@ -1130,8 +1128,8 @@ static void maybe_make_read_slices(grpc_tcp* tcp)
static void tcp_handle_read(void* arg /* grpc_tcp */, grpc_error_handle error) {
grpc_tcp* tcp = static_cast<grpc_tcp*>(arg);
if (GRPC_TRACE_FLAG_ENABLED(tcp)) {
gpr_log(GPR_INFO, "TCP:%p got_read: %s", tcp,
grpc_core::StatusToString(error).c_str());
LOG(INFO) << "TCP:" << tcp
<< " got_read: " << grpc_core::StatusToString(error);
}
tcp->read_mu.Lock();
grpc_error_handle tcp_read_error;
@ -1471,9 +1469,8 @@ static bool process_errors(grpc_tcp* tcp) {
// Got a control message that is not a timestamp or zerocopy. Don't know
// how to handle this.
if (GRPC_TRACE_FLAG_ENABLED(tcp)) {
gpr_log(GPR_INFO,
"unknown control message cmsg_level:%d cmsg_type:%d",
cmsg->cmsg_level, cmsg->cmsg_type);
LOG(INFO) << "unknown control message cmsg_level:" << cmsg->cmsg_level
<< " cmsg_type:" << cmsg->cmsg_type;
}
return processed_err;
}
@ -1488,8 +1485,7 @@ static void tcp_handle_error(void* arg /* grpc_tcp */,
grpc_error_handle error) {
grpc_tcp* tcp = static_cast<grpc_tcp*>(arg);
if (GRPC_TRACE_FLAG_ENABLED(tcp)) {
gpr_log(GPR_INFO, "TCP:%p got_error: %s", tcp,
grpc_core::StatusToString(error).c_str());
LOG(INFO) << "TCP:" << tcp << " got_error: " << error;
}
if (!error.ok() ||
@ -1847,7 +1843,7 @@ static void tcp_write(grpc_endpoint* ep, grpc_slice_buffer* buf,
size_t i;
for (i = 0; i < buf->count; i++) {
gpr_log(GPR_INFO, "WRITE %p (peer=%s)", tcp, tcp->peer_string.c_str());
LOG(INFO) << "WRITE " << tcp << " (peer=" << tcp->peer_string << ")";
if (ABSL_VLOG_IS_ON(2)) {
char* data =
grpc_dump_slice(buf->slices[i], GPR_DUMP_HEX | GPR_DUMP_ASCII);
@ -2030,7 +2026,7 @@ grpc_endpoint* grpc_tcp_create(grpc_fd* em_fd,
if (setsockopt(tcp->fd, SOL_TCP, TCP_INQ, &one, sizeof(one)) == 0) {
tcp->inq_capable = true;
} else {
gpr_log(GPR_DEBUG, "cannot set inq fd=%d errno=%d", tcp->fd, errno);
VLOG(2) << "cannot set inq fd=" << tcp->fd << " errno=" << errno;
tcp->inq_capable = false;
}
#else

@ -44,6 +44,7 @@
#include <string>
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_format.h"
@ -51,7 +52,6 @@
#include <grpc/event_engine/endpoint_config.h>
#include <grpc/event_engine/event_engine.h>
#include <grpc/support/alloc.h>
#include <grpc/support/log.h>
#include <grpc/support/sync.h>
#include <grpc/support/time.h>
@ -165,22 +165,21 @@ static grpc_error_handle CreateEventEngineListener(
->GetWrappedFd();
if (getpeername(fd, reinterpret_cast<struct sockaddr*>(addr.addr),
&(addr.len)) < 0) {
gpr_log(GPR_ERROR, "Failed getpeername: %s",
grpc_core::StrError(errno).c_str());
LOG(ERROR) << "Failed getpeername: "
<< grpc_core::StrError(errno);
close(fd);
return;
}
(void)grpc_set_socket_no_sigpipe_if_possible(fd);
auto addr_uri = grpc_sockaddr_to_uri(&addr);
if (!addr_uri.ok()) {
gpr_log(GPR_ERROR, "Invalid address: %s",
addr_uri.status().ToString().c_str());
LOG(ERROR) << "Invalid address: "
<< addr_uri.status().ToString();
return;
}
if (GRPC_TRACE_FLAG_ENABLED(tcp)) {
gpr_log(GPR_INFO,
"SERVER_CONNECT: incoming external connection: %s",
addr_uri->c_str());
LOG(INFO) << "SERVER_CONNECT: incoming external connection: "
<< addr_uri->c_str();
}
}
read_notifier_pollset =
@ -410,8 +409,7 @@ static void on_read(void* arg, grpc_error_handle err) {
}
gpr_mu_lock(&sp->server->mu);
if (!sp->server->shutdown_listeners) {
gpr_log(GPR_ERROR, "Failed accept4: %s",
grpc_core::StrError(errno).c_str());
LOG(ERROR) << "Failed accept4: " << grpc_core::StrError(errno);
} else {
// if we have shutdown listeners, accept4 could fail, and we
// needn't notify users
@ -424,10 +422,8 @@ static void on_read(void* arg, grpc_error_handle err) {
int64_t dropped_connections_count =
num_dropped_connections.fetch_add(1, std::memory_order_relaxed) + 1;
if (dropped_connections_count % 1000 == 1) {
gpr_log(GPR_INFO,
"Dropped >= %" PRId64
" new connection attempts due to high memory pressure",
dropped_connections_count);
LOG(INFO) << "Dropped >= " << dropped_connections_count
<< " new connection attempts due to high memory pressure";
}
close(fd);
continue;
@ -441,13 +437,11 @@ static void on_read(void* arg, grpc_error_handle err) {
if (getpeername(fd, reinterpret_cast<struct sockaddr*>(addr.addr),
&(addr.len)) < 0) {
auto listener_addr_uri = grpc_sockaddr_to_uri(&sp->addr);
gpr_log(
GPR_ERROR,
"Failed getpeername: %s. Dropping the connection, and continuing "
"to listen on %s:%d.",
grpc_core::StrError(errno).c_str(),
listener_addr_uri.ok() ? listener_addr_uri->c_str() : "<unknown>",
sp->port);
LOG(ERROR) << "Failed getpeername: " << grpc_core::StrError(errno)
<< ". Dropping the connection, and continuing to listen on "
<< (listener_addr_uri.ok() ? *listener_addr_uri
: "<unknown>")
<< ":" << sp->port;
close(fd);
continue;
}
@ -463,13 +457,11 @@ static void on_read(void* arg, grpc_error_handle err) {
auto addr_uri = grpc_sockaddr_to_uri(&addr);
if (!addr_uri.ok()) {
gpr_log(GPR_ERROR, "Invalid address: %s",
addr_uri.status().ToString().c_str());
LOG(ERROR) << "Invalid address: " << addr_uri.status();
goto error;
}
if (GRPC_TRACE_FLAG_ENABLED(tcp)) {
gpr_log(GPR_INFO, "SERVER_CONNECT: incoming connection: %s",
addr_uri->c_str());
LOG(INFO) << "SERVER_CONNECT: incoming connection: " << *addr_uri;
}
std::string name = absl::StrCat("tcp-server-connection:", addr_uri.value());
@ -549,16 +541,14 @@ static grpc_error_handle add_wildcard_addrs_to_server(grpc_tcp_server* s,
}
if (*out_port > 0) {
if (!v6_err.ok()) {
gpr_log(GPR_INFO,
"Failed to add :: listener, "
"the environment may not support IPv6: %s",
grpc_core::StatusToString(v6_err).c_str());
LOG(INFO) << "Failed to add :: listener, "
<< "the environment may not support IPv6: "
<< grpc_core::StatusToString(v6_err);
}
if (!v4_err.ok()) {
gpr_log(GPR_INFO,
"Failed to add 0.0.0.0 listener, "
"the environment may not support IPv4: %s",
grpc_core::StatusToString(v4_err).c_str());
LOG(INFO) << "Failed to add 0.0.0.0 listener, "
<< "the environment may not support IPv4: "
<< grpc_core::StatusToString(v4_err);
}
return absl::OkStatus();
} else {
@ -916,21 +906,19 @@ class ExternalConnectionHandler : public grpc_core::TcpServerFdHandler {
if (getpeername(fd, reinterpret_cast<struct sockaddr*>(addr.addr),
&(addr.len)) < 0) {
gpr_log(GPR_ERROR, "Failed getpeername: %s",
grpc_core::StrError(errno).c_str());
LOG(ERROR) << "Failed getpeername: " << grpc_core::StrError(errno);
close(fd);
return;
}
(void)grpc_set_socket_no_sigpipe_if_possible(fd);
auto addr_uri = grpc_sockaddr_to_uri(&addr);
if (!addr_uri.ok()) {
gpr_log(GPR_ERROR, "Invalid address: %s",
addr_uri.status().ToString().c_str());
LOG(ERROR) << "Invalid address: " << addr_uri.status();
return;
}
if (GRPC_TRACE_FLAG_ENABLED(tcp)) {
gpr_log(GPR_INFO, "SERVER_CONNECT: incoming external connection: %s",
addr_uri->c_str());
LOG(INFO) << "SERVER_CONNECT: incoming external connection: "
<< *addr_uri;
}
std::string name = absl::StrCat("tcp-server-connection:", addr_uri.value());
grpc_fd* fdobj = grpc_fd_create(fd, name.c_str(), true);

@ -23,10 +23,6 @@
namespace grpc_core {
namespace {
void* Offset(void* base, size_t amt) { return static_cast<char*>(base) + amt; }
} // namespace
namespace filters_detail {
void RunHalfClose(absl::Span<const HalfCloseOperator> ops, void* call_data) {
@ -129,39 +125,62 @@ char g_empty_call_data;
// CallFilters
CallFilters::CallFilters(ClientMetadataHandle client_initial_metadata)
: stack_(nullptr),
call_data_(nullptr),
: call_data_(nullptr),
push_client_initial_metadata_(std::move(client_initial_metadata)) {}
CallFilters::~CallFilters() {
if (call_data_ != nullptr && call_data_ != &g_empty_call_data) {
for (const auto& destructor : stack_->data_.filter_destructor) {
destructor.call_destroy(Offset(call_data_, destructor.call_offset));
for (const auto& stack : stacks_) {
for (const auto& destructor : stack.stack->data_.filter_destructor) {
destructor.call_destroy(filters_detail::Offset(
call_data_, stack.call_data_offset + destructor.call_offset));
}
}
gpr_free_aligned(call_data_);
}
}
void CallFilters::SetStack(RefCountedPtr<Stack> stack) {
void CallFilters::Start() {
CHECK_EQ(call_data_, nullptr);
stack_ = std::move(stack);
if (stack_->data_.call_data_size != 0) {
call_data_ = gpr_malloc_aligned(stack_->data_.call_data_size,
stack_->data_.call_data_alignment);
size_t call_data_alignment = 1;
for (const auto& stack : stacks_) {
call_data_alignment =
std::max(call_data_alignment, stack.stack->data_.call_data_alignment);
}
size_t call_data_size = 0;
for (auto& stack : stacks_) {
stack.call_data_offset = call_data_size;
size_t stack_call_data_size = stack.stack->data_.call_data_size;
if (stack_call_data_size % call_data_alignment != 0) {
stack_call_data_size +=
call_data_alignment - stack_call_data_size % call_data_alignment;
}
call_data_size += stack_call_data_size;
}
if (call_data_size != 0) {
call_data_ = gpr_malloc_aligned(call_data_size, call_data_alignment);
} else {
call_data_ = &g_empty_call_data;
}
for (const auto& constructor : stack_->data_.filter_constructor) {
constructor.call_init(Offset(call_data_, constructor.call_offset),
constructor.channel_data);
for (const auto& stack : stacks_) {
for (const auto& constructor : stack.stack->data_.filter_constructor) {
constructor.call_init(
filters_detail::Offset(
call_data_, stack.call_data_offset + constructor.call_offset),
constructor.channel_data);
}
}
call_state_.Start();
}
void CallFilters::Finalize(const grpc_call_final_info* final_info) {
for (auto& finalizer : stack_->data_.finalizers) {
finalizer.final(Offset(call_data_, finalizer.call_offset),
finalizer.channel_data, final_info);
for (auto& stack : stacks_) {
for (auto& finalizer : stack.stack->data_.finalizers) {
finalizer.final(
filters_detail::Offset(
call_data_, stack.call_data_offset + finalizer.call_offset),
finalizer.channel_data, final_info);
}
}
}

@ -16,6 +16,7 @@
#define GRPC_SRC_CORE_LIB_TRANSPORT_CALL_FILTERS_H
#include <cstdint>
#include <limits>
#include <memory>
#include <ostream>
#include <type_traits>
@ -121,6 +122,10 @@ struct NoInterceptor {};
namespace filters_detail {
inline void* Offset(void* base, size_t amt) {
return static_cast<char*>(base) + amt;
}
// One call filter constructor
// Contains enough information to allocate and initialize the
// call data for one filter.
@ -871,6 +876,17 @@ struct StackData {
// (to capture ownership of channel data)
std::vector<ChannelDataDestructor> channel_data_destructors;
bool empty() const {
return filter_constructor.empty() && filter_destructor.empty() &&
client_initial_metadata.ops.empty() &&
server_initial_metadata.ops.empty() &&
client_to_server_messages.ops.empty() &&
client_to_server_half_close.empty() &&
server_to_client_messages.ops.empty() &&
server_trailing_metadata.empty() && finalizers.empty() &&
channel_data_destructors.empty();
}
// Add one filter to the list of filters, and update alignment.
// Returns the offset of the call data for this filter.
// Specifically does not update any of the layouts or finalizers.
@ -1268,7 +1284,11 @@ class CallFilters {
CallFilters(CallFilters&&) = delete;
CallFilters& operator=(CallFilters&&) = delete;
void SetStack(RefCountedPtr<Stack> stack);
void AddStack(RefCountedPtr<Stack> stack) {
if (stack->data_.empty()) return;
stacks_.emplace_back(std::move(stack));
}
void Start();
// Access client initial metadata before it's processed
ClientMetadata* unprocessed_client_initial_metadata() {
@ -1276,45 +1296,72 @@ class CallFilters {
}
private:
template <typename Output, void (CallState::*on_done)(), typename Input>
Poll<ValueOrFailure<Output>> FinishStep(
Poll<filters_detail::ResultOr<Input>> p) {
auto* r = p.value_if_ready();
if (r == nullptr) return Pending{};
(call_state_.*on_done)();
if (r->ok != nullptr) {
return ValueOrFailure<Output>{std::move(r->ok)};
}
PushServerTrailingMetadata(std::move(r->error));
return Failure{};
}
template <typename Output, typename Input,
Input(CallFilters::*input_location),
filters_detail::Layout<Input>(filters_detail::StackData::*layout),
void (CallState::*on_done)()>
auto RunExecutor() {
DCHECK_NE((this->*input_location).get(), nullptr);
filters_detail::OperationExecutor<Input> executor;
return [this, executor = std::move(executor)]() mutable {
if ((this->*input_location) != nullptr) {
return FinishStep<Output, on_done>(
executor.Start(&(stack_->data_.*layout),
std::move(this->*input_location), call_data_));
void (CallState::*on_done)(), typename StackIterator>
class Executor {
public:
Executor(CallFilters* filters, StackIterator stack_begin,
StackIterator stack_end)
: stack_current_(stack_begin),
stack_end_(stack_end),
filters_(filters) {
DCHECK_NE((filters_->*input_location).get(), nullptr);
}
Poll<ValueOrFailure<Output>> operator()() {
if ((filters_->*input_location) != nullptr) {
if (stack_current_ == stack_end_) {
DCHECK_NE((filters_->*input_location).get(), nullptr);
(filters_->call_state_.*on_done)();
return Output(std::move(filters_->*input_location));
}
return FinishStep(executor_.Start(
&(stack_current_->stack->data_.*layout),
std::move(filters_->*input_location), filters_->call_data_));
} else {
return FinishStep(executor_.Step(filters_->call_data_));
}
return FinishStep<Output, on_done>(executor.Step(call_data_));
};
}
}
private:
Poll<ValueOrFailure<Output>> FinishStep(
Poll<filters_detail::ResultOr<Input>> p) {
auto* r = p.value_if_ready();
if (r == nullptr) return Pending{};
if (r->ok != nullptr) {
++stack_current_;
if (stack_current_ == stack_end_) {
(filters_->call_state_.*on_done)();
return ValueOrFailure<Output>{std::move(r->ok)};
}
return FinishStep(
executor_.Start(&(stack_current_->stack->data_.*layout),
std::move(r->ok), filters_->call_data_));
}
(filters_->call_state_.*on_done)();
filters_->PushServerTrailingMetadata(std::move(r->error));
return Failure{};
}
StackIterator stack_current_;
StackIterator stack_end_;
CallFilters* filters_;
filters_detail::OperationExecutor<Input> executor_;
};
public:
// Client: Fetch client initial metadata
// Returns a promise that resolves to ValueOrFailure<ClientMetadataHandle>
GRPC_MUST_USE_RESULT auto PullClientInitialMetadata() {
call_state_.BeginPullClientInitialMetadata();
return RunExecutor<ClientMetadataHandle, ClientMetadataHandle,
&CallFilters::push_client_initial_metadata_,
&filters_detail::StackData::client_initial_metadata,
&CallState::FinishPullClientInitialMetadata>();
return Executor<ClientMetadataHandle, ClientMetadataHandle,
&CallFilters::push_client_initial_metadata_,
&filters_detail::StackData::client_initial_metadata,
&CallState::FinishPullClientInitialMetadata,
StacksVector::const_iterator>(this, stacks_.cbegin(),
stacks_.cend());
}
// Server: Push server initial metadata
// Returns a promise that resolves to a StatusFlag indicating success
@ -1334,12 +1381,14 @@ class CallFilters {
has_server_initial_metadata,
[this]() {
return Map(
RunExecutor<
Executor<
absl::optional<ServerMetadataHandle>,
ServerMetadataHandle,
&CallFilters::push_server_initial_metadata_,
&filters_detail::StackData::server_initial_metadata,
&CallState::FinishPullServerInitialMetadata>(),
&CallState::FinishPullServerInitialMetadata,
StacksVector::const_reverse_iterator>(
this, stacks_.crbegin(), stacks_.crend()),
[](ValueOrFailure<absl::optional<ServerMetadataHandle>> r) {
if (r.ok()) return std::move(*r);
return absl::optional<ServerMetadataHandle>{};
@ -1372,11 +1421,13 @@ class CallFilters {
return If(
message_available,
[this]() {
return RunExecutor<
return Executor<
absl::optional<MessageHandle>, MessageHandle,
&CallFilters::push_client_to_server_message_,
&filters_detail::StackData::client_to_server_messages,
&CallState::FinishPullClientToServerMessage>();
&CallState::FinishPullClientToServerMessage,
StacksVector::const_iterator>(this, stacks_.cbegin(),
stacks_.cend());
},
[]() -> ValueOrFailure<absl::optional<MessageHandle>> {
return absl::optional<MessageHandle>();
@ -1401,11 +1452,13 @@ class CallFilters {
return If(
message_available,
[this]() {
return RunExecutor<
return Executor<
absl::optional<MessageHandle>, MessageHandle,
&CallFilters::push_server_to_client_message_,
&filters_detail::StackData::server_to_client_messages,
&CallState::FinishPullServerToClientMessage>();
&CallState::FinishPullServerToClientMessage,
StacksVector::const_reverse_iterator>(
this, stacks_.crbegin(), stacks_.crend());
},
[]() -> ValueOrFailure<absl::optional<MessageHandle>> {
return absl::optional<MessageHandle>();
@ -1423,12 +1476,17 @@ class CallFilters {
return Map(
[this]() { return call_state_.PollServerTrailingMetadataAvailable(); },
[this](Empty) {
auto result = std::move(push_server_trailing_metadata_);
auto value = std::move(push_server_trailing_metadata_);
if (call_data_ != nullptr) {
for (auto it = stacks_.crbegin(); it != stacks_.crend(); ++it) {
value = filters_detail::RunServerTrailingMetadata(
it->stack->data_.server_trailing_metadata,
filters_detail::Offset(call_data_, it->call_data_offset),
std::move(value));
}
}
call_state_.FinishPullServerTrailingMetadata();
if (call_data_ == nullptr) return result;
return filters_detail::RunServerTrailingMetadata(
stack_->data_.server_trailing_metadata, call_data_,
std::move(result));
return value;
});
}
// Server: Wait for server trailing metadata to have been sent
@ -1447,7 +1505,17 @@ class CallFilters {
private:
void CancelDueToFailedPipeOperation(SourceLocation but_where = {});
RefCountedPtr<Stack> stack_;
struct AddedStack {
explicit AddedStack(RefCountedPtr<Stack> stack)
: call_data_offset(std::numeric_limits<size_t>::max()),
stack(std::move(stack)) {}
size_t call_data_offset;
RefCountedPtr<Stack> stack;
};
using StacksVector = absl::InlinedVector<AddedStack, 2>;
StacksVector stacks_;
CallState call_state_;

@ -360,14 +360,12 @@ class UnstartedCallHandler {
return spine_->UnprocessedClientInitialMetadata();
}
// Helper for the very common situation in tests where we want to start a call
// with an empty filter stack.
CallHandler StartWithEmptyFilterStack() {
return StartCall(CallFilters::StackBuilder().Build());
void AddCallStack(RefCountedPtr<CallFilters::Stack> call_filters) {
spine_->call_filters().AddStack(std::move(call_filters));
}
CallHandler StartCall(RefCountedPtr<CallFilters::Stack> call_filters) {
spine_->call_filters().SetStack(std::move(call_filters));
CallHandler StartCall() {
spine_->call_filters().Start();
return CallHandler(std::move(spine_));
}

@ -57,7 +57,8 @@ class CallStarter final : public UnstartedCallDestination {
}
void StartCall(UnstartedCallHandler unstarted_call_handler) override {
destination_->HandleCall(unstarted_call_handler.StartCall(stack_));
unstarted_call_handler.AddCallStack(stack_);
destination_->HandleCall(unstarted_call_handler.StartCall());
}
private:
@ -78,16 +79,8 @@ class TerminalInterceptor final : public UnstartedCallDestination {
}
void StartCall(UnstartedCallHandler unstarted_call_handler) override {
unstarted_call_handler.SpawnGuarded(
"start_call",
Map(interception_chain_detail::HijackCall(unstarted_call_handler,
destination_, stack_),
[](ValueOrFailure<HijackedCall> hijacked_call) -> StatusFlag {
if (!hijacked_call.ok()) return Failure{};
ForwardCall(hijacked_call.value().original_call_handler(),
hijacked_call.value().MakeLastCall());
return Success{};
}));
unstarted_call_handler.AddCallStack(stack_);
destination_->StartCall(unstarted_call_handler);
}
private:

@ -64,56 +64,64 @@ class HijackedCall final {
CallHandler call_handler_;
};
namespace interception_chain_detail {
inline auto HijackCall(UnstartedCallHandler unstarted_call_handler,
RefCountedPtr<UnstartedCallDestination> destination,
RefCountedPtr<CallFilters::Stack> stack) {
auto call_handler = unstarted_call_handler.StartCall(stack);
return Map(
call_handler.PullClientInitialMetadata(),
[call_handler,
destination](ValueOrFailure<ClientMetadataHandle> metadata) mutable
-> ValueOrFailure<HijackedCall> {
if (!metadata.ok()) return Failure{};
return HijackedCall(std::move(metadata.value()), std::move(destination),
std::move(call_handler));
});
}
} // namespace interception_chain_detail
// A delegating UnstartedCallDestination for use as a hijacking filter.
//
// This class provides the final StartCall method, and delegates to the
// InterceptCall() method for the actual interception. It has the same semantics
// as StartCall, but affords the implementation the ability to prepare the
// UnstartedCallHandler appropriately.
//
// Implementations may look at the unprocessed initial metadata
// and decide to do one of two things:
// and decide to do one of three things:
//
// 1. It can hijack the call. Returns a HijackedCall object that can
// be used to start new calls with the same metadata.
//
// 2. It can consume the call by calling `Consume`.
//
// 3. It can pass the call through to the next interceptor by calling
// `PassThrough`.
//
// Upon the StartCall call the UnstartedCallHandler will be from the last
// *Interceptor* in the call chain (without having been processed by any
// intervening filters) -- note that this is commonly not useful (not enough
// guarantees), and so it's usually better to Hijack and examine the metadata.
class Interceptor : public UnstartedCallDestination {
public:
void StartCall(UnstartedCallHandler unstarted_call_handler) final {
unstarted_call_handler.AddCallStack(filter_stack_);
InterceptCall(std::move(unstarted_call_handler));
}
protected:
virtual void InterceptCall(UnstartedCallHandler unstarted_call_handler) = 0;
// Returns a promise that resolves to a HijackedCall instance.
// Hijacking is the process of taking over a call and starting one or more new
// ones.
auto Hijack(UnstartedCallHandler unstarted_call_handler) {
return interception_chain_detail::HijackCall(
std::move(unstarted_call_handler), wrapped_destination_, filter_stack_);
auto call_handler = unstarted_call_handler.StartCall();
return Map(call_handler.PullClientInitialMetadata(),
[call_handler, destination = wrapped_destination_](
ValueOrFailure<ClientMetadataHandle> metadata) mutable
-> ValueOrFailure<HijackedCall> {
if (!metadata.ok()) return Failure{};
return HijackedCall(std::move(metadata.value()),
std::move(destination),
std::move(call_handler));
});
}
// Consume this call - it will not be passed on to any further filters.
CallHandler Consume(UnstartedCallHandler unstarted_call_handler) {
return unstarted_call_handler.StartCall(filter_stack_);
return unstarted_call_handler.StartCall();
}
// TODO(ctiller): Consider a Passthrough() method that allows the call to be
// passed on to the next filter in the chain without any interception by the
// current filter.
// Pass through this call to the next filter.
void PassThrough(UnstartedCallHandler unstarted_call_handler) {
wrapped_destination_->StartCall(std::move(unstarted_call_handler));
}
private:
friend class InterceptionChainBuilder;

@ -76,6 +76,8 @@ class SubchannelInterface : public DualRefCounted<SubchannelInterface> {
// Cancels a connectivity state watch.
// If the watcher has already been destroyed, this is a no-op.
// TODO(roth): This interface has an ABA issue. Fix this before we
// make this API public.
virtual void CancelConnectivityStateWatch(
ConnectivityStateWatcherInterface* watcher) = 0;
@ -96,6 +98,8 @@ class SubchannelInterface : public DualRefCounted<SubchannelInterface> {
std::unique_ptr<DataWatcherInterface> watcher) = 0;
// Cancels a data watch.
// TODO(roth): This interface has an ABA issue. Fix this before we
// make this API public.
virtual void CancelDataWatcher(DataWatcherInterface* watcher) = 0;
protected:

@ -139,7 +139,7 @@ void BM_Unary(benchmark::State& state) {
// back a response.
auto unstarted_handler = helper.TakeHandler();
unstarted_handler.SpawnInfallible("run_handler", [&]() mutable {
auto handler = unstarted_handler.StartWithEmptyFilterStack();
auto handler = unstarted_handler.StartCall();
handler.PushServerInitialMetadata(Arena::MakePooled<ServerMetadata>());
auto response =
Arena::MakePooled<Message>(SliceBuffer(response_payload.Copy()), 0);

@ -119,7 +119,7 @@ class ClientCallTest : public YodelTest {
void Orphaned() override {}
void StartCall(UnstartedCallHandler handler) override {
CHECK(!test_->handler_.has_value());
test_->handler_.emplace(handler.StartWithEmptyFilterStack());
test_->handler_.emplace(handler.StartCall());
}
private:

@ -51,7 +51,7 @@ class ServerCallTest : public YodelTest {
MakeCallPair(std::move(client_initial_metadata), std::move(arena));
call.initiator.SpawnGuarded(
"initial_metadata",
[this, handler = call.handler.StartWithEmptyFilterStack()]() mutable {
[this, handler = call.handler.StartCall()]() mutable {
return TrySeq(
handler.PullClientInitialMetadata(),
[this,

@ -127,7 +127,7 @@ class ClientChannelTest : public YodelTest {
class TestCallDestination final : public UnstartedCallDestination {
public:
void StartCall(UnstartedCallHandler unstarted_call_handler) override {
handlers_.push(unstarted_call_handler.StartWithEmptyFilterStack());
handlers_.push(unstarted_call_handler.StartCall());
}
absl::optional<CallHandler> PopHandler() {

@ -75,7 +75,7 @@ class LoadBalancedCallDestinationTest : public YodelTest {
class TestCallDestination final : public UnstartedCallDestination {
public:
void StartCall(UnstartedCallHandler unstarted_call_handler) override {
handlers_.push(unstarted_call_handler.StartWithEmptyFilterStack());
handlers_.push(unstarted_call_handler.StartCall());
}
absl::optional<CallHandler> PopHandler() {

@ -30,7 +30,7 @@ class CallSpineFixture {
event_engine_.get());
auto p =
MakeCallPair(Arena::MakePooled<ClientMetadata>(), std::move(arena));
return {std::move(p.initiator), p.handler.StartCall(stack_)};
return {std::move(p.initiator), p.handler.StartCall()};
}
ServerMetadataHandle MakeServerInitialMetadata() {
@ -51,8 +51,6 @@ class CallSpineFixture {
ResourceQuota::Default()->memory_quota()->CreateMemoryAllocator(
"test-allocator"),
1024);
RefCountedPtr<CallFilters::Stack> stack_ =
CallFilters::StackBuilder().Build();
};
GRPC_CALL_SPINE_BENCHMARK(CallSpineFixture);
@ -70,7 +68,7 @@ class ForwardCallFixture {
auto p2 =
MakeCallPair(Arena::MakePooled<ClientMetadata>(), std::move(arena2));
p1.handler.SpawnInfallible("initial_metadata", [&]() {
auto p1_handler = p1.handler.StartCall(stack_);
auto p1_handler = p1.handler.StartCall();
return Map(
p1_handler.PullClientInitialMetadata(),
[p1_handler, &p2](ValueOrFailure<ClientMetadataHandle> md) mutable {
@ -81,7 +79,7 @@ class ForwardCallFixture {
});
absl::optional<CallHandler> p2_handler;
p2.handler.SpawnInfallible("start", [&]() {
p2_handler = p2.handler.StartCall(stack_);
p2_handler = p2.handler.StartCall();
return Empty{};
});
return {std::move(p1.initiator), std::move(*p2_handler)};
@ -105,8 +103,6 @@ class ForwardCallFixture {
ResourceQuota::Default()->memory_quota()->CreateMemoryAllocator(
"test-allocator"),
1024);
RefCountedPtr<CallFilters::Stack> stack_ =
CallFilters::StackBuilder().Build();
};
GRPC_CALL_SPINE_BENCHMARK(ForwardCallFixture);

@ -27,9 +27,6 @@ using testing::StrictMock;
namespace grpc_core {
namespace {
// Offset a void pointer by a given amount
void* Offset(void* base, size_t amt) { return static_cast<char*>(base) + amt; }
// A mock activity that can be activated and deactivated.
class MockActivity : public Activity, public Wakeable {
public:
@ -1165,7 +1162,101 @@ TEST(CallFiltersTest, UnaryCall) {
builder.Add(&f2);
auto arena = SimpleArenaAllocator()->MakeArena();
CallFilters filters(Arena::MakePooled<ClientMetadata>());
filters.SetStack(builder.Build());
filters.AddStack(builder.Build());
filters.Start();
promise_detail::Context<Arena> ctx(arena.get());
StrictMock<MockActivity> activity;
activity.Activate();
// Pull client initial metadata
auto pull_client_initial_metadata = filters.PullClientInitialMetadata();
EXPECT_THAT(pull_client_initial_metadata(), IsReady());
Mock::VerifyAndClearExpectations(&activity);
// Push client to server message
auto push_client_to_server_message = filters.PushClientToServerMessage(
Arena::MakePooled<Message>(SliceBuffer(), 0));
EXPECT_THAT(push_client_to_server_message(), IsPending());
auto pull_client_to_server_message = filters.PullClientToServerMessage();
// Pull client to server message, expect a wakeup
EXPECT_WAKEUP(activity,
EXPECT_THAT(pull_client_to_server_message(), IsReady()));
// Push should be done
EXPECT_THAT(push_client_to_server_message(), IsReady(Success{}));
// Push server initial metadata
filters.PushServerInitialMetadata(Arena::MakePooled<ServerMetadata>());
auto pull_server_initial_metadata = filters.PullServerInitialMetadata();
// Pull server initial metadata
EXPECT_THAT(pull_server_initial_metadata(), IsReady());
Mock::VerifyAndClearExpectations(&activity);
// Push server to client message
auto push_server_to_client_message = filters.PushServerToClientMessage(
Arena::MakePooled<Message>(SliceBuffer(), 0));
EXPECT_THAT(push_server_to_client_message(), IsPending());
auto pull_server_to_client_message = filters.PullServerToClientMessage();
// Pull server to client message, expect a wakeup
EXPECT_WAKEUP(activity,
EXPECT_THAT(pull_server_to_client_message(), IsReady()));
// Push should be done
EXPECT_THAT(push_server_to_client_message(), IsReady(Success{}));
// Push server trailing metadata
filters.PushServerTrailingMetadata(Arena::MakePooled<ServerMetadata>());
// Pull server trailing metadata
auto pull_server_trailing_metadata = filters.PullServerTrailingMetadata();
// Should be done
EXPECT_THAT(pull_server_trailing_metadata(), IsReady());
filters.Finalize(nullptr);
EXPECT_THAT(steps,
::testing::ElementsAre(
"f1:OnClientInitialMetadata", "f2:OnClientInitialMetadata",
"f1:OnClientToServerMessage", "f2:OnClientToServerMessage",
"f2:OnServerInitialMetadata", "f1:OnServerInitialMetadata",
"f2:OnServerToClientMessage", "f1:OnServerToClientMessage",
"f2:OnServerTrailingMetadata", "f1:OnServerTrailingMetadata",
"f1:OnFinalize", "f2:OnFinalize"));
}
TEST(CallFiltersTest, UnaryCallWithMultiStack) {
struct Filter {
struct Call {
void OnClientInitialMetadata(ClientMetadata&, Filter* f) {
f->steps.push_back(absl::StrCat(f->label, ":OnClientInitialMetadata"));
}
void OnServerInitialMetadata(ServerMetadata&, Filter* f) {
f->steps.push_back(absl::StrCat(f->label, ":OnServerInitialMetadata"));
}
void OnClientToServerMessage(Message&, Filter* f) {
f->steps.push_back(absl::StrCat(f->label, ":OnClientToServerMessage"));
}
void OnClientToServerHalfClose(Filter* f) {
f->steps.push_back(
absl::StrCat(f->label, ":OnClientToServerHalfClose"));
}
void OnServerToClientMessage(Message&, Filter* f) {
f->steps.push_back(absl::StrCat(f->label, ":OnServerToClientMessage"));
}
void OnServerTrailingMetadata(ServerMetadata&, Filter* f) {
f->steps.push_back(absl::StrCat(f->label, ":OnServerTrailingMetadata"));
}
void OnFinalize(const grpc_call_final_info*, Filter* f) {
f->steps.push_back(absl::StrCat(f->label, ":OnFinalize"));
}
std::unique_ptr<int> i = std::make_unique<int>(3);
};
const std::string label;
std::vector<std::string>& steps;
};
std::vector<std::string> steps;
Filter f1{"f1", steps};
Filter f2{"f2", steps};
CallFilters::StackBuilder builder1;
CallFilters::StackBuilder builder2;
builder1.Add(&f1);
builder2.Add(&f2);
auto arena = SimpleArenaAllocator()->MakeArena();
CallFilters filters(Arena::MakePooled<ClientMetadata>());
filters.AddStack(builder1.Build());
filters.AddStack(builder2.Build());
filters.Start();
promise_detail::Context<Arena> ctx(arena.get());
StrictMock<MockActivity> activity;
activity.Activate();

@ -243,7 +243,7 @@ class FilterFixture {
event_engine_.get());
auto p =
MakeCallPair(traits_.MakeClientInitialMetadata(), std::move(arena));
return {std::move(p.initiator), p.handler.StartCall(stack_)};
return {std::move(p.initiator), p.handler.StartCall()};
}
ServerMetadataHandle MakeServerInitialMetadata() {
@ -291,7 +291,7 @@ class UnstartedCallDestinationFixture {
absl::optional<CallHandler> started_handler;
Notification started;
handler.SpawnInfallible("handler_setup", [&]() {
started_handler = handler.StartCall(stack_);
started_handler = handler.StartCall();
started.Notify();
return Empty{};
});
@ -303,7 +303,6 @@ class UnstartedCallDestinationFixture {
~UnstartedCallDestinationFixture() {
// TODO(ctiller): entire destructor can be deleted once ExecCtx is gone.
ExecCtx exec_ctx;
stack_.reset();
top_destination_.reset();
bottom_destination_.reset();
arena_allocator_.reset();
@ -360,8 +359,6 @@ class UnstartedCallDestinationFixture {
MakeRefCounted<SinkDestination>();
RefCountedPtr<UnstartedCallDestination> top_destination_ =
traits_->CreateCallDestination(bottom_destination_);
RefCountedPtr<CallFilters::Stack> stack_ =
CallFilters::StackBuilder().Build();
};
} // namespace grpc_core

@ -146,13 +146,13 @@ void CallSpineTest::UnaryRequest(CallInitiator initiator, CallHandler handler) {
CALL_SPINE_TEST(UnaryRequest) {
auto call = MakeCall(MakeClientInitialMetadata());
UnaryRequest(call.initiator, call.handler.StartWithEmptyFilterStack());
UnaryRequest(call.initiator, call.handler.StartCall());
WaitForAllPendingWork();
}
CALL_SPINE_TEST(UnaryRequestThroughForwardCall) {
auto call1 = MakeCall(MakeClientInitialMetadata());
auto handler = call1.handler.StartWithEmptyFilterStack();
auto handler = call1.handler.StartCall();
SpawnTestSeq(
call1.initiator, "initiator",
[handler]() mutable { return handler.PullClientInitialMetadata(); },
@ -161,7 +161,7 @@ CALL_SPINE_TEST(UnaryRequestThroughForwardCall) {
EXPECT_TRUE(md.ok());
auto call2 = MakeCall(std::move(md.value()));
ForwardCall(handler, call2.initiator);
UnaryRequest(initiator, call2.handler.StartWithEmptyFilterStack());
UnaryRequest(initiator, call2.handler.StartCall());
return Empty{};
});
WaitForAllPendingWork();
@ -169,7 +169,7 @@ CALL_SPINE_TEST(UnaryRequestThroughForwardCall) {
CALL_SPINE_TEST(UnaryRequestThroughForwardCallWithServerTrailingMetadataHook) {
auto call1 = MakeCall(MakeClientInitialMetadata());
auto handler = call1.handler.StartWithEmptyFilterStack();
auto handler = call1.handler.StartCall();
bool got_md = false;
SpawnTestSeq(
call1.initiator, "initiator",
@ -180,7 +180,7 @@ CALL_SPINE_TEST(UnaryRequestThroughForwardCallWithServerTrailingMetadataHook) {
auto call2 = MakeCall(std::move(md.value()));
ForwardCall(handler, call2.initiator,
[&got_md](ServerMetadata&) { got_md = true; });
UnaryRequest(initiator, call2.handler.StartWithEmptyFilterStack());
UnaryRequest(initiator, call2.handler.StartCall());
return Empty{};
});
WaitForAllPendingWork();

@ -191,7 +191,7 @@ TEST_F(ClientTransportTest, AddOneStreamWithWriteFailed) {
std::move(data_endpoint.promise_endpoint), MakeChannelArgs(),
event_engine(), HPackParser(), HPackCompressor());
auto call = MakeCall(TestInitialMetadata());
transport->StartCall(call.handler.StartWithEmptyFilterStack());
transport->StartCall(call.handler.StartCall());
call.initiator.SpawnGuarded("test-send",
[initiator = call.initiator]() mutable {
return SendClientToServerMessages(initiator, 1);
@ -235,7 +235,7 @@ TEST_F(ClientTransportTest, AddOneStreamWithReadFailed) {
std::move(data_endpoint.promise_endpoint), MakeChannelArgs(),
event_engine(), HPackParser(), HPackCompressor());
auto call = MakeCall(TestInitialMetadata());
transport->StartCall(call.handler.StartWithEmptyFilterStack());
transport->StartCall(call.handler.StartCall());
call.initiator.SpawnGuarded("test-send",
[initiator = call.initiator]() mutable {
return SendClientToServerMessages(initiator, 1);
@ -287,9 +287,9 @@ TEST_F(ClientTransportTest, AddMultipleStreamWithWriteFailed) {
std::move(data_endpoint.promise_endpoint), MakeChannelArgs(),
event_engine(), HPackParser(), HPackCompressor());
auto call1 = MakeCall(TestInitialMetadata());
transport->StartCall(call1.handler.StartWithEmptyFilterStack());
transport->StartCall(call1.handler.StartCall());
auto call2 = MakeCall(TestInitialMetadata());
transport->StartCall(call2.handler.StartWithEmptyFilterStack());
transport->StartCall(call2.handler.StartCall());
call1.initiator.SpawnGuarded(
"test-send-1", [initiator = call1.initiator]() mutable {
return SendClientToServerMessages(initiator, 1);
@ -355,9 +355,9 @@ TEST_F(ClientTransportTest, AddMultipleStreamWithReadFailed) {
std::move(data_endpoint.promise_endpoint), MakeChannelArgs(),
event_engine(), HPackParser(), HPackCompressor());
auto call1 = MakeCall(TestInitialMetadata());
transport->StartCall(call1.handler.StartWithEmptyFilterStack());
transport->StartCall(call1.handler.StartCall());
auto call2 = MakeCall(TestInitialMetadata());
transport->StartCall(call2.handler.StartWithEmptyFilterStack());
transport->StartCall(call2.handler.StartCall());
call1.initiator.SpawnGuarded(
"test-send", [initiator = call1.initiator]() mutable {
return SendClientToServerMessages(initiator, 1);

@ -120,7 +120,7 @@ TEST_F(TransportTest, AddOneStream) {
std::move(data_endpoint.promise_endpoint), MakeChannelArgs(),
event_engine(), HPackParser(), HPackCompressor());
auto call = MakeCall(TestInitialMetadata());
transport->StartCall(call.handler.StartWithEmptyFilterStack());
transport->StartCall(call.handler.StartCall());
StrictMock<MockFunction<void()>> on_done;
EXPECT_CALL(on_done, Call());
control_endpoint.ExpectWrite(
@ -206,7 +206,7 @@ TEST_F(TransportTest, AddOneStreamMultipleMessages) {
std::move(data_endpoint.promise_endpoint), MakeChannelArgs(),
event_engine(), HPackParser(), HPackCompressor());
auto call = MakeCall(TestInitialMetadata());
transport->StartCall(call.handler.StartWithEmptyFilterStack());
transport->StartCall(call.handler.StartCall());
StrictMock<MockFunction<void()>> on_done;
EXPECT_CALL(on_done, Call());
control_endpoint.ExpectWrite(

@ -125,7 +125,7 @@ TEST_F(TransportTest, ReadAndWriteOneMessage) {
.get_pointer(HttpPathMetadata())
->as_string_view(),
"/demo.Service/Step");
auto handler = unstarted_call_handler.StartWithEmptyFilterStack();
auto handler = unstarted_call_handler.StartCall();
handler.SpawnInfallible("test-io", [&on_done, handler]() mutable {
return Seq(
handler.PullClientInitialMetadata(),

@ -158,7 +158,7 @@ const NoInterceptor FailsToInstantiateFilter<I>::Call::OnFinalize;
template <int I>
class TestConsumingInterceptor final : public Interceptor {
public:
void StartCall(UnstartedCallHandler unstarted_call_handler) override {
void InterceptCall(UnstartedCallHandler unstarted_call_handler) override {
Consume(std::move(unstarted_call_handler))
.PushServerTrailingMetadata(
ServerMetadataFromStatus(absl::InternalError("👊 consumed")));
@ -171,13 +171,30 @@ class TestConsumingInterceptor final : public Interceptor {
}
};
///////////////////////////////////////////////////////////////////////////////
// Test call interceptor - passes through calls
template <int I>
class TestPassThroughInterceptor final : public Interceptor {
public:
void InterceptCall(UnstartedCallHandler unstarted_call_handler) override {
PassThrough(std::move(unstarted_call_handler));
}
void Orphaned() override {}
static absl::StatusOr<RefCountedPtr<TestPassThroughInterceptor<I>>> Create(
const ChannelArgs& channel_args, ChannelFilter::Args filter_args) {
MaybeLogCreation(channel_args, filter_args, I);
return MakeRefCounted<TestPassThroughInterceptor<I>>();
}
};
///////////////////////////////////////////////////////////////////////////////
// Test call interceptor - fails to instantiate
template <int I>
class TestFailingInterceptor final : public Interceptor {
public:
void StartCall(UnstartedCallHandler unstarted_call_handler) override {
void InterceptCall(UnstartedCallHandler unstarted_call_handler) override {
Crash("unreachable");
}
void Orphaned() override {}
@ -194,7 +211,7 @@ class TestFailingInterceptor final : public Interceptor {
template <int I>
class TestHijackingInterceptor final : public Interceptor {
public:
void StartCall(UnstartedCallHandler unstarted_call_handler) override {
void InterceptCall(UnstartedCallHandler unstarted_call_handler) override {
unstarted_call_handler.SpawnInfallible(
"hijack", [this, unstarted_call_handler]() mutable {
return Map(Hijack(std::move(unstarted_call_handler)),
@ -300,6 +317,20 @@ TEST_F(InterceptionChainTest, Empty) {
EXPECT_NE(finished_call.client_metadata, nullptr);
}
TEST_F(InterceptionChainTest, PassThrough) {
auto r = InterceptionChainBuilder(ChannelArgs())
.Add<TestPassThroughInterceptor<1>>()
.Build(destination());
ASSERT_TRUE(r.ok()) << r.status();
auto finished_call = RunCall(r.value().get());
EXPECT_EQ(finished_call.server_metadata->get(GrpcStatusMetadata()),
GRPC_STATUS_INTERNAL);
EXPECT_EQ(finished_call.server_metadata->get_pointer(GrpcMessageMetadata())
->as_string_view(),
"👊 cancelled");
EXPECT_NE(finished_call.client_metadata, nullptr);
}
TEST_F(InterceptionChainTest, Consumed) {
auto r = InterceptionChainBuilder(ChannelArgs())
.Add<TestConsumingInterceptor<1>>()

@ -34,7 +34,7 @@ CallInitiator TransportTest::CreateCall(
call.handler.SpawnInfallible(
"start-call", [this, handler = call.handler]() mutable {
transport_pair_.client->client_transport()->StartCall(
handler.StartWithEmptyFilterStack());
handler.StartCall());
return Empty{};
});
return std::move(call.initiator);
@ -54,7 +54,7 @@ CallHandler TransportTest::TickUntilServerCall() {
void TransportTest::ServerCallDestination::StartCall(
UnstartedCallHandler handler) {
handlers_.push(handler.StartWithEmptyFilterStack());
handlers_.push(handler.StartCall());
}
absl::optional<CallHandler> TransportTest::ServerCallDestination::PopHandler() {

@ -779,6 +779,7 @@ doc/fail_fast.md \
doc/fork_support.md \
doc/g_stands_for.md \
doc/grpc_release_schedule.md \
doc/grpc_xds_bootstrap_format.md \
doc/grpc_xds_features.md \
doc/health-checking.md \
doc/http-grpc-status-mapping.md \

@ -779,6 +779,7 @@ doc/fail_fast.md \
doc/fork_support.md \
doc/g_stands_for.md \
doc/grpc_release_schedule.md \
doc/grpc_xds_bootstrap_format.md \
doc/grpc_xds_features.md \
doc/health-checking.md \
doc/http-grpc-status-mapping.md \

@ -786,6 +786,7 @@ doc/fail_fast.md \
doc/fork_support.md \
doc/g_stands_for.md \
doc/grpc_release_schedule.md \
doc/grpc_xds_bootstrap_format.md \
doc/grpc_xds_features.md \
doc/health-checking.md \
doc/http-grpc-status-mapping.md \

@ -786,6 +786,7 @@ doc/fail_fast.md \
doc/fork_support.md \
doc/g_stands_for.md \
doc/grpc_release_schedule.md \
doc/grpc_xds_bootstrap_format.md \
doc/grpc_xds_features.md \
doc/health-checking.md \
doc/http-grpc-status-mapping.md \

@ -777,6 +777,7 @@ doc/fail_fast.md \
doc/fork_support.md \
doc/g_stands_for.md \
doc/grpc_release_schedule.md \
doc/grpc_xds_bootstrap_format.md \
doc/grpc_xds_features.md \
doc/health-checking.md \
doc/http-grpc-status-mapping.md \

@ -777,6 +777,7 @@ doc/fail_fast.md \
doc/fork_support.md \
doc/g_stands_for.md \
doc/grpc_release_schedule.md \
doc/grpc_xds_bootstrap_format.md \
doc/grpc_xds_features.md \
doc/health-checking.md \
doc/http-grpc-status-mapping.md \

@ -777,6 +777,7 @@ doc/fail_fast.md \
doc/fork_support.md \
doc/g_stands_for.md \
doc/grpc_release_schedule.md \
doc/grpc_xds_bootstrap_format.md \
doc/grpc_xds_features.md \
doc/health-checking.md \
doc/http-grpc-status-mapping.md \

Loading…
Cancel
Save