Merge branch 'master' into keepalivethrottling

pull/23313/head
Yash Tibrewal 5 years ago
commit 26cb8260c1
  1. 20
      BUILD
  2. 1
      src/core/ext/filters/client_channel/lb_policy/xds/eds.cc
  3. 388
      src/core/ext/filters/client_channel/resolver/xds/xds_resolver.cc
  4. 9
      src/core/ext/transport/chttp2/client/chttp2_connector.cc
  5. 43
      src/core/ext/upb-generated/third_party/istio/security/proto/providers/google/meshca.upb.c
  6. 103
      src/core/ext/upb-generated/third_party/istio/security/proto/providers/google/meshca.upb.h
  7. 318
      src/core/ext/xds/xds_api.cc
  8. 48
      src/core/ext/xds/xds_api.h
  9. 366
      src/core/ext/xds/xds_client.cc
  10. 51
      src/core/ext/xds/xds_client.h
  11. 48
      src/core/lib/surface/channel.cc
  12. 11
      src/core/lib/surface/channel.h
  13. 2
      src/ruby/lib/grpc/generic/client_stub.rb
  14. 74
      src/ruby/spec/user_agent_spec.rb
  15. 5
      templates/tools/dockerfile/cmake.include
  16. 1
      templates/tools/dockerfile/test/cxx_buster_x64/Dockerfile.template
  17. 32
      templates/tools/dockerfile/test/cxx_sanitizers_jessie_x64/Dockerfile.template
  18. 25
      templates/tools/dockerfile/test/cxx_ubuntu1404_x64/Dockerfile.template
  19. 1
      templates/tools/dockerfile/test/cxx_ubuntu1604_x64/Dockerfile.template
  20. 1
      templates/tools/dockerfile/test/cxx_ubuntu1804_x64/Dockerfile.template
  21. 16
      test/core/iomgr/stranded_event_test.cc
  22. 94
      test/cpp/end2end/client_callback_end2end_test.cc
  23. 18
      test/cpp/naming/cancel_ares_query_test.cc
  24. 51
      third_party/istio/security/proto/providers/google/meshca.proto
  25. 2
      tools/buildgen/plugins/make_fuzzer_tests.py
  26. 1
      tools/codegen/core/gen_upb_api.sh
  27. 6
      tools/dockerfile/test/cxx_buster_x64/Dockerfile
  28. 81
      tools/dockerfile/test/cxx_sanitizers_jessie_x64/Dockerfile
  29. 79
      tools/dockerfile/test/cxx_ubuntu1404_x64/Dockerfile
  30. 6
      tools/dockerfile/test/cxx_ubuntu1604_x64/Dockerfile
  31. 6
      tools/dockerfile/test/cxx_ubuntu1804_x64/Dockerfile
  32. 5676
      tools/run_tests/generated/tests.json
  33. 5
      tools/run_tests/helper_scripts/pre_build_cmake.bat
  34. 2
      tools/run_tests/helper_scripts/pre_build_cmake.sh
  35. 147
      tools/run_tests/run_tests.py
  36. 24
      tools/run_tests/run_tests_matrix.py
  37. 51
      tools/run_tests/run_xds_tests.py

20
BUILD

@ -2830,6 +2830,26 @@ grpc_cc_library(
],
)
# Once upb code-gen issue is resolved, replace meshca_upb with this.
# meshca_upb_proto_library(
# name = "meshca_upb",
# deps = ["//third_party/istio/security/proto/providers/google:meshca_proto"],
# )
grpc_cc_library(
name = "meshca_upb",
srcs = [
"src/core/ext/upb-generated/third_party/istio/security/proto/providers/google/meshca.upb.c",
],
hdrs = [
"src/core/ext/upb-generated/third_party/istio/security/proto/providers/google/meshca.upb.h",
],
language = "c++",
deps = [
"google_api_upb",
],
)
# Once upb code-gen issue is resolved, replace alts_upb with this.
# grpc_upb_proto_library(
# name = "alts_upb",

@ -457,6 +457,7 @@ void EdsLb::UpdateLocked(UpdateArgs args) {
grpc_error* error = GRPC_ERROR_NONE;
xds_client_ = MakeOrphanable<XdsClient>(
work_serializer(), interested_parties(), GetEdsResourceName(),
std::vector<grpc_resolved_address>{},
nullptr /* service config watcher */, *args_, &error);
// TODO(roth): If we decide that we care about EDS-only mode, add
// proper error handling here.

@ -18,6 +18,8 @@
#include <grpc/support/port_platform.h>
#include "absl/strings/str_join.h"
#include "src/core/ext/filters/client_channel/config_selector.h"
#include "src/core/ext/filters/client_channel/resolver_registry.h"
#include "src/core/ext/xds/xds_client.h"
@ -68,12 +70,11 @@ class XdsResolver : public Resolver {
}
private:
class ServiceConfigWatcher : public XdsClient::ServiceConfigWatcherInterface {
class ListenerWatcher : public XdsClient::ListenerWatcherInterface {
public:
explicit ServiceConfigWatcher(RefCountedPtr<XdsResolver> resolver)
explicit ListenerWatcher(RefCountedPtr<XdsResolver> resolver)
: resolver_(std::move(resolver)) {}
void OnServiceConfigChanged(
RefCountedPtr<ServiceConfig> service_config) override;
void OnListenerChanged(XdsApi::LdsUpdate listener_data) override;
void OnError(grpc_error* error) override;
void OnResourceDoesNotExist() override;
@ -88,32 +89,75 @@ class XdsResolver : public Resolver {
}
};
// Returns the weighted_clusters action name to use from
// weighted_cluster_index_map_ for a WeightedClusters route action.
std::string WeightedClustersActionName(
const std::vector<XdsApi::RdsUpdate::RdsRoute::ClusterWeight>&
weighted_clusters);
// Updates weighted_cluster_index_map_ that will
// determine the names of the WeightedCluster actions for the current update.
void UpdateWeightedClusterIndexMap(const XdsApi::RdsUpdate& rds_update);
// Create the service config generated by the RdsUpdate.
grpc_error* CreateServiceConfig(const XdsApi::RdsUpdate& rds_update,
RefCountedPtr<ServiceConfig>* service_config);
std::string server_name_;
const grpc_channel_args* args_;
grpc_pollset_set* interested_parties_;
OrphanablePtr<XdsClient> xds_client_;
RefCountedPtr<XdsConfigSelector> config_selector_;
// 2-level map to store WeightedCluster action names.
// Top level map is keyed by cluster names without weight like a_b_c; bottom
// level map is keyed by cluster names + weights like a10_b50_c40.
struct ClusterNamesInfo {
uint64_t next_index = 0;
std::map<std::string /*cluster names + weights*/,
uint64_t /*policy index number*/>
cluster_weights_map;
};
using WeightedClusterIndexMap =
std::map<std::string /*cluster names*/, ClusterNamesInfo>;
// Cache of action names for WeightedCluster targets in the current
// service config.
WeightedClusterIndexMap weighted_cluster_index_map_;
};
void XdsResolver::ServiceConfigWatcher::OnServiceConfigChanged(
RefCountedPtr<ServiceConfig> service_config) {
//
// XdsResolver::ListenerWatcher
//
void XdsResolver::ListenerWatcher::OnListenerChanged(
XdsApi::LdsUpdate listener_data) {
if (resolver_->xds_client_ == nullptr) return;
if (GRPC_TRACE_FLAG_ENABLED(grpc_xds_resolver_trace)) {
gpr_log(GPR_INFO, "[xds_resolver %p] received updated service config: %s",
resolver_.get(), service_config->json_string().c_str());
gpr_log(GPR_INFO, "[xds_resolver %p] received updated listener data",
resolver_.get());
}
Result result;
grpc_error* error = resolver_->CreateServiceConfig(*listener_data.rds_update,
&result.service_config);
if (error != GRPC_ERROR_NONE) {
OnError(error);
return;
}
if (GRPC_TRACE_FLAG_ENABLED(grpc_xds_resolver_trace)) {
gpr_log(GPR_INFO, "[xds_resolver %p] generated service config: %s",
resolver_.get(), result.service_config->json_string().c_str());
}
grpc_arg new_args[] = {
resolver_->xds_client_->MakeChannelArg(),
resolver_->config_selector_->MakeChannelArg(),
};
Result result;
result.args = grpc_channel_args_copy_and_add(resolver_->args_, new_args,
GPR_ARRAY_SIZE(new_args));
result.service_config = std::move(service_config);
resolver_->result_handler()->ReturnResult(std::move(result));
}
void XdsResolver::ServiceConfigWatcher::OnError(grpc_error* error) {
void XdsResolver::ListenerWatcher::OnError(grpc_error* error) {
if (resolver_->xds_client_ == nullptr) return;
gpr_log(GPR_ERROR, "[xds_resolver %p] received error: %s", resolver_.get(),
grpc_error_string(error));
@ -125,7 +169,7 @@ void XdsResolver::ServiceConfigWatcher::OnError(grpc_error* error) {
resolver_->result_handler()->ReturnResult(std::move(result));
}
void XdsResolver::ServiceConfigWatcher::OnResourceDoesNotExist() {
void XdsResolver::ListenerWatcher::OnResourceDoesNotExist() {
if (resolver_->xds_client_ == nullptr) return;
gpr_log(GPR_ERROR,
"[xds_resolver %p] LDS/RDS resource does not exist -- returning "
@ -139,11 +183,16 @@ void XdsResolver::ServiceConfigWatcher::OnResourceDoesNotExist() {
resolver_->result_handler()->ReturnResult(std::move(result));
}
//
// XdsResolver
//
void XdsResolver::StartLocked() {
grpc_error* error = GRPC_ERROR_NONE;
xds_client_ = MakeOrphanable<XdsClient>(
work_serializer(), interested_parties_, server_name_,
absl::make_unique<ServiceConfigWatcher>(Ref()), *args_, &error);
std::vector<grpc_resolved_address>{},
absl::make_unique<ListenerWatcher>(Ref()), *args_, &error);
if (error != GRPC_ERROR_NONE) {
gpr_log(GPR_ERROR,
"Failed to create xds client -- channel will remain in "
@ -153,6 +202,319 @@ void XdsResolver::StartLocked() {
}
}
std::string CreateServiceConfigActionCluster(const std::string& cluster_name) {
return absl::StrFormat(
" \"cds:%s\":{\n"
" \"childPolicy\":[ {\n"
" \"cds_experimental\":{\n"
" \"cluster\": \"%s\"\n"
" }\n"
" } ]\n"
" }",
cluster_name, cluster_name);
}
std::string CreateServiceConfigRoute(const std::string& action_name,
const XdsApi::RdsUpdate::RdsRoute& route) {
std::vector<std::string> headers;
for (const auto& header : route.matchers.header_matchers) {
std::string header_matcher;
switch (header.type) {
case XdsApi::RdsUpdate::RdsRoute::Matchers::HeaderMatcher::
HeaderMatcherType::EXACT:
header_matcher = absl::StrFormat(" \"exact_match\": \"%s\"",
header.string_matcher);
break;
case XdsApi::RdsUpdate::RdsRoute::Matchers::HeaderMatcher::
HeaderMatcherType::REGEX:
header_matcher = absl::StrFormat(" \"regex_match\": \"%s\"",
header.regex_match->pattern());
break;
case XdsApi::RdsUpdate::RdsRoute::Matchers::HeaderMatcher::
HeaderMatcherType::RANGE:
header_matcher = absl::StrFormat(
" \"range_match\":{\n"
" \"start\":%d,\n"
" \"end\":%d\n"
" }",
header.range_start, header.range_end);
break;
case XdsApi::RdsUpdate::RdsRoute::Matchers::HeaderMatcher::
HeaderMatcherType::PRESENT:
header_matcher =
absl::StrFormat(" \"present_match\": %s",
header.present_match ? "true" : "false");
break;
case XdsApi::RdsUpdate::RdsRoute::Matchers::HeaderMatcher::
HeaderMatcherType::PREFIX:
header_matcher = absl::StrFormat(
" \"prefix_match\": \"%s\"", header.string_matcher);
break;
case XdsApi::RdsUpdate::RdsRoute::Matchers::HeaderMatcher::
HeaderMatcherType::SUFFIX:
header_matcher = absl::StrFormat(
" \"suffix_match\": \"%s\"", header.string_matcher);
break;
default:
break;
}
std::vector<std::string> header_parts;
header_parts.push_back(
absl::StrFormat(" { \n"
" \"name\": \"%s\",\n",
header.name));
header_parts.push_back(header_matcher);
if (header.invert_match) {
header_parts.push_back(
absl::StrFormat(",\n"
" \"invert_match\": true"));
}
header_parts.push_back(
absl::StrFormat("\n"
" }"));
headers.push_back(absl::StrJoin(header_parts, ""));
}
std::vector<std::string> headers_service_config;
if (!headers.empty()) {
headers_service_config.push_back("\"headers\":[\n");
headers_service_config.push_back(absl::StrJoin(headers, ","));
headers_service_config.push_back(" ],\n");
}
std::string path_match_str;
switch (route.matchers.path_matcher.type) {
case XdsApi::RdsUpdate::RdsRoute::Matchers::PathMatcher::PathMatcherType::
PREFIX:
path_match_str = absl::StrFormat(
"\"prefix\": \"%s\",\n", route.matchers.path_matcher.string_matcher);
break;
case XdsApi::RdsUpdate::RdsRoute::Matchers::PathMatcher::PathMatcherType::
PATH:
path_match_str = absl::StrFormat(
"\"path\": \"%s\",\n", route.matchers.path_matcher.string_matcher);
break;
case XdsApi::RdsUpdate::RdsRoute::Matchers::PathMatcher::PathMatcherType::
REGEX:
path_match_str =
absl::StrFormat("\"regex\": \"%s\",\n",
route.matchers.path_matcher.regex_matcher->pattern());
break;
}
return absl::StrFormat(
" { \n"
" %s"
" %s"
" %s"
" \"action\": \"%s\"\n"
" }",
path_match_str, absl::StrJoin(headers_service_config, ""),
route.matchers.fraction_per_million.has_value()
? absl::StrFormat("\"match_fraction\":%d,\n",
route.matchers.fraction_per_million.value())
: "",
action_name);
}
// Create the service config for one weighted cluster.
std::string CreateServiceConfigActionWeightedCluster(
const std::string& name,
const std::vector<XdsApi::RdsUpdate::RdsRoute::ClusterWeight>& clusters) {
std::vector<std::string> config_parts;
config_parts.push_back(
absl::StrFormat(" \"weighted:%s\":{\n"
" \"childPolicy\":[ {\n"
" \"weighted_target_experimental\":{\n"
" \"targets\":{\n",
name));
std::vector<std::string> weighted_targets;
weighted_targets.reserve(clusters.size());
for (const auto& cluster_weight : clusters) {
weighted_targets.push_back(absl::StrFormat(
" \"%s\":{\n"
" \"weight\":%d,\n"
" \"childPolicy\":[ {\n"
" \"cds_experimental\":{\n"
" \"cluster\": \"%s\"\n"
" }\n"
" } ]\n"
" }",
cluster_weight.name, cluster_weight.weight, cluster_weight.name));
}
config_parts.push_back(absl::StrJoin(weighted_targets, ",\n"));
config_parts.push_back(
" }\n"
" }\n"
" } ]\n"
" }");
return absl::StrJoin(config_parts, "");
}
struct WeightedClustersKeys {
std::string cluster_names_key;
std::string cluster_weights_key;
};
// Returns the cluster names and weights key or the cluster names only key.
WeightedClustersKeys GetWeightedClustersKey(
const std::vector<XdsApi::RdsUpdate::RdsRoute::ClusterWeight>&
weighted_clusters) {
std::set<std::string> cluster_names;
std::set<std::string> cluster_weights;
for (const auto& cluster_weight : weighted_clusters) {
cluster_names.emplace(absl::StrFormat("%s", cluster_weight.name));
cluster_weights.emplace(
absl::StrFormat("%s_%d", cluster_weight.name, cluster_weight.weight));
}
return {absl::StrJoin(cluster_names, "_"),
absl::StrJoin(cluster_weights, "_")};
}
std::string XdsResolver::WeightedClustersActionName(
const std::vector<XdsApi::RdsUpdate::RdsRoute::ClusterWeight>&
weighted_clusters) {
WeightedClustersKeys keys = GetWeightedClustersKey(weighted_clusters);
auto cluster_names_map_it =
weighted_cluster_index_map_.find(keys.cluster_names_key);
GPR_ASSERT(cluster_names_map_it != weighted_cluster_index_map_.end());
const auto& cluster_weights_map =
cluster_names_map_it->second.cluster_weights_map;
auto cluster_weights_map_it =
cluster_weights_map.find(keys.cluster_weights_key);
GPR_ASSERT(cluster_weights_map_it != cluster_weights_map.end());
return absl::StrFormat("%s_%d", keys.cluster_names_key,
cluster_weights_map_it->second);
}
void XdsResolver::UpdateWeightedClusterIndexMap(
const XdsApi::RdsUpdate& rds_update) {
// Construct a list of unique WeightedCluster
// actions which we need to process: to find action names
std::map<std::string /* cluster_weights_key */,
std::string /* cluster_names_key */>
actions_to_process;
for (const auto& route : rds_update.routes) {
if (!route.weighted_clusters.empty()) {
WeightedClustersKeys keys =
GetWeightedClustersKey(route.weighted_clusters);
auto action_it = actions_to_process.find(keys.cluster_weights_key);
if (action_it == actions_to_process.end()) {
actions_to_process[std::move(keys.cluster_weights_key)] =
std::move(keys.cluster_names_key);
}
}
}
// First pass of all unique WeightedCluster actions: if the exact same
// weighted target policy (same clusters and weights) appears in the old map,
// then that old action name is taken again and should be moved to the new
// map; any other action names from the old set of actions are candidates for
// reuse.
XdsResolver::WeightedClusterIndexMap new_weighted_cluster_index_map;
for (auto action_it = actions_to_process.begin();
action_it != actions_to_process.end();) {
const std::string& cluster_names_key = action_it->second;
const std::string& cluster_weights_key = action_it->first;
auto old_cluster_names_map_it =
weighted_cluster_index_map_.find(cluster_names_key);
if (old_cluster_names_map_it != weighted_cluster_index_map_.end()) {
// Add cluster_names_key to the new map and copy next_index.
auto& new_cluster_names_info =
new_weighted_cluster_index_map[cluster_names_key];
new_cluster_names_info.next_index =
old_cluster_names_map_it->second.next_index;
// Lookup cluster_weights_key in old map.
auto& old_cluster_weights_map =
old_cluster_names_map_it->second.cluster_weights_map;
auto old_cluster_weights_map_it =
old_cluster_weights_map.find(cluster_weights_key);
if (old_cluster_weights_map_it != old_cluster_weights_map.end()) {
// same policy found, move from old map to new map.
new_cluster_names_info.cluster_weights_map[cluster_weights_key] =
old_cluster_weights_map_it->second;
old_cluster_weights_map.erase(old_cluster_weights_map_it);
// This action has been added to new map, so no need to process it
// again.
action_it = actions_to_process.erase(action_it);
continue;
}
}
++action_it;
}
// Second pass of all remaining unique WeightedCluster actions: if clusters
// for a new action are the same as an old unused action, reuse the name. If
// clusters differ, use a brand new name.
for (const auto& action : actions_to_process) {
const std::string& cluster_names_key = action.second;
const std::string& cluster_weights_key = action.first;
auto& new_cluster_names_info =
new_weighted_cluster_index_map[cluster_names_key];
auto& old_cluster_weights_map =
weighted_cluster_index_map_[cluster_names_key].cluster_weights_map;
auto old_cluster_weights_it = old_cluster_weights_map.begin();
if (old_cluster_weights_it != old_cluster_weights_map.end()) {
// There is something to reuse: this action uses the same set
// of clusters as a previous action and that action name is not
// already taken.
new_cluster_names_info.cluster_weights_map[cluster_weights_key] =
old_cluster_weights_it->second;
// Remove the name from being able to reuse again.
old_cluster_weights_map.erase(old_cluster_weights_it);
} else {
// There is nothing to reuse, take the next index to use and
// increment.
new_cluster_names_info.cluster_weights_map[cluster_weights_key] =
new_cluster_names_info.next_index++;
}
}
weighted_cluster_index_map_ = std::move(new_weighted_cluster_index_map);
}
grpc_error* XdsResolver::CreateServiceConfig(
const XdsApi::RdsUpdate& rds_update,
RefCountedPtr<ServiceConfig>* service_config) {
UpdateWeightedClusterIndexMap(rds_update);
std::vector<std::string> actions_vector;
std::vector<std::string> route_table;
std::set<std::string> actions_set;
for (const auto& route : rds_update.routes) {
const std::string action_name =
route.weighted_clusters.empty()
? route.cluster_name
: WeightedClustersActionName(route.weighted_clusters);
if (actions_set.find(action_name) == actions_set.end()) {
actions_set.emplace(action_name);
actions_vector.push_back(
route.weighted_clusters.empty()
? CreateServiceConfigActionCluster(action_name)
: CreateServiceConfigActionWeightedCluster(
action_name, route.weighted_clusters));
}
route_table.push_back(CreateServiceConfigRoute(
absl::StrFormat("%s:%s",
route.weighted_clusters.empty() ? "cds" : "weighted",
action_name),
route));
}
std::vector<std::string> config_parts;
config_parts.push_back(
"{\n"
" \"loadBalancingConfig\":[\n"
" { \"xds_routing_experimental\":{\n"
" \"actions\":{\n");
config_parts.push_back(absl::StrJoin(actions_vector, ",\n"));
config_parts.push_back(
" },\n"
" \"routes\":[\n");
config_parts.push_back(absl::StrJoin(route_table, ",\n"));
config_parts.push_back(
" ]\n"
" } }\n"
" ]\n"
"}");
std::string json = absl::StrJoin(config_parts, "");
grpc_error* error = GRPC_ERROR_NONE;
*service_config = ServiceConfig::Create(json.c_str(), &error);
return error;
}
//
// Factory
//

@ -199,6 +199,8 @@ void Chttp2Connector::OnReceiveSettings(void* arg, grpc_error* error) {
{
MutexLock lock(&self->mu_);
if (!self->notify_error_.has_value()) {
grpc_endpoint_delete_from_pollset_set(self->endpoint_,
self->args_.interested_parties);
if (error != GRPC_ERROR_NONE) {
// Transport got an error while waiting on SETTINGS frame.
// TODO(yashykt): The following two lines should be moved to
@ -225,6 +227,8 @@ void Chttp2Connector::OnTimeout(void* arg, grpc_error* error) {
if (!self->notify_error_.has_value()) {
// The transport did not receive the settings frame in time. Destroy the
// transport.
grpc_endpoint_delete_from_pollset_set(self->endpoint_,
self->args_.interested_parties);
// TODO(yashykt): The following two lines should be moved to
// SubchannelConnector::Result::Reset()
grpc_transport_destroy(self->result_->transport);
@ -245,11 +249,8 @@ void Chttp2Connector::MaybeNotify(grpc_error* error) {
if (notify_error_.has_value()) {
GRPC_ERROR_UNREF(error);
NullThenSchedClosure(DEBUG_LOCATION, &notify_, notify_error_.value());
// Clear out the endpoint, since it is the responsibility of the transport
// to shut it down.
// Clear state for a new Connect().
grpc_endpoint_delete_from_pollset_set(endpoint_, args_.interested_parties);
// We do not destroy the endpoint here, since it is the responsibility of
// Clear out the endpoint_, since it is the responsibility of
// the transport to shut it down.
endpoint_ = nullptr;
notify_error_.reset();

@ -0,0 +1,43 @@
/* This file was generated by upbc (the upb compiler) from the input
* file:
*
* third_party/istio/security/proto/providers/google/meshca.proto
*
* Do not edit -- your changes will be discarded when the file is
* regenerated. */
#include <stddef.h>
#include "upb/msg.h"
#include "third_party/istio/security/proto/providers/google/meshca.upb.h"
#include "google/protobuf/duration.upb.h"
#include "upb/port_def.inc"
static const upb_msglayout *const google_security_meshca_v1_MeshCertificateRequest_submsgs[1] = {
&google_protobuf_Duration_msginit,
};
static const upb_msglayout_field google_security_meshca_v1_MeshCertificateRequest__fields[3] = {
{1, UPB_SIZE(0, 0), 0, 0, 9, 1},
{2, UPB_SIZE(8, 16), 0, 0, 9, 1},
{3, UPB_SIZE(16, 32), 0, 0, 11, 1},
};
const upb_msglayout google_security_meshca_v1_MeshCertificateRequest_msginit = {
&google_security_meshca_v1_MeshCertificateRequest_submsgs[0],
&google_security_meshca_v1_MeshCertificateRequest__fields[0],
UPB_SIZE(24, 48), 3, false,
};
static const upb_msglayout_field google_security_meshca_v1_MeshCertificateResponse__fields[1] = {
{1, UPB_SIZE(0, 0), 0, 0, 9, 3},
};
const upb_msglayout google_security_meshca_v1_MeshCertificateResponse_msginit = {
NULL,
&google_security_meshca_v1_MeshCertificateResponse__fields[0],
UPB_SIZE(4, 8), 1, false,
};
#include "upb/port_undef.inc"

@ -0,0 +1,103 @@
/* This file was generated by upbc (the upb compiler) from the input
* file:
*
* third_party/istio/security/proto/providers/google/meshca.proto
*
* Do not edit -- your changes will be discarded when the file is
* regenerated. */
#ifndef THIRD_PARTY_ISTIO_SECURITY_PROTO_PROVIDERS_GOOGLE_MESHCA_PROTO_UPB_H_
#define THIRD_PARTY_ISTIO_SECURITY_PROTO_PROVIDERS_GOOGLE_MESHCA_PROTO_UPB_H_
#include "upb/msg.h"
#include "upb/decode.h"
#include "upb/encode.h"
#include "upb/port_def.inc"
#ifdef __cplusplus
extern "C" {
#endif
struct google_security_meshca_v1_MeshCertificateRequest;
struct google_security_meshca_v1_MeshCertificateResponse;
typedef struct google_security_meshca_v1_MeshCertificateRequest google_security_meshca_v1_MeshCertificateRequest;
typedef struct google_security_meshca_v1_MeshCertificateResponse google_security_meshca_v1_MeshCertificateResponse;
extern const upb_msglayout google_security_meshca_v1_MeshCertificateRequest_msginit;
extern const upb_msglayout google_security_meshca_v1_MeshCertificateResponse_msginit;
struct google_protobuf_Duration;
extern const upb_msglayout google_protobuf_Duration_msginit;
/* google.security.meshca.v1.MeshCertificateRequest */
UPB_INLINE google_security_meshca_v1_MeshCertificateRequest *google_security_meshca_v1_MeshCertificateRequest_new(upb_arena *arena) {
return (google_security_meshca_v1_MeshCertificateRequest *)_upb_msg_new(&google_security_meshca_v1_MeshCertificateRequest_msginit, arena);
}
UPB_INLINE google_security_meshca_v1_MeshCertificateRequest *google_security_meshca_v1_MeshCertificateRequest_parse(const char *buf, size_t size,
upb_arena *arena) {
google_security_meshca_v1_MeshCertificateRequest *ret = google_security_meshca_v1_MeshCertificateRequest_new(arena);
return (ret && upb_decode(buf, size, ret, &google_security_meshca_v1_MeshCertificateRequest_msginit, arena)) ? ret : NULL;
}
UPB_INLINE char *google_security_meshca_v1_MeshCertificateRequest_serialize(const google_security_meshca_v1_MeshCertificateRequest *msg, upb_arena *arena, size_t *len) {
return upb_encode(msg, &google_security_meshca_v1_MeshCertificateRequest_msginit, arena, len);
}
UPB_INLINE upb_strview google_security_meshca_v1_MeshCertificateRequest_request_id(const google_security_meshca_v1_MeshCertificateRequest *msg) { return *UPB_PTR_AT(msg, UPB_SIZE(0, 0), upb_strview); }
UPB_INLINE upb_strview google_security_meshca_v1_MeshCertificateRequest_csr(const google_security_meshca_v1_MeshCertificateRequest *msg) { return *UPB_PTR_AT(msg, UPB_SIZE(8, 16), upb_strview); }
UPB_INLINE bool google_security_meshca_v1_MeshCertificateRequest_has_validity(const google_security_meshca_v1_MeshCertificateRequest *msg) { return _upb_has_submsg_nohasbit(msg, UPB_SIZE(16, 32)); }
UPB_INLINE const struct google_protobuf_Duration* google_security_meshca_v1_MeshCertificateRequest_validity(const google_security_meshca_v1_MeshCertificateRequest *msg) { return *UPB_PTR_AT(msg, UPB_SIZE(16, 32), const struct google_protobuf_Duration*); }
UPB_INLINE void google_security_meshca_v1_MeshCertificateRequest_set_request_id(google_security_meshca_v1_MeshCertificateRequest *msg, upb_strview value) {
*UPB_PTR_AT(msg, UPB_SIZE(0, 0), upb_strview) = value;
}
UPB_INLINE void google_security_meshca_v1_MeshCertificateRequest_set_csr(google_security_meshca_v1_MeshCertificateRequest *msg, upb_strview value) {
*UPB_PTR_AT(msg, UPB_SIZE(8, 16), upb_strview) = value;
}
UPB_INLINE void google_security_meshca_v1_MeshCertificateRequest_set_validity(google_security_meshca_v1_MeshCertificateRequest *msg, struct google_protobuf_Duration* value) {
*UPB_PTR_AT(msg, UPB_SIZE(16, 32), struct google_protobuf_Duration*) = value;
}
UPB_INLINE struct google_protobuf_Duration* google_security_meshca_v1_MeshCertificateRequest_mutable_validity(google_security_meshca_v1_MeshCertificateRequest *msg, upb_arena *arena) {
struct google_protobuf_Duration* sub = (struct google_protobuf_Duration*)google_security_meshca_v1_MeshCertificateRequest_validity(msg);
if (sub == NULL) {
sub = (struct google_protobuf_Duration*)_upb_msg_new(&google_protobuf_Duration_msginit, arena);
if (!sub) return NULL;
google_security_meshca_v1_MeshCertificateRequest_set_validity(msg, sub);
}
return sub;
}
/* google.security.meshca.v1.MeshCertificateResponse */
UPB_INLINE google_security_meshca_v1_MeshCertificateResponse *google_security_meshca_v1_MeshCertificateResponse_new(upb_arena *arena) {
return (google_security_meshca_v1_MeshCertificateResponse *)_upb_msg_new(&google_security_meshca_v1_MeshCertificateResponse_msginit, arena);
}
UPB_INLINE google_security_meshca_v1_MeshCertificateResponse *google_security_meshca_v1_MeshCertificateResponse_parse(const char *buf, size_t size,
upb_arena *arena) {
google_security_meshca_v1_MeshCertificateResponse *ret = google_security_meshca_v1_MeshCertificateResponse_new(arena);
return (ret && upb_decode(buf, size, ret, &google_security_meshca_v1_MeshCertificateResponse_msginit, arena)) ? ret : NULL;
}
UPB_INLINE char *google_security_meshca_v1_MeshCertificateResponse_serialize(const google_security_meshca_v1_MeshCertificateResponse *msg, upb_arena *arena, size_t *len) {
return upb_encode(msg, &google_security_meshca_v1_MeshCertificateResponse_msginit, arena, len);
}
UPB_INLINE upb_strview const* google_security_meshca_v1_MeshCertificateResponse_cert_chain(const google_security_meshca_v1_MeshCertificateResponse *msg, size_t *len) { return (upb_strview const*)_upb_array_accessor(msg, UPB_SIZE(0, 0), len); }
UPB_INLINE upb_strview* google_security_meshca_v1_MeshCertificateResponse_mutable_cert_chain(google_security_meshca_v1_MeshCertificateResponse *msg, size_t *len) {
return (upb_strview*)_upb_array_mutable_accessor(msg, UPB_SIZE(0, 0), len);
}
UPB_INLINE upb_strview* google_security_meshca_v1_MeshCertificateResponse_resize_cert_chain(google_security_meshca_v1_MeshCertificateResponse *msg, size_t len, upb_arena *arena) {
return (upb_strview*)_upb_array_resize_accessor(msg, UPB_SIZE(0, 0), len, UPB_TYPE_STRING, arena);
}
UPB_INLINE bool google_security_meshca_v1_MeshCertificateResponse_add_cert_chain(google_security_meshca_v1_MeshCertificateResponse *msg, upb_strview val, upb_arena *arena) {
return _upb_array_append_accessor(msg, UPB_SIZE(0, 0), UPB_SIZE(8, 16), UPB_TYPE_STRING, &val,
arena);
}
#ifdef __cplusplus
} /* extern "C" */
#endif
#include "upb/port_undef.inc"
#endif /* THIRD_PARTY_ISTIO_SECURITY_PROTO_PROVIDERS_GOOGLE_MESHCA_PROTO_UPB_H_ */

@ -24,6 +24,7 @@
#include <cstdlib>
#include <string>
#include "absl/strings/numbers.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_format.h"
#include "absl/strings/str_join.h"
@ -39,6 +40,7 @@
#include "src/core/lib/gpr/env.h"
#include "src/core/lib/gpr/string.h"
#include "src/core/lib/gpr/useful.h"
#include "src/core/lib/gprpp/host_port.h"
#include "src/core/lib/iomgr/error.h"
#include "src/core/lib/iomgr/sockaddr_utils.h"
@ -74,101 +76,44 @@
namespace grpc_core {
//
// XdsApi::PriorityListUpdate
// XdsApi::RdsUpdate::RdsRoute::Matchers::PathMatcher
//
bool XdsApi::PriorityListUpdate::operator==(
const XdsApi::PriorityListUpdate& other) const {
if (priorities_.size() != other.priorities_.size()) return false;
for (size_t i = 0; i < priorities_.size(); ++i) {
if (priorities_[i].localities != other.priorities_[i].localities) {
return false;
}
XdsApi::RdsUpdate::RdsRoute::Matchers::PathMatcher::PathMatcher(
const PathMatcher& other)
: type(other.type) {
if (type == PathMatcherType::REGEX) {
regex_matcher = absl::make_unique<RE2>(other.regex_matcher->pattern());
} else {
string_matcher = other.string_matcher;
}
return true;
}
void XdsApi::PriorityListUpdate::Add(
XdsApi::PriorityListUpdate::LocalityMap::Locality locality) {
// Pad the missing priorities in case the localities are not ordered by
// priority.
if (!Contains(locality.priority)) priorities_.resize(locality.priority + 1);
LocalityMap& locality_map = priorities_[locality.priority];
locality_map.localities.emplace(locality.name, std::move(locality));
}
const XdsApi::PriorityListUpdate::LocalityMap* XdsApi::PriorityListUpdate::Find(
uint32_t priority) const {
if (!Contains(priority)) return nullptr;
return &priorities_[priority];
}
bool XdsApi::PriorityListUpdate::Contains(
const RefCountedPtr<XdsLocalityName>& name) {
for (size_t i = 0; i < priorities_.size(); ++i) {
const LocalityMap& locality_map = priorities_[i];
if (locality_map.Contains(name)) return true;
XdsApi::RdsUpdate::RdsRoute::Matchers::PathMatcher&
XdsApi::RdsUpdate::RdsRoute::Matchers::PathMatcher::operator=(
const PathMatcher& other) {
type = other.type;
if (type == PathMatcherType::REGEX) {
regex_matcher = absl::make_unique<RE2>(other.regex_matcher->pattern());
} else {
string_matcher = other.string_matcher;
}
return false;
return *this;
}
//
// XdsApi::DropConfig
//
bool XdsApi::DropConfig::ShouldDrop(const std::string** category_name) const {
for (size_t i = 0; i < drop_category_list_.size(); ++i) {
const auto& drop_category = drop_category_list_[i];
// Generate a random number in [0, 1000000).
const uint32_t random = static_cast<uint32_t>(rand()) % 1000000;
if (random < drop_category.parts_per_million) {
*category_name = &drop_category.name;
return true;
bool XdsApi::RdsUpdate::RdsRoute::Matchers::PathMatcher::operator==(
const PathMatcher& other) const {
if (type != other.type) return false;
if (type == PathMatcherType::REGEX) {
// Should never be null.
if (regex_matcher == nullptr || other.regex_matcher == nullptr) {
return false;
}
return regex_matcher->pattern() == other.regex_matcher->pattern();
}
return false;
return string_matcher == other.string_matcher;
}
//
// XdsApi
//
const char* XdsApi::kLdsTypeUrl =
"type.googleapis.com/envoy.config.listener.v3.Listener";
const char* XdsApi::kRdsTypeUrl =
"type.googleapis.com/envoy.config.route.v3.RouteConfiguration";
const char* XdsApi::kCdsTypeUrl =
"type.googleapis.com/envoy.config.cluster.v3.Cluster";
const char* XdsApi::kEdsTypeUrl =
"type.googleapis.com/envoy.config.endpoint.v3.ClusterLoadAssignment";
namespace {
const char* kLdsV2TypeUrl = "type.googleapis.com/envoy.api.v2.Listener";
const char* kRdsV2TypeUrl =
"type.googleapis.com/envoy.api.v2.RouteConfiguration";
const char* kCdsV2TypeUrl = "type.googleapis.com/envoy.api.v2.Cluster";
const char* kEdsV2TypeUrl =
"type.googleapis.com/envoy.api.v2.ClusterLoadAssignment";
bool IsLds(absl::string_view type_url) {
return type_url == XdsApi::kLdsTypeUrl || type_url == kLdsV2TypeUrl;
}
bool IsRds(absl::string_view type_url) {
return type_url == XdsApi::kRdsTypeUrl || type_url == kRdsV2TypeUrl;
}
bool IsCds(absl::string_view type_url) {
return type_url == XdsApi::kCdsTypeUrl || type_url == kCdsV2TypeUrl;
}
bool IsEds(absl::string_view type_url) {
return type_url == XdsApi::kEdsTypeUrl || type_url == kEdsV2TypeUrl;
}
} // namespace
std::string XdsApi::RdsUpdate::RdsRoute::Matchers::PathMatcher::ToString()
const {
std::string path_type_string;
@ -185,12 +130,75 @@ std::string XdsApi::RdsUpdate::RdsRoute::Matchers::PathMatcher::ToString()
default:
break;
}
return absl::StrFormat("Path %s:/%s/", path_type_string,
return absl::StrFormat("Path %s:%s", path_type_string,
type == PathMatcherType::REGEX
? regex_matcher->pattern()
: string_matcher);
}
//
// XdsApi::RdsUpdate::RdsRoute::Matchers::HeaderMatcher
//
XdsApi::RdsUpdate::RdsRoute::Matchers::HeaderMatcher::HeaderMatcher(
const HeaderMatcher& other)
: name(other.name), type(other.type), invert_match(other.invert_match) {
switch (type) {
case HeaderMatcherType::REGEX:
regex_match = absl::make_unique<RE2>(other.regex_match->pattern());
break;
case HeaderMatcherType::RANGE:
range_start = other.range_start;
range_end = other.range_end;
break;
case HeaderMatcherType::PRESENT:
present_match = other.present_match;
break;
default:
string_matcher = other.string_matcher;
}
}
XdsApi::RdsUpdate::RdsRoute::Matchers::HeaderMatcher&
XdsApi::RdsUpdate::RdsRoute::Matchers::HeaderMatcher::operator=(
const HeaderMatcher& other) {
name = other.name;
type = other.type;
invert_match = other.invert_match;
switch (type) {
case HeaderMatcherType::REGEX:
regex_match = absl::make_unique<RE2>(other.regex_match->pattern());
break;
case HeaderMatcherType::RANGE:
range_start = other.range_start;
range_end = other.range_end;
break;
case HeaderMatcherType::PRESENT:
present_match = other.present_match;
break;
default:
string_matcher = other.string_matcher;
}
return *this;
}
bool XdsApi::RdsUpdate::RdsRoute::Matchers::HeaderMatcher::operator==(
const HeaderMatcher& other) const {
if (name != other.name) return false;
if (type != other.type) return false;
if (invert_match != other.invert_match) return false;
switch (type) {
case HeaderMatcherType::REGEX:
return regex_match->pattern() != other.regex_match->pattern();
case HeaderMatcherType::RANGE:
return range_start != other.range_start && range_end != other.range_end;
case HeaderMatcherType::PRESENT:
return present_match != other.present_match;
default:
return string_matcher != other.string_matcher;
}
}
std::string XdsApi::RdsUpdate::RdsRoute::Matchers::HeaderMatcher::ToString()
const {
switch (type) {
@ -257,6 +265,102 @@ std::string XdsApi::RdsUpdate::ToString() const {
return absl::StrJoin(contents, ",\n");
}
//
// XdsApi::PriorityListUpdate
//
bool XdsApi::PriorityListUpdate::operator==(
const XdsApi::PriorityListUpdate& other) const {
if (priorities_.size() != other.priorities_.size()) return false;
for (size_t i = 0; i < priorities_.size(); ++i) {
if (priorities_[i].localities != other.priorities_[i].localities) {
return false;
}
}
return true;
}
void XdsApi::PriorityListUpdate::Add(
XdsApi::PriorityListUpdate::LocalityMap::Locality locality) {
// Pad the missing priorities in case the localities are not ordered by
// priority.
if (!Contains(locality.priority)) priorities_.resize(locality.priority + 1);
LocalityMap& locality_map = priorities_[locality.priority];
locality_map.localities.emplace(locality.name, std::move(locality));
}
const XdsApi::PriorityListUpdate::LocalityMap* XdsApi::PriorityListUpdate::Find(
uint32_t priority) const {
if (!Contains(priority)) return nullptr;
return &priorities_[priority];
}
bool XdsApi::PriorityListUpdate::Contains(
const RefCountedPtr<XdsLocalityName>& name) {
for (size_t i = 0; i < priorities_.size(); ++i) {
const LocalityMap& locality_map = priorities_[i];
if (locality_map.Contains(name)) return true;
}
return false;
}
//
// XdsApi::DropConfig
//
bool XdsApi::DropConfig::ShouldDrop(const std::string** category_name) const {
for (size_t i = 0; i < drop_category_list_.size(); ++i) {
const auto& drop_category = drop_category_list_[i];
// Generate a random number in [0, 1000000).
const uint32_t random = static_cast<uint32_t>(rand()) % 1000000;
if (random < drop_category.parts_per_million) {
*category_name = &drop_category.name;
return true;
}
}
return false;
}
//
// XdsApi
//
const char* XdsApi::kLdsTypeUrl =
"type.googleapis.com/envoy.config.listener.v3.Listener";
const char* XdsApi::kRdsTypeUrl =
"type.googleapis.com/envoy.config.route.v3.RouteConfiguration";
const char* XdsApi::kCdsTypeUrl =
"type.googleapis.com/envoy.config.cluster.v3.Cluster";
const char* XdsApi::kEdsTypeUrl =
"type.googleapis.com/envoy.config.endpoint.v3.ClusterLoadAssignment";
namespace {
const char* kLdsV2TypeUrl = "type.googleapis.com/envoy.api.v2.Listener";
const char* kRdsV2TypeUrl =
"type.googleapis.com/envoy.api.v2.RouteConfiguration";
const char* kCdsV2TypeUrl = "type.googleapis.com/envoy.api.v2.Cluster";
const char* kEdsV2TypeUrl =
"type.googleapis.com/envoy.api.v2.ClusterLoadAssignment";
bool IsLds(absl::string_view type_url) {
return type_url == XdsApi::kLdsTypeUrl || type_url == kLdsV2TypeUrl;
}
bool IsRds(absl::string_view type_url) {
return type_url == XdsApi::kRdsTypeUrl || type_url == kRdsV2TypeUrl;
}
bool IsCds(absl::string_view type_url) {
return type_url == XdsApi::kCdsTypeUrl || type_url == kCdsV2TypeUrl;
}
bool IsEds(absl::string_view type_url) {
return type_url == XdsApi::kEdsTypeUrl || type_url == kEdsV2TypeUrl;
}
} // namespace
XdsApi::XdsApi(XdsClient* client, TraceFlag* tracer,
const XdsBootstrap* bootstrap)
: client_(client),
@ -361,6 +465,7 @@ void PopulateNode(upb_arena* arena, const XdsBootstrap* bootstrap,
const std::string& build_version,
const std::string& user_agent_name,
const std::string& server_name,
const std::vector<grpc_resolved_address>& listening_addresses,
envoy_config_core_v3_Node* node_msg) {
const XdsBootstrap::Node* node = bootstrap->node();
if (node != nullptr) {
@ -408,6 +513,21 @@ void PopulateNode(upb_arena* arena, const XdsBootstrap* bootstrap,
if (!bootstrap->server().ShouldUseV3()) {
PopulateBuildVersion(arena, node_msg, build_version);
}
for (const grpc_resolved_address& address : listening_addresses) {
std::string address_str = grpc_sockaddr_to_string(&address, false);
absl::string_view addr_str;
absl::string_view port_str;
GPR_ASSERT(SplitHostPort(address_str, &addr_str, &port_str));
uint32_t port;
GPR_ASSERT(absl::SimpleAtoi(port_str, &port));
auto* addr_msg =
envoy_config_core_v3_Node_add_listening_addresses(node_msg, arena);
auto* socket_addr_msg =
envoy_config_core_v3_Address_mutable_socket_address(addr_msg, arena);
envoy_config_core_v3_SocketAddress_set_address(
socket_addr_msg, upb_strview_make(addr_str.data(), addr_str.size()));
envoy_config_core_v3_SocketAddress_set_port_value(socket_addr_msg, port);
}
envoy_config_core_v3_Node_set_user_agent_name(
node_msg,
upb_strview_make(user_agent_name.data(), user_agent_name.size()));
@ -526,6 +646,29 @@ void AddNodeLogFields(const envoy_config_core_v3_Node* node,
fields->emplace_back(
absl::StrCat(" build_version: \"", build_version, "\""));
}
// listening_addresses
size_t num_listening_addresses;
const envoy_config_core_v3_Address* const* listening_addresses =
envoy_config_core_v3_Node_listening_addresses(node,
&num_listening_addresses);
for (size_t i = 0; i < num_listening_addresses; ++i) {
fields->emplace_back(" listening_address {");
const auto* socket_addr_msg =
envoy_config_core_v3_Address_socket_address(listening_addresses[i]);
if (socket_addr_msg != nullptr) {
fields->emplace_back(" socket_address {");
AddStringField(
" address",
envoy_config_core_v3_SocketAddress_address(socket_addr_msg), fields);
if (envoy_config_core_v3_SocketAddress_has_port_value(socket_addr_msg)) {
fields->emplace_back(absl::StrCat(
" port_value: ",
envoy_config_core_v3_SocketAddress_port_value(socket_addr_msg)));
}
fields->emplace_back(" }");
}
fields->emplace_back(" }");
}
// user_agent_name
AddStringField(" user_agent_name",
envoy_config_core_v3_Node_user_agent_name(node), fields);
@ -628,7 +771,8 @@ grpc_slice XdsApi::CreateAdsRequest(
const std::string& type_url,
const std::set<absl::string_view>& resource_names,
const std::string& version, const std::string& nonce, grpc_error* error,
bool populate_node) {
bool populate_node,
const std::vector<grpc_resolved_address>& listening_addresses) {
upb::Arena arena;
// Create a request.
envoy_service_discovery_v3_DiscoveryRequest* request =
@ -669,7 +813,7 @@ grpc_slice XdsApi::CreateAdsRequest(
envoy_service_discovery_v3_DiscoveryRequest_mutable_node(request,
arena.ptr());
PopulateNode(arena.ptr(), bootstrap_, build_version_, user_agent_name_, "",
node_msg);
listening_addresses, node_msg);
}
// Add resource_names.
for (const auto& resource_name : resource_names) {
@ -2095,7 +2239,7 @@ grpc_slice XdsApi::CreateLrsInitialRequest(const std::string& server_name) {
envoy_service_load_stats_v3_LoadStatsRequest_mutable_node(request,
arena.ptr());
PopulateNode(arena.ptr(), bootstrap_, build_version_, user_agent_name_,
server_name, node_msg);
server_name, {}, node_msg);
envoy_config_core_v3_Node_add_client_features(
node_msg, upb_strview_makez("envoy.lrs.supports_send_all_clusters"),
arena.ptr());

@ -61,19 +61,14 @@ class XdsApi {
PathMatcherType type;
std::string string_matcher;
std::unique_ptr<RE2> regex_matcher;
bool operator==(const PathMatcher& other) const {
if (type != other.type) return false;
if (type == PathMatcherType::REGEX) {
// Should never be null.
if (regex_matcher == nullptr || other.regex_matcher == nullptr) {
return false;
}
return regex_matcher->pattern() == other.regex_matcher->pattern();
}
return string_matcher == other.string_matcher;
}
PathMatcher() = default;
PathMatcher(const PathMatcher& other);
PathMatcher& operator=(const PathMatcher& other);
bool operator==(const PathMatcher& other) const;
std::string ToString() const;
};
struct HeaderMatcher {
enum class HeaderMatcherType {
EXACT, // value stored in string_matcher field
@ -93,19 +88,18 @@ class XdsApi {
// invert_match field may or may not exisit, so initialize it to
// false.
bool invert_match = false;
bool operator==(const HeaderMatcher& other) const {
return (name == other.name && type == other.type &&
range_start == other.range_start &&
range_end == other.range_end &&
string_matcher == other.string_matcher &&
present_match == other.present_match &&
invert_match == other.invert_match);
}
HeaderMatcher() = default;
HeaderMatcher(const HeaderMatcher& other);
HeaderMatcher& operator=(const HeaderMatcher& other);
bool operator==(const HeaderMatcher& other) const;
std::string ToString() const;
};
PathMatcher path_matcher;
std::vector<HeaderMatcher> header_matchers;
absl::optional<uint32_t> fraction_per_million;
bool operator==(const Matchers& other) const {
return (path_matcher == other.path_matcher &&
header_matchers == other.header_matchers &&
@ -113,13 +107,16 @@ class XdsApi {
}
std::string ToString() const;
};
Matchers matchers;
// Action for this route.
// TODO(roth): When we can use absl::variant<>, consider using that
// here, to enforce the fact that only one of the two fields can be set.
std::string cluster_name;
struct ClusterWeight {
std::string name;
uint32_t weight;
bool operator==(const ClusterWeight& other) const {
return (name == other.name && weight == other.weight);
}
@ -298,11 +295,12 @@ class XdsApi {
// Creates an ADS request.
// Takes ownership of \a error.
grpc_slice CreateAdsRequest(const std::string& type_url,
const std::set<absl::string_view>& resource_names,
const std::string& version,
const std::string& nonce, grpc_error* error,
bool populate_node);
grpc_slice CreateAdsRequest(
const std::string& type_url,
const std::set<absl::string_view>& resource_names,
const std::string& version, const std::string& nonce, grpc_error* error,
bool populate_node,
const std::vector<grpc_resolved_address>& listening_addresses);
// Parses an ADS response.
// If the response can't be parsed at the top level, the resulting

@ -187,8 +187,7 @@ class XdsClient::ChannelState::AdsCallState
}
if (type_url_ == XdsApi::kLdsTypeUrl ||
type_url_ == XdsApi::kRdsTypeUrl) {
ads_calld_->xds_client()->service_config_watcher_->OnError(
watcher_error);
ads_calld_->xds_client()->listener_watcher_->OnError(watcher_error);
} else if (type_url_ == XdsApi::kCdsTypeUrl) {
ClusterState& state = ads_calld_->xds_client()->cluster_map_[name_];
for (const auto& p : state.watchers) {
@ -680,7 +679,6 @@ XdsClient::ChannelState::AdsCallState::AdsCallState(
// activity in xds_client()->interested_parties_, which is comprised of
// the polling entities from client_channel.
GPR_ASSERT(xds_client() != nullptr);
GPR_ASSERT(!xds_client()->server_name_.empty());
// Create a call with the specified method name.
const auto& method =
xds_client()->bootstrap_->server().ShouldUseV3()
@ -719,7 +717,7 @@ XdsClient::ChannelState::AdsCallState::AdsCallState(
// Op: send request message.
GRPC_CLOSURE_INIT(&on_request_sent_, OnRequestSent, this,
grpc_schedule_on_exec_ctx);
if (xds_client()->service_config_watcher_ != nullptr) {
if (xds_client()->listener_watcher_ != nullptr) {
Subscribe(XdsApi::kLdsTypeUrl, xds_client()->server_name_);
if (xds_client()->lds_result_.has_value() &&
!xds_client()->lds_result_->route_config_name.empty()) {
@ -807,7 +805,8 @@ void XdsClient::ChannelState::AdsCallState::SendMessageLocked(
ResourceNamesForRequest(type_url);
request_payload_slice = xds_client()->api_.CreateAdsRequest(
type_url, resource_names, state.version, state.nonce,
GRPC_ERROR_REF(state.error), !sent_initial_message_);
GRPC_ERROR_REF(state.error), !sent_initial_message_,
xds_client()->listening_addresses_);
if (type_url != XdsApi::kLdsTypeUrl && type_url != XdsApi::kRdsTypeUrl &&
type_url != XdsApi::kCdsTypeUrl && type_url != XdsApi::kEdsTypeUrl) {
state_map_.erase(type_url);
@ -882,7 +881,7 @@ void XdsClient::ChannelState::AdsCallState::AcceptLdsUpdate(
xds_client()->rds_result_.reset();
}
xds_client()->lds_result_.reset();
xds_client()->service_config_watcher_->OnResourceDoesNotExist();
xds_client()->listener_watcher_->OnResourceDoesNotExist();
return;
}
if (GRPC_TRACE_FLAG_ENABLED(grpc_xds_client_trace)) {
@ -925,15 +924,8 @@ void XdsClient::ChannelState::AdsCallState::AcceptLdsUpdate(
if (xds_client()->lds_result_->rds_update.has_value()) {
// If the RouteConfiguration was found inlined in LDS response, notify
// the watcher immediately.
RefCountedPtr<ServiceConfig> service_config;
grpc_error* error = xds_client()->CreateServiceConfig(
xds_client()->lds_result_->rds_update.value(), &service_config);
if (error == GRPC_ERROR_NONE) {
xds_client()->service_config_watcher_->OnServiceConfigChanged(
std::move(service_config));
} else {
xds_client()->service_config_watcher_->OnError(error);
}
xds_client()->listener_watcher_->OnListenerChanged(
*xds_client()->lds_result_);
} else {
// Send RDS request for dynamic resolution.
Subscribe(XdsApi::kRdsTypeUrl,
@ -948,7 +940,7 @@ void XdsClient::ChannelState::AdsCallState::AcceptRdsUpdate(
"[xds_client %p] RDS update does not include requested resource",
xds_client());
xds_client()->rds_result_.reset();
xds_client()->service_config_watcher_->OnResourceDoesNotExist();
xds_client()->listener_watcher_->OnResourceDoesNotExist();
return;
}
if (GRPC_TRACE_FLAG_ENABLED(grpc_xds_client_trace)) {
@ -977,15 +969,9 @@ void XdsClient::ChannelState::AdsCallState::AcceptRdsUpdate(
}
xds_client()->rds_result_ = std::move(rds_update);
// Notify the watcher.
RefCountedPtr<ServiceConfig> service_config;
grpc_error* error = xds_client()->CreateServiceConfig(
xds_client()->rds_result_.value(), &service_config);
if (error == GRPC_ERROR_NONE) {
xds_client()->service_config_watcher_->OnServiceConfigChanged(
std::move(service_config));
} else {
xds_client()->service_config_watcher_->OnError(error);
}
XdsApi::LdsUpdate lds_result = *xds_client()->lds_result_;
lds_result.rds_update = xds_client()->rds_result_;
xds_client()->listener_watcher_->OnListenerChanged(lds_result);
}
void XdsClient::ChannelState::AdsCallState::AcceptCdsUpdate(
@ -1759,7 +1745,8 @@ grpc_millis GetRequestTimeout(const grpc_channel_args& args) {
XdsClient::XdsClient(std::shared_ptr<WorkSerializer> work_serializer,
grpc_pollset_set* interested_parties,
absl::string_view server_name,
std::unique_ptr<ServiceConfigWatcherInterface> watcher,
std::vector<grpc_resolved_address> listening_addresses,
std::unique_ptr<ListenerWatcherInterface> watcher,
const grpc_channel_args& channel_args, grpc_error** error)
: InternallyRefCounted<XdsClient>(&grpc_xds_client_trace),
request_timeout_(GetRequestTimeout(channel_args)),
@ -1769,7 +1756,8 @@ XdsClient::XdsClient(std::shared_ptr<WorkSerializer> work_serializer,
XdsBootstrap::ReadFromFile(this, &grpc_xds_client_trace, error)),
api_(this, &grpc_xds_client_trace, bootstrap_.get()),
server_name_(server_name),
service_config_watcher_(std::move(watcher)) {
listening_addresses_(std::move(listening_addresses)),
listener_watcher_(std::move(watcher)) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_xds_client_trace)) {
gpr_log(GPR_INFO, "[xds_client %p] creating xds client", this);
}
@ -1792,7 +1780,7 @@ XdsClient::XdsClient(std::shared_ptr<WorkSerializer> work_serializer,
}
chand_ = MakeOrphanable<ChannelState>(
Ref(DEBUG_LOCATION, "XdsClient+ChannelState"), channel);
if (service_config_watcher_ != nullptr) {
if (listener_watcher_ != nullptr) {
chand_->Subscribe(XdsApi::kLdsTypeUrl, std::string(server_name));
}
}
@ -1815,7 +1803,7 @@ void XdsClient::Orphan() {
// possible for ADS calls to be in progress. Unreffing the loadbalancing
// policies before those calls are done would lead to issues such as
// https://github.com/grpc/grpc/issues/20928.
if (service_config_watcher_ != nullptr) {
if (listener_watcher_ != nullptr) {
cluster_map_.clear();
endpoint_map_.clear();
}
@ -1990,322 +1978,6 @@ void XdsClient::ResetBackoff() {
}
}
namespace {
std::string CreateServiceConfigActionCluster(const std::string& cluster_name) {
return absl::StrFormat(
" \"cds:%s\":{\n"
" \"childPolicy\":[ {\n"
" \"cds_experimental\":{\n"
" \"cluster\": \"%s\"\n"
" }\n"
" } ]\n"
" }",
cluster_name, cluster_name);
}
std::string CreateServiceConfigRoute(const std::string& action_name,
const XdsApi::RdsUpdate::RdsRoute& route) {
std::vector<std::string> headers;
for (const auto& header : route.matchers.header_matchers) {
std::string header_matcher;
switch (header.type) {
case XdsApi::RdsUpdate::RdsRoute::Matchers::HeaderMatcher::
HeaderMatcherType::EXACT:
header_matcher = absl::StrFormat(" \"exact_match\": \"%s\"",
header.string_matcher);
break;
case XdsApi::RdsUpdate::RdsRoute::Matchers::HeaderMatcher::
HeaderMatcherType::REGEX:
header_matcher = absl::StrFormat(" \"regex_match\": \"%s\"",
header.regex_match->pattern());
break;
case XdsApi::RdsUpdate::RdsRoute::Matchers::HeaderMatcher::
HeaderMatcherType::RANGE:
header_matcher = absl::StrFormat(
" \"range_match\":{\n"
" \"start\":%d,\n"
" \"end\":%d\n"
" }",
header.range_start, header.range_end);
break;
case XdsApi::RdsUpdate::RdsRoute::Matchers::HeaderMatcher::
HeaderMatcherType::PRESENT:
header_matcher =
absl::StrFormat(" \"present_match\": %s",
header.present_match ? "true" : "false");
break;
case XdsApi::RdsUpdate::RdsRoute::Matchers::HeaderMatcher::
HeaderMatcherType::PREFIX:
header_matcher = absl::StrFormat(
" \"prefix_match\": \"%s\"", header.string_matcher);
break;
case XdsApi::RdsUpdate::RdsRoute::Matchers::HeaderMatcher::
HeaderMatcherType::SUFFIX:
header_matcher = absl::StrFormat(
" \"suffix_match\": \"%s\"", header.string_matcher);
break;
default:
break;
}
std::vector<std::string> header_parts;
header_parts.push_back(
absl::StrFormat(" { \n"
" \"name\": \"%s\",\n",
header.name));
header_parts.push_back(header_matcher);
if (header.invert_match) {
header_parts.push_back(
absl::StrFormat(",\n"
" \"invert_match\": true"));
}
header_parts.push_back(
absl::StrFormat("\n"
" }"));
headers.push_back(absl::StrJoin(header_parts, ""));
}
std::vector<std::string> headers_service_config;
if (!headers.empty()) {
headers_service_config.push_back("\"headers\":[\n");
headers_service_config.push_back(absl::StrJoin(headers, ","));
headers_service_config.push_back(" ],\n");
}
std::string path_match_str;
switch (route.matchers.path_matcher.type) {
case XdsApi::RdsUpdate::RdsRoute::Matchers::PathMatcher::PathMatcherType::
PREFIX:
path_match_str = absl::StrFormat(
"\"prefix\": \"%s\",\n", route.matchers.path_matcher.string_matcher);
break;
case XdsApi::RdsUpdate::RdsRoute::Matchers::PathMatcher::PathMatcherType::
PATH:
path_match_str = absl::StrFormat(
"\"path\": \"%s\",\n", route.matchers.path_matcher.string_matcher);
break;
case XdsApi::RdsUpdate::RdsRoute::Matchers::PathMatcher::PathMatcherType::
REGEX:
path_match_str =
absl::StrFormat("\"regex\": \"%s\",\n",
route.matchers.path_matcher.regex_matcher->pattern());
break;
}
return absl::StrFormat(
" { \n"
" %s"
" %s"
" %s"
" \"action\": \"%s\"\n"
" }",
path_match_str, absl::StrJoin(headers_service_config, ""),
route.matchers.fraction_per_million.has_value()
? absl::StrFormat("\"match_fraction\":%d,\n",
route.matchers.fraction_per_million.value())
: "",
action_name);
}
// Create the service config for one weighted cluster.
std::string CreateServiceConfigActionWeightedCluster(
const std::string& name,
const std::vector<XdsApi::RdsUpdate::RdsRoute::ClusterWeight>& clusters) {
std::vector<std::string> config_parts;
config_parts.push_back(
absl::StrFormat(" \"weighted:%s\":{\n"
" \"childPolicy\":[ {\n"
" \"weighted_target_experimental\":{\n"
" \"targets\":{\n",
name));
std::vector<std::string> weighted_targets;
weighted_targets.reserve(clusters.size());
for (const auto& cluster_weight : clusters) {
weighted_targets.push_back(absl::StrFormat(
" \"%s\":{\n"
" \"weight\":%d,\n"
" \"childPolicy\":[ {\n"
" \"cds_experimental\":{\n"
" \"cluster\": \"%s\"\n"
" }\n"
" } ]\n"
" }",
cluster_weight.name, cluster_weight.weight, cluster_weight.name));
}
config_parts.push_back(absl::StrJoin(weighted_targets, ",\n"));
config_parts.push_back(
" }\n"
" }\n"
" } ]\n"
" }");
return absl::StrJoin(config_parts, "");
}
struct WeightedClustersKeys {
std::string cluster_names_key;
std::string cluster_weights_key;
};
// Returns the cluster names and weights key or the cluster names only key.
WeightedClustersKeys GetWeightedClustersKey(
const std::vector<XdsApi::RdsUpdate::RdsRoute::ClusterWeight>&
weighted_clusters) {
std::set<std::string> cluster_names;
std::set<std::string> cluster_weights;
for (const auto& cluster_weight : weighted_clusters) {
cluster_names.emplace(absl::StrFormat("%s", cluster_weight.name));
cluster_weights.emplace(
absl::StrFormat("%s_%d", cluster_weight.name, cluster_weight.weight));
}
return {absl::StrJoin(cluster_names, "_"),
absl::StrJoin(cluster_weights, "_")};
}
} // namespace
std::string XdsClient::WeightedClustersActionName(
const std::vector<XdsApi::RdsUpdate::RdsRoute::ClusterWeight>&
weighted_clusters) {
WeightedClustersKeys keys = GetWeightedClustersKey(weighted_clusters);
auto cluster_names_map_it =
weighted_cluster_index_map_.find(keys.cluster_names_key);
GPR_ASSERT(cluster_names_map_it != weighted_cluster_index_map_.end());
const auto& cluster_weights_map =
cluster_names_map_it->second.cluster_weights_map;
auto cluster_weights_map_it =
cluster_weights_map.find(keys.cluster_weights_key);
GPR_ASSERT(cluster_weights_map_it != cluster_weights_map.end());
return absl::StrFormat("%s_%d", keys.cluster_names_key,
cluster_weights_map_it->second);
}
void XdsClient::UpdateWeightedClusterIndexMap(
const XdsApi::RdsUpdate& rds_update) {
// Construct a list of unique WeightedCluster
// actions which we need to process: to find action names
std::map<std::string /* cluster_weights_key */,
std::string /* cluster_names_key */>
actions_to_process;
for (const auto& route : rds_update.routes) {
if (!route.weighted_clusters.empty()) {
WeightedClustersKeys keys =
GetWeightedClustersKey(route.weighted_clusters);
auto action_it = actions_to_process.find(keys.cluster_weights_key);
if (action_it == actions_to_process.end()) {
actions_to_process[std::move(keys.cluster_weights_key)] =
std::move(keys.cluster_names_key);
}
}
}
// First pass of all unique WeightedCluster actions: if the exact same
// weighted target policy (same clusters and weights) appears in the old map,
// then that old action name is taken again and should be moved to the new
// map; any other action names from the old set of actions are candidates for
// reuse.
XdsClient::WeightedClusterIndexMap new_weighted_cluster_index_map;
for (auto action_it = actions_to_process.begin();
action_it != actions_to_process.end();) {
const std::string& cluster_names_key = action_it->second;
const std::string& cluster_weights_key = action_it->first;
auto old_cluster_names_map_it =
weighted_cluster_index_map_.find(cluster_names_key);
if (old_cluster_names_map_it != weighted_cluster_index_map_.end()) {
// Add cluster_names_key to the new map and copy next_index.
auto& new_cluster_names_info =
new_weighted_cluster_index_map[cluster_names_key];
new_cluster_names_info.next_index =
old_cluster_names_map_it->second.next_index;
// Lookup cluster_weights_key in old map.
auto& old_cluster_weights_map =
old_cluster_names_map_it->second.cluster_weights_map;
auto old_cluster_weights_map_it =
old_cluster_weights_map.find(cluster_weights_key);
if (old_cluster_weights_map_it != old_cluster_weights_map.end()) {
// same policy found, move from old map to new map.
new_cluster_names_info.cluster_weights_map[cluster_weights_key] =
old_cluster_weights_map_it->second;
old_cluster_weights_map.erase(old_cluster_weights_map_it);
// This action has been added to new map, so no need to process it
// again.
action_it = actions_to_process.erase(action_it);
continue;
}
}
++action_it;
}
// Second pass of all remaining unique WeightedCluster actions: if clusters
// for a new action are the same as an old unused action, reuse the name. If
// clusters differ, use a brand new name.
for (const auto& action : actions_to_process) {
const std::string& cluster_names_key = action.second;
const std::string& cluster_weights_key = action.first;
auto& new_cluster_names_info =
new_weighted_cluster_index_map[cluster_names_key];
auto& old_cluster_weights_map =
weighted_cluster_index_map_[cluster_names_key].cluster_weights_map;
auto old_cluster_weights_it = old_cluster_weights_map.begin();
if (old_cluster_weights_it != old_cluster_weights_map.end()) {
// There is something to reuse: this action uses the same set
// of clusters as a previous action and that action name is not
// already taken.
new_cluster_names_info.cluster_weights_map[cluster_weights_key] =
old_cluster_weights_it->second;
// Remove the name from being able to reuse again.
old_cluster_weights_map.erase(old_cluster_weights_it);
} else {
// There is nothing to reuse, take the next index to use and
// increment.
new_cluster_names_info.cluster_weights_map[cluster_weights_key] =
new_cluster_names_info.next_index++;
}
}
weighted_cluster_index_map_ = std::move(new_weighted_cluster_index_map);
}
grpc_error* XdsClient::CreateServiceConfig(
const XdsApi::RdsUpdate& rds_update,
RefCountedPtr<ServiceConfig>* service_config) {
UpdateWeightedClusterIndexMap(rds_update);
std::vector<std::string> actions_vector;
std::vector<std::string> route_table;
std::set<std::string> actions_set;
for (const auto& route : rds_update.routes) {
const std::string action_name =
route.weighted_clusters.empty()
? route.cluster_name
: WeightedClustersActionName(route.weighted_clusters);
if (actions_set.find(action_name) == actions_set.end()) {
actions_set.emplace(action_name);
actions_vector.push_back(
route.weighted_clusters.empty()
? CreateServiceConfigActionCluster(action_name)
: CreateServiceConfigActionWeightedCluster(
action_name, route.weighted_clusters));
}
route_table.push_back(CreateServiceConfigRoute(
absl::StrFormat("%s:%s",
route.weighted_clusters.empty() ? "cds" : "weighted",
action_name),
route));
}
std::vector<std::string> config_parts;
config_parts.push_back(
"{\n"
" \"loadBalancingConfig\":[\n"
" { \"xds_routing_experimental\":{\n"
" \"actions\":{\n");
config_parts.push_back(absl::StrJoin(actions_vector, ",\n"));
config_parts.push_back(
" },\n"
" \"routes\":[\n");
config_parts.push_back(absl::StrJoin(route_table, ",\n"));
config_parts.push_back(
" ]\n"
" } }\n"
" ]\n"
"}");
std::string json = absl::StrJoin(config_parts, "");
grpc_error* error = GRPC_ERROR_NONE;
*service_config = ServiceConfig::Create(json.c_str(), &error);
return error;
}
XdsApi::ClusterLoadReportMap XdsClient::BuildLoadReportSnapshot(
bool send_all_clusters, const std::set<std::string>& clusters) {
XdsApi::ClusterLoadReportMap snapshot_map;
@ -2375,8 +2047,8 @@ XdsApi::ClusterLoadReportMap XdsClient::BuildLoadReportSnapshot(
}
void XdsClient::NotifyOnError(grpc_error* error) {
if (service_config_watcher_ != nullptr) {
service_config_watcher_->OnError(GRPC_ERROR_REF(error));
if (listener_watcher_ != nullptr) {
listener_watcher_->OnError(GRPC_ERROR_REF(error));
}
for (const auto& p : cluster_map_) {
const ClusterState& cluster_state = p.second;

@ -20,11 +20,11 @@
#include <grpc/support/port_platform.h>
#include <set>
#include <vector>
#include "absl/strings/string_view.h"
#include "absl/types/optional.h"
#include "src/core/ext/filters/client_channel/service_config.h"
#include "src/core/ext/xds/xds_api.h"
#include "src/core/ext/xds/xds_bootstrap.h"
#include "src/core/ext/xds/xds_client_stats.h"
@ -33,6 +33,7 @@
#include "src/core/lib/gprpp/orphanable.h"
#include "src/core/lib/gprpp/ref_counted.h"
#include "src/core/lib/gprpp/ref_counted_ptr.h"
#include "src/core/lib/iomgr/resolve_address.h"
#include "src/core/lib/iomgr/work_serializer.h"
namespace grpc_core {
@ -41,13 +42,12 @@ extern TraceFlag xds_client_trace;
class XdsClient : public InternallyRefCounted<XdsClient> {
public:
// Service config watcher interface. Implemented by callers.
class ServiceConfigWatcherInterface {
// Listener data watcher interface. Implemented by callers.
class ListenerWatcherInterface {
public:
virtual ~ServiceConfigWatcherInterface() = default;
virtual ~ListenerWatcherInterface() = default;
virtual void OnServiceConfigChanged(
RefCountedPtr<ServiceConfig> service_config) = 0;
virtual void OnListenerChanged(XdsApi::LdsUpdate listener_data) = 0;
virtual void OnError(grpc_error* error) = 0;
@ -78,11 +78,14 @@ class XdsClient : public InternallyRefCounted<XdsClient> {
virtual void OnResourceDoesNotExist() = 0;
};
// gRPC client should populate server_name.
// gRPC server should populate listening_addresses.
// If *error is not GRPC_ERROR_NONE after construction, then there was
// an error initializing the client.
XdsClient(std::shared_ptr<WorkSerializer> work_serializer,
grpc_pollset_set* interested_parties, absl::string_view server_name,
std::unique_ptr<ServiceConfigWatcherInterface> watcher,
std::vector<grpc_resolved_address> listening_addresses,
std::unique_ptr<ListenerWatcherInterface> watcher,
const grpc_channel_args& channel_args, grpc_error** error);
~XdsClient();
@ -234,20 +237,6 @@ class XdsClient : public InternallyRefCounted<XdsClient> {
// Sends an error notification to all watchers.
void NotifyOnError(grpc_error* error);
// Returns the weighted_clusters action name to use from
// weighted_cluster_index_map_ for a WeightedClusters route action.
std::string WeightedClustersActionName(
const std::vector<XdsApi::RdsUpdate::RdsRoute::ClusterWeight>&
weighted_clusters);
// Updates weighted_cluster_index_map_ that will
// determine the names of the WeightedCluster actions for the current update.
void UpdateWeightedClusterIndexMap(const XdsApi::RdsUpdate& rds_update);
// Create the service config generated by the RdsUpdate.
grpc_error* CreateServiceConfig(const XdsApi::RdsUpdate& rds_update,
RefCountedPtr<ServiceConfig>* service_config);
XdsApi::ClusterLoadReportMap BuildLoadReportSnapshot(
bool send_all_clusters, const std::set<std::string>& clusters);
@ -267,8 +256,8 @@ class XdsClient : public InternallyRefCounted<XdsClient> {
XdsApi api_;
const std::string server_name_;
std::unique_ptr<ServiceConfigWatcherInterface> service_config_watcher_;
const std::vector<grpc_resolved_address> listening_addresses_;
std::unique_ptr<ListenerWatcherInterface> listener_watcher_;
// The channel for communicating with the xds server.
OrphanablePtr<ChannelState> chand_;
@ -285,22 +274,6 @@ class XdsClient : public InternallyRefCounted<XdsClient> {
LoadReportState>
load_report_map_;
// 2-level map to store WeightedCluster action names.
// Top level map is keyed by cluster names without weight like a_b_c; bottom
// level map is keyed by cluster names + weights like a10_b50_c40.
struct ClusterNamesInfo {
uint64_t next_index = 0;
std::map<std::string /*cluster names + weights*/,
uint64_t /*policy index number*/>
cluster_weights_map;
};
using WeightedClusterIndexMap =
std::map<std::string /*cluster names*/, ClusterNamesInfo>;
// Cache of action names for WeightedCluster targets in the current
// service config.
WeightedClusterIndexMap weighted_cluster_index_map_;
bool shutting_down_ = false;
};

@ -415,26 +415,33 @@ grpc_call* grpc_channel_create_pollset_set_call(
namespace grpc_core {
RegisteredCall::RegisteredCall(const char* method, const char* host) {
path = grpc_mdelem_from_slices(GRPC_MDSTR_PATH,
grpc_core::ExternallyManagedSlice(method));
authority =
host ? grpc_mdelem_from_slices(GRPC_MDSTR_AUTHORITY,
grpc_core::ExternallyManagedSlice(host))
: GRPC_MDNULL;
}
RegisteredCall::RegisteredCall(const char* method_arg, const char* host_arg)
: method(method_arg != nullptr ? method_arg : ""),
host(host_arg != nullptr ? host_arg : ""),
path(grpc_mdelem_from_slices(
GRPC_MDSTR_PATH, grpc_core::ExternallyManagedSlice(method.c_str()))),
authority(!host.empty()
? grpc_mdelem_from_slices(
GRPC_MDSTR_AUTHORITY,
grpc_core::ExternallyManagedSlice(host.c_str()))
: GRPC_MDNULL) {}
// TODO(vjpai): Delete copy-constructor when allowed by all supported compilers.
RegisteredCall::RegisteredCall(const RegisteredCall& other) {
path = other.path;
authority = other.authority;
GRPC_MDELEM_REF(path);
GRPC_MDELEM_REF(authority);
}
RegisteredCall::RegisteredCall(RegisteredCall&& other) noexcept {
path = other.path;
authority = other.authority;
RegisteredCall::RegisteredCall(const RegisteredCall& other)
: RegisteredCall(other.method.c_str(), other.host.c_str()) {}
RegisteredCall::RegisteredCall(RegisteredCall&& other) noexcept
: method(std::move(other.method)),
host(std::move(other.host)),
path(grpc_mdelem_from_slices(
GRPC_MDSTR_PATH, grpc_core::ExternallyManagedSlice(method.c_str()))),
authority(!host.empty()
? grpc_mdelem_from_slices(
GRPC_MDSTR_AUTHORITY,
grpc_core::ExternallyManagedSlice(host.c_str()))
: GRPC_MDNULL) {
GRPC_MDELEM_UNREF(other.path);
GRPC_MDELEM_UNREF(other.authority);
other.path = GRPC_MDNULL;
other.authority = GRPC_MDNULL;
}
@ -457,13 +464,14 @@ void* grpc_channel_register_call(grpc_channel* channel, const char* method,
grpc_core::MutexLock lock(&channel->registration_table->mu);
channel->registration_table->method_registration_attempts++;
auto key = std::make_pair(host, method);
auto key = std::make_pair(std::string(host != nullptr ? host : ""),
std::string(method != nullptr ? method : ""));
auto rc_posn = channel->registration_table->map.find(key);
if (rc_posn != channel->registration_table->map.end()) {
return &rc_posn->second;
}
auto insertion_result = channel->registration_table->map.insert(
{key, grpc_core::RegisteredCall(method, host)});
{std::move(key), grpc_core::RegisteredCall(method, host)});
return &insertion_result.first->second;
}

@ -69,21 +69,28 @@ void grpc_channel_update_call_size_estimate(grpc_channel* channel, size_t size);
namespace grpc_core {
struct RegisteredCall {
// The method and host are kept as part of this struct just to manage their
// lifetime since they must outlive the mdelem contents.
std::string method;
std::string host;
grpc_mdelem path;
grpc_mdelem authority;
explicit RegisteredCall(const char* method, const char* host);
explicit RegisteredCall(const char* method_arg, const char* host_arg);
// TODO(vjpai): delete copy constructor once all supported compilers allow
// std::map value_type to be MoveConstructible.
RegisteredCall(const RegisteredCall& other);
RegisteredCall(RegisteredCall&& other) noexcept;
RegisteredCall& operator=(const RegisteredCall&) = delete;
RegisteredCall& operator=(RegisteredCall&&) = delete;
~RegisteredCall();
};
struct CallRegistrationTable {
grpc_core::Mutex mu;
std::map<std::pair<const char*, const char*>, RegisteredCall>
std::map<std::pair<std::string, std::string>, RegisteredCall>
map /* GUARDED_BY(mu) */;
int method_registration_attempts /* GUARDED_BY(mu) */ = 0;
};

@ -100,7 +100,7 @@ module GRPC
channel_args: {},
interceptors: [])
@ch = ClientStub.setup_channel(channel_override, host, creds,
channel_args)
channel_args.dup)
alt_host = channel_args[Core::Channel::SSL_TARGET]
@host = alt_host.nil? ? host : alt_host
@propagate_mask = propagate_mask

@ -0,0 +1,74 @@
# Copyright 2020 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
require 'spec_helper'
# a test service that checks the cert of its peer
class UserAgentEchoService
include GRPC::GenericService
rpc :an_rpc, EchoMsg, EchoMsg
def an_rpc(_req, call)
EchoMsg.new(msg: call.metadata['user-agent'])
end
end
UserAgentEchoServiceStub = UserAgentEchoService.rpc_stub_class
describe 'user agent' do
RpcServer = GRPC::RpcServer
before(:all) do
server_opts = {
poll_period: 1
}
@srv = new_rpc_server_for_testing(**server_opts)
@port = @srv.add_http2_port('0.0.0.0:0', :this_port_is_insecure)
@srv.handle(UserAgentEchoService)
@srv_thd = Thread.new { @srv.run }
@srv.wait_till_running
end
after(:all) do
expect(@srv.stopped?).to be(false)
@srv.stop
@srv_thd.join
end
it 'client sends expected user agent' do
stub = UserAgentEchoServiceStub.new("localhost:#{@port}",
:this_channel_is_insecure,
{})
response = stub.an_rpc(EchoMsg.new)
expected_user_agent_prefix = "grpc-ruby/#{GRPC::VERSION}"
expect(response.msg.start_with?(expected_user_agent_prefix)).to be true
# check that the expected user agent prefix occurs in the real user agent exactly once
expect(response.msg.split(expected_user_agent_prefix).size).to eq 2
end
it 'user agent header does not grow when the same channel args hash is used across multiple stubs' do
shared_channel_args_hash = {}
10.times do
stub = UserAgentEchoServiceStub.new("localhost:#{@port}",
:this_channel_is_insecure,
channel_args: shared_channel_args_hash)
response = stub.an_rpc(EchoMsg.new)
puts "got echo response: #{response.msg}"
expected_user_agent_prefix = "grpc-ruby/#{GRPC::VERSION}"
expect(response.msg.start_with?(expected_user_agent_prefix)).to be true
# check that the expected user agent prefix occurs in the real user agent exactly once
expect(response.msg.split(expected_user_agent_prefix).size).to eq 2
end
end
end

@ -0,0 +1,5 @@
#=================
# Install cmake
# Note that this step should be only used for distributions that have new enough cmake to satisfy gRPC's cmake version requirement.
RUN apt-get update && apt-get install -y cmake && apt-get clean

@ -20,6 +20,7 @@
<%include file="../../python_deps.include"/>
<%include file="../../gcp_api_libraries.include"/>
<%include file="../../cxx_deps.include"/>
<%include file="../../cmake.include"/>
<%include file="../../run_tests_addons.include"/>
# Define the default command.

@ -1,32 +0,0 @@
%YAML 1.2
--- |
# Copyright 2018 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This is the base Docker image we use for running tests on RBE
FROM gcr.io/cloud-marketplace/google/rbe-debian8@sha256:1ede2a929b44d629ec5abe86eee6d7ffea1d5a4d247489a8867d46cfde3e38bd
RUN sed -i '/deb http:\/\/httpredir.debian.org\/debian jessie-updates main/d' /etc/apt/sources.list
<%include file="../../apt_get_basic.include"/>
<%include file="../../python_deps.include"/>
<%include file="../../gcp_api_libraries.include"/>
#=================
# C++ dependencies (purposely excluding Clang because it's part of the base image)
RUN apt-get update && apt-get -y install libgflags-dev libgtest-dev libc++-dev && apt-get clean
# Link llvm-symbolizer to where our test scripts expect to find it
RUN ln -s /usr/local/bin/llvm-symbolizer /usr/bin/llvm-symbolizer
# Define the default command.
CMD ["bash"]

@ -1,25 +0,0 @@
%YAML 1.2
--- |
# Copyright 2015 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
FROM ubuntu:14.04
<%include file="../../apt_get_basic.include"/>
<%include file="../../python_deps.include"/>
<%include file="../../gcp_api_libraries.include"/>
<%include file="../../cxx_deps.include"/>
<%include file="../../run_tests_addons.include"/>
# Define the default command.
CMD ["bash"]

@ -20,6 +20,7 @@
<%include file="../../python_deps.include"/>
<%include file="../../gcp_api_libraries.include"/>
<%include file="../../cxx_deps.include"/>
<%include file="../../cmake.include"/>
<%include file="../../run_tests_addons.include"/>
# The clang-3.6 symlink for the default clang version was added

@ -20,6 +20,7 @@
<%include file="../../python_deps.include"/>
<%include file="../../gcp_api_libraries.include"/>
<%include file="../../cxx_deps.include"/>
<%include file="../../cmake.include"/>
<%include file="../../run_tests_addons.include"/>
# Define the default command.

@ -321,12 +321,22 @@ TEST(Pollers, TestReadabilityNotificationsDontGetStrandedOnOneCq) {
kSharedUnconnectableAddress.c_str());
std::vector<std::thread> threads;
threads.reserve(kNumCalls);
std::vector<std::unique_ptr<TestServer>> test_servers;
// Instantiate servers inline here, so that we get port allocation out of the
// way and don't depend on it during the actual test. It can sometimes take
// time to allocate kNumCalls ports from the port server, and we don't want to
// hit test timeouts because of that.
test_servers.reserve(kNumCalls);
for (int i = 0; i < kNumCalls; i++) {
test_servers.push_back(absl::make_unique<TestServer>());
}
for (int i = 0; i < kNumCalls; i++) {
auto test_server = test_servers[i].get();
threads.push_back(std::thread([kSharedUnconnectableAddress,
&ping_pong_round, &ping_pongs_done,
&ping_pong_round_mu, &ping_pong_round_cv]() {
auto test_server = absl::make_unique<TestServer>();
gpr_log(GPR_DEBUG, "created test_server with address:%s",
&ping_pong_round_mu, &ping_pong_round_cv,
test_server]() {
gpr_log(GPR_DEBUG, "using test_server with address:%s",
test_server->address().c_str());
std::vector<grpc_arg> args;
grpc_arg service_config_arg;

@ -423,51 +423,61 @@ TEST_P(ClientCallbackEnd2endTest, SimpleRpcExpectedError) {
TEST_P(ClientCallbackEnd2endTest, SimpleRpcUnderLockNested) {
MAYBE_SKIP_TEST;
ResetStub();
std::mutex mu1, mu2, mu3;
std::condition_variable cv;
bool done = false;
EchoRequest request1, request2, request3;
request1.set_message("Hello locked world1.");
request2.set_message("Hello locked world2.");
request3.set_message("Hello locked world3.");
EchoResponse response1, response2, response3;
ClientContext cli_ctx1, cli_ctx2, cli_ctx3;
{
std::lock_guard<std::mutex> l(mu1);
// The request/response state associated with an RPC and the synchronization
// variables needed to notify its completion.
struct RpcState {
std::mutex mu;
std::condition_variable cv;
bool done = false;
EchoRequest request;
EchoResponse response;
ClientContext cli_ctx;
RpcState() = default;
~RpcState() {
// Grab the lock to prevent destruction while another is still holding
// lock
std::lock_guard<std::mutex> lock(mu);
}
};
std::vector<RpcState> rpc_state(3);
for (size_t i = 0; i < rpc_state.size(); i++) {
std::string message = "Hello locked world";
message += std::to_string(i);
rpc_state[i].request.set_message(message);
}
// Grab a lock and then start an RPC whose callback grabs the same lock and
// then calls this function to start the next RPC under lock (up to a limit of
// the size of the rpc_state vector).
std::function<void(int)> nested_call = [this, &nested_call,
&rpc_state](int index) {
std::lock_guard<std::mutex> l(rpc_state[index].mu);
stub_->experimental_async()->Echo(
&cli_ctx1, &request1, &response1,
[this, &mu1, &mu2, &mu3, &cv, &done, &request1, &request2, &request3,
&response1, &response2, &response3, &cli_ctx2, &cli_ctx3](Status s1) {
std::lock_guard<std::mutex> l1(mu1);
EXPECT_TRUE(s1.ok());
EXPECT_EQ(request1.message(), response1.message());
// start the second level of nesting
std::unique_lock<std::mutex> l2(mu2);
this->stub_->experimental_async()->Echo(
&cli_ctx2, &request2, &response2,
[this, &mu2, &mu3, &cv, &done, &request2, &request3, &response2,
&response3, &cli_ctx3](Status s2) {
std::lock_guard<std::mutex> l2(mu2);
EXPECT_TRUE(s2.ok());
EXPECT_EQ(request2.message(), response2.message());
// start the third level of nesting
std::lock_guard<std::mutex> l3(mu3);
stub_->experimental_async()->Echo(
&cli_ctx3, &request3, &response3,
[&mu3, &cv, &done, &request3, &response3](Status s3) {
std::lock_guard<std::mutex> l(mu3);
EXPECT_TRUE(s3.ok());
EXPECT_EQ(request3.message(), response3.message());
done = true;
cv.notify_all();
});
});
&rpc_state[index].cli_ctx, &rpc_state[index].request,
&rpc_state[index].response,
[index, &nested_call, &rpc_state](Status s) {
std::lock_guard<std::mutex> l1(rpc_state[index].mu);
EXPECT_TRUE(s.ok());
rpc_state[index].done = true;
rpc_state[index].cv.notify_all();
// Call the next level of nesting if possible
if (index + 1 < rpc_state.size()) {
nested_call(index + 1);
}
});
}
};
std::unique_lock<std::mutex> l(mu3);
while (!done) {
cv.wait(l);
nested_call(0);
// Wait for completion notifications from all RPCs. Order doesn't matter.
for (RpcState& state : rpc_state) {
std::unique_lock<std::mutex> l(state.mu);
while (!state.done) {
state.cv.wait(l);
}
EXPECT_EQ(state.request.message(), state.response.message());
}
}

@ -287,10 +287,17 @@ void TestCancelDuringActiveQuery(
query_timeout_setting);
grpc_channel_args* client_args = nullptr;
grpc_status_code expected_status_code = GRPC_STATUS_OK;
gpr_timespec rpc_deadline;
if (query_timeout_setting == NONE) {
// The RPC deadline should go off well before the DNS resolution
// timeout fires.
expected_status_code = GRPC_STATUS_DEADLINE_EXCEEDED;
// use default DNS resolution timeout (which is over one minute).
client_args = nullptr;
rpc_deadline = grpc_timeout_milliseconds_to_deadline(100);
} else if (query_timeout_setting == SHORT) {
// The DNS resolution timeout should fire well before the
// RPC's deadline expires.
expected_status_code = GRPC_STATUS_UNAVAILABLE;
grpc_arg arg;
arg.type = GRPC_ARG_INTEGER;
@ -298,13 +305,21 @@ void TestCancelDuringActiveQuery(
arg.value.integer =
1; // Set this shorter than the call deadline so that it goes off.
client_args = grpc_channel_args_copy_and_add(nullptr, &arg, 1);
// Set the deadline high enough such that if we hit this and get
// a deadline exceeded status code, then we are confident that there's
// a bug causing cancellation of DNS resolutions to not happen in a timely
// manner.
rpc_deadline = grpc_timeout_seconds_to_deadline(10);
} else if (query_timeout_setting == ZERO) {
// The RPC deadline should go off well before the DNS resolution
// timeout fires.
expected_status_code = GRPC_STATUS_DEADLINE_EXCEEDED;
grpc_arg arg;
arg.type = GRPC_ARG_INTEGER;
arg.key = const_cast<char*>(GRPC_ARG_DNS_ARES_QUERY_TIMEOUT_MS);
arg.value.integer = 0; // Set this to zero to disable query timeouts.
client_args = grpc_channel_args_copy_and_add(nullptr, &arg, 1);
rpc_deadline = grpc_timeout_milliseconds_to_deadline(100);
} else {
abort();
}
@ -312,10 +327,9 @@ void TestCancelDuringActiveQuery(
grpc_insecure_channel_create(client_target.c_str(), client_args, nullptr);
grpc_completion_queue* cq = grpc_completion_queue_create_for_next(nullptr);
cq_verifier* cqv = cq_verifier_create(cq);
gpr_timespec deadline = grpc_timeout_milliseconds_to_deadline(100);
grpc_call* call = grpc_channel_create_call(
client, nullptr, GRPC_PROPAGATE_DEFAULTS, cq,
grpc_slice_from_static_string("/foo"), nullptr, deadline, nullptr);
grpc_slice_from_static_string("/foo"), nullptr, rpc_deadline, nullptr);
GPR_ASSERT(call);
grpc_metadata_array initial_metadata_recv;
grpc_metadata_array trailing_metadata_recv;

@ -0,0 +1,51 @@
// Copyright 2019 Istio Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
syntax = "proto3";
package google.security.meshca.v1;
import "google/protobuf/duration.proto";
option java_multiple_files = true;
option java_outer_classname = "MeshCaProto";
option java_package = "com.google.security.meshca.v1";
// Certificate request message.
message MeshCertificateRequest {
// The request ID must be a valid UUID with the exception that zero UUID is
// not supported (00000000-0000-0000-0000-000000000000).
string request_id = 1;
// PEM-encoded certificate request.
string csr = 2;
// Optional: requested certificate validity period.
google.protobuf.Duration validity = 3;
// Reserved 4
}
// Certificate response message.
message MeshCertificateResponse {
// PEM-encoded certificate chain.
// Leaf cert is element '0'. Root cert is element 'n'.
repeated string cert_chain = 1;
}
// Service for managing certificates issued by the CSM CA.
service MeshCertificateService {
// Using provided CSR, returns a signed certificate that represents a GCP
// service account identity.
rpc CreateCertificate(MeshCertificateRequest)
returns (MeshCertificateResponse) {
}
}

@ -50,6 +50,6 @@ def mako_plugin(dictionary):
'platforms': ['mac', 'linux'],
'ci_platforms': ['linux'],
'flaky': False,
'language': 'c',
'language': 'c++',
'cpu_cost': 0.1,
})

@ -106,6 +106,7 @@ proto_files=( \
"src/proto/grpc/gcp/transport_security_common.proto" \
"src/proto/grpc/health/v1/health.proto" \
"src/proto/grpc/lb/v1/load_balancer.proto" \
"third_party/istio/security/proto/providers/google/meshca.proto" \
"udpa/data/orca/v1/orca_load_report.proto" \
"udpa/annotations/migrate.proto" \
"udpa/annotations/sensitive.proto" \

@ -72,6 +72,12 @@ RUN pip install --upgrade google-api-python-client oauth2client
# C++ dependencies
RUN apt-get update && apt-get -y install libgflags-dev libgtest-dev libc++-dev clang && apt-get clean
#=================
# Install cmake
# Note that this step should be only used for distributions that have new enough cmake to satisfy gRPC's cmake version requirement.
RUN apt-get update && apt-get install -y cmake && apt-get clean
RUN mkdir /var/local/jenkins

@ -1,81 +0,0 @@
# Copyright 2018 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This is the base Docker image we use for running tests on RBE
FROM gcr.io/cloud-marketplace/google/rbe-debian8@sha256:1ede2a929b44d629ec5abe86eee6d7ffea1d5a4d247489a8867d46cfde3e38bd
RUN sed -i '/deb http:\/\/httpredir.debian.org\/debian jessie-updates main/d' /etc/apt/sources.list
# Install Git and basic packages.
RUN apt-get update && apt-get install -y \
autoconf \
autotools-dev \
build-essential \
bzip2 \
ccache \
curl \
dnsutils \
gcc \
gcc-multilib \
git \
golang \
gyp \
lcov \
libc6 \
libc6-dbg \
libc6-dev \
libgtest-dev \
libtool \
make \
perl \
strace \
python-dev \
python-setuptools \
python-yaml \
telnet \
unzip \
wget \
zip && apt-get clean
#================
# Build profiling
RUN apt-get update && apt-get install -y time && apt-get clean
#====================
# Python dependencies
# Install dependencies
RUN apt-get update && apt-get install -y \
python-all-dev \
python3-all-dev \
python-setuptools
# Install Python packages from PyPI
RUN curl https://bootstrap.pypa.io/get-pip.py | python2.7
RUN pip install --upgrade pip==19.3.1
RUN pip install virtualenv==16.7.9
RUN pip install futures==2.2.0 enum34==1.0.4 protobuf==3.5.2.post1 six==1.10.0 twisted==17.5.0
# Google Cloud platform API libraries
RUN pip install --upgrade google-api-python-client oauth2client
#=================
# C++ dependencies (purposely excluding Clang because it's part of the base image)
RUN apt-get update && apt-get -y install libgflags-dev libgtest-dev libc++-dev && apt-get clean
# Link llvm-symbolizer to where our test scripts expect to find it
RUN ln -s /usr/local/bin/llvm-symbolizer /usr/bin/llvm-symbolizer
# Define the default command.
CMD ["bash"]

@ -1,79 +0,0 @@
# Copyright 2015 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
FROM ubuntu:14.04
# Install Git and basic packages.
RUN apt-get update && apt-get install -y \
autoconf \
autotools-dev \
build-essential \
bzip2 \
ccache \
curl \
dnsutils \
gcc \
gcc-multilib \
git \
golang \
gyp \
lcov \
libc6 \
libc6-dbg \
libc6-dev \
libgtest-dev \
libtool \
make \
perl \
strace \
python-dev \
python-setuptools \
python-yaml \
telnet \
unzip \
wget \
zip && apt-get clean
#================
# Build profiling
RUN apt-get update && apt-get install -y time && apt-get clean
#====================
# Python dependencies
# Install dependencies
RUN apt-get update && apt-get install -y \
python-all-dev \
python3-all-dev \
python-setuptools
# Install Python packages from PyPI
RUN curl https://bootstrap.pypa.io/get-pip.py | python2.7
RUN pip install --upgrade pip==19.3.1
RUN pip install virtualenv==16.7.9
RUN pip install futures==2.2.0 enum34==1.0.4 protobuf==3.5.2.post1 six==1.10.0 twisted==17.5.0
# Google Cloud platform API libraries
RUN pip install --upgrade google-api-python-client oauth2client
#=================
# C++ dependencies
RUN apt-get update && apt-get -y install libgflags-dev libgtest-dev libc++-dev clang && apt-get clean
RUN mkdir /var/local/jenkins
# Define the default command.
CMD ["bash"]

@ -72,6 +72,12 @@ RUN pip install --upgrade google-api-python-client oauth2client
# C++ dependencies
RUN apt-get update && apt-get -y install libgflags-dev libgtest-dev libc++-dev clang && apt-get clean
#=================
# Install cmake
# Note that this step should be only used for distributions that have new enough cmake to satisfy gRPC's cmake version requirement.
RUN apt-get update && apt-get install -y cmake && apt-get clean
RUN mkdir /var/local/jenkins

@ -72,6 +72,12 @@ RUN pip install --upgrade google-api-python-client oauth2client
# C++ dependencies
RUN apt-get update && apt-get -y install libgflags-dev libgtest-dev libc++-dev clang && apt-get clean
#=================
# Install cmake
# Note that this step should be only used for distributions that have new enough cmake to satisfy gRPC's cmake version requirement.
RUN apt-get update && apt-get install -y cmake && apt-get clean
RUN mkdir /var/local/jenkins

File diff suppressed because it is too large Load Diff

@ -14,9 +14,6 @@
setlocal
set GENERATOR=%1
set ARCHITECTURE=%2
cd /d %~dp0\..\..\..
mkdir cmake
@ -24,7 +21,7 @@ cd cmake
mkdir build
cd build
cmake -G %GENERATOR% -A %ARCHITECTURE% -DgRPC_BUILD_TESTS=ON ../.. || goto :error
cmake -DgRPC_BUILD_TESTS=ON %* ../.. || goto :error
endlocal

@ -21,4 +21,4 @@ mkdir -p cmake/build
cd cmake/build
# MSBUILD_CONFIG's values are suitable for cmake as well
cmake -DgRPC_BUILD_TESTS=ON -DCMAKE_BUILD_TYPE="${MSBUILD_CONFIG}" ../..
cmake -DgRPC_BUILD_TESTS=ON -DCMAKE_BUILD_TYPE="${MSBUILD_CONFIG}" "$@" ../..

@ -65,41 +65,6 @@ _POLLING_STRATEGIES = {
'mac': ['poll'],
}
BigQueryTestData = collections.namedtuple('BigQueryTestData', 'name flaky cpu')
def get_bqtest_data(limit=None):
import big_query_utils
bq = big_query_utils.create_big_query()
query = """
SELECT
filtered_test_name,
SUM(result != 'PASSED' AND result != 'SKIPPED') > 0 as flaky,
MAX(cpu_measured) + 0.01 as cpu
FROM (
SELECT
REGEXP_REPLACE(test_name, r'/\d+', '') AS filtered_test_name,
result, cpu_measured
FROM
[grpc-testing:jenkins_test_results.aggregate_results]
WHERE
timestamp >= DATE_ADD(CURRENT_DATE(), -1, "WEEK")
AND platform = '""" + platform_string() + """'
AND NOT REGEXP_MATCH(job_name, '.*portability.*') )
GROUP BY
filtered_test_name"""
if limit:
query += " limit {}".format(limit)
query_job = big_query_utils.sync_query_job(bq, 'grpc-testing', query)
page = bq.jobs().getQueryResults(
pageToken=None, **query_job['jobReference']).execute(num_retries=3)
test_data = [
BigQueryTestData(row['f'][0]['v'], row['f'][1]['v'] == 'true',
float(row['f'][2]['v'])) for row in page['rows']
]
return test_data
def platform_string():
return jobset.platform_string()
@ -272,24 +237,34 @@ class CLanguage(object):
def configure(self, config, args):
self.config = config
self.args = args
self._make_options = []
self._use_cmake = True
if self.platform == 'windows':
_check_compiler(
self.args.compiler,
['default', 'cmake', 'cmake_vs2015', 'cmake_vs2017'])
_check_arch(self.args.arch, ['default', 'x64', 'x86'])
self._cmake_generator_option = 'Visual Studio 15 2017' if self.args.compiler == 'cmake_vs2017' else 'Visual Studio 14 2015'
self._cmake_arch_option = 'x64' if self.args.arch == 'x64' else 'Win32'
self._use_cmake = True
self._make_options = []
elif self.args.compiler == 'cmake':
_check_arch(self.args.arch, ['default'])
self._use_cmake = True
self._docker_distro = 'jessie'
self._make_options = []
cmake_generator_option = 'Visual Studio 15 2017' if self.args.compiler == 'cmake_vs2017' else 'Visual Studio 14 2015'
cmake_arch_option = 'x64' if self.args.arch == 'x64' else 'Win32'
self._cmake_configure_extra_args = [
'-G', cmake_generator_option, '-A', cmake_arch_option
]
else:
self._use_cmake = False
self._docker_distro, self._make_options = self._compiler_options(
if self.platform == 'linux':
# Allow all the known architectures. _check_arch_option has already checked that we're not doing
# something illegal when not running under docker.
_check_arch(self.args.arch, ['default', 'x64', 'x86'])
else:
_check_arch(self.args.arch, ['default'])
self._docker_distro, self._cmake_configure_extra_args = self._compiler_options(
self.args.use_docker, self.args.compiler)
if self.args.arch == 'x86':
# disable boringssl asm optimizations when on x86
# see https://github.com/grpc/grpc/blob/b5b8578b3f8b4a9ce61ed6677e19d546e43c5c68/tools/run_tests/artifacts/artifact_targets.py#L253
self._cmake_configure_extra_args.append('-DOPENSSL_NO_ASM=ON')
if args.iomgr_platform == "uv":
cflags = '-DGRPC_UV -DGRPC_CUSTOM_IOMGR_THREAD_CHECK -DGRPC_CUSTOM_SOCKET '
try:
@ -457,12 +432,11 @@ class CLanguage(object):
def pre_build_steps(self):
if self.platform == 'windows':
return [[
'tools\\run_tests\\helper_scripts\\pre_build_cmake.bat',
self._cmake_generator_option, self._cmake_arch_option
]]
return [['tools\\run_tests\\helper_scripts\\pre_build_cmake.bat'] +
self._cmake_configure_extra_args]
elif self._use_cmake:
return [['tools/run_tests/helper_scripts/pre_build_cmake.sh']]
return [['tools/run_tests/helper_scripts/pre_build_cmake.sh'] +
self._cmake_configure_extra_args]
else:
return []
@ -481,36 +455,20 @@ class CLanguage(object):
else:
return 'Makefile'
def _clang_make_options(self, version_suffix=''):
if self.args.config == 'ubsan':
return [
'CC=clang%s' % version_suffix,
'CXX=clang++%s' % version_suffix,
'LD=clang++%s' % version_suffix,
'LDXX=clang++%s' % version_suffix
]
return [
'CC=clang%s' % version_suffix,
'CXX=clang++%s' % version_suffix,
'LD=clang%s' % version_suffix,
'LDXX=clang++%s' % version_suffix
]
def _gcc_make_options(self, version_suffix):
def _clang_cmake_configure_extra_args(self, version_suffix=''):
return [
'CC=gcc%s' % version_suffix,
'CXX=g++%s' % version_suffix,
'LD=gcc%s' % version_suffix,
'LDXX=g++%s' % version_suffix
'-DCMAKE_C_COMPILER=clang%s' % version_suffix,
'-DCMAKE_CXX_COMPILER=clang++%s' % version_suffix,
]
def _compiler_options(self, use_docker, compiler):
"""Returns docker distro and make options to use for given compiler."""
"""Returns docker distro and cmake configure args to use for given compiler."""
if not use_docker and not _is_use_docker_child():
_check_compiler(compiler, ['default'])
# if not running under docker, we cannot ensure the right compiler version will be used,
# so we only allow the non-specific choices.
_check_compiler(compiler, ['default', 'cmake'])
if compiler == 'gcc4.9' or compiler == 'default':
if compiler == 'gcc4.9' or compiler == 'default' or compiler == 'cmake':
return ('jessie', [])
elif compiler == 'gcc5.3':
return ('ubuntu1604', [])
@ -520,21 +478,14 @@ class CLanguage(object):
return ('buster', [])
elif compiler == 'gcc_musl':
return ('alpine', [])
elif compiler == 'clang3.4':
# on ubuntu1404, clang-3.4 alias doesn't exist, just use 'clang'
return ('ubuntu1404', self._clang_make_options())
elif compiler == 'clang3.5':
return ('jessie', self._clang_make_options(version_suffix='-3.5'))
elif compiler == 'clang3.6':
return ('ubuntu1604',
self._clang_make_options(version_suffix='-3.6'))
self._clang_cmake_configure_extra_args(
version_suffix='-3.6'))
elif compiler == 'clang3.7':
return ('ubuntu1604',
self._clang_make_options(version_suffix='-3.7'))
elif compiler == 'clang7.0':
# clang++-7.0 alias doesn't exist and there are no other clang versions
# installed.
return ('sanitizers_jessie', self._clang_make_options())
self._clang_cmake_configure_extra_args(
version_suffix='-3.7'))
else:
raise Exception('Compiler %s not supported.' % compiler)
@ -1496,10 +1447,10 @@ argp.add_argument(
'--compiler',
choices=[
'default', 'gcc4.9', 'gcc5.3', 'gcc7.4', 'gcc8.3', 'gcc_musl',
'clang3.4', 'clang3.5', 'clang3.6', 'clang3.7', 'clang7.0', 'python2.7',
'python3.5', 'python3.6', 'python3.7', 'python3.8', 'pypy', 'pypy3',
'python_alpine', 'all_the_cpythons', 'electron1.3', 'electron1.6',
'coreclr', 'cmake', 'cmake_vs2015', 'cmake_vs2017'
'clang3.6', 'clang3.7', 'python2.7', 'python3.5', 'python3.6',
'python3.7', 'python3.8', 'pypy', 'pypy3', 'python_alpine',
'all_the_cpythons', 'electron1.3', 'electron1.6', 'coreclr', 'cmake',
'cmake_vs2015', 'cmake_vs2017'
],
default='default',
help=
@ -1575,26 +1526,10 @@ argp.add_argument('--bq_result_table',
type=str,
nargs='?',
help='Upload test results to a specified BQ table.')
argp.add_argument(
'--auto_set_flakes',
default=False,
const=True,
action='store_const',
help=
'Allow repeated runs for tests that have been failing recently (based on BQ historical data).'
)
args = argp.parse_args()
flaky_tests = set()
shortname_to_cpu = {}
if args.auto_set_flakes:
try:
for test in get_bqtest_data():
if test.flaky: flaky_tests.add(test.name)
if test.cpu > 0: shortname_to_cpu[test.name] = test.cpu
except:
print("Unexpected error getting flaky tests: %s" %
traceback.format_exc())
if args.force_default_poller:
_POLLING_STRATEGIES = {}

@ -275,8 +275,8 @@ def _create_portability_test_jobs(extra_args=[],
# portability C and C++ on x64
for compiler in [
'gcc4.9', 'gcc5.3', 'gcc7.4', 'gcc8.3', 'gcc_musl', 'clang3.5',
'clang3.6', 'clang3.7', 'clang7.0'
'gcc4.9', 'gcc5.3', 'gcc7.4', 'gcc8.3', 'gcc_musl', 'clang3.6',
'clang3.7'
]:
test_jobs += _generate_jobs(languages=['c', 'c++'],
configs=['dbg'],
@ -339,26 +339,6 @@ def _create_portability_test_jobs(extra_args=[],
extra_args=extra_args,
timeout_seconds=_CPP_RUNTESTS_TIMEOUT)
# TODO(zyc): Turn on this test after adding c-ares support on windows.
# C with the c-ares DNS resolver on Windows
# test_jobs += _generate_jobs(languages=['c'],
# configs=['dbg'], platforms=['windows'],
# labels=['portability', 'corelang'],
# extra_args=extra_args,
# extra_envs={'GRPC_DNS_RESOLVER': 'ares'})
# C and C++ build with cmake on Linux
# TODO(jtattermusch): some of the tests are failing, so we force --build_only
# to make sure it's buildable at least.
test_jobs += _generate_jobs(languages=['c', 'c++'],
configs=['dbg'],
platforms=['linux'],
arch='default',
compiler='cmake',
labels=['portability', 'corelang'],
extra_args=extra_args + ['--build_only'],
inner_jobs=inner_jobs)
test_jobs += _generate_jobs(languages=['python'],
configs=['dbg'],
platforms=['linux'],

@ -828,12 +828,40 @@ def test_path_matching(gcp, original_backend_service, instance_group,
{
"UnaryCall": alternate_backend_instances,
"EmptyCall": original_backend_instances
}),
(
# This test case is similar to the one above (but with route
# services swapped). This test has two routes (full_path and
# the default) to match EmptyCall, and both routes set
# alternative_backend_service as the action. This forces the
# client to handle duplicate Clusters in the RDS response.
[
{
'priority': 0,
# Prefix UnaryCall -> original_backend_service.
'matchRules': [{
'prefixMatch': '/grpc.testing.TestService/Unary'
}],
'service': original_backend_service.url
},
{
'priority': 1,
# FullPath EmptyCall -> alternate_backend_service.
'matchRules': [{
'fullPathMatch':
'/grpc.testing.TestService/EmptyCall'
}],
'service': alternate_backend_service.url
}
],
{
"UnaryCall": original_backend_instances,
"EmptyCall": alternate_backend_instances
})
]
for (route_rules, expected_instances) in test_cases:
logger.info('patching url map with %s -> alternative',
route_rules[0]['matchRules'])
logger.info('patching url map with %s', route_rules)
patch_url_map_backend_service(gcp,
original_backend_service,
route_rules=route_rules)
@ -846,8 +874,8 @@ def test_path_matching(gcp, original_backend_service, instance_group,
original_backend_instances + alternate_backend_instances,
_WAIT_FOR_STATS_SEC)
retry_count = 10
# Each attempt takes about 10 seconds, 10 retries is equivalent to 100
retry_count = 20
# Each attempt takes about 10 seconds, 20 retries is equivalent to 200
# seconds timeout.
for i in range(retry_count):
stats = get_client_stats(_NUM_TEST_RPCS, _WAIT_FOR_STATS_SEC)
@ -1718,6 +1746,21 @@ try:
metadata_to_send = '--metadata=""'
if test_case in _TESTS_TO_FAIL_ON_RPC_FAILURE:
# TODO(ericgribkoff) Unconditional wait is recommended by TD
# team when reusing backend resources after config changes
# between test cases, as we are doing here. This should address
# flakiness issues with these tests; other attempts to deflake
# (such as waiting for the first successful RPC before failing
# on any subsequent failures) were insufficient because, due to
# propagation delays, we may initially see an RPC succeed to the
# expected backends but due to a stale configuration: e.g., test
# A (1) routes traffic to MIG A, then (2) switches to MIG B,
# then (3) back to MIG A. Test B begins running and sees RPCs
# going to MIG A, as expected. However, due to propagation
# delays, Test B is actually seeing the stale config from step
# (1), and then fails when it gets update (2) unexpectedly
# switching to MIG B.
time.sleep(200)
fail_on_failed_rpc = '--fail_on_failed_rpc=true'
else:
fail_on_failed_rpc = '--fail_on_failed_rpc=false'

Loading…
Cancel
Save