Merge remote-tracking branch 'upstream/master' into release

pull/24259/head
Donna Dionne 4 years ago
commit bad6ee7676
  1. 7
      src/core/ext/filters/client_channel/lb_policy/xds/cds.cc
  2. 100
      src/core/ext/filters/client_channel/lb_policy/xds/eds.cc
  3. 27
      src/core/ext/xds/xds_api.cc
  4. 6
      src/core/ext/xds/xds_api.h
  5. 1
      src/proto/grpc/testing/xds/BUILD
  6. 17
      src/proto/grpc/testing/xds/cds_for_test.proto
  7. 1
      src/proto/grpc/testing/xds/v3/BUILD
  8. 17
      src/proto/grpc/testing/xds/v3/cluster.proto
  9. 3
      src/python/grpcio_tests/tests/tests.json
  10. 9
      src/python/grpcio_tests/tests/unit/BUILD.bazel
  11. 232
      src/python/grpcio_tests/tests/unit/_rpc_part_1_test.py
  12. 426
      src/python/grpcio_tests/tests/unit/_rpc_part_2_test.py
  13. 1006
      src/python/grpcio_tests/tests/unit/_rpc_test.py
  14. 417
      src/python/grpcio_tests/tests/unit/_rpc_test_helpers.py
  15. 10
      test/cpp/end2end/test_service_impl.h
  16. 73
      test/cpp/end2end/xds_end2end_test.cc

@ -314,15 +314,18 @@ void CdsLb::OnClusterChanged(XdsApi::CdsUpdate cluster_data) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_cds_lb_trace)) {
gpr_log(GPR_INFO,
"[cdslb %p] received CDS update from xds client %p: "
"eds_service_name=%s lrs_load_reporting_server_name=%s",
"eds_service_name=%s lrs_load_reporting_server_name=%s "
"max_concurrent_requests=%d",
this, xds_client_.get(), cluster_data.eds_service_name.c_str(),
cluster_data.lrs_load_reporting_server_name.has_value()
? cluster_data.lrs_load_reporting_server_name.value().c_str()
: "(unset)");
: "(unset)",
cluster_data.max_concurrent_requests);
}
// Construct config for child policy.
Json::Object child_config = {
{"clusterName", config_->cluster()},
{"max_concurrent_requests", cluster_data.max_concurrent_requests},
{"localityPickingPolicy",
Json::Array{
Json::Object{

@ -36,6 +36,7 @@
#include "src/core/ext/xds/xds_client.h"
#include "src/core/ext/xds/xds_client_stats.h"
#include "src/core/lib/channel/channel_args.h"
#include "src/core/lib/gpr/string.h"
#include "src/core/lib/gprpp/orphanable.h"
#include "src/core/lib/gprpp/ref_counted_ptr.h"
#include "src/core/lib/iomgr/timer.h"
@ -58,13 +59,15 @@ class EdsLbConfig : public LoadBalancingPolicy::Config {
public:
EdsLbConfig(std::string cluster_name, std::string eds_service_name,
absl::optional<std::string> lrs_load_reporting_server_name,
Json locality_picking_policy, Json endpoint_picking_policy)
Json locality_picking_policy, Json endpoint_picking_policy,
uint32_t max_concurrent_requests)
: cluster_name_(std::move(cluster_name)),
eds_service_name_(std::move(eds_service_name)),
lrs_load_reporting_server_name_(
std::move(lrs_load_reporting_server_name)),
locality_picking_policy_(std::move(locality_picking_policy)),
endpoint_picking_policy_(std::move(endpoint_picking_policy)) {}
endpoint_picking_policy_(std::move(endpoint_picking_policy)),
max_concurrent_requests_(max_concurrent_requests) {}
const char* name() const override { return kEds; }
@ -79,6 +82,9 @@ class EdsLbConfig : public LoadBalancingPolicy::Config {
const Json& endpoint_picking_policy() const {
return endpoint_picking_policy_;
}
const uint32_t max_concurrent_requests() const {
return max_concurrent_requests_;
}
private:
std::string cluster_name_;
@ -86,6 +92,7 @@ class EdsLbConfig : public LoadBalancingPolicy::Config {
absl::optional<std::string> lrs_load_reporting_server_name_;
Json locality_picking_policy_;
Json endpoint_picking_policy_;
uint32_t max_concurrent_requests_;
};
// EDS LB policy.
@ -145,14 +152,16 @@ class EdsLb : public LoadBalancingPolicy {
// A picker that handles drops.
class DropPicker : public SubchannelPicker {
public:
explicit DropPicker(EdsLb* eds_policy);
explicit DropPicker(RefCountedPtr<EdsLb> eds_policy);
PickResult Pick(PickArgs args) override;
private:
RefCountedPtr<EdsLb> eds_policy_;
RefCountedPtr<XdsApi::EdsUpdate::DropConfig> drop_config_;
RefCountedPtr<XdsClusterDropStats> drop_stats_;
RefCountedPtr<ChildPickerWrapper> child_picker_;
uint32_t max_concurrent_requests_;
};
class Helper : public ChannelControlHelper {
@ -236,6 +245,8 @@ class EdsLb : public LoadBalancingPolicy {
RefCountedPtr<XdsApi::EdsUpdate::DropConfig> drop_config_;
RefCountedPtr<XdsClusterDropStats> drop_stats_;
// Current concurrent number of requests;
Atomic<uint32_t> concurrent_requests_{0};
OrphanablePtr<LoadBalancingPolicy> child_policy_;
@ -249,13 +260,16 @@ class EdsLb : public LoadBalancingPolicy {
// EdsLb::DropPicker
//
EdsLb::DropPicker::DropPicker(EdsLb* eds_policy)
: drop_config_(eds_policy->drop_config_),
drop_stats_(eds_policy->drop_stats_),
child_picker_(eds_policy->child_picker_) {
EdsLb::DropPicker::DropPicker(RefCountedPtr<EdsLb> eds_policy)
: eds_policy_(std::move(eds_policy)),
drop_config_(eds_policy_->drop_config_),
drop_stats_(eds_policy_->drop_stats_),
child_picker_(eds_policy_->child_picker_),
max_concurrent_requests_(
eds_policy_->config_->max_concurrent_requests()) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_eds_trace)) {
gpr_log(GPR_INFO, "[edslb %p] constructed new drop picker %p", eds_policy,
this);
gpr_log(GPR_INFO, "[edslb %p] constructed new drop picker %p",
eds_policy_.get(), this);
}
}
@ -268,6 +282,17 @@ EdsLb::PickResult EdsLb::DropPicker::Pick(PickArgs args) {
result.type = PickResult::PICK_COMPLETE;
return result;
}
// Check and see if we exceeded the max concurrent requests count.
uint32_t current = eds_policy_->concurrent_requests_.FetchAdd(1);
if (current >= max_concurrent_requests_) {
eds_policy_->concurrent_requests_.FetchSub(1);
if (drop_stats_ != nullptr) {
drop_stats_->AddUncategorizedDrops();
}
PickResult result;
result.type = PickResult::PICK_COMPLETE;
return result;
}
// If we're not dropping all calls, we should always have a child picker.
if (child_picker_ == nullptr) { // Should never happen.
PickResult result;
@ -276,10 +301,30 @@ EdsLb::PickResult EdsLb::DropPicker::Pick(PickArgs args) {
grpc_error_set_int(GRPC_ERROR_CREATE_FROM_STATIC_STRING(
"eds drop picker not given any child picker"),
GRPC_ERROR_INT_GRPC_STATUS, GRPC_STATUS_INTERNAL);
eds_policy_->concurrent_requests_.FetchSub(1);
return result;
}
// Not dropping, so delegate to child's picker.
return child_picker_->Pick(args);
PickResult result = child_picker_->Pick(args);
if (result.type == PickResult::PICK_COMPLETE) {
EdsLb* eds_policy = static_cast<EdsLb*>(
eds_policy_->Ref(DEBUG_LOCATION, "DropPickPicker+call").release());
auto original_recv_trailing_metadata_ready =
result.recv_trailing_metadata_ready;
result.recv_trailing_metadata_ready =
[original_recv_trailing_metadata_ready, eds_policy](
grpc_error* error, MetadataInterface* metadata,
CallState* call_state) {
if (original_recv_trailing_metadata_ready != nullptr) {
original_recv_trailing_metadata_ready(error, metadata, call_state);
}
eds_policy->concurrent_requests_.FetchSub(1);
eds_policy->Unref(DEBUG_LOCATION, "DropPickPicker+call");
};
} else {
eds_policy_->concurrent_requests_.FetchSub(1);
}
return result;
}
//
@ -469,9 +514,14 @@ void EdsLb::UpdateLocked(UpdateArgs args) {
grpc_channel_args_destroy(args_);
args_ = args.args;
args.args = nullptr;
const bool lrs_server_changed =
is_initial_update || config_->lrs_load_reporting_server_name() !=
old_config->lrs_load_reporting_server_name();
const bool max_concurrent_requests_changed =
is_initial_update || config_->max_concurrent_requests() !=
old_config->max_concurrent_requests();
// Update drop stats for load reporting if needed.
if (is_initial_update || config_->lrs_load_reporting_server_name() !=
old_config->lrs_load_reporting_server_name()) {
if (lrs_server_changed) {
drop_stats_.reset();
if (config_->lrs_load_reporting_server_name().has_value()) {
const auto key = GetLrsClusterKey();
@ -479,6 +529,8 @@ void EdsLb::UpdateLocked(UpdateArgs args) {
config_->lrs_load_reporting_server_name().value(),
key.first /*cluster_name*/, key.second /*eds_service_name*/);
}
}
if (lrs_server_changed || max_concurrent_requests_changed) {
MaybeUpdateDropPickerLocked();
}
// Update child policy if needed.
@ -815,14 +867,16 @@ void EdsLb::MaybeUpdateDropPickerLocked() {
// If we're dropping all calls, report READY, regardless of what (or
// whether) the child has reported.
if (drop_config_ != nullptr && drop_config_->drop_all()) {
channel_control_helper()->UpdateState(GRPC_CHANNEL_READY, absl::Status(),
absl::make_unique<DropPicker>(this));
channel_control_helper()->UpdateState(
GRPC_CHANNEL_READY, absl::Status(),
absl::make_unique<DropPicker>(Ref(DEBUG_LOCATION, "DropPicker")));
return;
}
// Update only if we have a child picker.
if (child_picker_ != nullptr) {
channel_control_helper()->UpdateState(child_state_, child_status_,
absl::make_unique<DropPicker>(this));
channel_control_helper()->UpdateState(
child_state_, child_status_,
absl::make_unique<DropPicker>(Ref(DEBUG_LOCATION, "DropPicker")));
}
}
@ -938,13 +992,25 @@ class EdsLbFactory : public LoadBalancingPolicyFactory {
"endpointPickingPolicy", &parse_error, 1));
GRPC_ERROR_UNREF(parse_error);
}
// Max concurrent requests.
uint32_t max_concurrent_requests = 1024;
it = json.object_value().find("max_concurrent_requests");
if (it != json.object_value().end()) {
if (it->second.type() != Json::Type::NUMBER) {
error_list.push_back(GRPC_ERROR_CREATE_FROM_STATIC_STRING(
"field:max_concurrent_requests error:must be of type number"));
} else {
max_concurrent_requests =
gpr_parse_nonnegative_int(it->second.string_value().c_str());
}
}
// Construct config.
if (error_list.empty()) {
return MakeRefCounted<EdsLbConfig>(
std::move(cluster_name), std::move(eds_service_name),
std::move(lrs_load_reporting_server_name),
std::move(locality_picking_policy),
std::move(endpoint_picking_policy));
std::move(endpoint_picking_policy), max_concurrent_requests);
} else {
*error = GRPC_ERROR_CREATE_FROM_VECTOR(
"eds_experimental LB policy config", &error_list);

@ -42,6 +42,7 @@
#include "src/core/lib/iomgr/error.h"
#include "src/core/lib/iomgr/sockaddr_utils.h"
#include "envoy/config/cluster/v3/circuit_breaker.upb.h"
#include "envoy/config/cluster/v3/cluster.upb.h"
#include "envoy/config/core/v3/address.upb.h"
#include "envoy/config/core/v3/base.upb.h"
@ -1838,6 +1839,32 @@ grpc_error* CdsResponseParse(
}
cds_update.lrs_load_reporting_server_name.emplace("");
}
// The Cluster resource encodes the circuit breaking parameters in a list of
// Thresholds messages, where each message specifies the parameters for a
// particular RoutingPriority. we will look only at the first entry in the
// list for priority DEFAULT and default to 1024 if not found.
if (envoy_config_cluster_v3_Cluster_has_circuit_breakers(cluster)) {
const envoy_config_cluster_v3_CircuitBreakers* circuit_breakers =
envoy_config_cluster_v3_Cluster_circuit_breakers(cluster);
size_t num_thresholds;
const envoy_config_cluster_v3_CircuitBreakers_Thresholds* const*
thresholds = envoy_config_cluster_v3_CircuitBreakers_thresholds(
circuit_breakers, &num_thresholds);
for (size_t i = 0; i < num_thresholds; ++i) {
const auto* threshold = thresholds[i];
if (envoy_config_cluster_v3_CircuitBreakers_Thresholds_priority(
threshold) == envoy_config_core_v3_DEFAULT) {
const google_protobuf_UInt32Value* max_requests =
envoy_config_cluster_v3_CircuitBreakers_Thresholds_max_requests(
threshold);
if (max_requests != nullptr) {
cds_update.max_concurrent_requests =
google_protobuf_UInt32Value_value(max_requests);
}
break;
}
}
}
}
return GRPC_ERROR_NONE;
}

@ -178,11 +178,15 @@ class XdsApi {
// If set to the empty string, will use the same server we obtained the CDS
// data from.
absl::optional<std::string> lrs_load_reporting_server_name;
// Maximum number of outstanding requests can be made to the upstream
// cluster.
uint32_t max_concurrent_requests = 1024;
bool operator==(const CdsUpdate& other) const {
return eds_service_name == other.eds_service_name &&
lrs_load_reporting_server_name ==
other.lrs_load_reporting_server_name;
other.lrs_load_reporting_server_name &&
max_concurrent_requests == other.max_concurrent_requests;
}
};

@ -35,6 +35,7 @@ grpc_proto_library(
srcs = [
"cds_for_test.proto",
],
well_known_protos = True,
)
grpc_proto_library(

@ -27,6 +27,8 @@ syntax = "proto3";
package envoy.api.v2;
import "google/protobuf/wrappers.proto";
// Aggregated Discovery Service (ADS) options. This is currently empty, but when
// set in :ref:`ConfigSource <envoy_api_msg_core.ConfigSource>` can be used to
// specify that ADS is to be used.
@ -57,6 +59,19 @@ message ConfigSource {
}
}
enum RoutingPriority {
DEFAULT = 0;
HIGH = 1;
}
message CircuitBreakers {
message Thresholds {
RoutingPriority priority = 1;
google.protobuf.UInt32Value max_requests = 4;
}
repeated Thresholds thresholds = 1;
}
message Cluster {
// Refer to :ref:`service discovery type <arch_overview_service_discovery_types>`
// for an explanation on each type.
@ -153,5 +168,7 @@ message Cluster {
// Configuration to use for EDS updates for the Cluster.
EdsClusterConfig eds_cluster_config = 3;
CircuitBreakers circuit_breakers = 10;
ConfigSource lrs_server = 42;
}

@ -81,6 +81,7 @@ grpc_proto_library(
srcs = [
"cluster.proto",
],
well_known_protos = True,
deps = [
"config_source_proto",
],

@ -20,6 +20,21 @@ package envoy.config.cluster.v3;
import "src/proto/grpc/testing/xds/v3/config_source.proto";
import "google/protobuf/wrappers.proto";
enum RoutingPriority {
DEFAULT = 0;
HIGH = 1;
}
message CircuitBreakers {
message Thresholds {
RoutingPriority priority = 1;
google.protobuf.UInt32Value max_requests = 4;
}
repeated Thresholds thresholds = 1;
}
// [#protodoc-title: Cluster configuration]
// Configuration for a single upstream cluster.
@ -127,6 +142,8 @@ message Cluster {
// when picking a host in the cluster.
LbPolicy lb_policy = 6;
CircuitBreakers circuit_breakers = 10;
// [#not-implemented-hide:]
// If present, tells the client where to send load reports via LRS. If not present, the
// client will fall back to a client-side default, which may be either (a) don't send any

@ -65,7 +65,8 @@
"unit._metadata_test.MetadataTest",
"unit._reconnect_test.ReconnectTest",
"unit._resource_exhausted_test.ResourceExhaustedTest",
"unit._rpc_test.RPCTest",
"unit._rpc_part_1_test.RPCPart1Test",
"unit._rpc_part_2_test.RPCPart2Test",
"unit._server_shutdown_test.ServerShutdown",
"unit._server_ssl_cert_config_test.ServerSSLCertConfigFetcherParamsChecks",
"unit._server_ssl_cert_config_test.ServerSSLCertReloadTestCertConfigReuse",

@ -31,7 +31,8 @@ GRPCIO_TESTS_UNIT = [
"_metadata_test.py",
"_reconnect_test.py",
"_resource_exhausted_test.py",
"_rpc_test.py",
"_rpc_part_1_test.py",
"_rpc_part_2_test.py",
"_signal_handling_test.py",
# TODO(ghostwriternr): To be added later.
# "_server_ssl_cert_config_test.py",
@ -74,6 +75,11 @@ py_library(
srcs = ["_exit_scenarios.py"],
)
py_library(
name = "_rpc_test_helpers",
srcs = ["_rpc_test_helpers.py"],
)
py_library(
name = "_server_shutdown_scenarios",
srcs = ["_server_shutdown_scenarios.py"],
@ -97,6 +103,7 @@ py_library(
deps = [
":_exit_scenarios",
":_from_grpc_import_star",
":_rpc_test_helpers",
":_server_shutdown_scenarios",
":_signal_client",
":_tcp_proxy",

@ -0,0 +1,232 @@
# Copyright 2016 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test of RPCs made against gRPC Python's application-layer API."""
import itertools
import threading
import unittest
import logging
from concurrent import futures
import grpc
from grpc.framework.foundation import logging_pool
from tests.unit._rpc_test_helpers import (
TIMEOUT_SHORT, Callback, unary_unary_multi_callable,
unary_stream_multi_callable, unary_stream_non_blocking_multi_callable,
stream_unary_multi_callable, stream_stream_multi_callable,
stream_stream_non_blocking_multi_callable, BaseRPCTest)
from tests.unit.framework.common import test_constants
class RPCPart1Test(BaseRPCTest, unittest.TestCase):
def testExpiredStreamRequestBlockingUnaryResponse(self):
requests = tuple(
b'\x07\x08' for _ in range(test_constants.STREAM_LENGTH))
request_iterator = iter(requests)
multi_callable = stream_unary_multi_callable(self._channel)
with self._control.pause():
with self.assertRaises(grpc.RpcError) as exception_context:
multi_callable(
request_iterator,
timeout=TIMEOUT_SHORT,
metadata=(('test',
'ExpiredStreamRequestBlockingUnaryResponse'),))
self.assertIsInstance(exception_context.exception, grpc.RpcError)
self.assertIsInstance(exception_context.exception, grpc.Call)
self.assertIsNotNone(exception_context.exception.initial_metadata())
self.assertIs(grpc.StatusCode.DEADLINE_EXCEEDED,
exception_context.exception.code())
self.assertIsNotNone(exception_context.exception.details())
self.assertIsNotNone(exception_context.exception.trailing_metadata())
def testExpiredStreamRequestFutureUnaryResponse(self):
requests = tuple(
b'\x07\x18' for _ in range(test_constants.STREAM_LENGTH))
request_iterator = iter(requests)
callback = Callback()
multi_callable = stream_unary_multi_callable(self._channel)
with self._control.pause():
response_future = multi_callable.future(
request_iterator,
timeout=TIMEOUT_SHORT,
metadata=(('test', 'ExpiredStreamRequestFutureUnaryResponse'),))
with self.assertRaises(grpc.FutureTimeoutError):
response_future.result(timeout=TIMEOUT_SHORT / 2.0)
response_future.add_done_callback(callback)
value_passed_to_callback = callback.value()
with self.assertRaises(grpc.RpcError) as exception_context:
response_future.result()
self.assertIs(grpc.StatusCode.DEADLINE_EXCEEDED, response_future.code())
self.assertIs(grpc.StatusCode.DEADLINE_EXCEEDED,
exception_context.exception.code())
self.assertIsInstance(response_future.exception(), grpc.RpcError)
self.assertIsNotNone(response_future.traceback())
self.assertIs(response_future, value_passed_to_callback)
self.assertIsNotNone(response_future.initial_metadata())
self.assertIs(grpc.StatusCode.DEADLINE_EXCEEDED, response_future.code())
self.assertIsNotNone(response_future.details())
self.assertIsNotNone(response_future.trailing_metadata())
def testExpiredStreamRequestStreamResponse(self):
self._expired_stream_request_stream_response(
stream_stream_multi_callable(self._channel))
def testExpiredStreamRequestStreamResponseNonBlocking(self):
self._expired_stream_request_stream_response(
stream_stream_non_blocking_multi_callable(self._channel))
def testFailedUnaryRequestBlockingUnaryResponse(self):
request = b'\x37\x17'
multi_callable = unary_unary_multi_callable(self._channel)
with self._control.fail():
with self.assertRaises(grpc.RpcError) as exception_context:
multi_callable.with_call(
request,
metadata=(('test',
'FailedUnaryRequestBlockingUnaryResponse'),))
self.assertIs(grpc.StatusCode.UNKNOWN,
exception_context.exception.code())
# sanity checks on to make sure returned string contains default members
# of the error
debug_error_string = exception_context.exception.debug_error_string()
self.assertIn('created', debug_error_string)
self.assertIn('description', debug_error_string)
self.assertIn('file', debug_error_string)
self.assertIn('file_line', debug_error_string)
def testFailedUnaryRequestFutureUnaryResponse(self):
request = b'\x37\x17'
callback = Callback()
multi_callable = unary_unary_multi_callable(self._channel)
with self._control.fail():
response_future = multi_callable.future(
request,
metadata=(('test', 'FailedUnaryRequestFutureUnaryResponse'),))
response_future.add_done_callback(callback)
value_passed_to_callback = callback.value()
self.assertIsInstance(response_future, grpc.Future)
self.assertIsInstance(response_future, grpc.Call)
with self.assertRaises(grpc.RpcError) as exception_context:
response_future.result()
self.assertIs(grpc.StatusCode.UNKNOWN,
exception_context.exception.code())
self.assertIsInstance(response_future.exception(), grpc.RpcError)
self.assertIsNotNone(response_future.traceback())
self.assertIs(grpc.StatusCode.UNKNOWN,
response_future.exception().code())
self.assertIs(response_future, value_passed_to_callback)
def testFailedUnaryRequestStreamResponse(self):
self._failed_unary_request_stream_response(
unary_stream_multi_callable(self._channel))
def testFailedUnaryRequestStreamResponseNonBlocking(self):
self._failed_unary_request_stream_response(
unary_stream_non_blocking_multi_callable(self._channel))
def testFailedStreamRequestBlockingUnaryResponse(self):
requests = tuple(
b'\x47\x58' for _ in range(test_constants.STREAM_LENGTH))
request_iterator = iter(requests)
multi_callable = stream_unary_multi_callable(self._channel)
with self._control.fail():
with self.assertRaises(grpc.RpcError) as exception_context:
multi_callable(
request_iterator,
metadata=(('test',
'FailedStreamRequestBlockingUnaryResponse'),))
self.assertIs(grpc.StatusCode.UNKNOWN,
exception_context.exception.code())
def testFailedStreamRequestFutureUnaryResponse(self):
requests = tuple(
b'\x07\x18' for _ in range(test_constants.STREAM_LENGTH))
request_iterator = iter(requests)
callback = Callback()
multi_callable = stream_unary_multi_callable(self._channel)
with self._control.fail():
response_future = multi_callable.future(
request_iterator,
metadata=(('test', 'FailedStreamRequestFutureUnaryResponse'),))
response_future.add_done_callback(callback)
value_passed_to_callback = callback.value()
with self.assertRaises(grpc.RpcError) as exception_context:
response_future.result()
self.assertIs(grpc.StatusCode.UNKNOWN, response_future.code())
self.assertIs(grpc.StatusCode.UNKNOWN,
exception_context.exception.code())
self.assertIsInstance(response_future.exception(), grpc.RpcError)
self.assertIsNotNone(response_future.traceback())
self.assertIs(response_future, value_passed_to_callback)
def testFailedStreamRequestStreamResponse(self):
self._failed_stream_request_stream_response(
stream_stream_multi_callable(self._channel))
def testFailedStreamRequestStreamResponseNonBlocking(self):
self._failed_stream_request_stream_response(
stream_stream_non_blocking_multi_callable(self._channel))
def testIgnoredUnaryRequestFutureUnaryResponse(self):
request = b'\x37\x17'
multi_callable = unary_unary_multi_callable(self._channel)
multi_callable.future(
request,
metadata=(('test', 'IgnoredUnaryRequestFutureUnaryResponse'),))
def testIgnoredUnaryRequestStreamResponse(self):
self._ignored_unary_stream_request_future_unary_response(
unary_stream_multi_callable(self._channel))
def testIgnoredUnaryRequestStreamResponseNonBlocking(self):
self._ignored_unary_stream_request_future_unary_response(
unary_stream_non_blocking_multi_callable(self._channel))
def testIgnoredStreamRequestFutureUnaryResponse(self):
requests = tuple(
b'\x07\x18' for _ in range(test_constants.STREAM_LENGTH))
request_iterator = iter(requests)
multi_callable = stream_unary_multi_callable(self._channel)
multi_callable.future(
request_iterator,
metadata=(('test', 'IgnoredStreamRequestFutureUnaryResponse'),))
def testIgnoredStreamRequestStreamResponse(self):
self._ignored_stream_request_stream_response(
stream_stream_multi_callable(self._channel))
def testIgnoredStreamRequestStreamResponseNonBlocking(self):
self._ignored_stream_request_stream_response(
stream_stream_non_blocking_multi_callable(self._channel))
if __name__ == '__main__':
logging.basicConfig()
unittest.main(verbosity=2)

@ -0,0 +1,426 @@
# Copyright 2016 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test of RPCs made against gRPC Python's application-layer API."""
import itertools
import threading
import unittest
import logging
from concurrent import futures
import grpc
from grpc.framework.foundation import logging_pool
from tests.unit._rpc_test_helpers import (
TIMEOUT_SHORT, Callback, unary_unary_multi_callable,
unary_stream_multi_callable, unary_stream_non_blocking_multi_callable,
stream_unary_multi_callable, stream_stream_multi_callable,
stream_stream_non_blocking_multi_callable, BaseRPCTest)
from tests.unit.framework.common import test_constants
class RPCPart2Test(BaseRPCTest, unittest.TestCase):
def testDefaultThreadPoolIsUsed(self):
self._consume_one_stream_response_unary_request(
unary_stream_multi_callable(self._channel))
self.assertFalse(self._thread_pool.was_used())
def testExperimentalThreadPoolIsUsed(self):
self._consume_one_stream_response_unary_request(
unary_stream_non_blocking_multi_callable(self._channel))
self.assertTrue(self._thread_pool.was_used())
def testUnrecognizedMethod(self):
request = b'abc'
with self.assertRaises(grpc.RpcError) as exception_context:
self._channel.unary_unary('NoSuchMethod')(request)
self.assertEqual(grpc.StatusCode.UNIMPLEMENTED,
exception_context.exception.code())
def testSuccessfulUnaryRequestBlockingUnaryResponse(self):
request = b'\x07\x08'
expected_response = self._handler.handle_unary_unary(request, None)
multi_callable = unary_unary_multi_callable(self._channel)
response = multi_callable(
request,
metadata=(('test', 'SuccessfulUnaryRequestBlockingUnaryResponse'),))
self.assertEqual(expected_response, response)
def testSuccessfulUnaryRequestBlockingUnaryResponseWithCall(self):
request = b'\x07\x08'
expected_response = self._handler.handle_unary_unary(request, None)
multi_callable = unary_unary_multi_callable(self._channel)
response, call = multi_callable.with_call(
request,
metadata=(('test',
'SuccessfulUnaryRequestBlockingUnaryResponseWithCall'),))
self.assertEqual(expected_response, response)
self.assertIs(grpc.StatusCode.OK, call.code())
self.assertEqual('', call.debug_error_string())
def testSuccessfulUnaryRequestFutureUnaryResponse(self):
request = b'\x07\x08'
expected_response = self._handler.handle_unary_unary(request, None)
multi_callable = unary_unary_multi_callable(self._channel)
response_future = multi_callable.future(
request,
metadata=(('test', 'SuccessfulUnaryRequestFutureUnaryResponse'),))
response = response_future.result()
self.assertIsInstance(response_future, grpc.Future)
self.assertIsInstance(response_future, grpc.Call)
self.assertEqual(expected_response, response)
self.assertIsNone(response_future.exception())
self.assertIsNone(response_future.traceback())
def testSuccessfulUnaryRequestStreamResponse(self):
request = b'\x37\x58'
expected_responses = tuple(
self._handler.handle_unary_stream(request, None))
multi_callable = unary_stream_multi_callable(self._channel)
response_iterator = multi_callable(
request,
metadata=(('test', 'SuccessfulUnaryRequestStreamResponse'),))
responses = tuple(response_iterator)
self.assertSequenceEqual(expected_responses, responses)
def testSuccessfulStreamRequestBlockingUnaryResponse(self):
requests = tuple(
b'\x07\x08' for _ in range(test_constants.STREAM_LENGTH))
expected_response = self._handler.handle_stream_unary(
iter(requests), None)
request_iterator = iter(requests)
multi_callable = stream_unary_multi_callable(self._channel)
response = multi_callable(
request_iterator,
metadata=(('test',
'SuccessfulStreamRequestBlockingUnaryResponse'),))
self.assertEqual(expected_response, response)
def testSuccessfulStreamRequestBlockingUnaryResponseWithCall(self):
requests = tuple(
b'\x07\x08' for _ in range(test_constants.STREAM_LENGTH))
expected_response = self._handler.handle_stream_unary(
iter(requests), None)
request_iterator = iter(requests)
multi_callable = stream_unary_multi_callable(self._channel)
response, call = multi_callable.with_call(
request_iterator,
metadata=(
('test',
'SuccessfulStreamRequestBlockingUnaryResponseWithCall'),))
self.assertEqual(expected_response, response)
self.assertIs(grpc.StatusCode.OK, call.code())
def testSuccessfulStreamRequestFutureUnaryResponse(self):
requests = tuple(
b'\x07\x08' for _ in range(test_constants.STREAM_LENGTH))
expected_response = self._handler.handle_stream_unary(
iter(requests), None)
request_iterator = iter(requests)
multi_callable = stream_unary_multi_callable(self._channel)
response_future = multi_callable.future(
request_iterator,
metadata=(('test', 'SuccessfulStreamRequestFutureUnaryResponse'),))
response = response_future.result()
self.assertEqual(expected_response, response)
self.assertIsNone(response_future.exception())
self.assertIsNone(response_future.traceback())
def testSuccessfulStreamRequestStreamResponse(self):
requests = tuple(
b'\x77\x58' for _ in range(test_constants.STREAM_LENGTH))
expected_responses = tuple(
self._handler.handle_stream_stream(iter(requests), None))
request_iterator = iter(requests)
multi_callable = stream_stream_multi_callable(self._channel)
response_iterator = multi_callable(
request_iterator,
metadata=(('test', 'SuccessfulStreamRequestStreamResponse'),))
responses = tuple(response_iterator)
self.assertSequenceEqual(expected_responses, responses)
def testSequentialInvocations(self):
first_request = b'\x07\x08'
second_request = b'\x0809'
expected_first_response = self._handler.handle_unary_unary(
first_request, None)
expected_second_response = self._handler.handle_unary_unary(
second_request, None)
multi_callable = unary_unary_multi_callable(self._channel)
first_response = multi_callable(first_request,
metadata=(('test',
'SequentialInvocations'),))
second_response = multi_callable(second_request,
metadata=(('test',
'SequentialInvocations'),))
self.assertEqual(expected_first_response, first_response)
self.assertEqual(expected_second_response, second_response)
def testConcurrentBlockingInvocations(self):
pool = logging_pool.pool(test_constants.THREAD_CONCURRENCY)
requests = tuple(
b'\x07\x08' for _ in range(test_constants.STREAM_LENGTH))
expected_response = self._handler.handle_stream_unary(
iter(requests), None)
expected_responses = [expected_response
] * test_constants.THREAD_CONCURRENCY
response_futures = [None] * test_constants.THREAD_CONCURRENCY
multi_callable = stream_unary_multi_callable(self._channel)
for index in range(test_constants.THREAD_CONCURRENCY):
request_iterator = iter(requests)
response_future = pool.submit(
multi_callable,
request_iterator,
metadata=(('test', 'ConcurrentBlockingInvocations'),))
response_futures[index] = response_future
responses = tuple(
response_future.result() for response_future in response_futures)
pool.shutdown(wait=True)
self.assertSequenceEqual(expected_responses, responses)
def testConcurrentFutureInvocations(self):
requests = tuple(
b'\x07\x08' for _ in range(test_constants.STREAM_LENGTH))
expected_response = self._handler.handle_stream_unary(
iter(requests), None)
expected_responses = [expected_response
] * test_constants.THREAD_CONCURRENCY
response_futures = [None] * test_constants.THREAD_CONCURRENCY
multi_callable = stream_unary_multi_callable(self._channel)
for index in range(test_constants.THREAD_CONCURRENCY):
request_iterator = iter(requests)
response_future = multi_callable.future(
request_iterator,
metadata=(('test', 'ConcurrentFutureInvocations'),))
response_futures[index] = response_future
responses = tuple(
response_future.result() for response_future in response_futures)
self.assertSequenceEqual(expected_responses, responses)
def testWaitingForSomeButNotAllConcurrentFutureInvocations(self):
pool = logging_pool.pool(test_constants.THREAD_CONCURRENCY)
request = b'\x67\x68'
expected_response = self._handler.handle_unary_unary(request, None)
response_futures = [None] * test_constants.THREAD_CONCURRENCY
lock = threading.Lock()
test_is_running_cell = [True]
def wrap_future(future):
def wrap():
try:
return future.result()
except grpc.RpcError:
with lock:
if test_is_running_cell[0]:
raise
return None
return wrap
multi_callable = unary_unary_multi_callable(self._channel)
for index in range(test_constants.THREAD_CONCURRENCY):
inner_response_future = multi_callable.future(
request,
metadata=(
('test',
'WaitingForSomeButNotAllConcurrentFutureInvocations'),))
outer_response_future = pool.submit(
wrap_future(inner_response_future))
response_futures[index] = outer_response_future
some_completed_response_futures_iterator = itertools.islice(
futures.as_completed(response_futures),
test_constants.THREAD_CONCURRENCY // 2)
for response_future in some_completed_response_futures_iterator:
self.assertEqual(expected_response, response_future.result())
with lock:
test_is_running_cell[0] = False
def testConsumingOneStreamResponseUnaryRequest(self):
self._consume_one_stream_response_unary_request(
unary_stream_multi_callable(self._channel))
def testConsumingOneStreamResponseUnaryRequestNonBlocking(self):
self._consume_one_stream_response_unary_request(
unary_stream_non_blocking_multi_callable(self._channel))
def testConsumingSomeButNotAllStreamResponsesUnaryRequest(self):
self._consume_some_but_not_all_stream_responses_unary_request(
unary_stream_multi_callable(self._channel))
def testConsumingSomeButNotAllStreamResponsesUnaryRequestNonBlocking(self):
self._consume_some_but_not_all_stream_responses_unary_request(
unary_stream_non_blocking_multi_callable(self._channel))
def testConsumingSomeButNotAllStreamResponsesStreamRequest(self):
self._consume_some_but_not_all_stream_responses_stream_request(
stream_stream_multi_callable(self._channel))
def testConsumingSomeButNotAllStreamResponsesStreamRequestNonBlocking(self):
self._consume_some_but_not_all_stream_responses_stream_request(
stream_stream_non_blocking_multi_callable(self._channel))
def testConsumingTooManyStreamResponsesStreamRequest(self):
self._consume_too_many_stream_responses_stream_request(
stream_stream_multi_callable(self._channel))
def testConsumingTooManyStreamResponsesStreamRequestNonBlocking(self):
self._consume_too_many_stream_responses_stream_request(
stream_stream_non_blocking_multi_callable(self._channel))
def testCancelledUnaryRequestUnaryResponse(self):
request = b'\x07\x17'
multi_callable = unary_unary_multi_callable(self._channel)
with self._control.pause():
response_future = multi_callable.future(
request,
metadata=(('test', 'CancelledUnaryRequestUnaryResponse'),))
response_future.cancel()
self.assertIs(grpc.StatusCode.CANCELLED, response_future.code())
self.assertTrue(response_future.cancelled())
with self.assertRaises(grpc.FutureCancelledError):
response_future.result()
with self.assertRaises(grpc.FutureCancelledError):
response_future.exception()
with self.assertRaises(grpc.FutureCancelledError):
response_future.traceback()
def testCancelledUnaryRequestStreamResponse(self):
self._cancelled_unary_request_stream_response(
unary_stream_multi_callable(self._channel))
def testCancelledUnaryRequestStreamResponseNonBlocking(self):
self._cancelled_unary_request_stream_response(
unary_stream_non_blocking_multi_callable(self._channel))
def testCancelledStreamRequestUnaryResponse(self):
requests = tuple(
b'\x07\x08' for _ in range(test_constants.STREAM_LENGTH))
request_iterator = iter(requests)
multi_callable = stream_unary_multi_callable(self._channel)
with self._control.pause():
response_future = multi_callable.future(
request_iterator,
metadata=(('test', 'CancelledStreamRequestUnaryResponse'),))
self._control.block_until_paused()
response_future.cancel()
self.assertIs(grpc.StatusCode.CANCELLED, response_future.code())
self.assertTrue(response_future.cancelled())
with self.assertRaises(grpc.FutureCancelledError):
response_future.result()
with self.assertRaises(grpc.FutureCancelledError):
response_future.exception()
with self.assertRaises(grpc.FutureCancelledError):
response_future.traceback()
self.assertIsNotNone(response_future.initial_metadata())
self.assertIsNotNone(response_future.details())
self.assertIsNotNone(response_future.trailing_metadata())
def testCancelledStreamRequestStreamResponse(self):
self._cancelled_stream_request_stream_response(
stream_stream_multi_callable(self._channel))
def testCancelledStreamRequestStreamResponseNonBlocking(self):
self._cancelled_stream_request_stream_response(
stream_stream_non_blocking_multi_callable(self._channel))
def testExpiredUnaryRequestBlockingUnaryResponse(self):
request = b'\x07\x17'
multi_callable = unary_unary_multi_callable(self._channel)
with self._control.pause():
with self.assertRaises(grpc.RpcError) as exception_context:
multi_callable.with_call(
request,
timeout=TIMEOUT_SHORT,
metadata=(('test',
'ExpiredUnaryRequestBlockingUnaryResponse'),))
self.assertIsInstance(exception_context.exception, grpc.Call)
self.assertIsNotNone(exception_context.exception.initial_metadata())
self.assertIs(grpc.StatusCode.DEADLINE_EXCEEDED,
exception_context.exception.code())
self.assertIsNotNone(exception_context.exception.details())
self.assertIsNotNone(exception_context.exception.trailing_metadata())
def testExpiredUnaryRequestFutureUnaryResponse(self):
request = b'\x07\x17'
callback = Callback()
multi_callable = unary_unary_multi_callable(self._channel)
with self._control.pause():
response_future = multi_callable.future(
request,
timeout=TIMEOUT_SHORT,
metadata=(('test', 'ExpiredUnaryRequestFutureUnaryResponse'),))
response_future.add_done_callback(callback)
value_passed_to_callback = callback.value()
self.assertIs(response_future, value_passed_to_callback)
self.assertIsNotNone(response_future.initial_metadata())
self.assertIs(grpc.StatusCode.DEADLINE_EXCEEDED, response_future.code())
self.assertIsNotNone(response_future.details())
self.assertIsNotNone(response_future.trailing_metadata())
with self.assertRaises(grpc.RpcError) as exception_context:
response_future.result()
self.assertIs(grpc.StatusCode.DEADLINE_EXCEEDED,
exception_context.exception.code())
self.assertIsInstance(response_future.exception(), grpc.RpcError)
self.assertIsNotNone(response_future.traceback())
self.assertIs(grpc.StatusCode.DEADLINE_EXCEEDED,
response_future.exception().code())
def testExpiredUnaryRequestStreamResponse(self):
self._expired_unary_request_stream_response(
unary_stream_multi_callable(self._channel))
def testExpiredUnaryRequestStreamResponseNonBlocking(self):
self._expired_unary_request_stream_response(
unary_stream_non_blocking_multi_callable(self._channel))
if __name__ == '__main__':
logging.basicConfig()
unittest.main(verbosity=2)

File diff suppressed because it is too large Load Diff

@ -0,0 +1,417 @@
# Copyright 2020 The gRPC Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test helpers for RPC invocation tests."""
import datetime
import threading
import grpc
from grpc.framework.foundation import logging_pool
from tests.unit import test_common
from tests.unit import thread_pool
from tests.unit.framework.common import test_constants
from tests.unit.framework.common import test_control
_SERIALIZE_REQUEST = lambda bytestring: bytestring * 2
_DESERIALIZE_REQUEST = lambda bytestring: bytestring[len(bytestring) // 2:]
_SERIALIZE_RESPONSE = lambda bytestring: bytestring * 3
_DESERIALIZE_RESPONSE = lambda bytestring: bytestring[:len(bytestring) // 3]
_UNARY_UNARY = '/test/UnaryUnary'
_UNARY_STREAM = '/test/UnaryStream'
_UNARY_STREAM_NON_BLOCKING = '/test/UnaryStreamNonBlocking'
_STREAM_UNARY = '/test/StreamUnary'
_STREAM_STREAM = '/test/StreamStream'
_STREAM_STREAM_NON_BLOCKING = '/test/StreamStreamNonBlocking'
TIMEOUT_SHORT = datetime.timedelta(seconds=1).total_seconds()
class Callback(object):
def __init__(self):
self._condition = threading.Condition()
self._value = None
self._called = False
def __call__(self, value):
with self._condition:
self._value = value
self._called = True
self._condition.notify_all()
def value(self):
with self._condition:
while not self._called:
self._condition.wait()
return self._value
class _Handler(object):
def __init__(self, control, thread_pool):
self._control = control
self._thread_pool = thread_pool
non_blocking_functions = (self.handle_unary_stream_non_blocking,
self.handle_stream_stream_non_blocking)
for non_blocking_function in non_blocking_functions:
non_blocking_function.__func__.experimental_non_blocking = True
non_blocking_function.__func__.experimental_thread_pool = self._thread_pool
def handle_unary_unary(self, request, servicer_context):
self._control.control()
if servicer_context is not None:
servicer_context.set_trailing_metadata(((
'testkey',
'testvalue',
),))
# TODO(https://github.com/grpc/grpc/issues/8483): test the values
# returned by these methods rather than only "smoke" testing that
# the return after having been called.
servicer_context.is_active()
servicer_context.time_remaining()
return request
def handle_unary_stream(self, request, servicer_context):
for _ in range(test_constants.STREAM_LENGTH):
self._control.control()
yield request
self._control.control()
if servicer_context is not None:
servicer_context.set_trailing_metadata(((
'testkey',
'testvalue',
),))
def handle_unary_stream_non_blocking(self, request, servicer_context,
on_next):
for _ in range(test_constants.STREAM_LENGTH):
self._control.control()
on_next(request)
self._control.control()
if servicer_context is not None:
servicer_context.set_trailing_metadata(((
'testkey',
'testvalue',
),))
on_next(None)
def handle_stream_unary(self, request_iterator, servicer_context):
if servicer_context is not None:
servicer_context.invocation_metadata()
self._control.control()
response_elements = []
for request in request_iterator:
self._control.control()
response_elements.append(request)
self._control.control()
if servicer_context is not None:
servicer_context.set_trailing_metadata(((
'testkey',
'testvalue',
),))
return b''.join(response_elements)
def handle_stream_stream(self, request_iterator, servicer_context):
self._control.control()
if servicer_context is not None:
servicer_context.set_trailing_metadata(((
'testkey',
'testvalue',
),))
for request in request_iterator:
self._control.control()
yield request
self._control.control()
def handle_stream_stream_non_blocking(self, request_iterator,
servicer_context, on_next):
self._control.control()
if servicer_context is not None:
servicer_context.set_trailing_metadata(((
'testkey',
'testvalue',
),))
for request in request_iterator:
self._control.control()
on_next(request)
self._control.control()
on_next(None)
class _MethodHandler(grpc.RpcMethodHandler):
def __init__(self, request_streaming, response_streaming,
request_deserializer, response_serializer, unary_unary,
unary_stream, stream_unary, stream_stream):
self.request_streaming = request_streaming
self.response_streaming = response_streaming
self.request_deserializer = request_deserializer
self.response_serializer = response_serializer
self.unary_unary = unary_unary
self.unary_stream = unary_stream
self.stream_unary = stream_unary
self.stream_stream = stream_stream
class _GenericHandler(grpc.GenericRpcHandler):
def __init__(self, handler):
self._handler = handler
def service(self, handler_call_details):
if handler_call_details.method == _UNARY_UNARY:
return _MethodHandler(False, False, None, None,
self._handler.handle_unary_unary, None, None,
None)
elif handler_call_details.method == _UNARY_STREAM:
return _MethodHandler(False, True, _DESERIALIZE_REQUEST,
_SERIALIZE_RESPONSE, None,
self._handler.handle_unary_stream, None, None)
elif handler_call_details.method == _UNARY_STREAM_NON_BLOCKING:
return _MethodHandler(
False, True, _DESERIALIZE_REQUEST, _SERIALIZE_RESPONSE, None,
self._handler.handle_unary_stream_non_blocking, None, None)
elif handler_call_details.method == _STREAM_UNARY:
return _MethodHandler(True, False, _DESERIALIZE_REQUEST,
_SERIALIZE_RESPONSE, None, None,
self._handler.handle_stream_unary, None)
elif handler_call_details.method == _STREAM_STREAM:
return _MethodHandler(True, True, None, None, None, None, None,
self._handler.handle_stream_stream)
elif handler_call_details.method == _STREAM_STREAM_NON_BLOCKING:
return _MethodHandler(
True, True, None, None, None, None, None,
self._handler.handle_stream_stream_non_blocking)
else:
return None
def unary_unary_multi_callable(channel):
return channel.unary_unary(_UNARY_UNARY)
def unary_stream_multi_callable(channel):
return channel.unary_stream(_UNARY_STREAM,
request_serializer=_SERIALIZE_REQUEST,
response_deserializer=_DESERIALIZE_RESPONSE)
def unary_stream_non_blocking_multi_callable(channel):
return channel.unary_stream(_UNARY_STREAM_NON_BLOCKING,
request_serializer=_SERIALIZE_REQUEST,
response_deserializer=_DESERIALIZE_RESPONSE)
def stream_unary_multi_callable(channel):
return channel.stream_unary(_STREAM_UNARY,
request_serializer=_SERIALIZE_REQUEST,
response_deserializer=_DESERIALIZE_RESPONSE)
def stream_stream_multi_callable(channel):
return channel.stream_stream(_STREAM_STREAM)
def stream_stream_non_blocking_multi_callable(channel):
return channel.stream_stream(_STREAM_STREAM_NON_BLOCKING)
class BaseRPCTest(object):
def setUp(self):
self._control = test_control.PauseFailControl()
self._thread_pool = thread_pool.RecordingThreadPool(max_workers=None)
self._handler = _Handler(self._control, self._thread_pool)
self._server = test_common.test_server()
port = self._server.add_insecure_port('[::]:0')
self._server.add_generic_rpc_handlers((_GenericHandler(self._handler),))
self._server.start()
self._channel = grpc.insecure_channel('localhost:%d' % port)
def tearDown(self):
self._server.stop(None)
self._channel.close()
def _consume_one_stream_response_unary_request(self, multi_callable):
request = b'\x57\x38'
response_iterator = multi_callable(
request,
metadata=(('test', 'ConsumingOneStreamResponseUnaryRequest'),))
next(response_iterator)
def _consume_some_but_not_all_stream_responses_unary_request(
self, multi_callable):
request = b'\x57\x38'
response_iterator = multi_callable(
request,
metadata=(('test',
'ConsumingSomeButNotAllStreamResponsesUnaryRequest'),))
for _ in range(test_constants.STREAM_LENGTH // 2):
next(response_iterator)
def _consume_some_but_not_all_stream_responses_stream_request(
self, multi_callable):
requests = tuple(
b'\x67\x88' for _ in range(test_constants.STREAM_LENGTH))
request_iterator = iter(requests)
response_iterator = multi_callable(
request_iterator,
metadata=(('test',
'ConsumingSomeButNotAllStreamResponsesStreamRequest'),))
for _ in range(test_constants.STREAM_LENGTH // 2):
next(response_iterator)
def _consume_too_many_stream_responses_stream_request(self, multi_callable):
requests = tuple(
b'\x67\x88' for _ in range(test_constants.STREAM_LENGTH))
request_iterator = iter(requests)
response_iterator = multi_callable(
request_iterator,
metadata=(('test',
'ConsumingTooManyStreamResponsesStreamRequest'),))
for _ in range(test_constants.STREAM_LENGTH):
next(response_iterator)
for _ in range(test_constants.STREAM_LENGTH):
with self.assertRaises(StopIteration):
next(response_iterator)
self.assertIsNotNone(response_iterator.initial_metadata())
self.assertIs(grpc.StatusCode.OK, response_iterator.code())
self.assertIsNotNone(response_iterator.details())
self.assertIsNotNone(response_iterator.trailing_metadata())
def _cancelled_unary_request_stream_response(self, multi_callable):
request = b'\x07\x19'
with self._control.pause():
response_iterator = multi_callable(
request,
metadata=(('test', 'CancelledUnaryRequestStreamResponse'),))
self._control.block_until_paused()
response_iterator.cancel()
with self.assertRaises(grpc.RpcError) as exception_context:
next(response_iterator)
self.assertIs(grpc.StatusCode.CANCELLED,
exception_context.exception.code())
self.assertIsNotNone(response_iterator.initial_metadata())
self.assertIs(grpc.StatusCode.CANCELLED, response_iterator.code())
self.assertIsNotNone(response_iterator.details())
self.assertIsNotNone(response_iterator.trailing_metadata())
def _cancelled_stream_request_stream_response(self, multi_callable):
requests = tuple(
b'\x07\x08' for _ in range(test_constants.STREAM_LENGTH))
request_iterator = iter(requests)
with self._control.pause():
response_iterator = multi_callable(
request_iterator,
metadata=(('test', 'CancelledStreamRequestStreamResponse'),))
response_iterator.cancel()
with self.assertRaises(grpc.RpcError):
next(response_iterator)
self.assertIsNotNone(response_iterator.initial_metadata())
self.assertIs(grpc.StatusCode.CANCELLED, response_iterator.code())
self.assertIsNotNone(response_iterator.details())
self.assertIsNotNone(response_iterator.trailing_metadata())
def _expired_unary_request_stream_response(self, multi_callable):
request = b'\x07\x19'
with self._control.pause():
with self.assertRaises(grpc.RpcError) as exception_context:
response_iterator = multi_callable(
request,
timeout=test_constants.SHORT_TIMEOUT,
metadata=(('test', 'ExpiredUnaryRequestStreamResponse'),))
next(response_iterator)
self.assertIs(grpc.StatusCode.DEADLINE_EXCEEDED,
exception_context.exception.code())
self.assertIs(grpc.StatusCode.DEADLINE_EXCEEDED,
response_iterator.code())
def _expired_stream_request_stream_response(self, multi_callable):
requests = tuple(
b'\x67\x18' for _ in range(test_constants.STREAM_LENGTH))
request_iterator = iter(requests)
with self._control.pause():
with self.assertRaises(grpc.RpcError) as exception_context:
response_iterator = multi_callable(
request_iterator,
timeout=test_constants.SHORT_TIMEOUT,
metadata=(('test', 'ExpiredStreamRequestStreamResponse'),))
next(response_iterator)
self.assertIs(grpc.StatusCode.DEADLINE_EXCEEDED,
exception_context.exception.code())
self.assertIs(grpc.StatusCode.DEADLINE_EXCEEDED,
response_iterator.code())
def _failed_unary_request_stream_response(self, multi_callable):
request = b'\x37\x17'
with self.assertRaises(grpc.RpcError) as exception_context:
with self._control.fail():
response_iterator = multi_callable(
request,
metadata=(('test', 'FailedUnaryRequestStreamResponse'),))
next(response_iterator)
self.assertIs(grpc.StatusCode.UNKNOWN,
exception_context.exception.code())
def _failed_stream_request_stream_response(self, multi_callable):
requests = tuple(
b'\x67\x88' for _ in range(test_constants.STREAM_LENGTH))
request_iterator = iter(requests)
with self._control.fail():
with self.assertRaises(grpc.RpcError) as exception_context:
response_iterator = multi_callable(
request_iterator,
metadata=(('test', 'FailedStreamRequestStreamResponse'),))
tuple(response_iterator)
self.assertIs(grpc.StatusCode.UNKNOWN,
exception_context.exception.code())
self.assertIs(grpc.StatusCode.UNKNOWN, response_iterator.code())
def _ignored_unary_stream_request_future_unary_response(
self, multi_callable):
request = b'\x37\x17'
multi_callable(request,
metadata=(('test',
'IgnoredUnaryRequestStreamResponse'),))
def _ignored_stream_request_stream_response(self, multi_callable):
requests = tuple(
b'\x67\x88' for _ in range(test_constants.STREAM_LENGTH))
request_iterator = iter(requests)
multi_callable(request_iterator,
metadata=(('test',
'IgnoredStreamRequestStreamResponse'),))

@ -167,6 +167,7 @@ class TestMultipleServiceImpl : public RpcService {
{
std::unique_lock<std::mutex> lock(mu_);
signal_client_ = true;
++rpcs_waiting_for_client_cancel_;
}
while (!context->IsCancelled()) {
gpr_sleep_until(gpr_time_add(
@ -174,6 +175,10 @@ class TestMultipleServiceImpl : public RpcService {
gpr_time_from_micros(request->param().client_cancel_after_us(),
GPR_TIMESPAN)));
}
{
std::unique_lock<std::mutex> lock(mu_);
--rpcs_waiting_for_client_cancel_;
}
return Status::CANCELLED;
} else if (request->has_param() &&
request->param().server_cancel_after_us()) {
@ -425,12 +430,17 @@ class TestMultipleServiceImpl : public RpcService {
}
void ClientWaitUntilRpcStarted() { signaller_.ClientWaitUntilRpcStarted(); }
void SignalServerToContinue() { signaller_.SignalServerToContinue(); }
uint64_t RpcsWaitingForClientCancel() {
std::unique_lock<std::mutex> lock(mu_);
return rpcs_waiting_for_client_cancel_;
}
private:
bool signal_client_;
std::mutex mu_;
TestServiceSignaller signaller_;
std::unique_ptr<std::string> host_;
uint64_t rpcs_waiting_for_client_cancel_ = 0;
};
class CallbackTestServiceImpl

@ -86,7 +86,9 @@ namespace {
using std::chrono::system_clock;
using ::envoy::config::cluster::v3::CircuitBreakers;
using ::envoy::config::cluster::v3::Cluster;
using ::envoy::config::cluster::v3::RoutingPriority;
using ::envoy::config::endpoint::v3::ClusterLoadAssignment;
using ::envoy::config::endpoint::v3::HealthStatus;
using ::envoy::config::listener::v3::Listener;
@ -2259,6 +2261,77 @@ TEST_P(XdsResolverOnlyTest, DefaultRouteSpecifiesSlashPrefix) {
WaitForAllBackends();
}
TEST_P(XdsResolverOnlyTest, CircuitBreaking) {
class TestRpc {
public:
TestRpc() {}
void StartRpc(grpc::testing::EchoTestService::Stub* stub) {
sender_thread_ = std::thread([this, stub]() {
EchoResponse response;
EchoRequest request;
request.mutable_param()->set_client_cancel_after_us(1 * 1000 * 1000);
request.set_message(kRequestMessage);
status_ = stub->Echo(&context_, request, &response);
});
}
void CancelRpc() {
context_.TryCancel();
sender_thread_.join();
}
private:
std::thread sender_thread_;
ClientContext context_;
Status status_;
};
const char* kNewClusterName = "new_cluster";
constexpr size_t kMaxConcurrentRequests = 10;
SetNextResolution({});
SetNextResolutionForLbChannelAllBalancers();
// Populate new EDS resources.
AdsServiceImpl::EdsResourceArgs args({
{"locality0", GetBackendPorts(0, 1)},
});
balancers_[0]->ads_service()->SetEdsResource(
AdsServiceImpl::BuildEdsResource(args));
// Update CDS resource to set max concurrent request.
CircuitBreakers circuit_breaks;
Cluster cluster = balancers_[0]->ads_service()->default_cluster();
auto* threshold = cluster.mutable_circuit_breakers()->add_thresholds();
threshold->set_priority(RoutingPriority::DEFAULT);
threshold->mutable_max_requests()->set_value(kMaxConcurrentRequests);
balancers_[0]->ads_service()->SetCdsResource(cluster);
// Send exactly max_concurrent_requests long RPCs.
TestRpc rpcs[kMaxConcurrentRequests];
for (size_t i = 0; i < kMaxConcurrentRequests; ++i) {
rpcs[i].StartRpc(stub_.get());
}
// Wait for all RPCs to be in flight.
while (backends_[0]->backend_service()->RpcsWaitingForClientCancel() <
kMaxConcurrentRequests) {
gpr_sleep_until(gpr_time_add(gpr_now(GPR_CLOCK_REALTIME),
gpr_time_from_micros(1 * 1000, GPR_TIMESPAN)));
}
// Sending a RPC now should fail, the error message should tell us
// we hit the max concurrent requests limit and got dropped.
Status status = SendRpc();
EXPECT_FALSE(status.ok());
EXPECT_EQ(status.error_message(), "Call dropped by load balancing policy");
// Cancel one RPC to allow another one through
rpcs[0].CancelRpc();
status = SendRpc();
EXPECT_TRUE(status.ok());
for (size_t i = 1; i < kMaxConcurrentRequests; ++i) {
rpcs[i].CancelRpc();
}
// Make sure RPCs go to the correct backend:
EXPECT_EQ(kMaxConcurrentRequests + 1,
backends_[0]->backend_service()->request_count());
}
TEST_P(XdsResolverOnlyTest, MultipleChannelsShareXdsClient) {
const char* kNewServerName = "new-server.example.com";
Listener listener = balancers_[0]->ads_service()->default_listener();

Loading…
Cancel
Save