Merge remote-tracking branch 'upstream/master' into upb_upgrade

pull/23140/head
Mark D. Roth 5 years ago
commit 9a5b47c3cc
  1. 2
      doc/unit_testing.md
  2. 34
      examples/csharp/Xds/Greeter.sln
  3. 20
      examples/csharp/Xds/Greeter/Greeter.csproj
  4. 12
      examples/csharp/Xds/GreeterClient/GreeterClient.csproj
  5. 51
      examples/csharp/Xds/GreeterClient/Program.cs
  6. 12
      examples/csharp/Xds/GreeterServer/GreeterServer.csproj
  7. 93
      examples/csharp/Xds/GreeterServer/Program.cs
  8. 99
      examples/csharp/Xds/README.md
  9. 359
      include/grpcpp/impl/codegen/client_callback_impl.h
  10. 4
      src/compiler/python_generator.cc
  11. 184
      src/core/ext/filters/client_channel/xds/xds_api.cc
  12. 34
      src/core/ext/filters/client_channel/xds/xds_api.h
  13. 77
      src/core/ext/filters/client_channel/xds/xds_client.cc
  14. 2
      src/core/lib/debug/stats.h
  15. 110
      test/cpp/end2end/client_callback_end2end_test.cc
  16. 48
      test/cpp/end2end/xds_end2end_test.cc
  17. 3
      test/cpp/util/grpc_cli.cc
  18. 9
      test/cpp/util/grpc_tool.cc
  19. 23
      test/cpp/util/grpc_tool_test.cc
  20. 2
      tools/internal_ci/linux/grpc_xds_bazel_python_test_in_docker.sh
  21. 2
      tools/internal_ci/linux/grpc_xds_bazel_test_in_docker.sh
  22. 2
      tools/internal_ci/linux/grpc_xds_csharp_test_in_docker.sh
  23. 2
      tools/internal_ci/linux/grpc_xds_php_test_in_docker.sh
  24. 2
      tools/internal_ci/linux/grpc_xds_ruby_test_in_docker.sh

@ -144,7 +144,7 @@ Unary RPC:
MockEchoTestServiceStub stub;
EchoResponse resp;
resp.set_message("hello world");
Expect_CALL(stub, Echo(_,_,_)).Times(Atleast(1)).WillOnce(DoAll(SetArgPointee<2>(resp), Return(Status::OK)));
EXPECT_CALL(stub, Echo(_,_,_)).Times(AtLeast(1)).WillOnce(DoAll(SetArgPointee<2>(resp), Return(Status::OK)));
FakeClient client(stub);
client.DoEcho();
```

@ -0,0 +1,34 @@
Microsoft Visual Studio Solution File, Format Version 12.00
# Visual Studio 15
VisualStudioVersion = 15.0.26228.4
MinimumVisualStudioVersion = 10.0.40219.1
Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "Greeter", "Greeter\Greeter.csproj", "{13B6DFC8-F5F6-4CC2-99DF-57A7CF042033}"
EndProject
Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "GreeterClient", "GreeterClient\GreeterClient.csproj", "{B754FB02-D501-4308-8B89-33AB7119C80D}"
EndProject
Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "GreeterServer", "GreeterServer\GreeterServer.csproj", "{DDBFF994-E076-43AD-B18D-049DFC1B670C}"
EndProject
Global
GlobalSection(SolutionConfigurationPlatforms) = preSolution
Debug|Any CPU = Debug|Any CPU
Release|Any CPU = Release|Any CPU
EndGlobalSection
GlobalSection(ProjectConfigurationPlatforms) = postSolution
{13B6DFC8-F5F6-4CC2-99DF-57A7CF042033}.Debug|Any CPU.ActiveCfg = Debug|Any CPU
{13B6DFC8-F5F6-4CC2-99DF-57A7CF042033}.Debug|Any CPU.Build.0 = Debug|Any CPU
{13B6DFC8-F5F6-4CC2-99DF-57A7CF042033}.Release|Any CPU.ActiveCfg = Release|Any CPU
{13B6DFC8-F5F6-4CC2-99DF-57A7CF042033}.Release|Any CPU.Build.0 = Release|Any CPU
{B754FB02-D501-4308-8B89-33AB7119C80D}.Debug|Any CPU.ActiveCfg = Debug|Any CPU
{B754FB02-D501-4308-8B89-33AB7119C80D}.Debug|Any CPU.Build.0 = Debug|Any CPU
{B754FB02-D501-4308-8B89-33AB7119C80D}.Release|Any CPU.ActiveCfg = Release|Any CPU
{B754FB02-D501-4308-8B89-33AB7119C80D}.Release|Any CPU.Build.0 = Release|Any CPU
{DDBFF994-E076-43AD-B18D-049DFC1B670C}.Debug|Any CPU.ActiveCfg = Debug|Any CPU
{DDBFF994-E076-43AD-B18D-049DFC1B670C}.Debug|Any CPU.Build.0 = Debug|Any CPU
{DDBFF994-E076-43AD-B18D-049DFC1B670C}.Release|Any CPU.ActiveCfg = Release|Any CPU
{DDBFF994-E076-43AD-B18D-049DFC1B670C}.Release|Any CPU.Build.0 = Release|Any CPU
EndGlobalSection
GlobalSection(SolutionProperties) = preSolution
HideSolutionNode = FALSE
EndGlobalSection
EndGlobal

@ -0,0 +1,20 @@
<Project Sdk="Microsoft.NET.Sdk">
<PropertyGroup>
<TargetFramework>netstandard2.0</TargetFramework>
</PropertyGroup>
<ItemGroup>
<PackageReference Include="Google.Protobuf" Version="3.12.2" />
<PackageReference Include="Grpc.Core" Version="2.29.0" />
<PackageReference Include="Grpc.HealthCheck" Version="2.29.0" />
<PackageReference Include="Grpc.Reflection" Version="2.29.0"/>
<PackageReference Include="CommandLineParser" Version="2.8.0" />
<PackageReference Include="Grpc.Tools" Version="2.29.0" PrivateAssets="All" />
</ItemGroup>
<ItemGroup>
<Protobuf Include="../../../protos/helloworld.proto" Link="helloworld.proto" />
</ItemGroup>
</Project>

@ -0,0 +1,12 @@
<Project Sdk="Microsoft.NET.Sdk">
<PropertyGroup>
<TargetFramework>netcoreapp2.1</TargetFramework>
<OutputType>Exe</OutputType>
</PropertyGroup>
<ItemGroup>
<ProjectReference Include="..\Greeter\Greeter.csproj" />
</ItemGroup>
</Project>

@ -0,0 +1,51 @@
// Copyright 2015 gRPC authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
using System;
using Grpc.Core;
using Helloworld;
using CommandLine;
namespace GreeterClient
{
class Program
{
private class Options
{
[Option("server", Default = "localhost:50051", HelpText = "The address of the server")]
public string Server { get; set; }
}
public static void Main(string[] args)
{
Parser.Default.ParseArguments<Options>(args)
.WithParsed<Options>(options => RunClient(options));
}
private static void RunClient(Options options)
{
Channel channel = new Channel(options.Server, ChannelCredentials.Insecure);
var client = new Greeter.GreeterClient(channel);
String user = "you";
var reply = client.SayHello(new HelloRequest { Name = user });
Console.WriteLine("Greeter client received: " + reply.Message);
channel.ShutdownAsync().Wait();
Console.WriteLine("Press any key to exit...");
Console.ReadKey();
}
}
}

@ -0,0 +1,12 @@
<Project Sdk="Microsoft.NET.Sdk">
<PropertyGroup>
<TargetFramework>netcoreapp2.1</TargetFramework>
<OutputType>Exe</OutputType>
</PropertyGroup>
<ItemGroup>
<ProjectReference Include="..\Greeter\Greeter.csproj" />
</ItemGroup>
</Project>

@ -0,0 +1,93 @@
// Copyright 2020 The gRPC Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
using System;
using System.Net;
using System.Threading.Tasks;
using Grpc.Core;
using Grpc.HealthCheck;
using Helloworld;
using Grpc.Health;
using Grpc.Health.V1;
using Grpc.Reflection;
using Grpc.Reflection.V1Alpha;
using CommandLine;
namespace GreeterServer
{
class GreeterImpl : Greeter.GreeterBase
{
private string hostname;
public GreeterImpl(string hostname)
{
this.hostname = hostname;
}
// Server side handler of the SayHello RPC
public override Task<HelloReply> SayHello(HelloRequest request, ServerCallContext context)
{
return Task.FromResult(new HelloReply { Message = $"Hello {request.Name} from {hostname}!"});
}
}
class Program
{
class Options
{
[Option("port", Default = 50051, HelpText = "The port to listen on.")]
public int Port { get; set; }
[Option("hostname", Required = false, HelpText = "The name clients will see in responses. If not specified, machine's hostname will obtain automatically.")]
public string Hostname { get; set; }
}
public static void Main(string[] args)
{
Parser.Default.ParseArguments<Options>(args)
.WithParsed<Options>(options => RunServer(options));
}
private static void RunServer(Options options)
{
var hostName = options.Hostname ?? Dns.GetHostName();
var serviceDescriptors = new [] {Greeter.Descriptor, Health.Descriptor, ServerReflection.Descriptor};
var greeterImpl = new GreeterImpl(hostName);
var healthServiceImpl = new HealthServiceImpl();
var reflectionImpl = new ReflectionServiceImpl(serviceDescriptors);
Server server = new Server
{
Services = { Greeter.BindService(greeterImpl), Health.BindService(healthServiceImpl), ServerReflection.BindService(reflectionImpl) },
Ports = { new ServerPort("[::]", options.Port, ServerCredentials.Insecure) }
};
server.Start();
// Mark all services as healthy.
foreach (var serviceDescriptor in serviceDescriptors)
{
healthServiceImpl.SetStatus(serviceDescriptor.FullName, HealthCheckResponse.Types.ServingStatus.Serving);
}
// Mark overall server status as healthy.
healthServiceImpl.SetStatus("", HealthCheckResponse.Types.ServingStatus.Serving);
Console.WriteLine("Greeter server listening on port " + options.Port);
Console.WriteLine("Press any key to stop the server...");
Console.ReadKey();
server.ShutdownAsync().Wait();
}
}
}

@ -0,0 +1,99 @@
gRPC Hostname example (C#)
========================
BACKGROUND
-------------
This is a version of the helloworld example with a server whose response includes its hostname. It also supports health and reflection services. This makes it a good server to test infrastructure, such as XDS load balancing.
PREREQUISITES
-------------
- The [.NET Core SDK 2.1+](https://www.microsoft.com/net/core)
You can also build the solution `Greeter.sln` using Visual Studio 2019,
but it's not a requirement.
RUN THE EXAMPLE
-------------
First, build and run the server, then verify the server is running and
check the server is behaving as expected (more on that below).
```
cd GreeterServer
dotnet run
```
After configuring your xDS server to track the gRPC server we just started,
create a bootstrap file as desribed in [gRFC A27](https://github.com/grpc/proposal/blob/master/A27-xds-global-load-balancing.md):
```
{
xds_servers": [
{
"server_uri": <string containing URI of xds server>,
"channel_creds": [
{
"type": <string containing channel cred type>,
"config": <JSON object containing config for the type>
}
]
}
],
"node": <JSON form of Node proto>
}
```
Then point the `GRPC_XDS_BOOTSTRAP` environment variable at the bootstrap file:
```
export GRPC_XDS_BOOTSTRAP=/etc/xds-bootstrap.json
```
Finally, run your client:
```
cd GreeterClient
dotnet run --server xds-experimental:///my-backend
```
VERIFYING THE SERVER
-------------
`grpcurl` can be used to test your server. If you don't have it,
install [`grpcurl`](https://github.com/fullstorydev/grpcurl/releases). This will allow
you to manually test the service.
Exercise your server's application-layer service:
```sh
> grpcurl --plaintext -d '{"name": "you"}' localhost:50051
{
"message": "Hello you from jtatt.muc.corp.google.com!"
}
```
Make sure that all of your server's services are available via reflection:
```sh
> grpcurl --plaintext localhost:50051 list
grpc.health.v1.Health
grpc.reflection.v1alpha.ServerReflection
helloworld.Greeter
```
Make sure that your services are reporting healthy:
```sh
> grpcurl --plaintext -d '{"service": "helloworld.Greeter"}' localhost:50051
grpc.health.v1.Health/Check
{
"status": "SERVING"
}
> grpcurl --plaintext -d '{"service": ""}' localhost:50051
grpc.health.v1.Health/Check
{
"status": "SERVING"
}
```

@ -461,51 +461,76 @@ class ClientCallbackReaderWriterImpl
// 1. Send initial metadata (unless corked) + recv initial metadata
// 2. Any read backlog
// 3. Any write backlog
// 4. Recv trailing metadata (unless corked)
// 4. Recv trailing metadata, on_completion callback
started_ = true;
start_tag_.Set(call_.call(),
[this](bool ok) {
reactor_->OnReadInitialMetadataDone(ok);
MaybeFinish();
},
&start_ops_, /*can_inline=*/false);
if (!start_corked_) {
start_ops_.SendInitialMetadata(&context_->send_initial_metadata_,
context_->initial_metadata_flags());
}
start_ops_.RecvInitialMetadata(context_);
start_ops_.set_core_cq_tag(&start_tag_);
call_.PerformOps(&start_ops_);
{
grpc::internal::MutexLock lock(&start_mu_);
if (backlog_.read_ops) {
call_.PerformOps(&read_ops_);
}
if (backlog_.write_ops) {
call_.PerformOps(&write_ops_);
}
if (backlog_.writes_done_ops) {
call_.PerformOps(&writes_done_ops_);
}
call_.PerformOps(&finish_ops_);
// The last thing in this critical section is to set started_ so that it
// can be used lock-free as well.
started_.store(true, std::memory_order_release);
// Also set up the read and write tags so that they don't have to be set up
// each time
write_tag_.Set(call_.call(),
[this](bool ok) {
reactor_->OnWriteDone(ok);
MaybeFinish();
},
&write_ops_, /*can_inline=*/false);
write_ops_.set_core_cq_tag(&write_tag_);
read_tag_.Set(call_.call(),
[this](bool ok) {
reactor_->OnReadDone(ok);
MaybeFinish();
},
&read_ops_, /*can_inline=*/false);
read_ops_.set_core_cq_tag(&read_tag_);
if (read_ops_at_start_) {
call_.PerformOps(&read_ops_);
}
if (write_ops_at_start_) {
call_.PerformOps(&write_ops_);
}
if (writes_done_ops_at_start_) {
call_.PerformOps(&writes_done_ops_);
}
// MaybeFinish outside the lock to make sure that destruction of this object
// doesn't take place while holding the lock (which would cause the lock to
// be released after destruction)
this->MaybeFinish();
finish_tag_.Set(call_.call(), [this](bool /*ok*/) { MaybeFinish(); },
&finish_ops_, /*can_inline=*/false);
finish_ops_.ClientRecvStatus(context_, &finish_status_);
finish_ops_.set_core_cq_tag(&finish_tag_);
call_.PerformOps(&finish_ops_);
}
void Read(Response* msg) override {
read_ops_.RecvMessage(msg);
callbacks_outstanding_.fetch_add(1, std::memory_order_relaxed);
if (GPR_UNLIKELY(!started_.load(std::memory_order_acquire))) {
grpc::internal::MutexLock lock(&start_mu_);
if (GPR_LIKELY(!started_.load(std::memory_order_relaxed))) {
backlog_.read_ops = true;
return;
}
if (started_) {
call_.PerformOps(&read_ops_);
} else {
read_ops_at_start_ = true;
}
call_.PerformOps(&read_ops_);
}
void Write(const Request* msg, ::grpc::WriteOptions options) override {
if (start_corked_) {
write_ops_.SendInitialMetadata(&context_->send_initial_metadata_,
context_->initial_metadata_flags());
start_corked_ = false;
}
if (options.is_last_message()) {
options.set_buffer_hint();
write_ops_.ClientSendClose();
@ -513,22 +538,18 @@ class ClientCallbackReaderWriterImpl
// TODO(vjpai): don't assert
GPR_CODEGEN_ASSERT(write_ops_.SendMessagePtr(msg, options).ok());
callbacks_outstanding_.fetch_add(1, std::memory_order_relaxed);
if (GPR_UNLIKELY(corked_write_needed_)) {
write_ops_.SendInitialMetadata(&context_->send_initial_metadata_,
context_->initial_metadata_flags());
corked_write_needed_ = false;
}
if (GPR_UNLIKELY(!started_.load(std::memory_order_acquire))) {
grpc::internal::MutexLock lock(&start_mu_);
if (GPR_LIKELY(!started_.load(std::memory_order_relaxed))) {
backlog_.write_ops = true;
return;
}
if (started_) {
call_.PerformOps(&write_ops_);
} else {
write_ops_at_start_ = true;
}
call_.PerformOps(&write_ops_);
}
void WritesDone() override {
if (start_corked_) {
writes_done_ops_.SendInitialMetadata(&context_->send_initial_metadata_,
context_->initial_metadata_flags());
start_corked_ = false;
}
writes_done_ops_.ClientSendClose();
writes_done_tag_.Set(call_.call(),
[this](bool ok) {
@ -538,19 +559,11 @@ class ClientCallbackReaderWriterImpl
&writes_done_ops_, /*can_inline=*/false);
writes_done_ops_.set_core_cq_tag(&writes_done_tag_);
callbacks_outstanding_.fetch_add(1, std::memory_order_relaxed);
if (GPR_UNLIKELY(corked_write_needed_)) {
writes_done_ops_.SendInitialMetadata(&context_->send_initial_metadata_,
context_->initial_metadata_flags());
corked_write_needed_ = false;
if (started_) {
call_.PerformOps(&writes_done_ops_);
} else {
writes_done_ops_at_start_ = true;
}
if (GPR_UNLIKELY(!started_.load(std::memory_order_acquire))) {
grpc::internal::MutexLock lock(&start_mu_);
if (GPR_LIKELY(!started_.load(std::memory_order_relaxed))) {
backlog_.writes_done_ops = true;
return;
}
}
call_.PerformOps(&writes_done_ops_);
}
void AddHold(int holds) override {
@ -567,42 +580,8 @@ class ClientCallbackReaderWriterImpl
: context_(context),
call_(call),
reactor_(reactor),
start_corked_(context_->initial_metadata_corked_),
corked_write_needed_(start_corked_) {
start_corked_(context_->initial_metadata_corked_) {
this->BindReactor(reactor);
// Set up the unchanging parts of the start, read, and write tags and ops.
start_tag_.Set(call_.call(),
[this](bool ok) {
reactor_->OnReadInitialMetadataDone(ok);
MaybeFinish();
},
&start_ops_, /*can_inline=*/false);
start_ops_.RecvInitialMetadata(context_);
start_ops_.set_core_cq_tag(&start_tag_);
write_tag_.Set(call_.call(),
[this](bool ok) {
reactor_->OnWriteDone(ok);
MaybeFinish();
},
&write_ops_, /*can_inline=*/false);
write_ops_.set_core_cq_tag(&write_tag_);
read_tag_.Set(call_.call(),
[this](bool ok) {
reactor_->OnReadDone(ok);
MaybeFinish();
},
&read_ops_, /*can_inline=*/false);
read_ops_.set_core_cq_tag(&read_tag_);
// Also set up the Finish tag and op set.
finish_tag_.Set(call_.call(), [this](bool /*ok*/) { MaybeFinish(); },
&finish_ops_,
/*can_inline=*/false);
finish_ops_.ClientRecvStatus(context_, &finish_status_);
finish_ops_.set_core_cq_tag(&finish_tag_);
}
::grpc_impl::ClientContext* const context_;
@ -613,9 +592,7 @@ class ClientCallbackReaderWriterImpl
grpc::internal::CallOpRecvInitialMetadata>
start_ops_;
grpc::internal::CallbackWithSuccessTag start_tag_;
const bool start_corked_;
bool corked_write_needed_; // no lock needed since only accessed in
// Write/WritesDone which cannot be concurrent
bool start_corked_;
grpc::internal::CallOpSet<grpc::internal::CallOpClientRecvStatus> finish_ops_;
grpc::internal::CallbackWithSuccessTag finish_tag_;
@ -626,27 +603,22 @@ class ClientCallbackReaderWriterImpl
grpc::internal::CallOpClientSendClose>
write_ops_;
grpc::internal::CallbackWithSuccessTag write_tag_;
bool write_ops_at_start_{false};
grpc::internal::CallOpSet<grpc::internal::CallOpSendInitialMetadata,
grpc::internal::CallOpClientSendClose>
writes_done_ops_;
grpc::internal::CallbackWithSuccessTag writes_done_tag_;
bool writes_done_ops_at_start_{false};
grpc::internal::CallOpSet<grpc::internal::CallOpRecvMessage<Response>>
read_ops_;
grpc::internal::CallbackWithSuccessTag read_tag_;
bool read_ops_at_start_{false};
struct StartCallBacklog {
bool write_ops = false;
bool writes_done_ops = false;
bool read_ops = false;
};
StartCallBacklog backlog_ /* GUARDED_BY(start_mu_) */;
// Minimum of 3 callbacks to pre-register for start ops, StartCall, and finish
std::atomic<intptr_t> callbacks_outstanding_{3};
std::atomic_bool started_{false};
grpc::internal::Mutex start_mu_;
// Minimum of 2 callbacks to pre-register for start and finish
std::atomic<intptr_t> callbacks_outstanding_{2};
bool started_{false};
};
template <class Request, class Response>
@ -698,7 +670,8 @@ class ClientCallbackReaderImpl : public ClientCallbackReader<Response> {
// This call initiates two batches, plus any backlog, each with a callback
// 1. Send initial metadata (unless corked) + recv initial metadata
// 2. Any backlog
// 3. Recv trailing metadata
// 3. Recv trailing metadata, on_completion callback
started_ = true;
start_tag_.Set(call_.call(),
[this](bool ok) {
@ -720,13 +693,8 @@ class ClientCallbackReaderImpl : public ClientCallbackReader<Response> {
},
&read_ops_, /*can_inline=*/false);
read_ops_.set_core_cq_tag(&read_tag_);
{
grpc::internal::MutexLock lock(&start_mu_);
if (backlog_.read_ops) {
call_.PerformOps(&read_ops_);
}
started_.store(true, std::memory_order_release);
if (read_ops_at_start_) {
call_.PerformOps(&read_ops_);
}
finish_tag_.Set(call_.call(), [this](bool /*ok*/) { MaybeFinish(); },
@ -739,14 +707,11 @@ class ClientCallbackReaderImpl : public ClientCallbackReader<Response> {
void Read(Response* msg) override {
read_ops_.RecvMessage(msg);
callbacks_outstanding_.fetch_add(1, std::memory_order_relaxed);
if (GPR_UNLIKELY(!started_.load(std::memory_order_acquire))) {
grpc::internal::MutexLock lock(&start_mu_);
if (GPR_LIKELY(!started_.load(std::memory_order_relaxed))) {
backlog_.read_ops = true;
return;
}
if (started_) {
call_.PerformOps(&read_ops_);
} else {
read_ops_at_start_ = true;
}
call_.PerformOps(&read_ops_);
}
void AddHold(int holds) override {
@ -787,16 +752,11 @@ class ClientCallbackReaderImpl : public ClientCallbackReader<Response> {
grpc::internal::CallOpSet<grpc::internal::CallOpRecvMessage<Response>>
read_ops_;
grpc::internal::CallbackWithSuccessTag read_tag_;
struct StartCallBacklog {
bool read_ops = false;
};
StartCallBacklog backlog_ /* GUARDED_BY(start_mu_) */;
bool read_ops_at_start_{false};
// Minimum of 2 callbacks to pre-register for start and finish
std::atomic<intptr_t> callbacks_outstanding_{2};
std::atomic_bool started_{false};
grpc::internal::Mutex start_mu_;
bool started_{false};
};
template <class Response>
@ -849,60 +809,74 @@ class ClientCallbackWriterImpl : public ClientCallbackWriter<Request> {
// This call initiates two batches, plus any backlog, each with a callback
// 1. Send initial metadata (unless corked) + recv initial metadata
// 2. Any backlog
// 3. Recv trailing metadata
// 3. Recv trailing metadata, on_completion callback
started_ = true;
start_tag_.Set(call_.call(),
[this](bool ok) {
reactor_->OnReadInitialMetadataDone(ok);
MaybeFinish();
},
&start_ops_, /*can_inline=*/false);
if (!start_corked_) {
start_ops_.SendInitialMetadata(&context_->send_initial_metadata_,
context_->initial_metadata_flags());
}
start_ops_.RecvInitialMetadata(context_);
start_ops_.set_core_cq_tag(&start_tag_);
call_.PerformOps(&start_ops_);
{
grpc::internal::MutexLock lock(&start_mu_);
if (backlog_.write_ops) {
call_.PerformOps(&write_ops_);
}
if (backlog_.writes_done_ops) {
call_.PerformOps(&writes_done_ops_);
}
call_.PerformOps(&finish_ops_);
// The last thing in this critical section is to set started_ so that it
// can be used lock-free as well.
started_.store(true, std::memory_order_release);
// Also set up the read and write tags so that they don't have to be set up
// each time
write_tag_.Set(call_.call(),
[this](bool ok) {
reactor_->OnWriteDone(ok);
MaybeFinish();
},
&write_ops_, /*can_inline=*/false);
write_ops_.set_core_cq_tag(&write_tag_);
if (write_ops_at_start_) {
call_.PerformOps(&write_ops_);
}
// MaybeFinish outside the lock to make sure that destruction of this object
// doesn't take place while holding the lock (which would cause the lock to
// be released after destruction)
this->MaybeFinish();
if (writes_done_ops_at_start_) {
call_.PerformOps(&writes_done_ops_);
}
finish_tag_.Set(call_.call(), [this](bool /*ok*/) { MaybeFinish(); },
&finish_ops_, /*can_inline=*/false);
finish_ops_.ClientRecvStatus(context_, &finish_status_);
finish_ops_.set_core_cq_tag(&finish_tag_);
call_.PerformOps(&finish_ops_);
}
void Write(const Request* msg, ::grpc::WriteOptions options) override {
if (GPR_UNLIKELY(options.is_last_message())) {
if (start_corked_) {
write_ops_.SendInitialMetadata(&context_->send_initial_metadata_,
context_->initial_metadata_flags());
start_corked_ = false;
}
if (options.is_last_message()) {
options.set_buffer_hint();
write_ops_.ClientSendClose();
}
// TODO(vjpai): don't assert
GPR_CODEGEN_ASSERT(write_ops_.SendMessagePtr(msg, options).ok());
callbacks_outstanding_.fetch_add(1, std::memory_order_relaxed);
if (GPR_UNLIKELY(corked_write_needed_)) {
write_ops_.SendInitialMetadata(&context_->send_initial_metadata_,
context_->initial_metadata_flags());
corked_write_needed_ = false;
if (started_) {
call_.PerformOps(&write_ops_);
} else {
write_ops_at_start_ = true;
}
if (GPR_UNLIKELY(!started_.load(std::memory_order_acquire))) {
grpc::internal::MutexLock lock(&start_mu_);
if (GPR_LIKELY(!started_.load(std::memory_order_relaxed))) {
backlog_.write_ops = true;
return;
}
}
call_.PerformOps(&write_ops_);
}
void WritesDone() override {
if (start_corked_) {
writes_done_ops_.SendInitialMetadata(&context_->send_initial_metadata_,
context_->initial_metadata_flags());
start_corked_ = false;
}
writes_done_ops_.ClientSendClose();
writes_done_tag_.Set(call_.call(),
[this](bool ok) {
@ -912,21 +886,11 @@ class ClientCallbackWriterImpl : public ClientCallbackWriter<Request> {
&writes_done_ops_, /*can_inline=*/false);
writes_done_ops_.set_core_cq_tag(&writes_done_tag_);
callbacks_outstanding_.fetch_add(1, std::memory_order_relaxed);
if (GPR_UNLIKELY(corked_write_needed_)) {
writes_done_ops_.SendInitialMetadata(&context_->send_initial_metadata_,
context_->initial_metadata_flags());
corked_write_needed_ = false;
}
if (GPR_UNLIKELY(!started_.load(std::memory_order_acquire))) {
grpc::internal::MutexLock lock(&start_mu_);
if (GPR_LIKELY(!started_.load(std::memory_order_relaxed))) {
backlog_.writes_done_ops = true;
return;
}
if (started_) {
call_.PerformOps(&writes_done_ops_);
} else {
writes_done_ops_at_start_ = true;
}
call_.PerformOps(&writes_done_ops_);
}
void AddHold(int holds) override {
@ -945,36 +909,10 @@ class ClientCallbackWriterImpl : public ClientCallbackWriter<Request> {
: context_(context),
call_(call),
reactor_(reactor),
start_corked_(context_->initial_metadata_corked_),
corked_write_needed_(start_corked_) {
start_corked_(context_->initial_metadata_corked_) {
this->BindReactor(reactor);
// Set up the unchanging parts of the start and write tags and ops.
start_tag_.Set(call_.call(),
[this](bool ok) {
reactor_->OnReadInitialMetadataDone(ok);
MaybeFinish();
},
&start_ops_, /*can_inline=*/false);
start_ops_.RecvInitialMetadata(context_);
start_ops_.set_core_cq_tag(&start_tag_);
write_tag_.Set(call_.call(),
[this](bool ok) {
reactor_->OnWriteDone(ok);
MaybeFinish();
},
&write_ops_, /*can_inline=*/false);
write_ops_.set_core_cq_tag(&write_tag_);
// Also set up the Finish tag and op set.
finish_ops_.RecvMessage(response);
finish_ops_.AllowNoMessage();
finish_tag_.Set(call_.call(), [this](bool /*ok*/) { MaybeFinish(); },
&finish_ops_,
/*can_inline=*/false);
finish_ops_.ClientRecvStatus(context_, &finish_status_);
finish_ops_.set_core_cq_tag(&finish_tag_);
}
::grpc_impl::ClientContext* const context_;
@ -985,9 +923,7 @@ class ClientCallbackWriterImpl : public ClientCallbackWriter<Request> {
grpc::internal::CallOpRecvInitialMetadata>
start_ops_;
grpc::internal::CallbackWithSuccessTag start_tag_;
const bool start_corked_;
bool corked_write_needed_; // no lock needed since only accessed in
// Write/WritesDone which cannot be concurrent
bool start_corked_;
grpc::internal::CallOpSet<grpc::internal::CallOpGenericRecvMessage,
grpc::internal::CallOpClientRecvStatus>
@ -1000,22 +936,17 @@ class ClientCallbackWriterImpl : public ClientCallbackWriter<Request> {
grpc::internal::CallOpClientSendClose>
write_ops_;
grpc::internal::CallbackWithSuccessTag write_tag_;
bool write_ops_at_start_{false};
grpc::internal::CallOpSet<grpc::internal::CallOpSendInitialMetadata,
grpc::internal::CallOpClientSendClose>
writes_done_ops_;
grpc::internal::CallbackWithSuccessTag writes_done_tag_;
bool writes_done_ops_at_start_{false};
struct StartCallBacklog {
bool write_ops = false;
bool writes_done_ops = false;
};
StartCallBacklog backlog_ /* GUARDED_BY(start_mu_) */;
// Minimum of 3 callbacks to pre-register for start ops, StartCall, and finish
std::atomic<intptr_t> callbacks_outstanding_{3};
std::atomic_bool started_{false};
grpc::internal::Mutex start_mu_;
// Minimum of 2 callbacks to pre-register for start and finish
std::atomic<intptr_t> callbacks_outstanding_{2};
bool started_{false};
};
template <class Request>
@ -1054,6 +985,7 @@ class ClientCallbackUnaryImpl final : public ClientCallbackUnary {
// This call initiates two batches, each with a callback
// 1. Send initial metadata + write + writes done + recv initial metadata
// 2. Read message, recv trailing metadata
started_ = true;
start_tag_.Set(call_.call(),
[this](bool ok) {
@ -1121,6 +1053,7 @@ class ClientCallbackUnaryImpl final : public ClientCallbackUnary {
// This call will have 2 callbacks: start and finish
std::atomic<intptr_t> callbacks_outstanding_{2};
bool started_{false};
};
class ClientCallbackUnaryFactory {

@ -777,7 +777,9 @@ pair<bool, grpc::string> PrivateGenerator::GetGrpcServices() {
if (generate_in_pb2_grpc) {
out->Print(
"# Generated by the gRPC Python protocol compiler plugin. "
"DO NOT EDIT!\n");
"DO NOT EDIT!\n\"\"\""
"Client and server classes corresponding to protobuf-defined "
"services.\"\"\"\n");
if (!PrintPreamble(out.get())) {
return make_pair(false, "");
}

@ -270,42 +270,6 @@ void PopulateNode(upb_arena* arena, const XdsBootstrap::Node* node,
arena);
}
envoy_api_v2_DiscoveryRequest* CreateDiscoveryRequest(
upb_arena* arena, const char* type_url, const std::string& version,
const std::string& nonce, grpc_error* error) {
// Create a request.
envoy_api_v2_DiscoveryRequest* request =
envoy_api_v2_DiscoveryRequest_new(arena);
// Set type_url.
envoy_api_v2_DiscoveryRequest_set_type_url(request,
upb_strview_makez(type_url));
// Set version_info.
if (!version.empty()) {
envoy_api_v2_DiscoveryRequest_set_version_info(
request, upb_strview_makez(version.c_str()));
}
// Set nonce.
if (!nonce.empty()) {
envoy_api_v2_DiscoveryRequest_set_response_nonce(
request, upb_strview_makez(nonce.c_str()));
}
// Set error_detail if it's a NACK.
if (error != GRPC_ERROR_NONE) {
grpc_slice error_description_slice;
GPR_ASSERT(grpc_error_get_str(error, GRPC_ERROR_STR_DESCRIPTION,
&error_description_slice));
upb_strview error_description_strview =
upb_strview_make(reinterpret_cast<const char*>(
GPR_SLICE_START_PTR(error_description_slice)),
GPR_SLICE_LENGTH(error_description_slice));
google_rpc_Status* error_detail =
envoy_api_v2_DiscoveryRequest_mutable_error_detail(request, arena);
google_rpc_Status_set_message(error_detail, error_description_strview);
GRPC_ERROR_UNREF(error);
}
return request;
}
inline absl::string_view UpbStringToAbsl(const upb_strview& str) {
return absl::string_view(str.data, str.size);
}
@ -476,92 +440,43 @@ grpc_slice SerializeDiscoveryRequest(upb_arena* arena,
} // namespace
grpc_slice XdsApi::CreateUnsupportedTypeNackRequest(const std::string& type_url,
const std::string& nonce,
grpc_error* error) {
upb::Arena arena;
envoy_api_v2_DiscoveryRequest* request = CreateDiscoveryRequest(
arena.ptr(), type_url.c_str(), /*version=*/"", nonce, error);
MaybeLogDiscoveryRequest(client_, tracer_, request);
return SerializeDiscoveryRequest(arena.ptr(), request);
}
grpc_slice XdsApi::CreateLdsRequest(const std::string& server_name,
const std::string& version,
const std::string& nonce, grpc_error* error,
bool populate_node) {
upb::Arena arena;
envoy_api_v2_DiscoveryRequest* request =
CreateDiscoveryRequest(arena.ptr(), kLdsTypeUrl, version, nonce, error);
// Populate node.
if (populate_node) {
envoy_api_v2_core_Node* node_msg =
envoy_api_v2_DiscoveryRequest_mutable_node(request, arena.ptr());
PopulateNode(arena.ptr(), node_, build_version_, user_agent_name_, "",
node_msg);
}
// Add resource_name.
envoy_api_v2_DiscoveryRequest_add_resource_names(
request, upb_strview_make(server_name.data(), server_name.size()),
arena.ptr());
MaybeLogDiscoveryRequest(client_, tracer_, request);
return SerializeDiscoveryRequest(arena.ptr(), request);
}
grpc_slice XdsApi::CreateRdsRequest(const std::string& route_config_name,
const std::string& version,
const std::string& nonce, grpc_error* error,
bool populate_node) {
upb::Arena arena;
envoy_api_v2_DiscoveryRequest* request =
CreateDiscoveryRequest(arena.ptr(), kRdsTypeUrl, version, nonce, error);
// Populate node.
if (populate_node) {
envoy_api_v2_core_Node* node_msg =
envoy_api_v2_DiscoveryRequest_mutable_node(request, arena.ptr());
PopulateNode(arena.ptr(), node_, build_version_, user_agent_name_, "",
node_msg);
}
// Add resource_name.
envoy_api_v2_DiscoveryRequest_add_resource_names(
request,
upb_strview_make(route_config_name.data(), route_config_name.size()),
arena.ptr());
MaybeLogDiscoveryRequest(client_, tracer_, request);
return SerializeDiscoveryRequest(arena.ptr(), request);
}
grpc_slice XdsApi::CreateCdsRequest(
const std::set<absl::string_view>& cluster_names,
grpc_slice XdsApi::CreateAdsRequest(
const std::string& type_url,
const std::set<absl::string_view>& resource_names,
const std::string& version, const std::string& nonce, grpc_error* error,
bool populate_node) {
upb::Arena arena;
// Create a request.
envoy_api_v2_DiscoveryRequest* request =
CreateDiscoveryRequest(arena.ptr(), kCdsTypeUrl, version, nonce, error);
// Populate node.
if (populate_node) {
envoy_api_v2_core_Node* node_msg =
envoy_api_v2_DiscoveryRequest_mutable_node(request, arena.ptr());
PopulateNode(arena.ptr(), node_, build_version_, user_agent_name_, "",
node_msg);
envoy_api_v2_DiscoveryRequest_new(arena.ptr());
// Set type_url.
envoy_api_v2_DiscoveryRequest_set_type_url(
request, upb_strview_make(type_url.data(), type_url.size()));
// Set version_info.
if (!version.empty()) {
envoy_api_v2_DiscoveryRequest_set_version_info(
request, upb_strview_make(version.data(), version.size()));
}
// Add resource_names.
for (const auto& cluster_name : cluster_names) {
envoy_api_v2_DiscoveryRequest_add_resource_names(
request, upb_strview_make(cluster_name.data(), cluster_name.size()),
arena.ptr());
// Set nonce.
if (!nonce.empty()) {
envoy_api_v2_DiscoveryRequest_set_response_nonce(
request, upb_strview_make(nonce.data(), nonce.size()));
}
// Set error_detail if it's a NACK.
if (error != GRPC_ERROR_NONE) {
grpc_slice error_description_slice;
GPR_ASSERT(grpc_error_get_str(error, GRPC_ERROR_STR_DESCRIPTION,
&error_description_slice));
upb_strview error_description_strview =
upb_strview_make(reinterpret_cast<const char*>(
GPR_SLICE_START_PTR(error_description_slice)),
GPR_SLICE_LENGTH(error_description_slice));
google_rpc_Status* error_detail =
envoy_api_v2_DiscoveryRequest_mutable_error_detail(request,
arena.ptr());
google_rpc_Status_set_message(error_detail, error_description_strview);
GRPC_ERROR_UNREF(error);
}
MaybeLogDiscoveryRequest(client_, tracer_, request);
return SerializeDiscoveryRequest(arena.ptr(), request);
}
grpc_slice XdsApi::CreateEdsRequest(
const std::set<absl::string_view>& eds_service_names,
const std::string& version, const std::string& nonce, grpc_error* error,
bool populate_node) {
upb::Arena arena;
envoy_api_v2_DiscoveryRequest* request =
CreateDiscoveryRequest(arena.ptr(), kEdsTypeUrl, version, nonce, error);
// Populate node.
if (populate_node) {
envoy_api_v2_core_Node* node_msg =
@ -570,10 +485,9 @@ grpc_slice XdsApi::CreateEdsRequest(
node_msg);
}
// Add resource_names.
for (const auto& eds_service_name : eds_service_names) {
for (const auto& resource_name : resource_names) {
envoy_api_v2_DiscoveryRequest_add_resource_names(
request,
upb_strview_make(eds_service_name.data(), eds_service_name.size()),
request, upb_strview_make(resource_name.data(), resource_name.size()),
arena.ptr());
}
MaybeLogDiscoveryRequest(client_, tracer_, request);
@ -1285,13 +1199,13 @@ grpc_error* LdsResponseParse(XdsClient* client, TraceFlag* tracer,
return GRPC_ERROR_NONE;
}
grpc_error* RdsResponseParse(XdsClient* client, TraceFlag* tracer,
const envoy_api_v2_DiscoveryResponse* response,
const std::string& expected_server_name,
const std::string& expected_route_config_name,
const bool xds_routing_enabled,
absl::optional<XdsApi::RdsUpdate>* rds_update,
upb_arena* arena) {
grpc_error* RdsResponseParse(
XdsClient* client, TraceFlag* tracer,
const envoy_api_v2_DiscoveryResponse* response,
const std::string& expected_server_name,
const std::set<absl::string_view>& expected_route_configuration_names,
const bool xds_routing_enabled,
absl::optional<XdsApi::RdsUpdate>* rds_update, upb_arena* arena) {
// Get the resources from the response.
size_t size;
const google_protobuf_Any* const* resources =
@ -1312,10 +1226,14 @@ grpc_error* RdsResponseParse(XdsClient* client, TraceFlag* tracer,
return GRPC_ERROR_CREATE_FROM_STATIC_STRING("Can't decode route_config.");
}
// Check route_config_name. Ignore unexpected route_config.
const upb_strview name = envoy_api_v2_RouteConfiguration_name(route_config);
const upb_strview expected_name =
upb_strview_makez(expected_route_config_name.c_str());
if (!upb_strview_eql(name, expected_name)) continue;
const upb_strview route_config_name =
envoy_api_v2_RouteConfiguration_name(route_config);
absl::string_view route_config_name_strview(route_config_name.data,
route_config_name.size);
if (expected_route_configuration_names.find(route_config_name_strview) ==
expected_route_configuration_names.end()) {
continue;
}
// Parse the route_config.
XdsApi::RdsUpdate local_rds_update;
grpc_error* error =
@ -1600,7 +1518,7 @@ grpc_error* EdsResponseParse(
grpc_error* XdsApi::ParseAdsResponse(
const grpc_slice& encoded_response, const std::string& expected_server_name,
const std::string& expected_route_config_name,
const std::set<absl::string_view>& expected_route_configuration_names,
const std::set<absl::string_view>& expected_cluster_names,
const std::set<absl::string_view>& expected_eds_service_names,
absl::optional<LdsUpdate>* lds_update,
@ -1635,8 +1553,8 @@ grpc_error* XdsApi::ParseAdsResponse(
xds_routing_enabled_, lds_update, arena.ptr());
} else if (*type_url == kRdsTypeUrl) {
return RdsResponseParse(client_, tracer_, response, expected_server_name,
expected_route_config_name, xds_routing_enabled_,
rds_update, arena.ptr());
expected_route_configuration_names,
xds_routing_enabled_, rds_update, arena.ptr());
} else if (*type_url == kCdsTypeUrl) {
return CdsResponseParse(client_, tracer_, response, expected_cluster_names,
cds_update_map, arena.ptr());

@ -230,47 +230,21 @@ class XdsApi {
XdsApi(XdsClient* client, TraceFlag* tracer, const XdsBootstrap::Node* node);
// Creates a request to nack an unsupported resource type.
// Creates an ADS request.
// Takes ownership of \a error.
grpc_slice CreateUnsupportedTypeNackRequest(const std::string& type_url,
const std::string& nonce,
grpc_error* error);
// Creates an LDS request querying \a server_name.
// Takes ownership of \a error.
grpc_slice CreateLdsRequest(const std::string& server_name,
const std::string& version,
const std::string& nonce, grpc_error* error,
bool populate_node);
// Creates an RDS request querying \a route_config_name.
// Takes ownership of \a error.
grpc_slice CreateRdsRequest(const std::string& route_config_name,
grpc_slice CreateAdsRequest(const std::string& type_url,
const std::set<absl::string_view>& resource_names,
const std::string& version,
const std::string& nonce, grpc_error* error,
bool populate_node);
// Creates a CDS request querying \a cluster_names.
// Takes ownership of \a error.
grpc_slice CreateCdsRequest(const std::set<absl::string_view>& cluster_names,
const std::string& version,
const std::string& nonce, grpc_error* error,
bool populate_node);
// Creates an EDS request querying \a eds_service_names.
// Takes ownership of \a error.
grpc_slice CreateEdsRequest(
const std::set<absl::string_view>& eds_service_names,
const std::string& version, const std::string& nonce, grpc_error* error,
bool populate_node);
// Parses the ADS response and outputs the validated update for either CDS or
// EDS. If the response can't be parsed at the top level, \a type_url will
// point to an empty string; otherwise, it will point to the received data.
grpc_error* ParseAdsResponse(
const grpc_slice& encoded_response,
const std::string& expected_server_name,
const std::string& expected_route_config_name,
const std::set<absl::string_view>& expected_route_configuration_names,
const std::set<absl::string_view>& expected_cluster_names,
const std::set<absl::string_view>& expected_eds_service_names,
absl::optional<LdsUpdate>* lds_update,

@ -255,8 +255,8 @@ class XdsClient::ChannelState::AdsCallState
bool IsCurrentCallOnChannel() const;
std::set<absl::string_view> ClusterNamesForRequest();
std::set<absl::string_view> EdsServiceNamesForRequest();
std::set<absl::string_view> ResourceNamesForRequest(
const std::string& type_url);
// The owning RetryableCall<>.
RefCountedPtr<RetryableCall<AdsCallState>> parent_;
@ -804,33 +804,13 @@ void XdsClient::ChannelState::AdsCallState::SendMessageLocked(
}
auto& state = state_map_[type_url];
grpc_slice request_payload_slice;
std::set<absl::string_view> resource_names;
if (type_url == XdsApi::kLdsTypeUrl) {
resource_names.insert(xds_client()->server_name_);
request_payload_slice = xds_client()->api_.CreateLdsRequest(
xds_client()->server_name_, state.version, state.nonce,
GRPC_ERROR_REF(state.error), !sent_initial_message_);
state.subscribed_resources[xds_client()->server_name_]->Start(Ref());
} else if (type_url == XdsApi::kRdsTypeUrl) {
resource_names.insert(xds_client()->lds_result_->route_config_name);
request_payload_slice = xds_client()->api_.CreateRdsRequest(
xds_client()->lds_result_->route_config_name, state.version,
state.nonce, GRPC_ERROR_REF(state.error), !sent_initial_message_);
state.subscribed_resources[xds_client()->lds_result_->route_config_name]
->Start(Ref());
} else if (type_url == XdsApi::kCdsTypeUrl) {
resource_names = ClusterNamesForRequest();
request_payload_slice = xds_client()->api_.CreateCdsRequest(
resource_names, state.version, state.nonce, GRPC_ERROR_REF(state.error),
!sent_initial_message_);
} else if (type_url == XdsApi::kEdsTypeUrl) {
resource_names = EdsServiceNamesForRequest();
request_payload_slice = xds_client()->api_.CreateEdsRequest(
resource_names, state.version, state.nonce, GRPC_ERROR_REF(state.error),
!sent_initial_message_);
} else {
request_payload_slice = xds_client()->api_.CreateUnsupportedTypeNackRequest(
type_url, state.nonce, GRPC_ERROR_REF(state.error));
std::set<absl::string_view> resource_names =
ResourceNamesForRequest(type_url);
request_payload_slice = xds_client()->api_.CreateAdsRequest(
type_url, resource_names, state.version, state.nonce,
GRPC_ERROR_REF(state.error), !sent_initial_message_);
if (type_url != XdsApi::kLdsTypeUrl && type_url != XdsApi::kRdsTypeUrl &&
type_url != XdsApi::kCdsTypeUrl && type_url != XdsApi::kEdsTypeUrl) {
state_map_.erase(type_url);
}
sent_initial_message_ = true;
@ -1242,12 +1222,10 @@ void XdsClient::ChannelState::AdsCallState::OnResponseReceivedLocked() {
// Note that ParseAdsResponse() also validates the response.
grpc_error* parse_error = xds_client()->api_.ParseAdsResponse(
response_slice, xds_client()->server_name_,
(xds_client()->lds_result_.has_value()
? xds_client()->lds_result_->route_config_name
: ""),
ClusterNamesForRequest(), EdsServiceNamesForRequest(), &lds_update,
&rds_update, &cds_update_map, &eds_update_map, &version, &nonce,
&type_url);
ResourceNamesForRequest(XdsApi::kRdsTypeUrl),
ResourceNamesForRequest(XdsApi::kCdsTypeUrl),
ResourceNamesForRequest(XdsApi::kEdsTypeUrl), &lds_update, &rds_update,
&cds_update_map, &eds_update_map, &version, &nonce, &type_url);
grpc_slice_unref_internal(response_slice);
if (type_url.empty()) {
// Ignore unparsable response.
@ -1351,25 +1329,18 @@ bool XdsClient::ChannelState::AdsCallState::IsCurrentCallOnChannel() const {
}
std::set<absl::string_view>
XdsClient::ChannelState::AdsCallState::ClusterNamesForRequest() {
std::set<absl::string_view> cluster_names;
for (auto& p : state_map_[XdsApi::kCdsTypeUrl].subscribed_resources) {
cluster_names.insert(p.first);
OrphanablePtr<ResourceState>& state = p.second;
state->Start(Ref());
XdsClient::ChannelState::AdsCallState::ResourceNamesForRequest(
const std::string& type_url) {
std::set<absl::string_view> resource_names;
auto it = state_map_.find(type_url);
if (it != state_map_.end()) {
for (auto& p : it->second.subscribed_resources) {
resource_names.insert(p.first);
OrphanablePtr<ResourceState>& state = p.second;
state->Start(Ref());
}
}
return cluster_names;
}
std::set<absl::string_view>
XdsClient::ChannelState::AdsCallState::EdsServiceNamesForRequest() {
std::set<absl::string_view> eds_names;
for (auto& p : state_map_[XdsApi::kEdsTypeUrl].subscribed_resources) {
eds_names.insert(p.first);
OrphanablePtr<ResourceState>& state = p.second;
state->Start(Ref());
}
return eds_names;
return resource_names;
}
//

@ -21,6 +21,8 @@
#include <grpc/support/port_platform.h>
#include <string>
#include <grpc/support/atm.h>
#include "src/core/lib/debug/stats_data.h"
#include "src/core/lib/iomgr/exec_ctx.h"

@ -16,6 +16,12 @@
*
*/
#include <algorithm>
#include <functional>
#include <mutex>
#include <sstream>
#include <thread>
#include <grpcpp/channel.h>
#include <grpcpp/client_context.h>
#include <grpcpp/create_channel.h>
@ -25,14 +31,6 @@
#include <grpcpp/server_builder.h>
#include <grpcpp/server_context.h>
#include <grpcpp/support/client_callback.h>
#include <gtest/gtest.h>
#include <algorithm>
#include <condition_variable>
#include <functional>
#include <mutex>
#include <sstream>
#include <thread>
#include "src/core/lib/gpr/env.h"
#include "src/core/lib/iomgr/iomgr.h"
@ -45,6 +43,8 @@
#include "test/cpp/util/string_ref_helper.h"
#include "test/cpp/util/test_credentials_provider.h"
#include <gtest/gtest.h>
// MAYBE_SKIP_TEST is a macro to determine if this particular test configuration
// should be skipped based on a decision made at SetUp time. In particular, any
// callback tests can only be run if the iomgr can run in the background or if
@ -1079,8 +1079,7 @@ class BidiClient
public:
BidiClient(grpc::testing::EchoTestService::Stub* stub,
ServerTryCancelRequestPhase server_try_cancel,
int num_msgs_to_send, bool cork_metadata, bool first_write_async,
ClientCancelInfo client_cancel = {})
int num_msgs_to_send, ClientCancelInfo client_cancel = {})
: server_try_cancel_(server_try_cancel),
msgs_to_send_{num_msgs_to_send},
client_cancel_{client_cancel} {
@ -1090,9 +1089,8 @@ class BidiClient
grpc::to_string(server_try_cancel));
}
request_.set_message("Hello fren ");
context_.set_initial_metadata_corked(cork_metadata);
stub->experimental_async()->BidiStream(&context_, this);
MaybeAsyncWrite(first_write_async);
MaybeWrite();
StartRead(&response_);
StartCall();
}
@ -1113,10 +1111,6 @@ class BidiClient
}
}
void OnWriteDone(bool ok) override {
if (async_write_thread_.joinable()) {
async_write_thread_.join();
RemoveHold();
}
if (server_try_cancel_ == DO_NOT_CANCEL) {
EXPECT_TRUE(ok);
} else if (!ok) {
@ -1181,26 +1175,6 @@ class BidiClient
}
private:
void MaybeAsyncWrite(bool first_write_async) {
if (first_write_async) {
// Make sure that we have a write to issue.
// TODO(vjpai): Make this work with 0 writes case as well.
assert(msgs_to_send_ >= 1);
AddHold();
async_write_thread_ = std::thread([this] {
std::unique_lock<std::mutex> lock(async_write_thread_mu_);
async_write_thread_cv_.wait(
lock, [this] { return async_write_thread_start_; });
MaybeWrite();
});
std::lock_guard<std::mutex> lock(async_write_thread_mu_);
async_write_thread_start_ = true;
async_write_thread_cv_.notify_one();
return;
}
MaybeWrite();
}
void MaybeWrite() {
if (client_cancel_.cancel &&
writes_complete_ == client_cancel_.ops_before_cancel) {
@ -1222,57 +1196,13 @@ class BidiClient
std::mutex mu_;
std::condition_variable cv_;
bool done_ = false;
std::thread async_write_thread_;
bool async_write_thread_start_ = false;
std::mutex async_write_thread_mu_;
std::condition_variable async_write_thread_cv_;
};
TEST_P(ClientCallbackEnd2endTest, BidiStream) {
MAYBE_SKIP_TEST;
ResetStub();
BidiClient test(stub_.get(), DO_NOT_CANCEL,
kServerDefaultResponseStreamsToSend,
/*cork_metadata=*/false, /*first_write_async=*/false);
test.Await();
// Make sure that the server interceptors were not notified of a cancel
if (GetParam().use_interceptors) {
EXPECT_EQ(0, DummyInterceptor::GetNumTimesCancel());
}
}
TEST_P(ClientCallbackEnd2endTest, BidiStreamFirstWriteAsync) {
MAYBE_SKIP_TEST;
ResetStub();
BidiClient test(stub_.get(), DO_NOT_CANCEL,
kServerDefaultResponseStreamsToSend,
/*cork_metadata=*/false, /*first_write_async=*/true);
test.Await();
// Make sure that the server interceptors were not notified of a cancel
if (GetParam().use_interceptors) {
EXPECT_EQ(0, DummyInterceptor::GetNumTimesCancel());
}
}
TEST_P(ClientCallbackEnd2endTest, BidiStreamCorked) {
MAYBE_SKIP_TEST;
ResetStub();
BidiClient test(stub_.get(), DO_NOT_CANCEL,
kServerDefaultResponseStreamsToSend,
/*cork_metadata=*/true, /*first_write_async=*/false);
test.Await();
// Make sure that the server interceptors were not notified of a cancel
if (GetParam().use_interceptors) {
EXPECT_EQ(0, DummyInterceptor::GetNumTimesCancel());
}
}
TEST_P(ClientCallbackEnd2endTest, BidiStreamCorkedFirstWriteAsync) {
MAYBE_SKIP_TEST;
ResetStub();
BidiClient test(stub_.get(), DO_NOT_CANCEL,
kServerDefaultResponseStreamsToSend,
/*cork_metadata=*/true, /*first_write_async=*/true);
BidiClient test{stub_.get(), DO_NOT_CANCEL,
kServerDefaultResponseStreamsToSend};
test.Await();
// Make sure that the server interceptors were not notified of a cancel
if (GetParam().use_interceptors) {
@ -1283,10 +1213,8 @@ TEST_P(ClientCallbackEnd2endTest, BidiStreamCorkedFirstWriteAsync) {
TEST_P(ClientCallbackEnd2endTest, ClientCancelsBidiStream) {
MAYBE_SKIP_TEST;
ResetStub();
BidiClient test(stub_.get(), DO_NOT_CANCEL,
kServerDefaultResponseStreamsToSend,
/*cork_metadata=*/false, /*first_write_async=*/false,
ClientCancelInfo(2));
BidiClient test{stub_.get(), DO_NOT_CANCEL,
kServerDefaultResponseStreamsToSend, ClientCancelInfo{2}};
test.Await();
// Make sure that the server interceptors were notified of a cancel
if (GetParam().use_interceptors) {
@ -1298,8 +1226,7 @@ TEST_P(ClientCallbackEnd2endTest, ClientCancelsBidiStream) {
TEST_P(ClientCallbackEnd2endTest, BidiStreamServerCancelBefore) {
MAYBE_SKIP_TEST;
ResetStub();
BidiClient test(stub_.get(), CANCEL_BEFORE_PROCESSING, /*num_msgs_to_send=*/2,
/*cork_metadata=*/false, /*first_write_async=*/false);
BidiClient test{stub_.get(), CANCEL_BEFORE_PROCESSING, 2};
test.Await();
// Make sure that the server interceptors were notified
if (GetParam().use_interceptors) {
@ -1312,9 +1239,7 @@ TEST_P(ClientCallbackEnd2endTest, BidiStreamServerCancelBefore) {
TEST_P(ClientCallbackEnd2endTest, BidiStreamServerCancelDuring) {
MAYBE_SKIP_TEST;
ResetStub();
BidiClient test(stub_.get(), CANCEL_DURING_PROCESSING,
/*num_msgs_to_send=*/10, /*cork_metadata=*/false,
/*first_write_async=*/false);
BidiClient test{stub_.get(), CANCEL_DURING_PROCESSING, 10};
test.Await();
// Make sure that the server interceptors were notified
if (GetParam().use_interceptors) {
@ -1327,8 +1252,7 @@ TEST_P(ClientCallbackEnd2endTest, BidiStreamServerCancelDuring) {
TEST_P(ClientCallbackEnd2endTest, BidiStreamServerCancelAfter) {
MAYBE_SKIP_TEST;
ResetStub();
BidiClient test(stub_.get(), CANCEL_AFTER_PROCESSING, /*num_msgs_to_send=*/5,
/*cork_metadata=*/false, /*first_write_async=*/false);
BidiClient test{stub_.get(), CANCEL_AFTER_PROCESSING, 5};
test.Await();
// Make sure that the server interceptors were notified
if (GetParam().use_interceptors) {

@ -1916,30 +1916,6 @@ TEST_P(XdsResolverOnlyTest, ChangeClusters) {
EXPECT_EQ(0, std::get<1>(counts));
}
// Tests that we go into TRANSIENT_FAILURE if the Listener is removed.
TEST_P(XdsResolverOnlyTest, ListenerRemoved) {
SetNextResolution({});
SetNextResolutionForLbChannelAllBalancers();
AdsServiceImpl::EdsResourceArgs args({
{"locality0", GetBackendPorts()},
});
balancers_[0]->ads_service()->SetEdsResource(
AdsServiceImpl::BuildEdsResource(args));
// We need to wait for all backends to come online.
WaitForAllBackends();
// Unset LDS resource.
balancers_[0]->ads_service()->UnsetResource(kLdsTypeUrl,
kDefaultResourceName);
// Wait for RPCs to start failing.
do {
} while (SendRpc(RpcOptions(), nullptr).ok());
// Make sure RPCs are still failing.
CheckRpcSendFailure(1000);
// Make sure we ACK'ed the update.
EXPECT_EQ(balancers_[0]->ads_service()->lds_response_state().state,
AdsServiceImpl::ResponseState::ACKED);
}
// Tests that we go into TRANSIENT_FAILURE if the Cluster disappears.
TEST_P(XdsResolverOnlyTest, ClusterRemoved) {
SetNextResolution({});
@ -2294,6 +2270,30 @@ TEST_P(LdsRdsTest, Vanilla) {
AdsServiceImpl::ResponseState::ACKED);
}
// Tests that we go into TRANSIENT_FAILURE if the Listener is removed.
TEST_P(LdsRdsTest, ListenerRemoved) {
SetNextResolution({});
SetNextResolutionForLbChannelAllBalancers();
AdsServiceImpl::EdsResourceArgs args({
{"locality0", GetBackendPorts()},
});
balancers_[0]->ads_service()->SetEdsResource(
AdsServiceImpl::BuildEdsResource(args));
// We need to wait for all backends to come online.
WaitForAllBackends();
// Unset LDS resource.
balancers_[0]->ads_service()->UnsetResource(kLdsTypeUrl,
kDefaultResourceName);
// Wait for RPCs to start failing.
do {
} while (SendRpc(RpcOptions(), nullptr).ok());
// Make sure RPCs are still failing.
CheckRpcSendFailure(1000);
// Make sure we ACK'ed the update.
EXPECT_EQ(balancers_[0]->ads_service()->lds_response_state().state,
AdsServiceImpl::ResponseState::ACKED);
}
// Tests that LDS client should send a NACK if matching domain can't be found in
// the LDS response.
TEST_P(LdsRdsTest, NoMatchedDomain) {

@ -51,6 +51,9 @@
--decode=grpc.testing.SimpleResponse \
src/proto/grpc/testing/messages.proto \
< output.bin > output.txt
10. --default_service_config, optional default service config to use
on the channel. Note that this may be ignored if the name resolver
returns a service config.
*/
#include <fstream>

@ -57,6 +57,11 @@ DEFINE_string(proto_path, ".", "Path to look for the proto file.");
DEFINE_string(protofiles, "", "Name of the proto file.");
DEFINE_bool(binary_input, false, "Input in binary format");
DEFINE_bool(binary_output, false, "Output in binary format");
DEFINE_string(
default_service_config, "",
"Default service config to use on the channel, if non-empty. Note "
"that this will be ignored if the name resolver returns a service "
"config.");
DEFINE_bool(json_input, false, "Input in json format");
DEFINE_bool(json_output, false, "Output in json format");
DEFINE_string(infile, "", "Input file (default is stdin)");
@ -217,6 +222,10 @@ std::shared_ptr<grpc::Channel> CreateCliChannel(
if (!cred.GetSslTargetNameOverride().empty()) {
args.SetSslTargetNameOverride(cred.GetSslTargetNameOverride());
}
if (!FLAGS_default_service_config.empty()) {
args.SetString(GRPC_ARG_SERVICE_CONFIG,
FLAGS_default_service_config.c_str());
}
return ::grpc::CreateCustomChannel(server_address, cred.GetCredentials(),
args);
}

@ -118,6 +118,7 @@ DECLARE_bool(batch);
DECLARE_string(metadata);
DECLARE_string(protofiles);
DECLARE_string(proto_path);
DECLARE_string(default_service_config);
namespace {
@ -1192,6 +1193,28 @@ TEST_F(GrpcToolTest, ListCommand_OverrideSslHostName) {
ShutdownServer();
}
TEST_F(GrpcToolTest, ConfiguringDefaultServiceConfig) {
// Test input "grpc_cli list localhost:<port>
// --default_service_config={\"loadBalancingConfig\":[{\"pick_first\":{}}]}"
std::stringstream output_stream;
const grpc::string server_address = SetUpServer();
const char* argv[] = {"grpc_cli", "ls", server_address.c_str()};
// Just check that the tool is still operational when --default_service_config
// is configured. This particular service config is in reality redundant with
// the channel's default configuration.
FLAGS_l = false;
FLAGS_default_service_config =
"{\"loadBalancingConfig\":[{\"pick_first\":{}}]}";
EXPECT_TRUE(0 == GrpcToolMainLib(ArraySize(argv), argv, TestCliCredentials(),
std::bind(PrintStream, &output_stream,
std::placeholders::_1)));
FLAGS_default_service_config = "";
EXPECT_TRUE(0 == strcmp(output_stream.str().c_str(),
"grpc.testing.EchoTestService\n"
"grpc.reflection.v1alpha.ServerReflection\n"));
ShutdownServer();
}
} // namespace testing
} // namespace grpc

@ -48,7 +48,7 @@ touch "$TOOLS_DIR"/src/proto/grpc/testing/__init__.py
bazel build //src/python/grpcio_tests/tests_py3_only/interop:xds_interop_client
GRPC_VERBOSITY=debug GRPC_TRACE=xds_client,xds_resolver,cds_lb,eds_lb,priority_lb,weighted_target_lb,lrs_lb "$PYTHON" \
GRPC_VERBOSITY=debug GRPC_TRACE=xds_client,xds_resolver,xds_routing_lb,cds_lb,eds_lb,priority_lb,weighted_target_lb,lrs_lb "$PYTHON" \
tools/run_tests/run_xds_tests.py \
--test_case=all \
--project_id=grpc-testing \

@ -48,7 +48,7 @@ touch "$TOOLS_DIR"/src/proto/grpc/testing/__init__.py
bazel build test/cpp/interop:xds_interop_client
GRPC_VERBOSITY=debug GRPC_TRACE=xds_client,xds_resolver,cds_lb,eds_lb,priority_lb,weighted_target_lb,lrs_lb "$PYTHON" \
GRPC_VERBOSITY=debug GRPC_TRACE=xds_client,xds_resolver,xds_routing_lb,cds_lb,eds_lb,priority_lb,weighted_target_lb,lrs_lb "$PYTHON" \
tools/run_tests/run_xds_tests.py \
--test_case=all \
--project_id=grpc-testing \

@ -48,7 +48,7 @@ touch "$TOOLS_DIR"/src/proto/grpc/testing/__init__.py
python tools/run_tests/run_tests.py -l csharp -c opt --build_only
GRPC_VERBOSITY=debug GRPC_TRACE=xds_client,xds_resolver,cds_lb,eds_lb,priority_lb,weighted_target_lb,lrs_lb "$PYTHON" \
GRPC_VERBOSITY=debug GRPC_TRACE=xds_client,xds_resolver,xds_routing_lb,cds_lb,eds_lb,priority_lb,weighted_target_lb,lrs_lb "$PYTHON" \
tools/run_tests/run_xds_tests.py \
--test_case=all \
--project_id=grpc-testing \

@ -61,7 +61,7 @@ export CC=/usr/bin/gcc
composer install && \
./bin/generate_proto_php.sh)
GRPC_VERBOSITY=debug GRPC_TRACE=xds_client,xds_resolver,cds_lb,eds_lb,priority_lb,weighted_target_lb,lrs_lb "$PYTHON" \
GRPC_VERBOSITY=debug GRPC_TRACE=xds_client,xds_resolver,xds_routing_lb,cds_lb,eds_lb,priority_lb,weighted_target_lb,lrs_lb "$PYTHON" \
tools/run_tests/run_xds_tests.py \
--test_case=all \
--project_id=grpc-testing \

@ -48,7 +48,7 @@ touch "$TOOLS_DIR"/src/proto/grpc/testing/__init__.py
(cd src/ruby && bundle && rake compile)
GRPC_VERBOSITY=debug GRPC_TRACE=xds_client,xds_resolver,cds_lb,eds_lb,priority_lb,weighted_target_lb,lrs_lb "$PYTHON" \
GRPC_VERBOSITY=debug GRPC_TRACE=xds_client,xds_resolver,xds_routing_lb,cds_lb,eds_lb,priority_lb,weighted_target_lb,lrs_lb "$PYTHON" \
tools/run_tests/run_xds_tests.py \
--test_case=all \
--project_id=grpc-testing \

Loading…
Cancel
Save