Merge branch 'master' into failhijackedsend

pull/17220/head
Yash Tibrewal 6 years ago
commit 5a6183f1bd
  1. 10
      .pylintrc
  2. 9
      .pylintrc-tests
  3. 5
      BUILD
  4. 441
      CMakeLists.txt
  5. 1977
      Makefile
  6. 315
      build.yaml
  7. 1
      config.m4
  8. 1
      config.w32
  9. 27
      examples/BUILD
  10. 110
      examples/cpp/load_balancing/Makefile
  11. 64
      examples/cpp/load_balancing/README.md
  12. 90
      examples/cpp/load_balancing/greeter_client.cc
  13. 72
      examples/cpp/load_balancing/greeter_server.cc
  14. 5
      examples/ruby/greeter_server.rb
  15. 5
      examples/ruby/route_guide/route_guide_server.rb
  16. 4
      gRPC-C++.podspec
  17. 9
      gRPC-Core.podspec
  18. 2
      grpc.gemspec
  19. 25
      grpc.gyp
  20. 3
      include/grpc/impl/codegen/compression_types.h
  21. 7
      include/grpcpp/impl/codegen/client_context.h
  22. 30
      include/grpcpp/impl/codegen/client_interceptor.h
  23. 133
      include/grpcpp/impl/codegen/interceptor.h
  24. 14
      include/grpcpp/impl/codegen/server_context.h
  25. 29
      include/grpcpp/impl/codegen/server_interceptor.h
  26. 24
      include/grpcpp/support/client_interceptor.h
  27. 24
      include/grpcpp/support/interceptor.h
  28. 24
      include/grpcpp/support/server_interceptor.h
  29. 2
      package.xml
  30. 1035
      src/core/ext/filters/client_channel/client_channel.cc
  31. 13
      src/core/ext/filters/client_channel/lb_policy.h
  32. 83
      src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.cc
  33. 8
      src/core/ext/filters/client_channel/lb_policy/pick_first/pick_first.cc
  34. 8
      src/core/ext/filters/client_channel/lb_policy/round_robin/round_robin.cc
  35. 8
      src/core/ext/filters/client_channel/lb_policy/xds/xds.cc
  36. 936
      src/core/ext/filters/client_channel/request_routing.cc
  37. 177
      src/core/ext/filters/client_channel/request_routing.h
  38. 14
      src/core/ext/filters/client_channel/resolver_result_parsing.cc
  39. 30
      src/core/ext/filters/client_channel/resolver_result_parsing.h
  40. 7
      src/core/ext/transport/chttp2/transport/chttp2_transport.cc
  41. 38
      src/core/ext/transport/chttp2/transport/context_list.cc
  42. 35
      src/core/ext/transport/chttp2/transport/context_list.h
  43. 29
      src/core/lib/iomgr/tcp_posix.cc
  44. 61
      src/core/lib/security/credentials/composite/composite_credentials.cc
  45. 43
      src/core/lib/security/credentials/composite/composite_credentials.h
  46. 2
      src/core/lib/security/credentials/jwt/jwt_verifier.cc
  47. 12
      src/core/tsi/ssl_transport_security.cc
  48. 2
      src/cpp/common/channel_arguments.cc
  49. 6
      src/cpp/ext/filters/census/context.cc
  50. 2
      src/csharp/Grpc.IntegrationTesting/InteropClient.cs
  51. 3
      src/csharp/Grpc.Tools/build/_grpc/_Grpc.Tools.targets
  52. 3
      src/csharp/Grpc.Tools/build/_protobuf/Google.Protobuf.Tools.targets
  53. 9
      src/objective-c/README.md
  54. 8
      src/php/tests/interop/interop_client.php
  55. 2
      src/python/grpcio/grpc/_auth.py
  56. 14
      src/python/grpcio/grpc/_channel.py
  57. 2
      src/python/grpcio/grpc/_cython/_cygrpc/completion_queue.pyx.pxi
  58. 11
      src/python/grpcio/grpc/_cython/_cygrpc/server.pyx.pxi
  59. 106
      src/python/grpcio/grpc/_server.py
  60. 3
      src/python/grpcio/grpc/_utilities.py
  61. 1
      src/python/grpcio/grpc_core_dependencies.py
  62. 2
      src/python/grpcio_status/grpc_status/rpc_status.py
  63. 2
      src/python/grpcio_testing/grpc_testing/_server/_handler.py
  64. 2
      src/python/grpcio_tests/commands.py
  65. 12
      src/python/grpcio_tests/setup.py
  66. 2
      src/python/grpcio_tests/tests/_runner.py
  67. 3
      src/python/grpcio_tests/tests/channelz/_channelz_servicer_test.py
  68. 8
      src/python/grpcio_tests/tests/health_check/_health_servicer_test.py
  69. 12
      src/python/grpcio_tests/tests/interop/client.py
  70. 8
      src/python/grpcio_tests/tests/protoc_plugin/_split_definitions_test.py
  71. 2
      src/python/grpcio_tests/tests/protoc_plugin/beta_python_plugin_test.py
  72. 2
      src/python/grpcio_tests/tests/qps/benchmark_client.py
  73. 2
      src/python/grpcio_tests/tests/qps/client_runner.py
  74. 2
      src/python/grpcio_tests/tests/qps/worker_server.py
  75. 8
      src/python/grpcio_tests/tests/reflection/_reflection_servicer_test.py
  76. 5
      src/python/grpcio_tests/tests/stress/client.py
  77. 4
      src/python/grpcio_tests/tests/testing/_client_application.py
  78. 1
      src/python/grpcio_tests/tests/tests.json
  79. 7
      src/python/grpcio_tests/tests/unit/BUILD.bazel
  80. 1
      src/python/grpcio_tests/tests/unit/_api_test.py
  81. 6
      src/python/grpcio_tests/tests/unit/_auth_context_test.py
  82. 6
      src/python/grpcio_tests/tests/unit/_channel_connectivity_test.py
  83. 5
      src/python/grpcio_tests/tests/unit/_channel_ready_future_test.py
  84. 5
      src/python/grpcio_tests/tests/unit/_compression_test.py
  85. 1
      src/python/grpcio_tests/tests/unit/_empty_message_test.py
  86. 1
      src/python/grpcio_tests/tests/unit/_error_message_encoding_test.py
  87. 2
      src/python/grpcio_tests/tests/unit/_from_grpc_import_star.py
  88. 1
      src/python/grpcio_tests/tests/unit/_interceptor_test.py
  89. 3
      src/python/grpcio_tests/tests/unit/_invalid_metadata_test.py
  90. 1
      src/python/grpcio_tests/tests/unit/_invocation_defects_test.py
  91. 14
      src/python/grpcio_tests/tests/unit/_metadata_code_details_test.py
  92. 29
      src/python/grpcio_tests/tests/unit/_metadata_flags_test.py
  93. 1
      src/python/grpcio_tests/tests/unit/_metadata_test.py
  94. 2
      src/python/grpcio_tests/tests/unit/_reconnect_test.py
  95. 1
      src/python/grpcio_tests/tests/unit/_resource_exhausted_test.py
  96. 1
      src/python/grpcio_tests/tests/unit/_rpc_test.py
  97. 97
      src/python/grpcio_tests/tests/unit/_server_shutdown_scenarios.py
  98. 90
      src/python/grpcio_tests/tests/unit/_server_shutdown_test.py
  99. 61
      src/ruby/end2end/graceful_sig_handling_client.rb
  100. 83
      src/ruby/end2end/graceful_sig_handling_driver.rb
  101. Some files were not shown because too many files have changed in this diff Show More

@ -1,3 +1,11 @@
[MASTER]
ignore=
src/python/grpcio/grpc/beta,
src/python/grpcio/grpc/framework,
src/python/grpcio/grpc/framework/common,
src/python/grpcio/grpc/framework/foundation,
src/python/grpcio/grpc/framework/interfaces,
[VARIABLES]
# TODO(https://github.com/PyCQA/pylint/issues/1345): How does the inspection
@ -82,3 +90,5 @@ disable=
# if:/else: and for:/else:.
useless-else-on-loop,
no-else-return,
# NOTE(lidiz): Python 3 make object inheritance default, but not PY2
useless-object-inheritance,

@ -1,3 +1,10 @@
[MASTER]
ignore=
src/python/grpcio_tests/tests/unit/beta,
src/python/grpcio_tests/tests/unit/framework,
src/python/grpcio_tests/tests/unit/framework/common,
src/python/grpcio_tests/tests/unit/framework/foundation,
[VARIABLES]
# TODO(https://github.com/PyCQA/pylint/issues/1345): How does the inspection
@ -115,3 +122,5 @@ disable=
# if:/else: and for:/else:.
useless-else-on-loop,
no-else-return,
# NOTE(lidiz): Python 3 make object inheritance default, but not PY2
useless-object-inheritance,

@ -244,10 +244,13 @@ GRPCXX_PUBLIC_HDRS = [
"include/grpcpp/support/byte_buffer.h",
"include/grpcpp/support/channel_arguments.h",
"include/grpcpp/support/client_callback.h",
"include/grpcpp/support/client_interceptor.h",
"include/grpcpp/support/config.h",
"include/grpcpp/support/interceptor.h",
"include/grpcpp/support/proto_buffer_reader.h",
"include/grpcpp/support/proto_buffer_writer.h",
"include/grpcpp/support/server_callback.h",
"include/grpcpp/support/server_interceptor.h",
"include/grpcpp/support/slice.h",
"include/grpcpp/support/status.h",
"include/grpcpp/support/status_code_enum.h",
@ -1053,6 +1056,7 @@ grpc_cc_library(
"src/core/ext/filters/client_channel/parse_address.cc",
"src/core/ext/filters/client_channel/proxy_mapper.cc",
"src/core/ext/filters/client_channel/proxy_mapper_registry.cc",
"src/core/ext/filters/client_channel/request_routing.cc",
"src/core/ext/filters/client_channel/resolver.cc",
"src/core/ext/filters/client_channel/resolver_registry.cc",
"src/core/ext/filters/client_channel/resolver_result_parsing.cc",
@ -1076,6 +1080,7 @@ grpc_cc_library(
"src/core/ext/filters/client_channel/parse_address.h",
"src/core/ext/filters/client_channel/proxy_mapper.h",
"src/core/ext/filters/client_channel/proxy_mapper_registry.h",
"src/core/ext/filters/client_channel/request_routing.h",
"src/core/ext/filters/client_channel/resolver.h",
"src/core/ext/filters/client_channel/resolver_factory.h",
"src/core/ext/filters/client_channel/resolver_registry.h",

File diff suppressed because it is too large Load Diff

1977
Makefile

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

@ -352,6 +352,7 @@ if test "$PHP_GRPC" != "no"; then
src/core/ext/filters/client_channel/parse_address.cc \
src/core/ext/filters/client_channel/proxy_mapper.cc \
src/core/ext/filters/client_channel/proxy_mapper_registry.cc \
src/core/ext/filters/client_channel/request_routing.cc \
src/core/ext/filters/client_channel/resolver.cc \
src/core/ext/filters/client_channel/resolver_registry.cc \
src/core/ext/filters/client_channel/resolver_result_parsing.cc \

@ -327,6 +327,7 @@ if (PHP_GRPC != "no") {
"src\\core\\ext\\filters\\client_channel\\parse_address.cc " +
"src\\core\\ext\\filters\\client_channel\\proxy_mapper.cc " +
"src\\core\\ext\\filters\\client_channel\\proxy_mapper_registry.cc " +
"src\\core\\ext\\filters\\client_channel\\request_routing.cc " +
"src\\core\\ext\\filters\\client_channel\\resolver.cc " +
"src\\core\\ext\\filters\\client_channel\\resolver_registry.cc " +
"src\\core\\ext\\filters\\client_channel\\resolver_result_parsing.cc " +

@ -66,3 +66,30 @@ cc_binary(
deps = [":helloworld", "//:grpc++"],
)
cc_binary(
name = "lb_client",
srcs = ["cpp/load_balancing/greeter_client.cc"],
defines = ["BAZEL_BUILD"],
deps = [":helloworld", "//:grpc++"],
)
cc_binary(
name = "lb_server",
srcs = ["cpp/load_balancing/greeter_server.cc"],
defines = ["BAZEL_BUILD"],
deps = [":helloworld", "//:grpc++"],
)
cc_binary(
name = "compression_client",
srcs = ["cpp/compression/greeter_client.cc"],
defines = ["BAZEL_BUILD"],
deps = [":helloworld", "//:grpc++"],
)
cc_binary(
name = "compression_server",
srcs = ["cpp/compression/greeter_server.cc"],
defines = ["BAZEL_BUILD"],
deps = [":helloworld", "//:grpc++"],
)

@ -0,0 +1,110 @@
#
# Copyright 2018 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
HOST_SYSTEM = $(shell uname | cut -f 1 -d_)
SYSTEM ?= $(HOST_SYSTEM)
CXX = g++
CPPFLAGS += `pkg-config --cflags protobuf grpc`
CXXFLAGS += -std=c++11
ifeq ($(SYSTEM),Darwin)
LDFLAGS += -L/usr/local/lib `pkg-config --libs protobuf grpc++ grpc`\
-lgrpc++_reflection\
-ldl
else
LDFLAGS += -L/usr/local/lib `pkg-config --libs protobuf grpc++ grpc`\
-Wl,--no-as-needed -lgrpc++_reflection -Wl,--as-needed\
-ldl
endif
PROTOC = protoc
GRPC_CPP_PLUGIN = grpc_cpp_plugin
GRPC_CPP_PLUGIN_PATH ?= `which $(GRPC_CPP_PLUGIN)`
PROTOS_PATH = ../../protos
vpath %.proto $(PROTOS_PATH)
all: system-check greeter_client greeter_server
greeter_client: helloworld.pb.o helloworld.grpc.pb.o greeter_client.o
$(CXX) $^ $(LDFLAGS) -o $@
greeter_server: helloworld.pb.o helloworld.grpc.pb.o greeter_server.o
$(CXX) $^ $(LDFLAGS) -o $@
.PRECIOUS: %.grpc.pb.cc
%.grpc.pb.cc: %.proto
$(PROTOC) -I $(PROTOS_PATH) --grpc_out=. --plugin=protoc-gen-grpc=$(GRPC_CPP_PLUGIN_PATH) $<
.PRECIOUS: %.pb.cc
%.pb.cc: %.proto
$(PROTOC) -I $(PROTOS_PATH) --cpp_out=. $<
clean:
rm -f *.o *.pb.cc *.pb.h greeter_client greeter_server
# The following is to test your system and ensure a smoother experience.
# They are by no means necessary to actually compile a grpc-enabled software.
PROTOC_CMD = which $(PROTOC)
PROTOC_CHECK_CMD = $(PROTOC) --version | grep -q libprotoc.3
PLUGIN_CHECK_CMD = which $(GRPC_CPP_PLUGIN)
HAS_PROTOC = $(shell $(PROTOC_CMD) > /dev/null && echo true || echo false)
ifeq ($(HAS_PROTOC),true)
HAS_VALID_PROTOC = $(shell $(PROTOC_CHECK_CMD) 2> /dev/null && echo true || echo false)
endif
HAS_PLUGIN = $(shell $(PLUGIN_CHECK_CMD) > /dev/null && echo true || echo false)
SYSTEM_OK = false
ifeq ($(HAS_VALID_PROTOC),true)
ifeq ($(HAS_PLUGIN),true)
SYSTEM_OK = true
endif
endif
system-check:
ifneq ($(HAS_VALID_PROTOC),true)
@echo " DEPENDENCY ERROR"
@echo
@echo "You don't have protoc 3.0.0 installed in your path."
@echo "Please install Google protocol buffers 3.0.0 and its compiler."
@echo "You can find it here:"
@echo
@echo " https://github.com/google/protobuf/releases/tag/v3.0.0"
@echo
@echo "Here is what I get when trying to evaluate your version of protoc:"
@echo
-$(PROTOC) --version
@echo
@echo
endif
ifneq ($(HAS_PLUGIN),true)
@echo " DEPENDENCY ERROR"
@echo
@echo "You don't have the grpc c++ protobuf plugin installed in your path."
@echo "Please install grpc. You can find it here:"
@echo
@echo " https://github.com/grpc/grpc"
@echo
@echo "Here is what I get when trying to detect if you have the plugin:"
@echo
-which $(GRPC_CPP_PLUGIN)
@echo
@echo
endif
ifneq ($(SYSTEM_OK),true)
@false
endif

@ -0,0 +1,64 @@
# gRPC C++ Load Balancing Tutorial
### Prerequisite
Make sure you have run the [hello world example](../helloworld) or understood the basics of gRPC. We will not dive into the details that have been discussed in the hello world example.
### Get the tutorial source code
The example code for this and our other examples lives in the `examples` directory. Clone this repository to your local machine by running the following command:
```sh
$ git clone -b $(curl -L https://grpc.io/release) https://github.com/grpc/grpc
```
Change your current directory to examples/cpp/load_balancing
```sh
$ cd examples/cpp/load_balancing/
```
### Generating gRPC code
To generate the client and server side interfaces:
```sh
$ make helloworld.grpc.pb.cc helloworld.pb.cc
```
Which internally invokes the proto-compiler as:
```sh
$ protoc -I ../../protos/ --grpc_out=. --plugin=protoc-gen-grpc=grpc_cpp_plugin ../../protos/helloworld.proto
$ protoc -I ../../protos/ --cpp_out=. ../../protos/helloworld.proto
```
### Writing a client and a server
The client and the server can be based on the hello world example.
Additionally, we can configure the load balancing policy. (To see what load balancing policies are available, check out [this folder](https://github.com/grpc/grpc/tree/master/src/core/ext/filters/client_channel/lb_policy).)
In the client, set the load balancing policy of the channel via the channel arg (to, for example, Round Robin).
```cpp
ChannelArguments args;
// Set the load balancing policy for the channel.
args.SetLoadBalancingPolicyName("round_robin");
GreeterClient greeter(grpc::CreateCustomChannel(
"localhost:50051", grpc::InsecureChannelCredentials(), args));
```
For a working example, refer to [greeter_client.cc](greeter_client.cc) and [greeter_server.cc](greeter_server.cc).
Build and run the client and the server with the following commands.
```sh
make
./greeter_server
```
```sh
./greeter_client
```
(Note that the case in this example is trivial because there is only one server resolved from the name.)

@ -0,0 +1,90 @@
/*
*
* Copyright 2015 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
#include <iostream>
#include <memory>
#include <string>
#include <grpcpp/grpcpp.h>
#ifdef BAZEL_BUILD
#include "examples/protos/helloworld.grpc.pb.h"
#else
#include "helloworld.grpc.pb.h"
#endif
using grpc::Channel;
using grpc::ChannelArguments;
using grpc::ClientContext;
using grpc::Status;
using helloworld::HelloRequest;
using helloworld::HelloReply;
using helloworld::Greeter;
class GreeterClient {
public:
GreeterClient(std::shared_ptr<Channel> channel)
: stub_(Greeter::NewStub(channel)) {}
// Assembles the client's payload, sends it and presents the response back
// from the server.
std::string SayHello(const std::string& user) {
// Data we are sending to the server.
HelloRequest request;
request.set_name(user);
// Container for the data we expect from the server.
HelloReply reply;
// Context for the client. It could be used to convey extra information to
// the server and/or tweak certain RPC behaviors.
ClientContext context;
// The actual RPC.
Status status = stub_->SayHello(&context, request, &reply);
// Act upon its status.
if (status.ok()) {
return reply.message();
} else {
std::cout << status.error_code() << ": " << status.error_message()
<< std::endl;
return "RPC failed";
}
}
private:
std::unique_ptr<Greeter::Stub> stub_;
};
int main(int argc, char** argv) {
// Instantiate the client. It requires a channel, out of which the actual RPCs
// are created. This channel models a connection to an endpoint (in this case,
// localhost at port 50051). We indicate that the channel isn't authenticated
// (use of InsecureChannelCredentials()).
ChannelArguments args;
// Set the load balancing policy for the channel.
args.SetLoadBalancingPolicyName("round_robin");
GreeterClient greeter(grpc::CreateCustomChannel(
"localhost:50051", grpc::InsecureChannelCredentials(), args));
std::string user("world");
std::string reply = greeter.SayHello(user);
std::cout << "Greeter received: " << reply << std::endl;
return 0;
}

@ -0,0 +1,72 @@
/*
*
* Copyright 2015 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
#include <iostream>
#include <memory>
#include <string>
#include <grpcpp/grpcpp.h>
#ifdef BAZEL_BUILD
#include "examples/protos/helloworld.grpc.pb.h"
#else
#include "helloworld.grpc.pb.h"
#endif
using grpc::Server;
using grpc::ServerBuilder;
using grpc::ServerContext;
using grpc::Status;
using helloworld::HelloRequest;
using helloworld::HelloReply;
using helloworld::Greeter;
// Logic and data behind the server's behavior.
class GreeterServiceImpl final : public Greeter::Service {
Status SayHello(ServerContext* context, const HelloRequest* request,
HelloReply* reply) override {
std::string prefix("Hello ");
reply->set_message(prefix + request->name());
return Status::OK;
}
};
void RunServer() {
std::string server_address("0.0.0.0:50051");
GreeterServiceImpl service;
ServerBuilder builder;
// Listen on the given address without any authentication mechanism.
builder.AddListeningPort(server_address, grpc::InsecureServerCredentials());
// Register "service" as the instance through which we'll communicate with
// clients. In this case it corresponds to an *synchronous* service.
builder.RegisterService(&service);
// Finally assemble the server.
std::unique_ptr<Server> server(builder.BuildAndStart());
std::cout << "Server listening on " << server_address << std::endl;
// Wait for the server to shutdown. Note that some other thread must be
// responsible for shutting down the server for this call to ever return.
server->Wait();
}
int main(int argc, char** argv) {
RunServer();
return 0;
}

@ -39,7 +39,10 @@ def main
s = GRPC::RpcServer.new
s.add_http2_port('0.0.0.0:50051', :this_port_is_insecure)
s.handle(GreeterServer)
s.run_till_terminated
# Runs the server with SIGHUP, SIGINT and SIGQUIT signal handlers to
# gracefully shutdown.
# User could also choose to run server via call to run_till_terminated
s.run_till_terminated_or_interrupted([1, 'int', 'SIGQUIT'])
end
main

@ -172,7 +172,10 @@ def main
s.add_http2_port(port, :this_port_is_insecure)
GRPC.logger.info("... running insecurely on #{port}")
s.handle(ServerImpl.new(feature_db))
s.run_till_terminated
# Runs the server with SIGHUP, SIGINT and SIGQUIT signal handlers to
# gracefully shutdown.
# User could also choose to run server via call to run_till_terminated
s.run_till_terminated_or_interrupted([1, 'int', 'SIGQUIT'])
end
main

@ -116,10 +116,13 @@ Pod::Spec.new do |s|
'include/grpcpp/support/byte_buffer.h',
'include/grpcpp/support/channel_arguments.h',
'include/grpcpp/support/client_callback.h',
'include/grpcpp/support/client_interceptor.h',
'include/grpcpp/support/config.h',
'include/grpcpp/support/interceptor.h',
'include/grpcpp/support/proto_buffer_reader.h',
'include/grpcpp/support/proto_buffer_writer.h',
'include/grpcpp/support/server_callback.h',
'include/grpcpp/support/server_interceptor.h',
'include/grpcpp/support/slice.h',
'include/grpcpp/support/status.h',
'include/grpcpp/support/status_code_enum.h',
@ -352,6 +355,7 @@ Pod::Spec.new do |s|
'src/core/ext/filters/client_channel/parse_address.h',
'src/core/ext/filters/client_channel/proxy_mapper.h',
'src/core/ext/filters/client_channel/proxy_mapper_registry.h',
'src/core/ext/filters/client_channel/request_routing.h',
'src/core/ext/filters/client_channel/resolver.h',
'src/core/ext/filters/client_channel/resolver_factory.h',
'src/core/ext/filters/client_channel/resolver_registry.h',

@ -349,6 +349,7 @@ Pod::Spec.new do |s|
'src/core/ext/filters/client_channel/parse_address.h',
'src/core/ext/filters/client_channel/proxy_mapper.h',
'src/core/ext/filters/client_channel/proxy_mapper_registry.h',
'src/core/ext/filters/client_channel/request_routing.h',
'src/core/ext/filters/client_channel/resolver.h',
'src/core/ext/filters/client_channel/resolver_factory.h',
'src/core/ext/filters/client_channel/resolver_registry.h',
@ -791,6 +792,7 @@ Pod::Spec.new do |s|
'src/core/ext/filters/client_channel/parse_address.cc',
'src/core/ext/filters/client_channel/proxy_mapper.cc',
'src/core/ext/filters/client_channel/proxy_mapper_registry.cc',
'src/core/ext/filters/client_channel/request_routing.cc',
'src/core/ext/filters/client_channel/resolver.cc',
'src/core/ext/filters/client_channel/resolver_registry.cc',
'src/core/ext/filters/client_channel/resolver_result_parsing.cc',
@ -970,6 +972,7 @@ Pod::Spec.new do |s|
'src/core/ext/filters/client_channel/parse_address.h',
'src/core/ext/filters/client_channel/proxy_mapper.h',
'src/core/ext/filters/client_channel/proxy_mapper_registry.h',
'src/core/ext/filters/client_channel/request_routing.h',
'src/core/ext/filters/client_channel/resolver.h',
'src/core/ext/filters/client_channel/resolver_factory.h',
'src/core/ext/filters/client_channel/resolver_registry.h',
@ -1198,9 +1201,7 @@ Pod::Spec.new do |s|
ss.dependency "#{s.name}/Interface", version
ss.dependency "#{s.name}/Implementation", version
ss.source_files = 'test/core/util/test_config.cc',
'test/core/util/test_config.h',
'test/core/end2end/data/client_certs.cc',
ss.source_files = 'test/core/end2end/data/client_certs.cc',
'test/core/end2end/data/server1_cert.cc',
'test/core/end2end/data/server1_key.cc',
'test/core/end2end/data/test_root_cert.cc',
@ -1224,6 +1225,7 @@ Pod::Spec.new do |s|
'test/core/util/slice_splitter.cc',
'test/core/util/subprocess_posix.cc',
'test/core/util/subprocess_windows.cc',
'test/core/util/test_config.cc',
'test/core/util/tracer_util.cc',
'test/core/util/trickle_endpoint.cc',
'test/core/util/cmdline.cc',
@ -1249,6 +1251,7 @@ Pod::Spec.new do |s|
'test/core/util/port_server_client.h',
'test/core/util/slice_splitter.h',
'test/core/util/subprocess.h',
'test/core/util/test_config.h',
'test/core/util/tracer_util.h',
'test/core/util/trickle_endpoint.h',
'test/core/util/cmdline.h',

@ -285,6 +285,7 @@ Gem::Specification.new do |s|
s.files += %w( src/core/ext/filters/client_channel/parse_address.h )
s.files += %w( src/core/ext/filters/client_channel/proxy_mapper.h )
s.files += %w( src/core/ext/filters/client_channel/proxy_mapper_registry.h )
s.files += %w( src/core/ext/filters/client_channel/request_routing.h )
s.files += %w( src/core/ext/filters/client_channel/resolver.h )
s.files += %w( src/core/ext/filters/client_channel/resolver_factory.h )
s.files += %w( src/core/ext/filters/client_channel/resolver_registry.h )
@ -730,6 +731,7 @@ Gem::Specification.new do |s|
s.files += %w( src/core/ext/filters/client_channel/parse_address.cc )
s.files += %w( src/core/ext/filters/client_channel/proxy_mapper.cc )
s.files += %w( src/core/ext/filters/client_channel/proxy_mapper_registry.cc )
s.files += %w( src/core/ext/filters/client_channel/request_routing.cc )
s.files += %w( src/core/ext/filters/client_channel/resolver.cc )
s.files += %w( src/core/ext/filters/client_channel/resolver_registry.cc )
s.files += %w( src/core/ext/filters/client_channel/resolver_result_parsing.cc )

@ -258,16 +258,6 @@
'src/core/lib/profiling/stap_timers.cc',
],
},
{
'target_name': 'gpr_test_util',
'type': 'static_library',
'dependencies': [
'gpr',
],
'sources': [
'test/core/util/test_config.cc',
],
},
{
'target_name': 'grpc',
'type': 'static_library',
@ -544,6 +534,7 @@
'src/core/ext/filters/client_channel/parse_address.cc',
'src/core/ext/filters/client_channel/proxy_mapper.cc',
'src/core/ext/filters/client_channel/proxy_mapper_registry.cc',
'src/core/ext/filters/client_channel/request_routing.cc',
'src/core/ext/filters/client_channel/resolver.cc',
'src/core/ext/filters/client_channel/resolver_registry.cc',
'src/core/ext/filters/client_channel/resolver_result_parsing.cc',
@ -604,7 +595,6 @@
'target_name': 'grpc_test_util',
'type': 'static_library',
'dependencies': [
'gpr_test_util',
'gpr',
'grpc',
],
@ -634,6 +624,7 @@
'test/core/util/slice_splitter.cc',
'test/core/util/subprocess_posix.cc',
'test/core/util/subprocess_windows.cc',
'test/core/util/test_config.cc',
'test/core/util/tracer_util.cc',
'test/core/util/trickle_endpoint.cc',
'test/core/util/cmdline.cc',
@ -804,6 +795,7 @@
'src/core/ext/filters/client_channel/parse_address.cc',
'src/core/ext/filters/client_channel/proxy_mapper.cc',
'src/core/ext/filters/client_channel/proxy_mapper_registry.cc',
'src/core/ext/filters/client_channel/request_routing.cc',
'src/core/ext/filters/client_channel/resolver.cc',
'src/core/ext/filters/client_channel/resolver_registry.cc',
'src/core/ext/filters/client_channel/resolver_result_parsing.cc',
@ -851,7 +843,6 @@
'type': 'static_library',
'dependencies': [
'gpr',
'gpr_test_util',
'grpc_unsecure',
],
'sources': [
@ -875,6 +866,7 @@
'test/core/util/slice_splitter.cc',
'test/core/util/subprocess_posix.cc',
'test/core/util/subprocess_windows.cc',
'test/core/util/test_config.cc',
'test/core/util/tracer_util.cc',
'test/core/util/trickle_endpoint.cc',
'test/core/util/cmdline.cc',
@ -1045,6 +1037,7 @@
'src/core/ext/filters/client_channel/parse_address.cc',
'src/core/ext/filters/client_channel/proxy_mapper.cc',
'src/core/ext/filters/client_channel/proxy_mapper_registry.cc',
'src/core/ext/filters/client_channel/request_routing.cc',
'src/core/ext/filters/client_channel/resolver.cc',
'src/core/ext/filters/client_channel/resolver_registry.cc',
'src/core/ext/filters/client_channel/resolver_result_parsing.cc',
@ -1298,6 +1291,7 @@
'src/core/ext/filters/client_channel/parse_address.cc',
'src/core/ext/filters/client_channel/proxy_mapper.cc',
'src/core/ext/filters/client_channel/proxy_mapper_registry.cc',
'src/core/ext/filters/client_channel/request_routing.cc',
'src/core/ext/filters/client_channel/resolver.cc',
'src/core/ext/filters/client_channel/resolver_registry.cc',
'src/core/ext/filters/client_channel/resolver_result_parsing.cc',
@ -1353,7 +1347,6 @@
'test_tcp_server',
'grpc_test_util',
'grpc',
'gpr_test_util',
'gpr',
],
'sources': [
@ -1366,7 +1359,6 @@
'dependencies': [
'grpc_test_util',
'grpc',
'gpr_test_util',
'gpr',
],
'sources': [
@ -1683,7 +1675,6 @@
'grpc_test_util',
'grpc++',
'grpc',
'gpr_test_util',
'gpr',
'grpc++_test_config',
],
@ -1718,7 +1709,6 @@
'grpc_test_util',
'grpc++',
'grpc',
'gpr_test_util',
'gpr',
'grpc++_test_config',
],
@ -2671,7 +2661,6 @@
'dependencies': [
'grpc_test_util_unsecure',
'grpc_unsecure',
'gpr_test_util',
'gpr',
],
'sources': [
@ -2684,7 +2673,6 @@
'dependencies': [
'grpc_test_util',
'grpc',
'gpr_test_util',
'gpr',
],
'sources': [
@ -2776,7 +2764,6 @@
'dependencies': [
'grpc_test_util_unsecure',
'grpc_unsecure',
'gpr_test_util',
'gpr',
],
'sources': [

@ -52,7 +52,8 @@ extern "C" {
"grpc.compression_enabled_algorithms_bitset"
/** \} */
/** The various compression algorithms supported by gRPC */
/** The various compression algorithms supported by gRPC (not sorted by
* compression level) */
typedef enum {
GRPC_COMPRESS_NONE = 0,
GRPC_COMPRESS_DEFLATE,

@ -200,6 +200,13 @@ class ClientContext {
/// end in "-bin".
/// \param meta_value The metadata value. If its value is binary, the key name
/// must end in "-bin".
///
/// Metadata must conform to the following format:
/// Custom-Metadata -> Binary-Header / ASCII-Header
/// Binary-Header -> {Header-Name "-bin" } {binary value}
/// ASCII-Header -> Header-Name ASCII-Value
/// Header-Name -> 1*( %x30-39 / %x61-7A / "_" / "-" / ".") ; 0-9 a-z _ - .
/// ASCII-Value -> 1*( %x20-%x7E ) ; space and printable ASCII
void AddMetadata(const grpc::string& meta_key,
const grpc::string& meta_value);

@ -38,9 +38,17 @@ class InterceptorBatchMethodsImpl;
namespace experimental {
class ClientRpcInfo;
// A factory interface for creation of client interceptors. A vector of
// factories can be provided at channel creation which will be used to create a
// new vector of client interceptors per RPC. Client interceptor authors should
// create a subclass of ClientInterceptorFactorInterface which creates objects
// of their interceptors.
class ClientInterceptorFactoryInterface {
public:
virtual ~ClientInterceptorFactoryInterface() {}
// Returns a pointer to an Interceptor object on successful creation, nullptr
// otherwise. If nullptr is returned, this server interceptor factory is
// ignored for the purposes of that RPC.
virtual Interceptor* CreateClientInterceptor(ClientRpcInfo* info) = 0;
};
} // namespace experimental
@ -50,11 +58,16 @@ extern experimental::ClientInterceptorFactoryInterface*
g_global_client_interceptor_factory;
}
/// ClientRpcInfo represents the state of a particular RPC as it
/// appears to an interceptor. It is created and owned by the library and
/// passed to the CreateClientInterceptor method of the application's
/// ClientInterceptorFactoryInterface implementation
namespace experimental {
class ClientRpcInfo {
public:
// TODO(yashykt): Stop default-constructing ClientRpcInfo and remove UNKNOWN
// from the list of possible Types.
/// Type categorizes RPCs by unary or streaming type
enum class Type {
UNARY,
CLIENT_STREAMING,
@ -65,13 +78,23 @@ class ClientRpcInfo {
~ClientRpcInfo(){};
// Delete copy constructor but allow default move constructor
ClientRpcInfo(const ClientRpcInfo&) = delete;
ClientRpcInfo(ClientRpcInfo&&) = default;
// Getter methods
/// Return the fully-specified method name
const char* method() const { return method_; }
/// Return a pointer to the channel on which the RPC is being sent
ChannelInterface* channel() { return channel_; }
/// Return a pointer to the underlying ClientContext structure associated
/// with the RPC to support features that apply to it
grpc::ClientContext* client_context() { return ctx_; }
/// Return the type of the RPC (unary or a streaming flavor)
Type type() const { return type_; }
private:
@ -120,8 +143,11 @@ class ClientRpcInfo {
}
for (auto it = creators.begin() + interceptor_pos; it != creators.end();
++it) {
interceptors_.push_back(std::unique_ptr<experimental::Interceptor>(
(*it)->CreateClientInterceptor(this)));
auto* interceptor = (*it)->CreateClientInterceptor(this);
if (interceptor != nullptr) {
interceptors_.push_back(
std::unique_ptr<experimental::Interceptor>(interceptor));
}
}
if (internal::g_global_client_interceptor_factory != nullptr) {
interceptors_.push_back(std::unique_ptr<experimental::Interceptor>(

@ -31,96 +31,135 @@ class ChannelInterface;
class Status;
namespace experimental {
class InterceptedMessage {
public:
template <class M>
bool Extract(M* msg); // returns false if definitely invalid extraction
template <class M>
M* MutableExtract();
uint64_t length(); // length on wire
};
/// An enumeration of different possible points at which the \a Intercept
/// method of the \a Interceptor interface may be called. Any given call
/// to \a Intercept will include one or more of these hook points, and
/// each hook point makes certain types of information available to the
/// interceptor.
/// In these enumeration names, PRE_SEND means that an interception has taken
/// place between the time the application provided a certain type of data
/// (e.g., initial metadata, status) and the time that that data goes to the
/// other side. POST_SEND means that the data has been committed for going to
/// the other side (even if it has not yet been received at the other side).
/// PRE_RECV means an interception between the time that a certain
/// operation has been requested and it is available. POST_RECV means that a
/// result is available but has not yet been passed back to the application.
enum class InterceptionHookPoints {
/* The first three in this list are for clients and servers */
/// The first two in this list are for clients and servers
PRE_SEND_INITIAL_METADATA,
PRE_SEND_MESSAGE,
POST_SEND_MESSAGE,
PRE_SEND_STATUS /* server only */,
PRE_SEND_CLOSE /* client only */,
/* The following three are for hijacked clients only and can only be
registered by the global interceptor */
PRE_SEND_STATUS, // server only
PRE_SEND_CLOSE, // client only: WritesDone for stream; after write in unary
/// The following three are for hijacked clients only and can only be
/// registered by the global interceptor
PRE_RECV_INITIAL_METADATA,
PRE_RECV_MESSAGE,
PRE_RECV_STATUS,
/* The following two are for all clients and servers */
/// The following two are for all clients and servers
POST_RECV_INITIAL_METADATA,
POST_RECV_MESSAGE,
POST_RECV_STATUS /* client only */,
POST_RECV_CLOSE /* server only */,
/* This is a special hook point available to both clients and servers when
TryCancel() is performed.
- No other hook points will be present along with this.
- It is illegal for an interceptor to block/delay this operation.
- ALL interceptors see this hook point irrespective of whether the RPC was
hijacked or not. */
POST_RECV_STATUS, // client only
POST_RECV_CLOSE, // server only
/// This is a special hook point available to both clients and servers when
/// TryCancel() is performed.
/// - No other hook points will be present along with this.
/// - It is illegal for an interceptor to block/delay this operation.
/// - ALL interceptors see this hook point irrespective of whether the
/// RPC was hijacked or not.
PRE_SEND_CANCEL,
NUM_INTERCEPTION_HOOKS
};
/// Class that is passed as an argument to the \a Intercept method
/// of the application's \a Interceptor interface implementation. It has five
/// purposes:
/// 1. Indicate which hook points are present at a specific interception
/// 2. Allow an interceptor to inform the library that an RPC should
/// continue to the next stage of its processing (which may be another
/// interceptor or the main path of the library)
/// 3. Allow an interceptor to hijack the processing of the RPC (only for
/// client-side RPCs with PRE_SEND_INITIAL_METADATA) so that it does not
/// proceed with normal processing beyond that stage
/// 4. Access the relevant fields of an RPC at each interception point
/// 5. Set some fields of an RPC at each interception point, when possible
class InterceptorBatchMethods {
public:
virtual ~InterceptorBatchMethods(){};
// Queries to check whether the current batch has an interception hook point
// of type \a type
/// Determine whether the current batch has an interception hook point
/// of type \a type
virtual bool QueryInterceptionHookPoint(InterceptionHookPoints type) = 0;
// Calling this will signal that the interceptor is done intercepting the
// current batch of the RPC.
// Proceed is a no-op if the batch contains PRE_SEND_CANCEL. Simply returning
// from the Intercept method does the job of continuing the RPC in this case.
/// Signal that the interceptor is done intercepting the current batch of the
/// RPC. Every interceptor must either call Proceed or Hijack on each
/// interception. In most cases, only Proceed will be used. Explicit use of
/// Proceed is what enables interceptors to delay the processing of RPCs
/// while they perform other work.
/// Proceed is a no-op if the batch contains PRE_SEND_CANCEL. Simply returning
/// from the Intercept method does the job of continuing the RPC in this case.
/// This is because PRE_SEND_CANCEL is always in a separate batch and is not
/// allowed to be delayed.
virtual void Proceed() = 0;
// Calling this indicates that the interceptor has hijacked the RPC (only
// valid if the batch contains send_initial_metadata on the client side)
/// Indicate that the interceptor has hijacked the RPC (only valid if the
/// batch contains send_initial_metadata on the client side). Later
/// interceptors in the interceptor list will not be called. Later batches
/// on the same RPC will go through interception, but only up to the point
/// of the hijacking interceptor.
virtual void Hijack() = 0;
// Returns a modifable ByteBuffer holding serialized form of the message to be
// sent
/// Returns a modifable ByteBuffer holding the serialized form of the message
/// that is going to be sent. Valid for PRE_SEND_MESSAGE interceptions.
/// A return value of nullptr indicates that this ByteBuffer is not valid.
virtual ByteBuffer* GetSendMessage() = 0;
// Checks whether the SEND MESSAGE op succeeded
/// Checks whether the SEND MESSAGE op succeeded. Valid for POST_SEND_MESSAGE
/// interceptions.
virtual bool GetSendMessageStatus() = 0;
// Returns a modifiable multimap of the initial metadata to be sent
/// Returns a modifiable multimap of the initial metadata to be sent. Valid
/// for PRE_SEND_INITIAL_METADATA interceptions. A value of nullptr indicates
/// that this field is not valid.
virtual std::multimap<grpc::string, grpc::string>*
GetSendInitialMetadata() = 0;
// Returns the status to be sent
/// Returns the status to be sent. Valid for PRE_SEND_STATUS interceptions.
virtual Status GetSendStatus() = 0;
// Modifies the status with \a status
/// Overwrites the status with \a status. Valid for PRE_SEND_STATUS
/// interceptions.
virtual void ModifySendStatus(const Status& status) = 0;
// Returns a modifiable multimap of the trailing metadata to be sent
/// Returns a modifiable multimap of the trailing metadata to be sent. Valid
/// for PRE_SEND_STATUS interceptions. A value of nullptr indicates
/// that this field is not valid.
virtual std::multimap<grpc::string, grpc::string>*
GetSendTrailingMetadata() = 0;
// Returns a pointer to the modifiable received message. Note that the message
// is already deserialized
/// Returns a pointer to the modifiable received message. Note that the
/// message is already deserialized but the type is not set; the interceptor
/// should static_cast to the appropriate type before using it. This is valid
/// for POST_RECV_MESSAGE interceptions; nullptr for not valid
virtual void* GetRecvMessage() = 0;
// Returns a modifiable multimap of the received initial metadata
/// Returns a modifiable multimap of the received initial metadata.
/// Valid for POST_RECV_INITIAL_METADATA interceptions; nullptr if not valid
virtual std::multimap<grpc::string_ref, grpc::string_ref>*
GetRecvInitialMetadata() = 0;
// Returns a modifiable view of the received status
/// Returns a modifiable view of the received status on POST_RECV_STATUS
/// interceptions; nullptr if not valid.
virtual Status* GetRecvStatus() = 0;
// Returns a modifiable multimap of the received trailing metadata
/// Returns a modifiable multimap of the received trailing metadata on
/// POST_RECV_STATUS interceptions; nullptr if not valid
virtual std::multimap<grpc::string_ref, grpc::string_ref>*
GetRecvTrailingMetadata() = 0;
// Gets an intercepted channel. When a call is started on this interceptor,
// only interceptors after the current interceptor are created from the
// factory objects registered with the channel.
/// Gets an intercepted channel. When a call is started on this interceptor,
/// only interceptors after the current interceptor are created from the
/// factory objects registered with the channel. This allows calls to be
/// started from interceptors without infinite regress through the interceptor
/// list.
virtual std::unique_ptr<ChannelInterface> GetInterceptedChannel() = 0;
// On a hijacked RPC/ to-be hijacked RPC, this can be called to fail a SEND
@ -128,10 +167,14 @@ class InterceptorBatchMethods {
virtual void FailHijackedSendMessage() = 0;
};
/// Interface for an interceptor. Interceptor authors must create a class
/// that derives from this parent class.
class Interceptor {
public:
virtual ~Interceptor() {}
/// The one public method of an Interceptor interface. Override this to
/// trigger the desired actions at the hook points described above.
virtual void Intercept(InterceptorBatchMethods* methods) = 0;
};

@ -131,6 +131,13 @@ class ServerContext {
/// end in "-bin".
/// \param value The metadata value. If its value is binary, the key name
/// must end in "-bin".
///
/// Metadata must conform to the following format:
/// Custom-Metadata -> Binary-Header / ASCII-Header
/// Binary-Header -> {Header-Name "-bin" } {binary value}
/// ASCII-Header -> Header-Name ASCII-Value
/// Header-Name -> 1*( %x30-39 / %x61-7A / "_" / "-" / ".") ; 0-9 a-z _ - .
/// ASCII-Value -> 1*( %x20-%x7E ) ; space and printable ASCII
void AddInitialMetadata(const grpc::string& key, const grpc::string& value);
/// Add the (\a key, \a value) pair to the initial metadata
@ -145,6 +152,13 @@ class ServerContext {
/// it must end in "-bin".
/// \param value The metadata value. If its value is binary, the key name
/// must end in "-bin".
///
/// Metadata must conform to the following format:
/// Custom-Metadata -> Binary-Header / ASCII-Header
/// Binary-Header -> {Header-Name "-bin" } {binary value}
/// ASCII-Header -> Header-Name ASCII-Value
/// Header-Name -> 1*( %x30-39 / %x61-7A / "_" / "-" / ".") ; 0-9 a-z _ - .
/// ASCII-Value -> 1*( %x20-%x7E ) ; space and printable ASCII
void AddTrailingMetadata(const grpc::string& key, const grpc::string& value);
/// IsCancelled is always safe to call when using sync or callback API.

@ -37,25 +37,47 @@ class InterceptorBatchMethodsImpl;
namespace experimental {
class ServerRpcInfo;
// A factory interface for creation of server interceptors. A vector of
// factories can be provided to ServerBuilder which will be used to create a new
// vector of server interceptors per RPC. Server interceptor authors should
// create a subclass of ServerInterceptorFactorInterface which creates objects
// of their interceptors.
class ServerInterceptorFactoryInterface {
public:
virtual ~ServerInterceptorFactoryInterface() {}
// Returns a pointer to an Interceptor object on successful creation, nullptr
// otherwise. If nullptr is returned, this server interceptor factory is
// ignored for the purposes of that RPC.
virtual Interceptor* CreateServerInterceptor(ServerRpcInfo* info) = 0;
};
/// ServerRpcInfo represents the state of a particular RPC as it
/// appears to an interceptor. It is created and owned by the library and
/// passed to the CreateServerInterceptor method of the application's
/// ServerInterceptorFactoryInterface implementation
class ServerRpcInfo {
public:
/// Type categorizes RPCs by unary or streaming type
enum class Type { UNARY, CLIENT_STREAMING, SERVER_STREAMING, BIDI_STREAMING };
~ServerRpcInfo(){};
// Delete all copy and move constructors and assignments
ServerRpcInfo(const ServerRpcInfo&) = delete;
ServerRpcInfo& operator=(const ServerRpcInfo&) = delete;
ServerRpcInfo(ServerRpcInfo&&) = delete;
ServerRpcInfo& operator=(ServerRpcInfo&&) = delete;
// Getter methods
/// Return the fully-specified method name
const char* method() const { return method_; }
/// Return the type of the RPC (unary or a streaming flavor)
Type type() const { return type_; }
/// Return a pointer to the underlying ServerContext structure associated
/// with the RPC to support features that apply to it
grpc::ServerContext* server_context() { return ctx_; }
private:
@ -90,8 +112,11 @@ class ServerRpcInfo {
std::unique_ptr<experimental::ServerInterceptorFactoryInterface>>&
creators) {
for (const auto& creator : creators) {
interceptors_.push_back(std::unique_ptr<experimental::Interceptor>(
creator->CreateServerInterceptor(this)));
auto* interceptor = creator->CreateServerInterceptor(this);
if (interceptor != nullptr) {
interceptors_.push_back(
std::unique_ptr<experimental::Interceptor>(interceptor));
}
}
}

@ -0,0 +1,24 @@
/*
*
* Copyright 2015 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
#ifndef GRPCPP_SUPPORT_CLIENT_INTERCEPTOR_H
#define GRPCPP_SUPPORT_CLIENT_INTERCEPTOR_H
#include <grpcpp/impl/codegen/client_interceptor.h>
#endif // GRPCPP_SUPPORT_CLIENT_INTERCEPTOR_H

@ -0,0 +1,24 @@
/*
*
* Copyright 2015 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
#ifndef GRPCPP_SUPPORT_INTERCEPTOR_H
#define GRPCPP_SUPPORT_INTERCEPTOR_H
#include <grpcpp/impl/codegen/interceptor.h>
#endif // GRPCPP_SUPPORT_INTERCEPTOR_H

@ -0,0 +1,24 @@
/*
*
* Copyright 2015 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
#ifndef GRPCPP_SUPPORT_SERVER_INTERCEPTOR_H
#define GRPCPP_SUPPORT_SERVER_INTERCEPTOR_H
#include <grpcpp/impl/codegen/server_interceptor.h>
#endif // GRPCPP_SUPPORT_SERVER_INTERCEPTOR_H

@ -290,6 +290,7 @@
<file baseinstalldir="/" name="src/core/ext/filters/client_channel/parse_address.h" role="src" />
<file baseinstalldir="/" name="src/core/ext/filters/client_channel/proxy_mapper.h" role="src" />
<file baseinstalldir="/" name="src/core/ext/filters/client_channel/proxy_mapper_registry.h" role="src" />
<file baseinstalldir="/" name="src/core/ext/filters/client_channel/request_routing.h" role="src" />
<file baseinstalldir="/" name="src/core/ext/filters/client_channel/resolver.h" role="src" />
<file baseinstalldir="/" name="src/core/ext/filters/client_channel/resolver_factory.h" role="src" />
<file baseinstalldir="/" name="src/core/ext/filters/client_channel/resolver_registry.h" role="src" />
@ -735,6 +736,7 @@
<file baseinstalldir="/" name="src/core/ext/filters/client_channel/parse_address.cc" role="src" />
<file baseinstalldir="/" name="src/core/ext/filters/client_channel/proxy_mapper.cc" role="src" />
<file baseinstalldir="/" name="src/core/ext/filters/client_channel/proxy_mapper_registry.cc" role="src" />
<file baseinstalldir="/" name="src/core/ext/filters/client_channel/request_routing.cc" role="src" />
<file baseinstalldir="/" name="src/core/ext/filters/client_channel/resolver.cc" role="src" />
<file baseinstalldir="/" name="src/core/ext/filters/client_channel/resolver_registry.cc" role="src" />
<file baseinstalldir="/" name="src/core/ext/filters/client_channel/resolver_result_parsing.cc" role="src" />

File diff suppressed because it is too large Load Diff

@ -65,10 +65,10 @@ class LoadBalancingPolicy : public InternallyRefCounted<LoadBalancingPolicy> {
struct PickState {
/// Initial metadata associated with the picking call.
grpc_metadata_batch* initial_metadata = nullptr;
/// Bitmask used for selective cancelling. See
/// Pointer to bitmask used for selective cancelling. See
/// \a CancelMatchingPicksLocked() and \a GRPC_INITIAL_METADATA_* in
/// grpc_types.h.
uint32_t initial_metadata_flags = 0;
uint32_t* initial_metadata_flags = nullptr;
/// Storage for LB token in \a initial_metadata, or nullptr if not used.
grpc_linked_mdelem lb_token_mdelem_storage;
/// Closure to run when pick is complete, if not completed synchronously.
@ -88,6 +88,9 @@ class LoadBalancingPolicy : public InternallyRefCounted<LoadBalancingPolicy> {
LoadBalancingPolicy(const LoadBalancingPolicy&) = delete;
LoadBalancingPolicy& operator=(const LoadBalancingPolicy&) = delete;
/// Returns the name of the LB policy.
virtual const char* name() const GRPC_ABSTRACT;
/// Updates the policy with a new set of \a args and a new \a lb_config from
/// the resolver. Note that the LB policy gets the set of addresses from the
/// GRPC_ARG_SERVER_ADDRESS_LIST channel arg.
@ -205,12 +208,6 @@ class LoadBalancingPolicy : public InternallyRefCounted<LoadBalancingPolicy> {
grpc_pollset_set* interested_parties_;
/// Callback to force a re-resolution.
grpc_closure* request_reresolution_;
// Dummy classes needed for alignment issues.
// See https://github.com/grpc/grpc/issues/16032 for context.
// TODO(ncteisen): remove this as soon as the issue is resolved.
channelz::ChildRefsList dummy_list_foo;
channelz::ChildRefsList dummy_list_bar;
};
} // namespace grpc_core

@ -122,10 +122,14 @@ TraceFlag grpc_lb_glb_trace(false, "glb");
namespace {
constexpr char kGrpclb[] = "grpclb";
class GrpcLb : public LoadBalancingPolicy {
public:
explicit GrpcLb(const Args& args);
const char* name() const override { return kGrpclb; }
void UpdateLocked(const grpc_channel_args& args,
grpc_json* lb_config) override;
bool PickLocked(PickState* pick, grpc_error** error) override;
@ -361,7 +365,9 @@ void lb_token_destroy(void* token) {
}
}
int lb_token_cmp(void* token1, void* token2) {
return GPR_ICMP(token1, token2);
// Always indicate a match, since we don't want this channel arg to
// affect the subchannel's key in the index.
return 0;
}
const grpc_arg_pointer_vtable lb_token_arg_vtable = {
lb_token_copy, lb_token_destroy, lb_token_cmp};
@ -422,7 +428,7 @@ ServerAddressList ProcessServerlist(const grpc_grpclb_serverlist* serverlist) {
grpc_resolved_address addr;
ParseServer(server, &addr);
// LB token processing.
void* lb_token;
grpc_mdelem lb_token;
if (server->has_load_balance_token) {
const size_t lb_token_max_length =
GPR_ARRAY_SIZE(server->load_balance_token);
@ -430,9 +436,7 @@ ServerAddressList ProcessServerlist(const grpc_grpclb_serverlist* serverlist) {
strnlen(server->load_balance_token, lb_token_max_length);
grpc_slice lb_token_mdstr = grpc_slice_from_copied_buffer(
server->load_balance_token, lb_token_length);
lb_token =
(void*)grpc_mdelem_from_slices(GRPC_MDSTR_LB_TOKEN, lb_token_mdstr)
.payload;
lb_token = grpc_mdelem_from_slices(GRPC_MDSTR_LB_TOKEN, lb_token_mdstr);
} else {
char* uri = grpc_sockaddr_to_uri(&addr);
gpr_log(GPR_INFO,
@ -440,14 +444,16 @@ ServerAddressList ProcessServerlist(const grpc_grpclb_serverlist* serverlist) {
"be used instead",
uri);
gpr_free(uri);
lb_token = (void*)GRPC_MDELEM_LB_TOKEN_EMPTY.payload;
lb_token = GRPC_MDELEM_LB_TOKEN_EMPTY;
}
// Add address.
grpc_arg arg = grpc_channel_arg_pointer_create(
const_cast<char*>(GRPC_ARG_GRPCLB_ADDRESS_LB_TOKEN), lb_token,
&lb_token_arg_vtable);
const_cast<char*>(GRPC_ARG_GRPCLB_ADDRESS_LB_TOKEN),
(void*)lb_token.payload, &lb_token_arg_vtable);
grpc_channel_args* args = grpc_channel_args_copy_and_add(nullptr, &arg, 1);
addresses.emplace_back(addr, args);
// Clean up.
GRPC_MDELEM_UNREF(lb_token);
}
return addresses;
}
@ -525,8 +531,7 @@ void GrpcLb::BalancerCallState::Orphan() {
void GrpcLb::BalancerCallState::StartQuery() {
GPR_ASSERT(lb_call_ != nullptr);
if (grpc_lb_glb_trace.enabled()) {
gpr_log(GPR_INFO,
"[grpclb %p] Starting LB call (lb_calld: %p, lb_call: %p)",
gpr_log(GPR_INFO, "[grpclb %p] lb_calld=%p: Starting LB call %p",
grpclb_policy_.get(), this, lb_call_);
}
// Create the ops.
@ -670,8 +675,9 @@ void GrpcLb::BalancerCallState::SendClientLoadReportLocked() {
grpc_call_error call_error = grpc_call_start_batch_and_execute(
lb_call_, &op, 1, &client_load_report_closure_);
if (GPR_UNLIKELY(call_error != GRPC_CALL_OK)) {
gpr_log(GPR_ERROR, "[grpclb %p] call_error=%d", grpclb_policy_.get(),
call_error);
gpr_log(GPR_ERROR,
"[grpclb %p] lb_calld=%p call_error=%d sending client load report",
grpclb_policy_.get(), this, call_error);
GPR_ASSERT(GRPC_CALL_OK == call_error);
}
}
@ -732,15 +738,17 @@ void GrpcLb::BalancerCallState::OnBalancerMessageReceivedLocked(
&initial_response->client_stats_report_interval));
if (grpc_lb_glb_trace.enabled()) {
gpr_log(GPR_INFO,
"[grpclb %p] Received initial LB response message; "
"client load reporting interval = %" PRId64 " milliseconds",
grpclb_policy, lb_calld->client_stats_report_interval_);
"[grpclb %p] lb_calld=%p: Received initial LB response "
"message; client load reporting interval = %" PRId64
" milliseconds",
grpclb_policy, lb_calld,
lb_calld->client_stats_report_interval_);
}
} else if (grpc_lb_glb_trace.enabled()) {
gpr_log(GPR_INFO,
"[grpclb %p] Received initial LB response message; client load "
"reporting NOT enabled",
grpclb_policy);
"[grpclb %p] lb_calld=%p: Received initial LB response message; "
"client load reporting NOT enabled",
grpclb_policy, lb_calld);
}
grpc_grpclb_initial_response_destroy(initial_response);
lb_calld->seen_initial_response_ = true;
@ -750,15 +758,17 @@ void GrpcLb::BalancerCallState::OnBalancerMessageReceivedLocked(
GPR_ASSERT(lb_calld->lb_call_ != nullptr);
if (grpc_lb_glb_trace.enabled()) {
gpr_log(GPR_INFO,
"[grpclb %p] Serverlist with %" PRIuPTR " servers received",
grpclb_policy, serverlist->num_servers);
"[grpclb %p] lb_calld=%p: Serverlist with %" PRIuPTR
" servers received",
grpclb_policy, lb_calld, serverlist->num_servers);
for (size_t i = 0; i < serverlist->num_servers; ++i) {
grpc_resolved_address addr;
ParseServer(serverlist->servers[i], &addr);
char* ipport;
grpc_sockaddr_to_string(&ipport, &addr, false);
gpr_log(GPR_INFO, "[grpclb %p] Serverlist[%" PRIuPTR "]: %s",
grpclb_policy, i, ipport);
gpr_log(GPR_INFO,
"[grpclb %p] lb_calld=%p: Serverlist[%" PRIuPTR "]: %s",
grpclb_policy, lb_calld, i, ipport);
gpr_free(ipport);
}
}
@ -778,9 +788,9 @@ void GrpcLb::BalancerCallState::OnBalancerMessageReceivedLocked(
if (grpc_grpclb_serverlist_equals(grpclb_policy->serverlist_, serverlist)) {
if (grpc_lb_glb_trace.enabled()) {
gpr_log(GPR_INFO,
"[grpclb %p] Incoming server list identical to current, "
"ignoring.",
grpclb_policy);
"[grpclb %p] lb_calld=%p: Incoming server list identical to "
"current, ignoring.",
grpclb_policy, lb_calld);
}
grpc_grpclb_destroy_serverlist(serverlist);
} else { // New serverlist.
@ -806,8 +816,9 @@ void GrpcLb::BalancerCallState::OnBalancerMessageReceivedLocked(
char* response_slice_str =
grpc_dump_slice(response_slice, GPR_DUMP_ASCII | GPR_DUMP_HEX);
gpr_log(GPR_ERROR,
"[grpclb %p] Invalid LB response received: '%s'. Ignoring.",
grpclb_policy, response_slice_str);
"[grpclb %p] lb_calld=%p: Invalid LB response received: '%s'. "
"Ignoring.",
grpclb_policy, lb_calld, response_slice_str);
gpr_free(response_slice_str);
}
grpc_slice_unref_internal(response_slice);
@ -838,9 +849,9 @@ void GrpcLb::BalancerCallState::OnBalancerStatusReceivedLocked(
char* status_details =
grpc_slice_to_c_string(lb_calld->lb_call_status_details_);
gpr_log(GPR_INFO,
"[grpclb %p] Status from LB server received. Status = %d, details "
"= '%s', (lb_calld: %p, lb_call: %p), error '%s'",
grpclb_policy, lb_calld->lb_call_status_, status_details, lb_calld,
"[grpclb %p] lb_calld=%p: Status from LB server received. "
"Status = %d, details = '%s', (lb_call: %p), error '%s'",
grpclb_policy, lb_calld, lb_calld->lb_call_status_, status_details,
lb_calld->lb_call_, grpc_error_string(error));
gpr_free(status_details);
}
@ -1129,7 +1140,7 @@ void GrpcLb::CancelMatchingPicksLocked(uint32_t initial_metadata_flags_mask,
pending_picks_ = nullptr;
while (pp != nullptr) {
PendingPick* next = pp->next;
if ((pp->pick->initial_metadata_flags & initial_metadata_flags_mask) ==
if ((*pp->pick->initial_metadata_flags & initial_metadata_flags_mask) ==
initial_metadata_flags_eq) {
// Note: pp is deleted in this callback.
GRPC_CLOSURE_SCHED(&pp->on_complete,
@ -1592,6 +1603,10 @@ void GrpcLb::CreateRoundRobinPolicyLocked(const Args& args) {
this);
return;
}
if (grpc_lb_glb_trace.enabled()) {
gpr_log(GPR_INFO, "[grpclb %p] Created new RR policy %p", this,
rr_policy_.get());
}
// TODO(roth): We currently track this ref manually. Once the new
// ClosureRef API is done, pass the RefCountedPtr<> along with the closure.
auto self = Ref(DEBUG_LOCATION, "on_rr_reresolution_requested");
@ -1685,10 +1700,6 @@ void GrpcLb::CreateOrUpdateRoundRobinPolicyLocked() {
lb_policy_args.client_channel_factory = client_channel_factory();
lb_policy_args.args = args;
CreateRoundRobinPolicyLocked(lb_policy_args);
if (grpc_lb_glb_trace.enabled()) {
gpr_log(GPR_INFO, "[grpclb %p] Created new RR policy %p", this,
rr_policy_.get());
}
}
grpc_channel_args_destroy(args);
}
@ -1812,7 +1823,7 @@ class GrpcLbFactory : public LoadBalancingPolicyFactory {
return OrphanablePtr<LoadBalancingPolicy>(New<GrpcLb>(args));
}
const char* name() const override { return "grpclb"; }
const char* name() const override { return kGrpclb; }
};
} // namespace

@ -43,10 +43,14 @@ namespace {
// pick_first LB policy
//
constexpr char kPickFirst[] = "pick_first";
class PickFirst : public LoadBalancingPolicy {
public:
explicit PickFirst(const Args& args);
const char* name() const override { return kPickFirst; }
void UpdateLocked(const grpc_channel_args& args,
grpc_json* lb_config) override;
bool PickLocked(PickState* pick, grpc_error** error) override;
@ -234,7 +238,7 @@ void PickFirst::CancelMatchingPicksLocked(uint32_t initial_metadata_flags_mask,
pending_picks_ = nullptr;
while (pick != nullptr) {
PickState* next = pick->next;
if ((pick->initial_metadata_flags & initial_metadata_flags_mask) ==
if ((*pick->initial_metadata_flags & initial_metadata_flags_mask) ==
initial_metadata_flags_eq) {
GRPC_CLOSURE_SCHED(pick->on_complete,
GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
@ -622,7 +626,7 @@ class PickFirstFactory : public LoadBalancingPolicyFactory {
return OrphanablePtr<LoadBalancingPolicy>(New<PickFirst>(args));
}
const char* name() const override { return "pick_first"; }
const char* name() const override { return kPickFirst; }
};
} // namespace

@ -53,10 +53,14 @@ namespace {
// round_robin LB policy
//
constexpr char kRoundRobin[] = "round_robin";
class RoundRobin : public LoadBalancingPolicy {
public:
explicit RoundRobin(const Args& args);
const char* name() const override { return kRoundRobin; }
void UpdateLocked(const grpc_channel_args& args,
grpc_json* lb_config) override;
bool PickLocked(PickState* pick, grpc_error** error) override;
@ -291,7 +295,7 @@ void RoundRobin::CancelMatchingPicksLocked(uint32_t initial_metadata_flags_mask,
pending_picks_ = nullptr;
while (pick != nullptr) {
PickState* next = pick->next;
if ((pick->initial_metadata_flags & initial_metadata_flags_mask) ==
if ((*pick->initial_metadata_flags & initial_metadata_flags_mask) ==
initial_metadata_flags_eq) {
pick->connected_subchannel.reset();
GRPC_CLOSURE_SCHED(pick->on_complete,
@ -700,7 +704,7 @@ class RoundRobinFactory : public LoadBalancingPolicyFactory {
return OrphanablePtr<LoadBalancingPolicy>(New<RoundRobin>(args));
}
const char* name() const override { return "round_robin"; }
const char* name() const override { return kRoundRobin; }
};
} // namespace

@ -115,10 +115,14 @@ TraceFlag grpc_lb_xds_trace(false, "xds");
namespace {
constexpr char kXds[] = "xds_experimental";
class XdsLb : public LoadBalancingPolicy {
public:
explicit XdsLb(const Args& args);
const char* name() const override { return kXds; }
void UpdateLocked(const grpc_channel_args& args,
grpc_json* lb_config) override;
bool PickLocked(PickState* pick, grpc_error** error) override;
@ -1053,7 +1057,7 @@ void XdsLb::CancelMatchingPicksLocked(uint32_t initial_metadata_flags_mask,
pending_picks_ = nullptr;
while (pp != nullptr) {
PendingPick* next = pp->next;
if ((pp->pick->initial_metadata_flags & initial_metadata_flags_mask) ==
if ((*pp->pick->initial_metadata_flags & initial_metadata_flags_mask) ==
initial_metadata_flags_eq) {
// Note: pp is deleted in this callback.
GRPC_CLOSURE_SCHED(&pp->on_complete,
@ -1651,7 +1655,7 @@ class XdsFactory : public LoadBalancingPolicyFactory {
return OrphanablePtr<LoadBalancingPolicy>(New<XdsLb>(args));
}
const char* name() const override { return "xds_experimental"; }
const char* name() const override { return kXds; }
};
} // namespace

@ -0,0 +1,936 @@
/*
*
* Copyright 2015 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
#include <grpc/support/port_platform.h>
#include "src/core/ext/filters/client_channel/request_routing.h"
#include <inttypes.h>
#include <limits.h>
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <grpc/support/alloc.h>
#include <grpc/support/log.h>
#include <grpc/support/string_util.h>
#include <grpc/support/sync.h>
#include "src/core/ext/filters/client_channel/backup_poller.h"
#include "src/core/ext/filters/client_channel/http_connect_handshaker.h"
#include "src/core/ext/filters/client_channel/lb_policy_registry.h"
#include "src/core/ext/filters/client_channel/proxy_mapper_registry.h"
#include "src/core/ext/filters/client_channel/resolver_registry.h"
#include "src/core/ext/filters/client_channel/retry_throttle.h"
#include "src/core/ext/filters/client_channel/server_address.h"
#include "src/core/ext/filters/client_channel/subchannel.h"
#include "src/core/ext/filters/deadline/deadline_filter.h"
#include "src/core/lib/backoff/backoff.h"
#include "src/core/lib/channel/channel_args.h"
#include "src/core/lib/channel/connected_channel.h"
#include "src/core/lib/channel/status_util.h"
#include "src/core/lib/gpr/string.h"
#include "src/core/lib/gprpp/inlined_vector.h"
#include "src/core/lib/gprpp/manual_constructor.h"
#include "src/core/lib/iomgr/combiner.h"
#include "src/core/lib/iomgr/iomgr.h"
#include "src/core/lib/iomgr/polling_entity.h"
#include "src/core/lib/profiling/timers.h"
#include "src/core/lib/slice/slice_internal.h"
#include "src/core/lib/slice/slice_string_helpers.h"
#include "src/core/lib/surface/channel.h"
#include "src/core/lib/transport/connectivity_state.h"
#include "src/core/lib/transport/error_utils.h"
#include "src/core/lib/transport/metadata.h"
#include "src/core/lib/transport/metadata_batch.h"
#include "src/core/lib/transport/service_config.h"
#include "src/core/lib/transport/static_metadata.h"
#include "src/core/lib/transport/status_metadata.h"
namespace grpc_core {
//
// RequestRouter::Request::ResolverResultWaiter
//
// Handles waiting for a resolver result.
// Used only for the first call on an idle channel.
class RequestRouter::Request::ResolverResultWaiter {
public:
explicit ResolverResultWaiter(Request* request)
: request_router_(request->request_router_),
request_(request),
tracer_enabled_(request_router_->tracer_->enabled()) {
if (tracer_enabled_) {
gpr_log(GPR_INFO,
"request_router=%p request=%p: deferring pick pending resolver "
"result",
request_router_, request);
}
// Add closure to be run when a resolver result is available.
GRPC_CLOSURE_INIT(&done_closure_, &DoneLocked, this,
grpc_combiner_scheduler(request_router_->combiner_));
AddToWaitingList();
// Set cancellation closure, so that we abort if the call is cancelled.
GRPC_CLOSURE_INIT(&cancel_closure_, &CancelLocked, this,
grpc_combiner_scheduler(request_router_->combiner_));
grpc_call_combiner_set_notify_on_cancel(request->call_combiner_,
&cancel_closure_);
}
private:
// Adds done_closure_ to
// request_router_->waiting_for_resolver_result_closures_.
void AddToWaitingList() {
grpc_closure_list_append(
&request_router_->waiting_for_resolver_result_closures_, &done_closure_,
GRPC_ERROR_NONE);
}
// Invoked when a resolver result is available.
static void DoneLocked(void* arg, grpc_error* error) {
ResolverResultWaiter* self = static_cast<ResolverResultWaiter*>(arg);
RequestRouter* request_router = self->request_router_;
// If CancelLocked() has already run, delete ourselves without doing
// anything. Note that the call stack may have already been destroyed,
// so it's not safe to access anything in state_.
if (GPR_UNLIKELY(self->finished_)) {
if (self->tracer_enabled_) {
gpr_log(GPR_INFO,
"request_router=%p: call cancelled before resolver result",
request_router);
}
Delete(self);
return;
}
// Otherwise, process the resolver result.
Request* request = self->request_;
if (GPR_UNLIKELY(error != GRPC_ERROR_NONE)) {
if (self->tracer_enabled_) {
gpr_log(GPR_INFO,
"request_router=%p request=%p: resolver failed to return data",
request_router, request);
}
GRPC_CLOSURE_RUN(request->on_route_done_, GRPC_ERROR_REF(error));
} else if (GPR_UNLIKELY(request_router->resolver_ == nullptr)) {
// Shutting down.
if (self->tracer_enabled_) {
gpr_log(GPR_INFO, "request_router=%p request=%p: resolver disconnected",
request_router, request);
}
GRPC_CLOSURE_RUN(request->on_route_done_,
GRPC_ERROR_CREATE_FROM_STATIC_STRING("Disconnected"));
} else if (GPR_UNLIKELY(request_router->lb_policy_ == nullptr)) {
// Transient resolver failure.
// If call has wait_for_ready=true, try again; otherwise, fail.
if (*request->pick_.initial_metadata_flags &
GRPC_INITIAL_METADATA_WAIT_FOR_READY) {
if (self->tracer_enabled_) {
gpr_log(GPR_INFO,
"request_router=%p request=%p: resolver returned but no LB "
"policy; wait_for_ready=true; trying again",
request_router, request);
}
// Re-add ourselves to the waiting list.
self->AddToWaitingList();
// Return early so that we don't set finished_ to true below.
return;
} else {
if (self->tracer_enabled_) {
gpr_log(GPR_INFO,
"request_router=%p request=%p: resolver returned but no LB "
"policy; wait_for_ready=false; failing",
request_router, request);
}
GRPC_CLOSURE_RUN(
request->on_route_done_,
grpc_error_set_int(
GRPC_ERROR_CREATE_FROM_STATIC_STRING("Name resolution failure"),
GRPC_ERROR_INT_GRPC_STATUS, GRPC_STATUS_UNAVAILABLE));
}
} else {
if (self->tracer_enabled_) {
gpr_log(GPR_INFO,
"request_router=%p request=%p: resolver returned, doing LB "
"pick",
request_router, request);
}
request->ProcessServiceConfigAndStartLbPickLocked();
}
self->finished_ = true;
}
// Invoked when the call is cancelled.
// Note: This runs under the client_channel combiner, but will NOT be
// holding the call combiner.
static void CancelLocked(void* arg, grpc_error* error) {
ResolverResultWaiter* self = static_cast<ResolverResultWaiter*>(arg);
RequestRouter* request_router = self->request_router_;
// If DoneLocked() has already run, delete ourselves without doing anything.
if (self->finished_) {
Delete(self);
return;
}
Request* request = self->request_;
// If we are being cancelled, immediately invoke on_route_done_
// to propagate the error back to the caller.
if (error != GRPC_ERROR_NONE) {
if (self->tracer_enabled_) {
gpr_log(GPR_INFO,
"request_router=%p request=%p: cancelling call waiting for "
"name resolution",
request_router, request);
}
// Note: Although we are not in the call combiner here, we are
// basically stealing the call combiner from the pending pick, so
// it's safe to run on_route_done_ here -- we are essentially
// calling it here instead of calling it in DoneLocked().
GRPC_CLOSURE_RUN(request->on_route_done_,
GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
"Pick cancelled", &error, 1));
}
self->finished_ = true;
}
RequestRouter* request_router_;
Request* request_;
const bool tracer_enabled_;
grpc_closure done_closure_;
grpc_closure cancel_closure_;
bool finished_ = false;
};
//
// RequestRouter::Request::AsyncPickCanceller
//
// Handles the call combiner cancellation callback for an async LB pick.
class RequestRouter::Request::AsyncPickCanceller {
public:
explicit AsyncPickCanceller(Request* request)
: request_router_(request->request_router_),
request_(request),
tracer_enabled_(request_router_->tracer_->enabled()) {
GRPC_CALL_STACK_REF(request->owning_call_, "pick_callback_cancel");
// Set cancellation closure, so that we abort if the call is cancelled.
GRPC_CLOSURE_INIT(&cancel_closure_, &CancelLocked, this,
grpc_combiner_scheduler(request_router_->combiner_));
grpc_call_combiner_set_notify_on_cancel(request->call_combiner_,
&cancel_closure_);
}
void MarkFinishedLocked() {
finished_ = true;
GRPC_CALL_STACK_UNREF(request_->owning_call_, "pick_callback_cancel");
}
private:
// Invoked when the call is cancelled.
// Note: This runs under the client_channel combiner, but will NOT be
// holding the call combiner.
static void CancelLocked(void* arg, grpc_error* error) {
AsyncPickCanceller* self = static_cast<AsyncPickCanceller*>(arg);
Request* request = self->request_;
RequestRouter* request_router = self->request_router_;
if (!self->finished_) {
// Note: request_router->lb_policy_ may have changed since we started our
// pick, in which case we will be cancelling the pick on a policy other
// than the one we started it on. However, this will just be a no-op.
if (error != GRPC_ERROR_NONE && request_router->lb_policy_ != nullptr) {
if (self->tracer_enabled_) {
gpr_log(GPR_INFO,
"request_router=%p request=%p: cancelling pick from LB "
"policy %p",
request_router, request, request_router->lb_policy_.get());
}
request_router->lb_policy_->CancelPickLocked(&request->pick_,
GRPC_ERROR_REF(error));
}
request->pick_canceller_ = nullptr;
GRPC_CALL_STACK_UNREF(request->owning_call_, "pick_callback_cancel");
}
Delete(self);
}
RequestRouter* request_router_;
Request* request_;
const bool tracer_enabled_;
grpc_closure cancel_closure_;
bool finished_ = false;
};
//
// RequestRouter::Request
//
RequestRouter::Request::Request(grpc_call_stack* owning_call,
grpc_call_combiner* call_combiner,
grpc_polling_entity* pollent,
grpc_metadata_batch* send_initial_metadata,
uint32_t* send_initial_metadata_flags,
ApplyServiceConfigCallback apply_service_config,
void* apply_service_config_user_data,
grpc_closure* on_route_done)
: owning_call_(owning_call),
call_combiner_(call_combiner),
pollent_(pollent),
apply_service_config_(apply_service_config),
apply_service_config_user_data_(apply_service_config_user_data),
on_route_done_(on_route_done) {
pick_.initial_metadata = send_initial_metadata;
pick_.initial_metadata_flags = send_initial_metadata_flags;
}
RequestRouter::Request::~Request() {
if (pick_.connected_subchannel != nullptr) {
pick_.connected_subchannel.reset();
}
for (size_t i = 0; i < GRPC_CONTEXT_COUNT; ++i) {
if (pick_.subchannel_call_context[i].destroy != nullptr) {
pick_.subchannel_call_context[i].destroy(
pick_.subchannel_call_context[i].value);
}
}
}
// Invoked once resolver results are available.
void RequestRouter::Request::ProcessServiceConfigAndStartLbPickLocked() {
// Get service config data if needed.
if (!apply_service_config_(apply_service_config_user_data_)) return;
// Start LB pick.
StartLbPickLocked();
}
void RequestRouter::Request::MaybeAddCallToInterestedPartiesLocked() {
if (!pollent_added_to_interested_parties_) {
pollent_added_to_interested_parties_ = true;
grpc_polling_entity_add_to_pollset_set(
pollent_, request_router_->interested_parties_);
}
}
void RequestRouter::Request::MaybeRemoveCallFromInterestedPartiesLocked() {
if (pollent_added_to_interested_parties_) {
pollent_added_to_interested_parties_ = false;
grpc_polling_entity_del_from_pollset_set(
pollent_, request_router_->interested_parties_);
}
}
// Starts a pick on the LB policy.
void RequestRouter::Request::StartLbPickLocked() {
if (request_router_->tracer_->enabled()) {
gpr_log(GPR_INFO,
"request_router=%p request=%p: starting pick on lb_policy=%p",
request_router_, this, request_router_->lb_policy_.get());
}
GRPC_CLOSURE_INIT(&on_pick_done_, &LbPickDoneLocked, this,
grpc_combiner_scheduler(request_router_->combiner_));
pick_.on_complete = &on_pick_done_;
GRPC_CALL_STACK_REF(owning_call_, "pick_callback");
grpc_error* error = GRPC_ERROR_NONE;
const bool pick_done =
request_router_->lb_policy_->PickLocked(&pick_, &error);
if (pick_done) {
// Pick completed synchronously.
if (request_router_->tracer_->enabled()) {
gpr_log(GPR_INFO,
"request_router=%p request=%p: pick completed synchronously",
request_router_, this);
}
GRPC_CLOSURE_RUN(on_route_done_, error);
GRPC_CALL_STACK_UNREF(owning_call_, "pick_callback");
} else {
// Pick will be returned asynchronously.
// Add the request's polling entity to the request_router's
// interested_parties, so that the I/O of the LB policy can be done
// under it. It will be removed in LbPickDoneLocked().
MaybeAddCallToInterestedPartiesLocked();
// Request notification on call cancellation.
// We allocate a separate object to track cancellation, since the
// cancellation closure might still be pending when we need to reuse
// the memory in which this Request object is stored for a subsequent
// retry attempt.
pick_canceller_ = New<AsyncPickCanceller>(this);
}
}
// Callback invoked by LoadBalancingPolicy::PickLocked() for async picks.
// Unrefs the LB policy and invokes on_route_done_.
void RequestRouter::Request::LbPickDoneLocked(void* arg, grpc_error* error) {
Request* self = static_cast<Request*>(arg);
RequestRouter* request_router = self->request_router_;
if (request_router->tracer_->enabled()) {
gpr_log(GPR_INFO,
"request_router=%p request=%p: pick completed asynchronously",
request_router, self);
}
self->MaybeRemoveCallFromInterestedPartiesLocked();
if (self->pick_canceller_ != nullptr) {
self->pick_canceller_->MarkFinishedLocked();
}
GRPC_CLOSURE_RUN(self->on_route_done_, GRPC_ERROR_REF(error));
GRPC_CALL_STACK_UNREF(self->owning_call_, "pick_callback");
}
//
// RequestRouter::LbConnectivityWatcher
//
class RequestRouter::LbConnectivityWatcher {
public:
LbConnectivityWatcher(RequestRouter* request_router,
grpc_connectivity_state state,
LoadBalancingPolicy* lb_policy,
grpc_channel_stack* owning_stack,
grpc_combiner* combiner)
: request_router_(request_router),
state_(state),
lb_policy_(lb_policy),
owning_stack_(owning_stack) {
GRPC_CHANNEL_STACK_REF(owning_stack_, "LbConnectivityWatcher");
GRPC_CLOSURE_INIT(&on_changed_, &OnLbPolicyStateChangedLocked, this,
grpc_combiner_scheduler(combiner));
lb_policy_->NotifyOnStateChangeLocked(&state_, &on_changed_);
}
~LbConnectivityWatcher() {
GRPC_CHANNEL_STACK_UNREF(owning_stack_, "LbConnectivityWatcher");
}
private:
static void OnLbPolicyStateChangedLocked(void* arg, grpc_error* error) {
LbConnectivityWatcher* self = static_cast<LbConnectivityWatcher*>(arg);
// If the notification is not for the current policy, we're stale,
// so delete ourselves.
if (self->lb_policy_ != self->request_router_->lb_policy_.get()) {
Delete(self);
return;
}
// Otherwise, process notification.
if (self->request_router_->tracer_->enabled()) {
gpr_log(GPR_INFO, "request_router=%p: lb_policy=%p state changed to %s",
self->request_router_, self->lb_policy_,
grpc_connectivity_state_name(self->state_));
}
self->request_router_->SetConnectivityStateLocked(
self->state_, GRPC_ERROR_REF(error), "lb_changed");
// If shutting down, terminate watch.
if (self->state_ == GRPC_CHANNEL_SHUTDOWN) {
Delete(self);
return;
}
// Renew watch.
self->lb_policy_->NotifyOnStateChangeLocked(&self->state_,
&self->on_changed_);
}
RequestRouter* request_router_;
grpc_connectivity_state state_;
// LB policy address. No ref held, so not safe to dereference unless
// it happens to match request_router->lb_policy_.
LoadBalancingPolicy* lb_policy_;
grpc_channel_stack* owning_stack_;
grpc_closure on_changed_;
};
//
// RequestRounter::ReresolutionRequestHandler
//
class RequestRouter::ReresolutionRequestHandler {
public:
ReresolutionRequestHandler(RequestRouter* request_router,
LoadBalancingPolicy* lb_policy,
grpc_channel_stack* owning_stack,
grpc_combiner* combiner)
: request_router_(request_router),
lb_policy_(lb_policy),
owning_stack_(owning_stack) {
GRPC_CHANNEL_STACK_REF(owning_stack_, "ReresolutionRequestHandler");
GRPC_CLOSURE_INIT(&closure_, &OnRequestReresolutionLocked, this,
grpc_combiner_scheduler(combiner));
lb_policy_->SetReresolutionClosureLocked(&closure_);
}
private:
static void OnRequestReresolutionLocked(void* arg, grpc_error* error) {
ReresolutionRequestHandler* self =
static_cast<ReresolutionRequestHandler*>(arg);
RequestRouter* request_router = self->request_router_;
// If this invocation is for a stale LB policy, treat it as an LB shutdown
// signal.
if (self->lb_policy_ != request_router->lb_policy_.get() ||
error != GRPC_ERROR_NONE || request_router->resolver_ == nullptr) {
GRPC_CHANNEL_STACK_UNREF(request_router->owning_stack_,
"ReresolutionRequestHandler");
Delete(self);
return;
}
if (request_router->tracer_->enabled()) {
gpr_log(GPR_INFO, "request_router=%p: started name re-resolving",
request_router);
}
request_router->resolver_->RequestReresolutionLocked();
// Give back the closure to the LB policy.
self->lb_policy_->SetReresolutionClosureLocked(&self->closure_);
}
RequestRouter* request_router_;
// LB policy address. No ref held, so not safe to dereference unless
// it happens to match request_router->lb_policy_.
LoadBalancingPolicy* lb_policy_;
grpc_channel_stack* owning_stack_;
grpc_closure closure_;
};
//
// RequestRouter
//
RequestRouter::RequestRouter(
grpc_channel_stack* owning_stack, grpc_combiner* combiner,
grpc_client_channel_factory* client_channel_factory,
grpc_pollset_set* interested_parties, TraceFlag* tracer,
ProcessResolverResultCallback process_resolver_result,
void* process_resolver_result_user_data, const char* target_uri,
const grpc_channel_args* args, grpc_error** error)
: owning_stack_(owning_stack),
combiner_(combiner),
client_channel_factory_(client_channel_factory),
interested_parties_(interested_parties),
tracer_(tracer),
process_resolver_result_(process_resolver_result),
process_resolver_result_user_data_(process_resolver_result_user_data) {
GRPC_CLOSURE_INIT(&on_resolver_result_changed_,
&RequestRouter::OnResolverResultChangedLocked, this,
grpc_combiner_scheduler(combiner));
grpc_connectivity_state_init(&state_tracker_, GRPC_CHANNEL_IDLE,
"request_router");
grpc_channel_args* new_args = nullptr;
if (process_resolver_result == nullptr) {
grpc_arg arg = grpc_channel_arg_integer_create(
const_cast<char*>(GRPC_ARG_SERVICE_CONFIG_DISABLE_RESOLUTION), 0);
new_args = grpc_channel_args_copy_and_add(args, &arg, 1);
}
resolver_ = ResolverRegistry::CreateResolver(
target_uri, (new_args == nullptr ? args : new_args), interested_parties_,
combiner_);
grpc_channel_args_destroy(new_args);
if (resolver_ == nullptr) {
*error = GRPC_ERROR_CREATE_FROM_STATIC_STRING("resolver creation failed");
}
}
RequestRouter::~RequestRouter() {
if (resolver_ != nullptr) {
// The only way we can get here is if we never started resolving,
// because we take a ref to the channel stack when we start
// resolving and do not release it until the resolver callback is
// invoked after the resolver shuts down.
resolver_.reset();
}
if (lb_policy_ != nullptr) {
grpc_pollset_set_del_pollset_set(lb_policy_->interested_parties(),
interested_parties_);
lb_policy_.reset();
}
if (client_channel_factory_ != nullptr) {
grpc_client_channel_factory_unref(client_channel_factory_);
}
grpc_connectivity_state_destroy(&state_tracker_);
}
namespace {
const char* GetChannelConnectivityStateChangeString(
grpc_connectivity_state state) {
switch (state) {
case GRPC_CHANNEL_IDLE:
return "Channel state change to IDLE";
case GRPC_CHANNEL_CONNECTING:
return "Channel state change to CONNECTING";
case GRPC_CHANNEL_READY:
return "Channel state change to READY";
case GRPC_CHANNEL_TRANSIENT_FAILURE:
return "Channel state change to TRANSIENT_FAILURE";
case GRPC_CHANNEL_SHUTDOWN:
return "Channel state change to SHUTDOWN";
}
GPR_UNREACHABLE_CODE(return "UNKNOWN");
}
} // namespace
void RequestRouter::SetConnectivityStateLocked(grpc_connectivity_state state,
grpc_error* error,
const char* reason) {
if (lb_policy_ != nullptr) {
if (state == GRPC_CHANNEL_TRANSIENT_FAILURE) {
// Cancel picks with wait_for_ready=false.
lb_policy_->CancelMatchingPicksLocked(
/* mask= */ GRPC_INITIAL_METADATA_WAIT_FOR_READY,
/* check= */ 0, GRPC_ERROR_REF(error));
} else if (state == GRPC_CHANNEL_SHUTDOWN) {
// Cancel all picks.
lb_policy_->CancelMatchingPicksLocked(/* mask= */ 0, /* check= */ 0,
GRPC_ERROR_REF(error));
}
}
if (tracer_->enabled()) {
gpr_log(GPR_INFO, "request_router=%p: setting connectivity state to %s",
this, grpc_connectivity_state_name(state));
}
if (channelz_node_ != nullptr) {
channelz_node_->AddTraceEvent(
channelz::ChannelTrace::Severity::Info,
grpc_slice_from_static_string(
GetChannelConnectivityStateChangeString(state)));
}
grpc_connectivity_state_set(&state_tracker_, state, error, reason);
}
void RequestRouter::StartResolvingLocked() {
if (tracer_->enabled()) {
gpr_log(GPR_INFO, "request_router=%p: starting name resolution", this);
}
GPR_ASSERT(!started_resolving_);
started_resolving_ = true;
GRPC_CHANNEL_STACK_REF(owning_stack_, "resolver");
resolver_->NextLocked(&resolver_result_, &on_resolver_result_changed_);
}
// Invoked from the resolver NextLocked() callback when the resolver
// is shutting down.
void RequestRouter::OnResolverShutdownLocked(grpc_error* error) {
if (tracer_->enabled()) {
gpr_log(GPR_INFO, "request_router=%p: shutting down", this);
}
if (lb_policy_ != nullptr) {
if (tracer_->enabled()) {
gpr_log(GPR_INFO, "request_router=%p: shutting down lb_policy=%p", this,
lb_policy_.get());
}
grpc_pollset_set_del_pollset_set(lb_policy_->interested_parties(),
interested_parties_);
lb_policy_.reset();
}
if (resolver_ != nullptr) {
// This should never happen; it can only be triggered by a resolver
// implementation spotaneously deciding to report shutdown without
// being orphaned. This code is included just to be defensive.
if (tracer_->enabled()) {
gpr_log(GPR_INFO,
"request_router=%p: spontaneous shutdown from resolver %p", this,
resolver_.get());
}
resolver_.reset();
SetConnectivityStateLocked(GRPC_CHANNEL_SHUTDOWN,
GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
"Resolver spontaneous shutdown", &error, 1),
"resolver_spontaneous_shutdown");
}
grpc_closure_list_fail_all(&waiting_for_resolver_result_closures_,
GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
"Channel disconnected", &error, 1));
GRPC_CLOSURE_LIST_SCHED(&waiting_for_resolver_result_closures_);
GRPC_CHANNEL_STACK_UNREF(owning_stack_, "resolver");
grpc_channel_args_destroy(resolver_result_);
resolver_result_ = nullptr;
GRPC_ERROR_UNREF(error);
}
// Creates a new LB policy, replacing any previous one.
// If the new policy is created successfully, sets *connectivity_state and
// *connectivity_error to its initial connectivity state; otherwise,
// leaves them unchanged.
void RequestRouter::CreateNewLbPolicyLocked(
const char* lb_policy_name, grpc_json* lb_config,
grpc_connectivity_state* connectivity_state,
grpc_error** connectivity_error, TraceStringVector* trace_strings) {
LoadBalancingPolicy::Args lb_policy_args;
lb_policy_args.combiner = combiner_;
lb_policy_args.client_channel_factory = client_channel_factory_;
lb_policy_args.args = resolver_result_;
lb_policy_args.lb_config = lb_config;
OrphanablePtr<LoadBalancingPolicy> new_lb_policy =
LoadBalancingPolicyRegistry::CreateLoadBalancingPolicy(lb_policy_name,
lb_policy_args);
if (GPR_UNLIKELY(new_lb_policy == nullptr)) {
gpr_log(GPR_ERROR, "could not create LB policy \"%s\"", lb_policy_name);
if (channelz_node_ != nullptr) {
char* str;
gpr_asprintf(&str, "Could not create LB policy \'%s\'", lb_policy_name);
trace_strings->push_back(str);
}
} else {
if (tracer_->enabled()) {
gpr_log(GPR_INFO, "request_router=%p: created new LB policy \"%s\" (%p)",
this, lb_policy_name, new_lb_policy.get());
}
if (channelz_node_ != nullptr) {
char* str;
gpr_asprintf(&str, "Created new LB policy \'%s\'", lb_policy_name);
trace_strings->push_back(str);
}
// Swap out the LB policy and update the fds in interested_parties_.
if (lb_policy_ != nullptr) {
if (tracer_->enabled()) {
gpr_log(GPR_INFO, "request_router=%p: shutting down lb_policy=%p", this,
lb_policy_.get());
}
grpc_pollset_set_del_pollset_set(lb_policy_->interested_parties(),
interested_parties_);
lb_policy_->HandOffPendingPicksLocked(new_lb_policy.get());
}
lb_policy_ = std::move(new_lb_policy);
grpc_pollset_set_add_pollset_set(lb_policy_->interested_parties(),
interested_parties_);
// Create re-resolution request handler for the new LB policy. It
// will delete itself when no longer needed.
New<ReresolutionRequestHandler>(this, lb_policy_.get(), owning_stack_,
combiner_);
// Get the new LB policy's initial connectivity state and start a
// connectivity watch.
GRPC_ERROR_UNREF(*connectivity_error);
*connectivity_state =
lb_policy_->CheckConnectivityLocked(connectivity_error);
if (exit_idle_when_lb_policy_arrives_) {
lb_policy_->ExitIdleLocked();
exit_idle_when_lb_policy_arrives_ = false;
}
// Create new watcher. It will delete itself when done.
New<LbConnectivityWatcher>(this, *connectivity_state, lb_policy_.get(),
owning_stack_, combiner_);
}
}
void RequestRouter::MaybeAddTraceMessagesForAddressChangesLocked(
TraceStringVector* trace_strings) {
const ServerAddressList* addresses =
FindServerAddressListChannelArg(resolver_result_);
const bool resolution_contains_addresses =
addresses != nullptr && addresses->size() > 0;
if (!resolution_contains_addresses &&
previous_resolution_contained_addresses_) {
trace_strings->push_back(gpr_strdup("Address list became empty"));
} else if (resolution_contains_addresses &&
!previous_resolution_contained_addresses_) {
trace_strings->push_back(gpr_strdup("Address list became non-empty"));
}
previous_resolution_contained_addresses_ = resolution_contains_addresses;
}
void RequestRouter::ConcatenateAndAddChannelTraceLocked(
TraceStringVector* trace_strings) const {
if (!trace_strings->empty()) {
gpr_strvec v;
gpr_strvec_init(&v);
gpr_strvec_add(&v, gpr_strdup("Resolution event: "));
bool is_first = 1;
for (size_t i = 0; i < trace_strings->size(); ++i) {
if (!is_first) gpr_strvec_add(&v, gpr_strdup(", "));
is_first = false;
gpr_strvec_add(&v, (*trace_strings)[i]);
}
char* flat;
size_t flat_len = 0;
flat = gpr_strvec_flatten(&v, &flat_len);
channelz_node_->AddTraceEvent(
grpc_core::channelz::ChannelTrace::Severity::Info,
grpc_slice_new(flat, flat_len, gpr_free));
gpr_strvec_destroy(&v);
}
}
// Callback invoked when a resolver result is available.
void RequestRouter::OnResolverResultChangedLocked(void* arg,
grpc_error* error) {
RequestRouter* self = static_cast<RequestRouter*>(arg);
if (self->tracer_->enabled()) {
const char* disposition =
self->resolver_result_ != nullptr
? ""
: (error == GRPC_ERROR_NONE ? " (transient error)"
: " (resolver shutdown)");
gpr_log(GPR_INFO,
"request_router=%p: got resolver result: resolver_result=%p "
"error=%s%s",
self, self->resolver_result_, grpc_error_string(error),
disposition);
}
// Handle shutdown.
if (error != GRPC_ERROR_NONE || self->resolver_ == nullptr) {
self->OnResolverShutdownLocked(GRPC_ERROR_REF(error));
return;
}
// Data used to set the channel's connectivity state.
bool set_connectivity_state = true;
// We only want to trace the address resolution in the follow cases:
// (a) Address resolution resulted in service config change.
// (b) Address resolution that causes number of backends to go from
// zero to non-zero.
// (c) Address resolution that causes number of backends to go from
// non-zero to zero.
// (d) Address resolution that causes a new LB policy to be created.
//
// we track a list of strings to eventually be concatenated and traced.
TraceStringVector trace_strings;
grpc_connectivity_state connectivity_state = GRPC_CHANNEL_TRANSIENT_FAILURE;
grpc_error* connectivity_error =
GRPC_ERROR_CREATE_FROM_STATIC_STRING("No load balancing policy");
// resolver_result_ will be null in the case of a transient
// resolution error. In that case, we don't have any new result to
// process, which means that we keep using the previous result (if any).
if (self->resolver_result_ == nullptr) {
if (self->tracer_->enabled()) {
gpr_log(GPR_INFO, "request_router=%p: resolver transient failure", self);
}
// Don't override connectivity state if we already have an LB policy.
if (self->lb_policy_ != nullptr) set_connectivity_state = false;
} else {
// Parse the resolver result.
const char* lb_policy_name = nullptr;
grpc_json* lb_policy_config = nullptr;
const bool service_config_changed = self->process_resolver_result_(
self->process_resolver_result_user_data_, *self->resolver_result_,
&lb_policy_name, &lb_policy_config);
GPR_ASSERT(lb_policy_name != nullptr);
// Check to see if we're already using the right LB policy.
const bool lb_policy_name_changed =
self->lb_policy_ == nullptr ||
strcmp(self->lb_policy_->name(), lb_policy_name) != 0;
if (self->lb_policy_ != nullptr && !lb_policy_name_changed) {
// Continue using the same LB policy. Update with new addresses.
if (self->tracer_->enabled()) {
gpr_log(GPR_INFO,
"request_router=%p: updating existing LB policy \"%s\" (%p)",
self, lb_policy_name, self->lb_policy_.get());
}
self->lb_policy_->UpdateLocked(*self->resolver_result_, lb_policy_config);
// No need to set the channel's connectivity state; the existing
// watch on the LB policy will take care of that.
set_connectivity_state = false;
} else {
// Instantiate new LB policy.
self->CreateNewLbPolicyLocked(lb_policy_name, lb_policy_config,
&connectivity_state, &connectivity_error,
&trace_strings);
}
// Add channel trace event.
if (self->channelz_node_ != nullptr) {
if (service_config_changed) {
// TODO(ncteisen): might be worth somehow including a snippet of the
// config in the trace, at the risk of bloating the trace logs.
trace_strings.push_back(gpr_strdup("Service config changed"));
}
self->MaybeAddTraceMessagesForAddressChangesLocked(&trace_strings);
self->ConcatenateAndAddChannelTraceLocked(&trace_strings);
}
// Clean up.
grpc_channel_args_destroy(self->resolver_result_);
self->resolver_result_ = nullptr;
}
// Set the channel's connectivity state if needed.
if (set_connectivity_state) {
self->SetConnectivityStateLocked(connectivity_state, connectivity_error,
"resolver_result");
} else {
GRPC_ERROR_UNREF(connectivity_error);
}
// Invoke closures that were waiting for results and renew the watch.
GRPC_CLOSURE_LIST_SCHED(&self->waiting_for_resolver_result_closures_);
self->resolver_->NextLocked(&self->resolver_result_,
&self->on_resolver_result_changed_);
}
void RequestRouter::RouteCallLocked(Request* request) {
GPR_ASSERT(request->pick_.connected_subchannel == nullptr);
request->request_router_ = this;
if (lb_policy_ != nullptr) {
// We already have resolver results, so process the service config
// and start an LB pick.
request->ProcessServiceConfigAndStartLbPickLocked();
} else if (resolver_ == nullptr) {
GRPC_CLOSURE_RUN(request->on_route_done_,
GRPC_ERROR_CREATE_FROM_STATIC_STRING("Disconnected"));
} else {
// We do not yet have an LB policy, so wait for a resolver result.
if (!started_resolving_) {
StartResolvingLocked();
}
// Create a new waiter, which will delete itself when done.
New<Request::ResolverResultWaiter>(request);
// Add the request's polling entity to the request_router's
// interested_parties, so that the I/O of the resolver can be done
// under it. It will be removed in LbPickDoneLocked().
request->MaybeAddCallToInterestedPartiesLocked();
}
}
void RequestRouter::ShutdownLocked(grpc_error* error) {
if (resolver_ != nullptr) {
SetConnectivityStateLocked(GRPC_CHANNEL_SHUTDOWN, GRPC_ERROR_REF(error),
"disconnect");
resolver_.reset();
if (!started_resolving_) {
grpc_closure_list_fail_all(&waiting_for_resolver_result_closures_,
GRPC_ERROR_REF(error));
GRPC_CLOSURE_LIST_SCHED(&waiting_for_resolver_result_closures_);
}
if (lb_policy_ != nullptr) {
grpc_pollset_set_del_pollset_set(lb_policy_->interested_parties(),
interested_parties_);
lb_policy_.reset();
}
}
GRPC_ERROR_UNREF(error);
}
grpc_connectivity_state RequestRouter::GetConnectivityState() {
return grpc_connectivity_state_check(&state_tracker_);
}
void RequestRouter::NotifyOnConnectivityStateChange(
grpc_connectivity_state* state, grpc_closure* closure) {
grpc_connectivity_state_notify_on_state_change(&state_tracker_, state,
closure);
}
void RequestRouter::ExitIdleLocked() {
if (lb_policy_ != nullptr) {
lb_policy_->ExitIdleLocked();
} else {
exit_idle_when_lb_policy_arrives_ = true;
if (!started_resolving_ && resolver_ != nullptr) {
StartResolvingLocked();
}
}
}
void RequestRouter::ResetConnectionBackoffLocked() {
if (resolver_ != nullptr) {
resolver_->ResetBackoffLocked();
resolver_->RequestReresolutionLocked();
}
if (lb_policy_ != nullptr) {
lb_policy_->ResetBackoffLocked();
}
}
} // namespace grpc_core

@ -0,0 +1,177 @@
/*
*
* Copyright 2018 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
#ifndef GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_REQUEST_ROUTING_H
#define GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_REQUEST_ROUTING_H
#include <grpc/support/port_platform.h>
#include "src/core/ext/filters/client_channel/client_channel_channelz.h"
#include "src/core/ext/filters/client_channel/client_channel_factory.h"
#include "src/core/ext/filters/client_channel/lb_policy.h"
#include "src/core/ext/filters/client_channel/resolver.h"
#include "src/core/lib/channel/channel_args.h"
#include "src/core/lib/channel/channel_stack.h"
#include "src/core/lib/debug/trace.h"
#include "src/core/lib/gprpp/inlined_vector.h"
#include "src/core/lib/gprpp/orphanable.h"
#include "src/core/lib/iomgr/call_combiner.h"
#include "src/core/lib/iomgr/closure.h"
#include "src/core/lib/iomgr/polling_entity.h"
#include "src/core/lib/iomgr/pollset_set.h"
#include "src/core/lib/transport/connectivity_state.h"
#include "src/core/lib/transport/metadata_batch.h"
namespace grpc_core {
class RequestRouter {
public:
class Request {
public:
// Synchronous callback that applies the service config to a call.
// Returns false if the call should be failed.
typedef bool (*ApplyServiceConfigCallback)(void* user_data);
Request(grpc_call_stack* owning_call, grpc_call_combiner* call_combiner,
grpc_polling_entity* pollent,
grpc_metadata_batch* send_initial_metadata,
uint32_t* send_initial_metadata_flags,
ApplyServiceConfigCallback apply_service_config,
void* apply_service_config_user_data, grpc_closure* on_route_done);
~Request();
// TODO(roth): It seems a bit ugly to expose this member in a
// non-const way. Find a better API to avoid this.
LoadBalancingPolicy::PickState* pick() { return &pick_; }
private:
friend class RequestRouter;
class ResolverResultWaiter;
class AsyncPickCanceller;
void ProcessServiceConfigAndStartLbPickLocked();
void StartLbPickLocked();
static void LbPickDoneLocked(void* arg, grpc_error* error);
void MaybeAddCallToInterestedPartiesLocked();
void MaybeRemoveCallFromInterestedPartiesLocked();
// Populated by caller.
grpc_call_stack* owning_call_;
grpc_call_combiner* call_combiner_;
grpc_polling_entity* pollent_;
ApplyServiceConfigCallback apply_service_config_;
void* apply_service_config_user_data_;
grpc_closure* on_route_done_;
LoadBalancingPolicy::PickState pick_;
// Internal state.
RequestRouter* request_router_ = nullptr;
bool pollent_added_to_interested_parties_ = false;
grpc_closure on_pick_done_;
AsyncPickCanceller* pick_canceller_ = nullptr;
};
// Synchronous callback that takes the service config JSON string and
// LB policy name.
// Returns true if the service config has changed since the last result.
typedef bool (*ProcessResolverResultCallback)(void* user_data,
const grpc_channel_args& args,
const char** lb_policy_name,
grpc_json** lb_policy_config);
RequestRouter(grpc_channel_stack* owning_stack, grpc_combiner* combiner,
grpc_client_channel_factory* client_channel_factory,
grpc_pollset_set* interested_parties, TraceFlag* tracer,
ProcessResolverResultCallback process_resolver_result,
void* process_resolver_result_user_data, const char* target_uri,
const grpc_channel_args* args, grpc_error** error);
~RequestRouter();
void set_channelz_node(channelz::ClientChannelNode* channelz_node) {
channelz_node_ = channelz_node;
}
void RouteCallLocked(Request* request);
// TODO(roth): Add methods to cancel picks.
void ShutdownLocked(grpc_error* error);
void ExitIdleLocked();
void ResetConnectionBackoffLocked();
grpc_connectivity_state GetConnectivityState();
void NotifyOnConnectivityStateChange(grpc_connectivity_state* state,
grpc_closure* closure);
LoadBalancingPolicy* lb_policy() const { return lb_policy_.get(); }
private:
using TraceStringVector = grpc_core::InlinedVector<char*, 3>;
class ReresolutionRequestHandler;
class LbConnectivityWatcher;
void StartResolvingLocked();
void OnResolverShutdownLocked(grpc_error* error);
void CreateNewLbPolicyLocked(const char* lb_policy_name, grpc_json* lb_config,
grpc_connectivity_state* connectivity_state,
grpc_error** connectivity_error,
TraceStringVector* trace_strings);
void MaybeAddTraceMessagesForAddressChangesLocked(
TraceStringVector* trace_strings);
void ConcatenateAndAddChannelTraceLocked(
TraceStringVector* trace_strings) const;
static void OnResolverResultChangedLocked(void* arg, grpc_error* error);
void SetConnectivityStateLocked(grpc_connectivity_state state,
grpc_error* error, const char* reason);
// Passed in from caller at construction time.
grpc_channel_stack* owning_stack_;
grpc_combiner* combiner_;
grpc_client_channel_factory* client_channel_factory_;
grpc_pollset_set* interested_parties_;
TraceFlag* tracer_;
channelz::ClientChannelNode* channelz_node_ = nullptr;
// Resolver and associated state.
OrphanablePtr<Resolver> resolver_;
ProcessResolverResultCallback process_resolver_result_;
void* process_resolver_result_user_data_;
bool started_resolving_ = false;
grpc_channel_args* resolver_result_ = nullptr;
bool previous_resolution_contained_addresses_ = false;
grpc_closure_list waiting_for_resolver_result_closures_;
grpc_closure on_resolver_result_changed_;
// LB policy and associated state.
OrphanablePtr<LoadBalancingPolicy> lb_policy_;
bool exit_idle_when_lb_policy_arrives_ = false;
grpc_connectivity_state_tracker state_tracker_;
};
} // namespace grpc_core
#endif /* GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_REQUEST_ROUTING_H */

@ -43,16 +43,16 @@ namespace grpc_core {
namespace internal {
ProcessedResolverResult::ProcessedResolverResult(
const grpc_channel_args* resolver_result, bool parse_retry) {
const grpc_channel_args& resolver_result, bool parse_retry) {
ProcessServiceConfig(resolver_result, parse_retry);
// If no LB config was found above, just find the LB policy name then.
if (lb_policy_name_ == nullptr) ProcessLbPolicyName(resolver_result);
}
void ProcessedResolverResult::ProcessServiceConfig(
const grpc_channel_args* resolver_result, bool parse_retry) {
const grpc_channel_args& resolver_result, bool parse_retry) {
const grpc_arg* channel_arg =
grpc_channel_args_find(resolver_result, GRPC_ARG_SERVICE_CONFIG);
grpc_channel_args_find(&resolver_result, GRPC_ARG_SERVICE_CONFIG);
const char* service_config_json = grpc_channel_arg_get_string(channel_arg);
if (service_config_json != nullptr) {
service_config_json_.reset(gpr_strdup(service_config_json));
@ -60,7 +60,7 @@ void ProcessedResolverResult::ProcessServiceConfig(
if (service_config_ != nullptr) {
if (parse_retry) {
channel_arg =
grpc_channel_args_find(resolver_result, GRPC_ARG_SERVER_URI);
grpc_channel_args_find(&resolver_result, GRPC_ARG_SERVER_URI);
const char* server_uri = grpc_channel_arg_get_string(channel_arg);
GPR_ASSERT(server_uri != nullptr);
grpc_uri* uri = grpc_uri_parse(server_uri, true);
@ -78,7 +78,7 @@ void ProcessedResolverResult::ProcessServiceConfig(
}
void ProcessedResolverResult::ProcessLbPolicyName(
const grpc_channel_args* resolver_result) {
const grpc_channel_args& resolver_result) {
// Prefer the LB policy name found in the service config. Note that this is
// checking the deprecated loadBalancingPolicy field, rather than the new
// loadBalancingConfig field.
@ -96,13 +96,13 @@ void ProcessedResolverResult::ProcessLbPolicyName(
// Otherwise, find the LB policy name set by the client API.
if (lb_policy_name_ == nullptr) {
const grpc_arg* channel_arg =
grpc_channel_args_find(resolver_result, GRPC_ARG_LB_POLICY_NAME);
grpc_channel_args_find(&resolver_result, GRPC_ARG_LB_POLICY_NAME);
lb_policy_name_.reset(gpr_strdup(grpc_channel_arg_get_string(channel_arg)));
}
// Special case: If at least one balancer address is present, we use
// the grpclb policy, regardless of what the resolver has returned.
const ServerAddressList* addresses =
FindServerAddressListChannelArg(resolver_result);
FindServerAddressListChannelArg(&resolver_result);
if (addresses != nullptr) {
bool found_balancer_address = false;
for (size_t i = 0; i < addresses->size(); ++i) {

@ -36,8 +36,7 @@ namespace internal {
class ClientChannelMethodParams;
// A table mapping from a method name to its method parameters.
typedef grpc_core::SliceHashTable<
grpc_core::RefCountedPtr<ClientChannelMethodParams>>
typedef SliceHashTable<RefCountedPtr<ClientChannelMethodParams>>
ClientChannelMethodParamsTable;
// A container of processed fields from the resolver result. Simplifies the
@ -47,33 +46,30 @@ class ProcessedResolverResult {
// Processes the resolver result and populates the relative members
// for later consumption. Tries to parse retry parameters only if parse_retry
// is true.
ProcessedResolverResult(const grpc_channel_args* resolver_result,
ProcessedResolverResult(const grpc_channel_args& resolver_result,
bool parse_retry);
// Getters. Any managed object's ownership is transferred.
grpc_core::UniquePtr<char> service_config_json() {
UniquePtr<char> service_config_json() {
return std::move(service_config_json_);
}
grpc_core::RefCountedPtr<ServerRetryThrottleData> retry_throttle_data() {
RefCountedPtr<ServerRetryThrottleData> retry_throttle_data() {
return std::move(retry_throttle_data_);
}
grpc_core::RefCountedPtr<ClientChannelMethodParamsTable>
method_params_table() {
RefCountedPtr<ClientChannelMethodParamsTable> method_params_table() {
return std::move(method_params_table_);
}
grpc_core::UniquePtr<char> lb_policy_name() {
return std::move(lb_policy_name_);
}
UniquePtr<char> lb_policy_name() { return std::move(lb_policy_name_); }
grpc_json* lb_policy_config() { return lb_policy_config_; }
private:
// Finds the service config; extracts LB config and (maybe) retry throttle
// params from it.
void ProcessServiceConfig(const grpc_channel_args* resolver_result,
void ProcessServiceConfig(const grpc_channel_args& resolver_result,
bool parse_retry);
// Finds the LB policy name (when no LB config was found).
void ProcessLbPolicyName(const grpc_channel_args* resolver_result);
void ProcessLbPolicyName(const grpc_channel_args& resolver_result);
// Parses the service config. Intended to be used by
// ServiceConfig::ParseGlobalParams.
@ -85,16 +81,16 @@ class ProcessedResolverResult {
void ParseRetryThrottleParamsFromServiceConfig(const grpc_json* field);
// Service config.
grpc_core::UniquePtr<char> service_config_json_;
grpc_core::UniquePtr<grpc_core::ServiceConfig> service_config_;
UniquePtr<char> service_config_json_;
UniquePtr<grpc_core::ServiceConfig> service_config_;
// LB policy.
grpc_json* lb_policy_config_ = nullptr;
grpc_core::UniquePtr<char> lb_policy_name_;
UniquePtr<char> lb_policy_name_;
// Retry throttle data.
char* server_name_ = nullptr;
grpc_core::RefCountedPtr<ServerRetryThrottleData> retry_throttle_data_;
RefCountedPtr<ServerRetryThrottleData> retry_throttle_data_;
// Method params table.
grpc_core::RefCountedPtr<ClientChannelMethodParamsTable> method_params_table_;
RefCountedPtr<ClientChannelMethodParamsTable> method_params_table_;
};
// The parameters of a method.

@ -170,7 +170,12 @@ grpc_chttp2_transport::~grpc_chttp2_transport() {
grpc_slice_buffer_destroy_internal(&outbuf);
grpc_chttp2_hpack_compressor_destroy(&hpack_compressor);
grpc_core::ContextList::Execute(cl, nullptr, GRPC_ERROR_NONE);
grpc_error* error =
GRPC_ERROR_CREATE_FROM_STATIC_STRING("Transport destroyed");
// ContextList::Execute follows semantics of a callback function and does not
// take a ref on error
grpc_core::ContextList::Execute(cl, nullptr, error);
GRPC_ERROR_UNREF(error);
cl = nullptr;
grpc_slice_buffer_destroy_internal(&read_buffer);

@ -21,31 +21,47 @@
#include "src/core/ext/transport/chttp2/transport/context_list.h"
namespace {
void (*write_timestamps_callback_g)(void*, grpc_core::Timestamps*) = nullptr;
}
void (*write_timestamps_callback_g)(void*, grpc_core::Timestamps*,
grpc_error* error) = nullptr;
void* (*get_copied_context_fn_g)(void*) = nullptr;
} // namespace
namespace grpc_core {
void ContextList::Append(ContextList** head, grpc_chttp2_stream* s) {
if (get_copied_context_fn_g == nullptr ||
write_timestamps_callback_g == nullptr) {
return;
}
/* Create a new element in the list and add it at the front */
ContextList* elem = grpc_core::New<ContextList>();
elem->trace_context_ = get_copied_context_fn_g(s->context);
elem->byte_offset_ = s->byte_counter;
elem->next_ = *head;
*head = elem;
}
void ContextList::Execute(void* arg, grpc_core::Timestamps* ts,
grpc_error* error) {
ContextList* head = static_cast<ContextList*>(arg);
ContextList* to_be_freed;
while (head != nullptr) {
if (error == GRPC_ERROR_NONE && ts != nullptr) {
if (write_timestamps_callback_g) {
ts->byte_offset = static_cast<uint32_t>(head->byte_offset_);
write_timestamps_callback_g(head->s_->context, ts);
}
if (write_timestamps_callback_g) {
ts->byte_offset = static_cast<uint32_t>(head->byte_offset_);
write_timestamps_callback_g(head->trace_context_, ts, error);
}
GRPC_CHTTP2_STREAM_UNREF(static_cast<grpc_chttp2_stream*>(head->s_),
"timestamp");
to_be_freed = head;
head = head->next_;
grpc_core::Delete(to_be_freed);
}
}
void grpc_http2_set_write_timestamps_callback(
void (*fn)(void*, grpc_core::Timestamps*)) {
void grpc_http2_set_write_timestamps_callback(void (*fn)(void*,
grpc_core::Timestamps*,
grpc_error* error)) {
write_timestamps_callback_g = fn;
}
void grpc_http2_set_fn_get_copied_context(void* (*fn)(void*)) {
get_copied_context_fn_g = fn;
}
} /* namespace grpc_core */

@ -31,42 +31,23 @@ class ContextList {
public:
/* Creates a new element with \a context as the value and appends it to the
* list. */
static void Append(ContextList** head, grpc_chttp2_stream* s) {
/* Make sure context is not already present */
GRPC_CHTTP2_STREAM_REF(s, "timestamp");
#ifndef NDEBUG
ContextList* ptr = *head;
while (ptr != nullptr) {
if (ptr->s_ == s) {
GPR_ASSERT(
false &&
"Trying to append a stream that is already present in the list");
}
ptr = ptr->next_;
}
#endif
/* Create a new element in the list and add it at the front */
ContextList* elem = grpc_core::New<ContextList>();
elem->s_ = s;
elem->byte_offset_ = s->byte_counter;
elem->next_ = *head;
*head = elem;
}
static void Append(ContextList** head, grpc_chttp2_stream* s);
/* Executes a function \a fn with each context in the list and \a ts. It also
* frees up the entire list after this operation. */
* frees up the entire list after this operation. It is intended as a callback
* and hence does not take a ref on \a error */
static void Execute(void* arg, grpc_core::Timestamps* ts, grpc_error* error);
private:
grpc_chttp2_stream* s_ = nullptr;
void* trace_context_ = nullptr;
ContextList* next_ = nullptr;
size_t byte_offset_ = 0;
};
void grpc_http2_set_write_timestamps_callback(
void (*fn)(void*, grpc_core::Timestamps*));
void grpc_http2_set_write_timestamps_callback(void (*fn)(void*,
grpc_core::Timestamps*,
grpc_error* error));
void grpc_http2_set_fn_get_copied_context(void* (*fn)(void*));
} /* namespace grpc_core */
#endif /* GRPC_CORE_EXT_TRANSPORT_CHTTP2_TRANSPORT_CONTEXT_LIST_H */

@ -126,6 +126,7 @@ struct grpc_tcp {
int bytes_counter;
bool socket_ts_enabled; /* True if timestamping options are set on the socket
*/
bool ts_capable; /* Cache whether we can set timestamping options */
gpr_atm
stop_error_notification; /* Set to 1 if we do not want to be notified on
errors anymore */
@ -589,7 +590,7 @@ ssize_t tcp_send(int fd, const struct msghdr* msg) {
*/
static bool tcp_write_with_timestamps(grpc_tcp* tcp, struct msghdr* msg,
size_t sending_length,
ssize_t* sent_length, grpc_error** error);
ssize_t* sent_length);
/** The callback function to be invoked when we get an error on the socket. */
static void tcp_handle_error(void* arg /* grpc_tcp */, grpc_error* error);
@ -597,13 +598,11 @@ static void tcp_handle_error(void* arg /* grpc_tcp */, grpc_error* error);
#ifdef GRPC_LINUX_ERRQUEUE
static bool tcp_write_with_timestamps(grpc_tcp* tcp, struct msghdr* msg,
size_t sending_length,
ssize_t* sent_length,
grpc_error** error) {
ssize_t* sent_length) {
if (!tcp->socket_ts_enabled) {
uint32_t opt = grpc_core::kTimestampingSocketOptions;
if (setsockopt(tcp->fd, SOL_SOCKET, SO_TIMESTAMPING,
static_cast<void*>(&opt), sizeof(opt)) != 0) {
*error = tcp_annotate_error(GRPC_OS_ERROR(errno, "setsockopt"), tcp);
grpc_slice_buffer_reset_and_unref_internal(tcp->outgoing_buffer);
if (grpc_tcp_trace.enabled()) {
gpr_log(GPR_ERROR, "Failed to set timestamping options on the socket.");
@ -784,8 +783,7 @@ static void tcp_handle_error(void* arg /* grpc_tcp */, grpc_error* error) {
#else /* GRPC_LINUX_ERRQUEUE */
static bool tcp_write_with_timestamps(grpc_tcp* tcp, struct msghdr* msg,
size_t sending_length,
ssize_t* sent_length,
grpc_error** error) {
ssize_t* sent_length) {
gpr_log(GPR_ERROR, "Write with timestamps not supported for this platform");
GPR_ASSERT(0);
return false;
@ -804,7 +802,7 @@ void tcp_shutdown_buffer_list(grpc_tcp* tcp) {
gpr_mu_lock(&tcp->tb_mu);
grpc_core::TracedBuffer::Shutdown(
&tcp->tb_head, tcp->outgoing_buffer_arg,
GRPC_ERROR_CREATE_FROM_STATIC_STRING("endpoint destroyed"));
GRPC_ERROR_CREATE_FROM_STATIC_STRING("TracedBuffer list shutdown"));
gpr_mu_unlock(&tcp->tb_mu);
tcp->outgoing_buffer_arg = nullptr;
}
@ -820,7 +818,7 @@ static bool tcp_flush(grpc_tcp* tcp, grpc_error** error) {
struct msghdr msg;
struct iovec iov[MAX_WRITE_IOVEC];
msg_iovlen_type iov_size;
ssize_t sent_length;
ssize_t sent_length = 0;
size_t sending_length;
size_t trailing;
size_t unwind_slice_idx;
@ -855,13 +853,19 @@ static bool tcp_flush(grpc_tcp* tcp, grpc_error** error) {
msg.msg_iov = iov;
msg.msg_iovlen = iov_size;
msg.msg_flags = 0;
bool tried_sending_message = false;
if (tcp->outgoing_buffer_arg != nullptr) {
if (!tcp_write_with_timestamps(tcp, &msg, sending_length, &sent_length,
error)) {
if (!tcp->ts_capable ||
!tcp_write_with_timestamps(tcp, &msg, sending_length, &sent_length)) {
/* We could not set socket options to collect Fathom timestamps.
* Fallback on writing without timestamps. */
tcp->ts_capable = false;
tcp_shutdown_buffer_list(tcp);
return true; /* something went wrong with timestamps */
} else {
tried_sending_message = true;
}
} else {
}
if (!tried_sending_message) {
msg.msg_control = nullptr;
msg.msg_controllen = 0;
@ -1117,6 +1121,7 @@ grpc_endpoint* grpc_tcp_create(grpc_fd* em_fd,
tcp->is_first_read = true;
tcp->bytes_counter = -1;
tcp->socket_ts_enabled = false;
tcp->ts_capable = true;
tcp->outgoing_buffer_arg = nullptr;
/* paired with unref in grpc_tcp_destroy */
gpr_ref_init(&tcp->refcount, 1);

@ -35,44 +35,6 @@
static void composite_call_metadata_cb(void* arg, grpc_error* error);
grpc_call_credentials_array::~grpc_call_credentials_array() {
for (size_t i = 0; i < num_creds_; ++i) {
creds_array_[i].~RefCountedPtr<grpc_call_credentials>();
}
if (creds_array_ != nullptr) {
gpr_free(creds_array_);
}
}
grpc_call_credentials_array::grpc_call_credentials_array(
const grpc_call_credentials_array& that)
: num_creds_(that.num_creds_) {
reserve(that.capacity_);
for (size_t i = 0; i < num_creds_; ++i) {
new (&creds_array_[i])
grpc_core::RefCountedPtr<grpc_call_credentials>(that.creds_array_[i]);
}
}
void grpc_call_credentials_array::reserve(size_t capacity) {
if (capacity_ >= capacity) {
return;
}
grpc_core::RefCountedPtr<grpc_call_credentials>* new_arr =
static_cast<grpc_core::RefCountedPtr<grpc_call_credentials>*>(gpr_malloc(
sizeof(grpc_core::RefCountedPtr<grpc_call_credentials>) * capacity));
if (creds_array_ != nullptr) {
for (size_t i = 0; i < num_creds_; ++i) {
new (&new_arr[i]) grpc_core::RefCountedPtr<grpc_call_credentials>(
std::move(creds_array_[i]));
creds_array_[i].~RefCountedPtr<grpc_call_credentials>();
}
gpr_free(creds_array_);
}
creds_array_ = new_arr;
capacity_ = capacity;
}
namespace {
struct grpc_composite_call_credentials_metadata_context {
grpc_composite_call_credentials_metadata_context(
@ -103,13 +65,13 @@ static void composite_call_metadata_cb(void* arg, grpc_error* error) {
grpc_composite_call_credentials_metadata_context* ctx =
static_cast<grpc_composite_call_credentials_metadata_context*>(arg);
if (error == GRPC_ERROR_NONE) {
const grpc_call_credentials_array& inner = ctx->composite_creds->inner();
const grpc_composite_call_credentials::CallCredentialsList& inner =
ctx->composite_creds->inner();
/* See if we need to get some more metadata. */
if (ctx->creds_index < inner.size()) {
if (inner.get(ctx->creds_index++)
->get_request_metadata(
ctx->pollent, ctx->auth_md_context, ctx->md_array,
&ctx->internal_on_request_metadata, &error)) {
if (inner[ctx->creds_index++]->get_request_metadata(
ctx->pollent, ctx->auth_md_context, ctx->md_array,
&ctx->internal_on_request_metadata, &error)) {
// Synchronous response, so call ourselves recursively.
composite_call_metadata_cb(arg, error);
GRPC_ERROR_UNREF(error);
@ -130,12 +92,11 @@ bool grpc_composite_call_credentials::get_request_metadata(
ctx = grpc_core::New<grpc_composite_call_credentials_metadata_context>(
this, pollent, auth_md_context, md_array, on_request_metadata);
bool synchronous = true;
const grpc_call_credentials_array& inner = ctx->composite_creds->inner();
const CallCredentialsList& inner = ctx->composite_creds->inner();
while (ctx->creds_index < inner.size()) {
if (inner.get(ctx->creds_index++)
->get_request_metadata(ctx->pollent, ctx->auth_md_context,
ctx->md_array,
&ctx->internal_on_request_metadata, error)) {
if (inner[ctx->creds_index++]->get_request_metadata(
ctx->pollent, ctx->auth_md_context, ctx->md_array,
&ctx->internal_on_request_metadata, error)) {
if (*error != GRPC_ERROR_NONE) break;
} else {
synchronous = false; // Async return.
@ -149,7 +110,7 @@ bool grpc_composite_call_credentials::get_request_metadata(
void grpc_composite_call_credentials::cancel_get_request_metadata(
grpc_credentials_mdelem_array* md_array, grpc_error* error) {
for (size_t i = 0; i < inner_.size(); ++i) {
inner_.get(i)->cancel_get_request_metadata(md_array, GRPC_ERROR_REF(error));
inner_[i]->cancel_get_request_metadata(md_array, GRPC_ERROR_REF(error));
}
GRPC_ERROR_UNREF(error);
}
@ -172,7 +133,7 @@ void grpc_composite_call_credentials::push_to_inner(
auto composite_creds =
static_cast<grpc_composite_call_credentials*>(creds.get());
for (size_t i = 0; i < composite_creds->inner().size(); ++i) {
inner_.push_back(std::move(composite_creds->inner_.get_mutable(i)));
inner_.push_back(std::move(composite_creds->inner_[i]));
}
}

@ -21,43 +21,10 @@
#include <grpc/support/port_platform.h>
#include "src/core/lib/gprpp/inlined_vector.h"
#include "src/core/lib/gprpp/ref_counted_ptr.h"
#include "src/core/lib/security/credentials/credentials.h"
// TODO(soheil): Replace this with InlinedVector once #16032 is resolved.
class grpc_call_credentials_array {
public:
grpc_call_credentials_array() = default;
grpc_call_credentials_array(const grpc_call_credentials_array& that);
~grpc_call_credentials_array();
void reserve(size_t capacity);
// Must reserve before pushing any data.
void push_back(grpc_core::RefCountedPtr<grpc_call_credentials> cred) {
GPR_DEBUG_ASSERT(capacity_ > num_creds_);
new (&creds_array_[num_creds_++])
grpc_core::RefCountedPtr<grpc_call_credentials>(std::move(cred));
}
const grpc_core::RefCountedPtr<grpc_call_credentials>& get(size_t i) const {
GPR_DEBUG_ASSERT(i < num_creds_);
return creds_array_[i];
}
grpc_core::RefCountedPtr<grpc_call_credentials>& get_mutable(size_t i) {
GPR_DEBUG_ASSERT(i < num_creds_);
return creds_array_[i];
}
size_t size() const { return num_creds_; }
private:
grpc_core::RefCountedPtr<grpc_call_credentials>* creds_array_ = nullptr;
size_t num_creds_ = 0;
size_t capacity_ = 0;
};
/* -- Composite channel credentials. -- */
class grpc_composite_channel_credentials : public grpc_channel_credentials {
@ -97,6 +64,10 @@ class grpc_composite_channel_credentials : public grpc_channel_credentials {
class grpc_composite_call_credentials : public grpc_call_credentials {
public:
using CallCredentialsList =
grpc_core::InlinedVector<grpc_core::RefCountedPtr<grpc_call_credentials>,
2>;
grpc_composite_call_credentials(
grpc_core::RefCountedPtr<grpc_call_credentials> creds1,
grpc_core::RefCountedPtr<grpc_call_credentials> creds2);
@ -111,13 +82,13 @@ class grpc_composite_call_credentials : public grpc_call_credentials {
void cancel_get_request_metadata(grpc_credentials_mdelem_array* md_array,
grpc_error* error) override;
const grpc_call_credentials_array& inner() const { return inner_; }
const CallCredentialsList& inner() const { return inner_; }
private:
void push_to_inner(grpc_core::RefCountedPtr<grpc_call_credentials> creds,
bool is_composite);
grpc_call_credentials_array inner_;
CallCredentialsList inner_;
};
#endif /* GRPC_CORE_LIB_SECURITY_CREDENTIALS_COMPOSITE_COMPOSITE_CREDENTIALS_H \

@ -31,7 +31,9 @@
#include <grpc/support/sync.h>
extern "C" {
#include <openssl/bn.h>
#include <openssl/pem.h>
#include <openssl/rsa.h>
}
#include "src/core/lib/gpr/string.h"

@ -156,9 +156,13 @@ static unsigned long openssl_thread_id_cb(void) {
#endif
static void init_openssl(void) {
#if OPENSSL_API_COMPAT >= 0x10100000L
OPENSSL_init_ssl(0, NULL);
#else
SSL_library_init();
SSL_load_error_strings();
OpenSSL_add_all_algorithms();
#endif
#if OPENSSL_VERSION_NUMBER < 0x10100000
if (!CRYPTO_get_locking_callback()) {
int num_locks = CRYPTO_num_locks();
@ -1649,7 +1653,11 @@ tsi_result tsi_create_ssl_client_handshaker_factory_with_options(
return TSI_INVALID_ARGUMENT;
}
#if defined(OPENSSL_NO_TLS1_2_METHOD) || OPENSSL_API_COMPAT >= 0x10100000L
ssl_context = SSL_CTX_new(TLS_method());
#else
ssl_context = SSL_CTX_new(TLSv1_2_method());
#endif
if (ssl_context == nullptr) {
gpr_log(GPR_ERROR, "Could not create ssl context.");
return TSI_INVALID_ARGUMENT;
@ -1806,7 +1814,11 @@ tsi_result tsi_create_ssl_server_handshaker_factory_with_options(
for (i = 0; i < options->num_key_cert_pairs; i++) {
do {
#if defined(OPENSSL_NO_TLS1_2_METHOD) || OPENSSL_API_COMPAT >= 0x10100000L
impl->ssl_contexts[i] = SSL_CTX_new(TLS_method());
#else
impl->ssl_contexts[i] = SSL_CTX_new(TLSv1_2_method());
#endif
if (impl->ssl_contexts[i] == nullptr) {
gpr_log(GPR_ERROR, "Could not create ssl context.");
result = TSI_OUT_OF_RESOURCES;

@ -106,7 +106,9 @@ void ChannelArguments::SetSocketMutator(grpc_socket_mutator* mutator) {
}
if (!replaced) {
strings_.push_back(grpc::string(mutator_arg.key));
args_.push_back(mutator_arg);
args_.back().key = const_cast<char*>(strings_.back().c_str());
}
}

@ -28,6 +28,9 @@ using ::opencensus::trace::SpanContext;
void GenerateServerContext(absl::string_view tracing, absl::string_view stats,
absl::string_view primary_role,
absl::string_view method, CensusContext* context) {
// Destruct the current CensusContext to free the Span memory before
// overwriting it below.
context->~CensusContext();
GrpcTraceContext trace_ctxt;
if (TraceContextEncoding::Decode(tracing, &trace_ctxt) !=
TraceContextEncoding::kEncodeDecodeFailure) {
@ -42,6 +45,9 @@ void GenerateServerContext(absl::string_view tracing, absl::string_view stats,
void GenerateClientContext(absl::string_view method, CensusContext* ctxt,
CensusContext* parent_ctxt) {
// Destruct the current CensusContext to free the Span memory before
// overwriting it below.
ctxt->~CensusContext();
if (parent_ctxt != nullptr) {
SpanContext span_ctxt = parent_ctxt->Context();
Span span = parent_ctxt->Span();

@ -45,7 +45,7 @@ namespace Grpc.IntegrationTesting
[Option("server_host", Default = "localhost")]
public string ServerHost { get; set; }
[Option("server_host_override", Default = TestCredentials.DefaultHostOverride)]
[Option("server_host_override")]
public string ServerHostOverride { get; set; }
[Option("server_port", Required = true)]

@ -22,9 +22,8 @@
<Target Name="gRPC_ResolvePluginFullPath" AfterTargets="Protobuf_ResolvePlatform">
<PropertyGroup>
<!-- TODO(kkm): Do not use Protobuf_PackagedToolsPath, roll gRPC's own. -->
<!-- TODO(kkm): Do not package windows x64 builds (#13098). -->
<gRPC_PluginFullPath Condition=" '$(gRPC_PluginFullPath)' == '' and '$(Protobuf_ToolsOs)' == 'windows' "
>$(Protobuf_PackagedToolsPath)\$(Protobuf_ToolsOs)_x86\$(gRPC_PluginFileName).exe</gRPC_PluginFullPath>
>$(Protobuf_PackagedToolsPath)\$(Protobuf_ToolsOs)_$(Protobuf_ToolsCpu)\$(gRPC_PluginFileName).exe</gRPC_PluginFullPath>
<gRPC_PluginFullPath Condition=" '$(gRPC_PluginFullPath)' == '' "
>$(Protobuf_PackagedToolsPath)/$(Protobuf_ToolsOs)_$(Protobuf_ToolsCpu)/$(gRPC_PluginFileName)</gRPC_PluginFullPath>
</PropertyGroup>

@ -74,9 +74,8 @@
<!-- Next try OS and CPU resolved by ProtoToolsPlatform. -->
<Protobuf_ToolsOs Condition=" '$(Protobuf_ToolsOs)' == '' ">$(_Protobuf_ToolsOs)</Protobuf_ToolsOs>
<Protobuf_ToolsCpu Condition=" '$(Protobuf_ToolsCpu)' == '' ">$(_Protobuf_ToolsCpu)</Protobuf_ToolsCpu>
<!-- TODO(kkm): Do not package windows x64 builds (#13098). -->
<Protobuf_ProtocFullPath Condition=" '$(Protobuf_ProtocFullPath)' == '' and '$(Protobuf_ToolsOs)' == 'windows' "
>$(Protobuf_PackagedToolsPath)\$(Protobuf_ToolsOs)_x86\protoc.exe</Protobuf_ProtocFullPath>
>$(Protobuf_PackagedToolsPath)\$(Protobuf_ToolsOs)_$(Protobuf_ToolsCpu)\protoc.exe</Protobuf_ProtocFullPath>
<Protobuf_ProtocFullPath Condition=" '$(Protobuf_ProtocFullPath)' == '' "
>$(Protobuf_PackagedToolsPath)/$(Protobuf_ToolsOs)_$(Protobuf_ToolsCpu)/protoc</Protobuf_ProtocFullPath>
</PropertyGroup>

@ -242,3 +242,12 @@ pod `gRPC-Core`, :podspec => "." # assuming gRPC-Core.podspec is in the same dir
These steps should allow gRPC to use OpenSSL and drop BoringSSL dependency. If you see any issue,
file an issue to us.
## Upgrade issue with BoringSSL
If you were using an old version of gRPC (<= v1.14) which depended on pod `BoringSSL` rather than
`BoringSSL-GRPC` and meet issue with the library like:
```
ld: framework not found openssl
```
updating `-framework openssl` in Other Linker Flags to `-framework openssl_grpc` in your project
may resolve this issue (see [#16821](https://github.com/grpc/grpc/issues/16821)).

@ -530,7 +530,7 @@ function _makeStub($args)
throw new Exception('Missing argument: --test_case is required');
}
if ($args['server_port'] === 443) {
if ($args['server_port'] === '443') {
$server_address = $args['server_host'];
} else {
$server_address = $args['server_host'].':'.$args['server_port'];
@ -538,7 +538,7 @@ function _makeStub($args)
$test_case = $args['test_case'];
$host_override = 'foo.test.google.fr';
$host_override = '';
if (array_key_exists('server_host_override', $args)) {
$host_override = $args['server_host_override'];
}
@ -565,7 +565,9 @@ function _makeStub($args)
$ssl_credentials = Grpc\ChannelCredentials::createSsl();
}
$opts['credentials'] = $ssl_credentials;
$opts['grpc.ssl_target_name_override'] = $host_override;
if (!empty($host_override)) {
$opts['grpc.ssl_target_name_override'] = $host_override;
}
} else {
$opts['credentials'] = Grpc\ChannelCredentials::createInsecure();
}

@ -46,7 +46,7 @@ class GoogleCallCredentials(grpc.AuthMetadataPlugin):
# Hack to determine if these are JWT creds and we need to pass
# additional_claims when getting a token
self._is_jwt = 'additional_claims' in inspect.getargspec(
self._is_jwt = 'additional_claims' in inspect.getargspec( # pylint: disable=deprecated-method
credentials.get_access_token).args
def __call__(self, context, callback):

@ -526,7 +526,7 @@ class _UnaryUnaryMultiCallable(grpc.UnaryUnaryMultiCallable):
state, operations, deadline, rendezvous = self._prepare(
request, timeout, metadata, wait_for_ready)
if state is None:
raise rendezvous
raise rendezvous # pylint: disable-msg=raising-bad-type
else:
call = self._channel.segregated_call(
cygrpc.PropagationConstants.GRPC_PROPAGATE_DEFAULTS,
@ -537,7 +537,7 @@ class _UnaryUnaryMultiCallable(grpc.UnaryUnaryMultiCallable):
),), self._context)
event = call.next_event()
_handle_event(event, state, self._response_deserializer)
return state, call,
return state, call
def __call__(self,
request,
@ -568,7 +568,7 @@ class _UnaryUnaryMultiCallable(grpc.UnaryUnaryMultiCallable):
state, operations, deadline, rendezvous = self._prepare(
request, timeout, metadata, wait_for_ready)
if state is None:
raise rendezvous
raise rendezvous # pylint: disable-msg=raising-bad-type
else:
event_handler = _event_handler(state, self._response_deserializer)
call = self._managed_call(
@ -603,7 +603,7 @@ class _UnaryStreamMultiCallable(grpc.UnaryStreamMultiCallable):
initial_metadata_flags = _InitialMetadataFlags().with_wait_for_ready(
wait_for_ready)
if serialized_request is None:
raise rendezvous
raise rendezvous # pylint: disable-msg=raising-bad-type
else:
state = _RPCState(_UNARY_STREAM_INITIAL_DUE, None, None, None, None)
operationses = (
@ -660,7 +660,7 @@ class _StreamUnaryMultiCallable(grpc.StreamUnaryMultiCallable):
state.condition.notify_all()
if not state.due:
break
return state, call,
return state, call
def __call__(self,
request_iterator,
@ -755,10 +755,10 @@ class _InitialMetadataFlags(int):
def with_wait_for_ready(self, wait_for_ready):
if wait_for_ready is not None:
if wait_for_ready:
self = self.__class__(self | cygrpc.InitialMetadataFlags.wait_for_ready | \
return self.__class__(self | cygrpc.InitialMetadataFlags.wait_for_ready | \
cygrpc.InitialMetadataFlags.wait_for_ready_explicitly_set)
elif not wait_for_ready:
self = self.__class__(self & ~cygrpc.InitialMetadataFlags.wait_for_ready | \
return self.__class__(self & ~cygrpc.InitialMetadataFlags.wait_for_ready | \
cygrpc.InitialMetadataFlags.wait_for_ready_explicitly_set)
return self

@ -49,7 +49,7 @@ cdef grpc_event _next(grpc_completion_queue *c_completion_queue, deadline):
cdef _interpret_event(grpc_event c_event):
cdef _Tag tag
if c_event.type == GRPC_QUEUE_TIMEOUT:
# NOTE(nathaniel): For now we coopt ConnectivityEvent here.
# TODO(ericgribkoff) Do not coopt ConnectivityEvent here.
return None, ConnectivityEvent(GRPC_QUEUE_TIMEOUT, False, None)
elif c_event.type == GRPC_QUEUE_SHUTDOWN:
# NOTE(nathaniel): For now we coopt ConnectivityEvent here.

@ -128,7 +128,10 @@ cdef class Server:
with nogil:
grpc_server_cancel_all_calls(self.c_server)
def __dealloc__(self):
# TODO(https://github.com/grpc/grpc/issues/17515) Determine what, if any,
# portion of this is safe to call from __dealloc__, and potentially remove
# backup_shutdown_queue.
def destroy(self):
if self.c_server != NULL:
if not self.is_started:
pass
@ -146,4 +149,8 @@ cdef class Server:
while not self.is_shutdown:
time.sleep(0)
grpc_server_destroy(self.c_server)
grpc_shutdown()
self.c_server = NULL
def __dealloc(self):
if self.c_server == NULL:
grpc_shutdown()

@ -48,7 +48,7 @@ _CANCELLED = 'cancelled'
_EMPTY_FLAGS = 0
_UNEXPECTED_EXIT_SERVER_GRACE = 1.0
_DEALLOCATED_SERVER_CHECK_PERIOD_S = 1.0
def _serialized_request(request_event):
@ -676,6 +676,9 @@ class _ServerState(object):
self.rpc_states = set()
self.due = set()
# A "volatile" flag to interrupt the daemon serving thread
self.server_deallocated = False
def _add_generic_handlers(state, generic_handlers):
with state.lock:
@ -702,6 +705,7 @@ def _request_call(state):
# TODO(https://github.com/grpc/grpc/issues/6597): delete this function.
def _stop_serving(state):
if not state.rpc_states and not state.due:
state.server.destroy()
for shutdown_event in state.shutdown_events:
shutdown_event.set()
state.stage = _ServerStage.STOPPED
@ -715,49 +719,69 @@ def _on_call_completed(state):
state.active_rpc_count -= 1
def _serve(state):
while True:
event = state.completion_queue.poll()
if event.tag is _SHUTDOWN_TAG:
def _process_event_and_continue(state, event):
should_continue = True
if event.tag is _SHUTDOWN_TAG:
with state.lock:
state.due.remove(_SHUTDOWN_TAG)
if _stop_serving(state):
should_continue = False
elif event.tag is _REQUEST_CALL_TAG:
with state.lock:
state.due.remove(_REQUEST_CALL_TAG)
concurrency_exceeded = (
state.maximum_concurrent_rpcs is not None and
state.active_rpc_count >= state.maximum_concurrent_rpcs)
rpc_state, rpc_future = _handle_call(
event, state.generic_handlers, state.interceptor_pipeline,
state.thread_pool, concurrency_exceeded)
if rpc_state is not None:
state.rpc_states.add(rpc_state)
if rpc_future is not None:
state.active_rpc_count += 1
rpc_future.add_done_callback(
lambda unused_future: _on_call_completed(state))
if state.stage is _ServerStage.STARTED:
_request_call(state)
elif _stop_serving(state):
should_continue = False
else:
rpc_state, callbacks = event.tag(event)
for callback in callbacks:
callable_util.call_logging_exceptions(callback,
'Exception calling callback!')
if rpc_state is not None:
with state.lock:
state.due.remove(_SHUTDOWN_TAG)
state.rpc_states.remove(rpc_state)
if _stop_serving(state):
return
elif event.tag is _REQUEST_CALL_TAG:
with state.lock:
state.due.remove(_REQUEST_CALL_TAG)
concurrency_exceeded = (
state.maximum_concurrent_rpcs is not None and
state.active_rpc_count >= state.maximum_concurrent_rpcs)
rpc_state, rpc_future = _handle_call(
event, state.generic_handlers, state.interceptor_pipeline,
state.thread_pool, concurrency_exceeded)
if rpc_state is not None:
state.rpc_states.add(rpc_state)
if rpc_future is not None:
state.active_rpc_count += 1
rpc_future.add_done_callback(
lambda unused_future: _on_call_completed(state))
if state.stage is _ServerStage.STARTED:
_request_call(state)
elif _stop_serving(state):
return
else:
rpc_state, callbacks = event.tag(event)
for callback in callbacks:
callable_util.call_logging_exceptions(
callback, 'Exception calling callback!')
if rpc_state is not None:
with state.lock:
state.rpc_states.remove(rpc_state)
if _stop_serving(state):
return
should_continue = False
return should_continue
def _serve(state):
while True:
timeout = time.time() + _DEALLOCATED_SERVER_CHECK_PERIOD_S
event = state.completion_queue.poll(timeout)
if state.server_deallocated:
_begin_shutdown_once(state)
if event.completion_type != cygrpc.CompletionType.queue_timeout:
if not _process_event_and_continue(state, event):
return
# We want to force the deletion of the previous event
# ~before~ we poll again; if the event has a reference
# to a shutdown Call object, this can induce spinlock.
event = None
def _begin_shutdown_once(state):
with state.lock:
if state.stage is _ServerStage.STARTED:
state.server.shutdown(state.completion_queue, _SHUTDOWN_TAG)
state.stage = _ServerStage.GRACE
state.shutdown_events = []
state.due.add(_SHUTDOWN_TAG)
def _stop(state, grace):
with state.lock:
if state.stage is _ServerStage.STOPPED:
@ -765,11 +789,7 @@ def _stop(state, grace):
shutdown_event.set()
return shutdown_event
else:
if state.stage is _ServerStage.STARTED:
state.server.shutdown(state.completion_queue, _SHUTDOWN_TAG)
state.stage = _ServerStage.GRACE
state.shutdown_events = []
state.due.add(_SHUTDOWN_TAG)
_begin_shutdown_once(state)
shutdown_event = threading.Event()
state.shutdown_events.append(shutdown_event)
if grace is None:
@ -840,7 +860,9 @@ class _Server(grpc.Server):
return _stop(self._state, grace)
def __del__(self):
_stop(self._state, None)
# We can not grab a lock in __del__(), so set a flag to signal the
# serving daemon thread (if it exists) to initiate shutdown.
self._state.server_deallocated = True
def create_server(thread_pool, generic_rpc_handlers, interceptors, options,

@ -132,15 +132,12 @@ class _ChannelReadyFuture(grpc.Future):
def result(self, timeout=None):
self._block(timeout)
return None
def exception(self, timeout=None):
self._block(timeout)
return None
def traceback(self, timeout=None):
self._block(timeout)
return None
def add_done_callback(self, fn):
with self._condition:

@ -326,6 +326,7 @@ CORE_SOURCE_FILES = [
'src/core/ext/filters/client_channel/parse_address.cc',
'src/core/ext/filters/client_channel/proxy_mapper.cc',
'src/core/ext/filters/client_channel/proxy_mapper_registry.cc',
'src/core/ext/filters/client_channel/request_routing.cc',
'src/core/ext/filters/client_channel/resolver.cc',
'src/core/ext/filters/client_channel/resolver_registry.cc',
'src/core/ext/filters/client_channel/resolver_result_parsing.cc',

@ -24,7 +24,7 @@ import grpc
import google.protobuf # pylint: disable=unused-import
from google.rpc import status_pb2
_CODE_TO_GRPC_CODE_MAPPING = dict([(x.value[0], x) for x in grpc.StatusCode])
_CODE_TO_GRPC_CODE_MAPPING = {x.value[0]: x for x in grpc.StatusCode}
_GRPC_DETAILS_METADATA_KEY = 'grpc-status-details-bin'

@ -185,7 +185,7 @@ class _Handler(Handler):
elif self._code is None:
self._condition.wait()
else:
return self._trailing_metadata, self._code, self._details,
return self._trailing_metadata, self._code, self._details
def expire(self):
with self._condition:

@ -22,7 +22,6 @@ import re
import shutil
import subprocess
import sys
import traceback
import setuptools
from setuptools.command import build_ext
@ -134,6 +133,7 @@ class TestGevent(setuptools.Command):
# This test will stuck while running higher version of gevent
'unit._auth_context_test.AuthContextTest.testSessionResumption',
# TODO(https://github.com/grpc/grpc/issues/15411) enable these tests
'unit._metadata_flags_test',
'unit._exit_test.ExitTest.test_in_flight_unary_unary_call',
'unit._exit_test.ExitTest.test_in_flight_unary_stream_call',
'unit._exit_test.ExitTest.test_in_flight_stream_unary_call',

@ -37,13 +37,19 @@ PACKAGE_DIRECTORIES = {
}
INSTALL_REQUIRES = (
'coverage>=4.0', 'enum34>=1.0.4',
'coverage>=4.0',
'enum34>=1.0.4',
'grpcio>={version}'.format(version=grpc_version.VERSION),
'grpcio-channelz>={version}'.format(version=grpc_version.VERSION),
# TODO(https://github.com/pypa/warehouse/issues/5196)
# Re-enable it once we got the name back
# 'grpcio-channelz>={version}'.format(version=grpc_version.VERSION),
'grpcio-status>={version}'.format(version=grpc_version.VERSION),
'grpcio-tools>={version}'.format(version=grpc_version.VERSION),
'grpcio-health-checking>={version}'.format(version=grpc_version.VERSION),
'oauth2client>=1.4.7', 'protobuf>=3.6.0', 'six>=1.10', 'google-auth>=1.0.0',
'oauth2client>=1.4.7',
'protobuf>=3.6.0',
'six>=1.10',
'google-auth>=1.0.0',
'requests>=2.14.2')
if not PY3:

@ -203,7 +203,7 @@ class Runner(object):
check_kill_self()
time.sleep(0)
case_thread.join()
except:
except: # pylint: disable=try-except-raise
# re-raise the exception after forcing the with-block to end
raise
result.set_output(augmented_case.case, stdout_pipe.output(),

@ -88,11 +88,10 @@ def _generate_channel_server_pairs(n):
def _close_channel_server_pairs(pairs):
for pair in pairs:
pair.server.stop(None)
# TODO(ericgribkoff) This del should not be required
del pair.server
pair.channel.close()
@unittest.skip('https://github.com/pypa/warehouse/issues/5196')
class ChannelzServicerTest(unittest.TestCase):
def _send_successful_unary_unary(self, idx):

@ -39,8 +39,12 @@ class HealthServicerTest(unittest.TestCase):
health_pb2_grpc.add_HealthServicer_to_server(servicer, self._server)
self._server.start()
channel = grpc.insecure_channel('localhost:%d' % port)
self._stub = health_pb2_grpc.HealthStub(channel)
self._channel = grpc.insecure_channel('localhost:%d' % port)
self._stub = health_pb2_grpc.HealthStub(self._channel)
def tearDown(self):
self._server.stop(None)
self._channel.close()
def test_empty_service(self):
request = health_pb2.HealthCheckRequest()

@ -54,7 +54,6 @@ def _args():
help='replace platform root CAs with ca.pem')
parser.add_argument(
'--server_host_override',
default="foo.test.google.fr",
type=str,
help='the server host to which to claim to connect')
parser.add_argument(
@ -100,10 +99,13 @@ def _stub(args):
channel_credentials = grpc.composite_channel_credentials(
channel_credentials, call_credentials)
channel = grpc.secure_channel(target, channel_credentials, ((
'grpc.ssl_target_name_override',
args.server_host_override,
),))
channel_opts = None
if args.server_host_override:
channel_opts = ((
'grpc.ssl_target_name_override',
args.server_host_override,
),)
channel = grpc.secure_channel(target, channel_credentials, channel_opts)
else:
channel = grpc.insecure_channel(target)
if args.test_case == "unimplemented_service":

@ -144,7 +144,7 @@ class _ProtoBeforeGrpcProtocStyle(object):
absolute_proto_file_names)
pb2_grpc_protoc_exit_code = _protoc(
proto_path, None, 'grpc_2_0', python_out, absolute_proto_file_names)
return pb2_protoc_exit_code, pb2_grpc_protoc_exit_code,
return pb2_protoc_exit_code, pb2_grpc_protoc_exit_code
class _GrpcBeforeProtoProtocStyle(object):
@ -160,7 +160,7 @@ class _GrpcBeforeProtoProtocStyle(object):
proto_path, None, 'grpc_2_0', python_out, absolute_proto_file_names)
pb2_protoc_exit_code = _protoc(proto_path, python_out, None, None,
absolute_proto_file_names)
return pb2_grpc_protoc_exit_code, pb2_protoc_exit_code,
return pb2_grpc_protoc_exit_code, pb2_protoc_exit_code
_PROTOC_STYLES = (
@ -243,9 +243,9 @@ class _Test(six.with_metaclass(abc.ABCMeta, unittest.TestCase)):
def _services_modules(self):
if self.PROTOC_STYLE.grpc_in_pb2_expected():
return self._services_pb2, self._services_pb2_grpc,
return self._services_pb2, self._services_pb2_grpc
else:
return self._services_pb2_grpc,
return (self._services_pb2_grpc,)
def test_imported_attributes(self):
self._protoc()

@ -223,7 +223,7 @@ def _CreateService(payload_pb2, responses_pb2, service_pb2):
server.start()
channel = implementations.insecure_channel('localhost', port)
stub = getattr(service_pb2, STUB_FACTORY_IDENTIFIER)(channel)
yield servicer_methods, stub,
yield servicer_methods, stub
server.stop(0)

@ -180,7 +180,7 @@ class StreamingSyncBenchmarkClient(BenchmarkClient):
self._streams = [
_SyncStream(self._stub, self._generic, self._request,
self._handle_response)
for _ in xrange(config.outstanding_rpcs_per_channel)
for _ in range(config.outstanding_rpcs_per_channel)
]
self._curr_stream = 0

@ -77,7 +77,7 @@ class ClosedLoopClientRunner(ClientRunner):
def start(self):
self._is_running = True
self._client.start()
for _ in xrange(self._request_count):
for _ in range(self._request_count):
self._client.send_request()
def stop(self):

@ -109,7 +109,7 @@ class WorkerServer(worker_service_pb2_grpc.WorkerServiceServicer):
start_time = time.time()
# Create a client for each channel
for i in xrange(config.client_channels):
for i in range(config.client_channels):
server = config.server_targets[i % len(config.server_targets)]
runner = self._create_client_runner(server, config, qps_data)
client_runners.append(runner)

@ -56,8 +56,12 @@ class ReflectionServicerTest(unittest.TestCase):
port = self._server.add_insecure_port('[::]:0')
self._server.start()
channel = grpc.insecure_channel('localhost:%d' % port)
self._stub = reflection_pb2_grpc.ServerReflectionStub(channel)
self._channel = grpc.insecure_channel('localhost:%d' % port)
self._stub = reflection_pb2_grpc.ServerReflectionStub(self._channel)
def tearDown(self):
self._server.stop(None)
self._channel.close()
def testFileByName(self):
requests = (

@ -71,7 +71,6 @@ def _args():
'--use_tls', help='Whether to use TLS', default=False, type=bool)
parser.add_argument(
'--server_host_override',
default="foo.test.google.fr",
help='the server host to which to claim to connect',
type=str)
return parser.parse_args()
@ -132,9 +131,9 @@ def run_test(args):
server.start()
for test_server_target in test_server_targets:
for _ in xrange(args.num_channels_per_server):
for _ in range(args.num_channels_per_server):
channel = _get_channel(test_server_target, args)
for _ in xrange(args.num_stubs_per_channel):
for _ in range(args.num_stubs_per_channel):
stub = test_pb2_grpc.TestServiceStub(channel)
runner = test_runner.TestRunner(stub, test_cases, hist,
exception_queue, stop_event)

@ -130,9 +130,9 @@ def _run_stream_stream(stub):
request_pipe = _Pipe()
response_iterator = stub.StreStre(iter(request_pipe))
request_pipe.add(_application_common.STREAM_STREAM_REQUEST)
first_responses = next(response_iterator), next(response_iterator),
first_responses = next(response_iterator), next(response_iterator)
request_pipe.add(_application_common.STREAM_STREAM_REQUEST)
second_responses = next(response_iterator), next(response_iterator),
second_responses = next(response_iterator), next(response_iterator)
request_pipe.close()
try:
next(response_iterator)

@ -57,6 +57,7 @@
"unit._reconnect_test.ReconnectTest",
"unit._resource_exhausted_test.ResourceExhaustedTest",
"unit._rpc_test.RPCTest",
"unit._server_shutdown_test.ServerShutdown",
"unit._server_ssl_cert_config_test.ServerSSLCertConfigFetcherParamsChecks",
"unit._server_ssl_cert_config_test.ServerSSLCertReloadTestCertConfigReuse",
"unit._server_ssl_cert_config_test.ServerSSLCertReloadTestWithClientAuth",

@ -28,6 +28,7 @@ GRPCIO_TESTS_UNIT = [
# TODO(ghostwriternr): To be added later.
# "_server_ssl_cert_config_test.py",
"_server_test.py",
"_server_shutdown_test.py",
"_session_cache_test.py",
]
@ -49,6 +50,11 @@ py_library(
srcs = ["_exit_scenarios.py"],
)
py_library(
name = "_server_shutdown_scenarios",
srcs = ["_server_shutdown_scenarios.py"],
)
py_library(
name = "_thread_pool",
srcs = ["_thread_pool.py"],
@ -70,6 +76,7 @@ py_library(
":resources",
":test_common",
":_exit_scenarios",
":_server_shutdown_scenarios",
":_thread_pool",
":_from_grpc_import_star",
"//src/python/grpcio_tests/tests/unit/framework/common",

@ -101,6 +101,7 @@ class ChannelTest(unittest.TestCase):
def test_secure_channel(self):
channel_credentials = grpc.ssl_channel_credentials()
channel = grpc.secure_channel('google.com:443', channel_credentials)
channel.close()
if __name__ == '__main__':

@ -71,8 +71,8 @@ class AuthContextTest(unittest.TestCase):
port = server.add_insecure_port('[::]:0')
server.start()
channel = grpc.insecure_channel('localhost:%d' % port)
response = channel.unary_unary(_UNARY_UNARY)(_REQUEST)
with grpc.insecure_channel('localhost:%d' % port) as channel:
response = channel.unary_unary(_UNARY_UNARY)(_REQUEST)
server.stop(None)
auth_data = pickle.loads(response)
@ -98,6 +98,7 @@ class AuthContextTest(unittest.TestCase):
channel_creds,
options=_PROPERTY_OPTIONS)
response = channel.unary_unary(_UNARY_UNARY)(_REQUEST)
channel.close()
server.stop(None)
auth_data = pickle.loads(response)
@ -132,6 +133,7 @@ class AuthContextTest(unittest.TestCase):
options=_PROPERTY_OPTIONS)
response = channel.unary_unary(_UNARY_UNARY)(_REQUEST)
channel.close()
server.stop(None)
auth_data = pickle.loads(response)

@ -75,6 +75,8 @@ class ChannelConnectivityTest(unittest.TestCase):
channel.unsubscribe(callback.update)
fifth_connectivities = callback.connectivities()
channel.close()
self.assertSequenceEqual((grpc.ChannelConnectivity.IDLE,),
first_connectivities)
self.assertNotIn(grpc.ChannelConnectivity.READY, second_connectivities)
@ -108,7 +110,8 @@ class ChannelConnectivityTest(unittest.TestCase):
_ready_in_connectivities)
second_callback.block_until_connectivities_satisfy(
_ready_in_connectivities)
del channel
channel.close()
server.stop(None)
self.assertSequenceEqual((grpc.ChannelConnectivity.IDLE,),
first_connectivities)
@ -139,6 +142,7 @@ class ChannelConnectivityTest(unittest.TestCase):
callback.block_until_connectivities_satisfy(
_last_connectivity_is_not_ready)
channel.unsubscribe(callback.update)
channel.close()
self.assertFalse(thread_pool.was_used())

@ -60,6 +60,8 @@ class ChannelReadyFutureTest(unittest.TestCase):
self.assertTrue(ready_future.done())
self.assertFalse(ready_future.running())
channel.close()
def test_immediately_connectable_channel_connectivity(self):
thread_pool = _thread_pool.RecordingThreadPool(max_workers=None)
server = grpc.server(thread_pool, options=(('grpc.so_reuseport', 0),))
@ -84,6 +86,9 @@ class ChannelReadyFutureTest(unittest.TestCase):
self.assertFalse(ready_future.running())
self.assertFalse(thread_pool.was_used())
channel.close()
server.stop(None)
if __name__ == '__main__':
logging.basicConfig()

@ -77,6 +77,9 @@ class CompressionTest(unittest.TestCase):
self._port = self._server.add_insecure_port('[::]:0')
self._server.start()
def tearDown(self):
self._server.stop(None)
def testUnary(self):
request = b'\x00' * 100
@ -102,6 +105,7 @@ class CompressionTest(unittest.TestCase):
response = multi_callable(
request, metadata=[('grpc-internal-encoding-request', 'gzip')])
self.assertEqual(request, response)
compressed_channel.close()
def testStreaming(self):
request = b'\x00' * 100
@ -115,6 +119,7 @@ class CompressionTest(unittest.TestCase):
call = multi_callable(iter([request] * test_constants.STREAM_LENGTH))
for response in call:
self.assertEqual(request, response)
compressed_channel.close()
if __name__ == '__main__':

@ -96,6 +96,7 @@ class EmptyMessageTest(unittest.TestCase):
def tearDown(self):
self._server.stop(0)
self._channel.close()
def testUnaryUnary(self):
response = self._channel.unary_unary(_UNARY_UNARY)(_REQUEST)

@ -71,6 +71,7 @@ class ErrorMessageEncodingTest(unittest.TestCase):
def tearDown(self):
self._server.stop(0)
self._channel.close()
def testMessageEncoding(self):
for message in _UNICODE_ERROR_MESSAGES:

@ -14,7 +14,7 @@
_BEFORE_IMPORT = tuple(globals())
from grpc import * # pylint: disable=wildcard-import
from grpc import * # pylint: disable=wildcard-import,unused-wildcard-import
_AFTER_IMPORT = tuple(globals())

@ -337,6 +337,7 @@ class InterceptorTest(unittest.TestCase):
def tearDown(self):
self._server.stop(None)
self._server_pool.shutdown(wait=True)
self._channel.close()
def testTripleRequestMessagesClientInterceptor(self):

@ -62,6 +62,9 @@ class InvalidMetadataTest(unittest.TestCase):
self._stream_unary = _stream_unary_multi_callable(self._channel)
self._stream_stream = _stream_stream_multi_callable(self._channel)
def tearDown(self):
self._channel.close()
def testUnaryRequestBlockingUnaryResponse(self):
request = b'\x07\x08'
metadata = (('InVaLiD', 'UnaryRequestBlockingUnaryResponse'),)

@ -215,6 +215,7 @@ class InvocationDefectsTest(unittest.TestCase):
def tearDown(self):
self._server.stop(0)
self._channel.close()
def testIterableStreamRequestBlockingUnaryResponse(self):
requests = [b'\x07\x08' for _ in range(test_constants.STREAM_LENGTH)]

@ -198,8 +198,8 @@ class MetadataCodeDetailsTest(unittest.TestCase):
port = self._server.add_insecure_port('[::]:0')
self._server.start()
channel = grpc.insecure_channel('localhost:{}'.format(port))
self._unary_unary = channel.unary_unary(
self._channel = grpc.insecure_channel('localhost:{}'.format(port))
self._unary_unary = self._channel.unary_unary(
'/'.join((
'',
_SERVICE,
@ -208,17 +208,17 @@ class MetadataCodeDetailsTest(unittest.TestCase):
request_serializer=_REQUEST_SERIALIZER,
response_deserializer=_RESPONSE_DESERIALIZER,
)
self._unary_stream = channel.unary_stream('/'.join((
self._unary_stream = self._channel.unary_stream('/'.join((
'',
_SERVICE,
_UNARY_STREAM,
)),)
self._stream_unary = channel.stream_unary('/'.join((
self._stream_unary = self._channel.stream_unary('/'.join((
'',
_SERVICE,
_STREAM_UNARY,
)),)
self._stream_stream = channel.stream_stream(
self._stream_stream = self._channel.stream_stream(
'/'.join((
'',
_SERVICE,
@ -228,6 +228,10 @@ class MetadataCodeDetailsTest(unittest.TestCase):
response_deserializer=_RESPONSE_DESERIALIZER,
)
def tearDown(self):
self._server.stop(None)
self._channel.close()
def testSuccessfulUnaryUnary(self):
self._servicer.set_details(_DETAILS)

@ -187,13 +187,14 @@ class MetadataFlagsTest(unittest.TestCase):
def test_call_wait_for_ready_default(self):
for perform_call in _ALL_CALL_CASES:
self.check_connection_does_failfast(perform_call,
create_dummy_channel())
with create_dummy_channel() as channel:
self.check_connection_does_failfast(perform_call, channel)
def test_call_wait_for_ready_disabled(self):
for perform_call in _ALL_CALL_CASES:
self.check_connection_does_failfast(
perform_call, create_dummy_channel(), wait_for_ready=False)
with create_dummy_channel() as channel:
self.check_connection_does_failfast(
perform_call, channel, wait_for_ready=False)
def test_call_wait_for_ready_enabled(self):
# To test the wait mechanism, Python thread is required to make
@ -210,16 +211,16 @@ class MetadataFlagsTest(unittest.TestCase):
wg.done()
def test_call(perform_call):
try:
channel = grpc.insecure_channel(addr)
channel.subscribe(wait_for_transient_failure)
perform_call(channel, wait_for_ready=True)
except BaseException as e: # pylint: disable=broad-except
# If the call failed, the thread would be destroyed. The channel
# object can be collected before calling the callback, which
# will result in a deadlock.
wg.done()
unhandled_exceptions.put(e, True)
with grpc.insecure_channel(addr) as channel:
try:
channel.subscribe(wait_for_transient_failure)
perform_call(channel, wait_for_ready=True)
except BaseException as e: # pylint: disable=broad-except
# If the call failed, the thread would be destroyed. The
# channel object can be collected before calling the
# callback, which will result in a deadlock.
wg.done()
unhandled_exceptions.put(e, True)
test_threads = []
for perform_call in _ALL_CALL_CASES:

@ -186,6 +186,7 @@ class MetadataTest(unittest.TestCase):
def tearDown(self):
self._server.stop(0)
self._channel.close()
def testUnaryUnary(self):
multi_callable = self._channel.unary_unary(_UNARY_UNARY)

@ -98,6 +98,8 @@ class ReconnectTest(unittest.TestCase):
server.add_insecure_port('[::]:{}'.format(port))
server.start()
self.assertEqual(_RESPONSE, multi_callable(_REQUEST))
server.stop(None)
channel.close()
if __name__ == '__main__':

@ -148,6 +148,7 @@ class ResourceExhaustedTest(unittest.TestCase):
def tearDown(self):
self._server.stop(0)
self._channel.close()
def testUnaryUnary(self):
multi_callable = self._channel.unary_unary(_UNARY_UNARY)

@ -193,6 +193,7 @@ class RPCTest(unittest.TestCase):
def tearDown(self):
self._server.stop(None)
self._channel.close()
def testUnrecognizedMethod(self):
request = b'abc'

@ -0,0 +1,97 @@
# Copyright 2018 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Defines a number of module-scope gRPC scenarios to test server shutdown."""
import argparse
import os
import threading
import time
import logging
import grpc
from tests.unit import test_common
from concurrent import futures
from six.moves import queue
WAIT_TIME = 1000
REQUEST = b'request'
RESPONSE = b'response'
SERVER_RAISES_EXCEPTION = 'server_raises_exception'
SERVER_DEALLOCATED = 'server_deallocated'
SERVER_FORK_CAN_EXIT = 'server_fork_can_exit'
FORK_EXIT = '/test/ForkExit'
def fork_and_exit(request, servicer_context):
pid = os.fork()
if pid == 0:
os._exit(0)
return RESPONSE
class GenericHandler(grpc.GenericRpcHandler):
def service(self, handler_call_details):
if handler_call_details.method == FORK_EXIT:
return grpc.unary_unary_rpc_method_handler(fork_and_exit)
else:
return None
def run_server(port_queue):
server = test_common.test_server()
port = server.add_insecure_port('[::]:0')
port_queue.put(port)
server.add_generic_rpc_handlers((GenericHandler(),))
server.start()
# threading.Event.wait() does not exhibit the bug identified in
# https://github.com/grpc/grpc/issues/17093, sleep instead
time.sleep(WAIT_TIME)
def run_test(args):
if args.scenario == SERVER_RAISES_EXCEPTION:
server = test_common.test_server()
server.start()
raise Exception()
elif args.scenario == SERVER_DEALLOCATED:
server = test_common.test_server()
server.start()
server.__del__()
while server._state.stage != grpc._server._ServerStage.STOPPED:
pass
elif args.scenario == SERVER_FORK_CAN_EXIT:
port_queue = queue.Queue()
thread = threading.Thread(target=run_server, args=(port_queue,))
thread.daemon = True
thread.start()
port = port_queue.get()
channel = grpc.insecure_channel('localhost:%d' % port)
multi_callable = channel.unary_unary(FORK_EXIT)
result, call = multi_callable.with_call(REQUEST, wait_for_ready=True)
os.wait()
else:
raise ValueError('unknown test scenario')
if __name__ == '__main__':
logging.basicConfig()
parser = argparse.ArgumentParser()
parser.add_argument('scenario', type=str)
args = parser.parse_args()
run_test(args)

@ -0,0 +1,90 @@
# Copyright 2018 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests clean shutdown of server on various interpreter exit conditions.
The tests in this module spawn a subprocess for each test case, the
test is considered successful if it doesn't hang/timeout.
"""
import atexit
import os
import subprocess
import sys
import threading
import unittest
import logging
from tests.unit import _server_shutdown_scenarios
SCENARIO_FILE = os.path.abspath(
os.path.join(
os.path.dirname(os.path.realpath(__file__)),
'_server_shutdown_scenarios.py'))
INTERPRETER = sys.executable
BASE_COMMAND = [INTERPRETER, SCENARIO_FILE]
processes = []
process_lock = threading.Lock()
# Make sure we attempt to clean up any
# processes we may have left running
def cleanup_processes():
with process_lock:
for process in processes:
try:
process.kill()
except Exception: # pylint: disable=broad-except
pass
atexit.register(cleanup_processes)
def wait(process):
with process_lock:
processes.append(process)
process.wait()
class ServerShutdown(unittest.TestCase):
# Currently we shut down a server (if possible) after the Python server
# instance is garbage collected. This behavior may change in the future.
def test_deallocated_server_stops(self):
process = subprocess.Popen(
BASE_COMMAND + [_server_shutdown_scenarios.SERVER_DEALLOCATED],
stdout=sys.stdout,
stderr=sys.stderr)
wait(process)
def test_server_exception_exits(self):
process = subprocess.Popen(
BASE_COMMAND + [_server_shutdown_scenarios.SERVER_RAISES_EXCEPTION],
stdout=sys.stdout,
stderr=sys.stderr)
wait(process)
@unittest.skipIf(os.name == 'nt', 'fork not supported on windows')
def test_server_fork_can_exit(self):
process = subprocess.Popen(
BASE_COMMAND + [_server_shutdown_scenarios.SERVER_FORK_CAN_EXIT],
stdout=sys.stdout,
stderr=sys.stderr)
wait(process)
if __name__ == '__main__':
logging.basicConfig()
unittest.main(verbosity=2)

@ -0,0 +1,61 @@
#!/usr/bin/env ruby
# Copyright 2015 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
require_relative './end2end_common'
# Test client. Sends RPC's as normal but process also has signal handlers
class SigHandlingClientController < ClientControl::ClientController::Service
def initialize(stub)
@stub = stub
end
def do_echo_rpc(req, _)
response = @stub.echo(Echo::EchoRequest.new(request: req.request))
fail 'bad response' unless response.response == req.request
ClientControl::Void.new
end
end
def main
client_control_port = ''
server_port = ''
OptionParser.new do |opts|
opts.on('--client_control_port=P', String) do |p|
client_control_port = p
end
opts.on('--server_port=P', String) do |p|
server_port = p
end
end.parse!
# Allow a few seconds to be safe.
srv = new_rpc_server_for_testing
srv.add_http2_port("0.0.0.0:#{client_control_port}",
:this_port_is_insecure)
stub = Echo::EchoServer::Stub.new("localhost:#{server_port}",
:this_channel_is_insecure)
control_service = SigHandlingClientController.new(stub)
srv.handle(control_service)
server_thread = Thread.new do
srv.run_till_terminated_or_interrupted(['int'])
end
srv.wait_till_running
# send a first RPC to notify the parent process that we've started
stub.echo(Echo::EchoRequest.new(request: 'client/child started'))
server_thread.join
end
main

@ -0,0 +1,83 @@
#!/usr/bin/env ruby
# Copyright 2016 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# smoke test for a grpc-using app that receives and
# handles process-ending signals
require_relative './end2end_common'
# A service that calls back it's received_rpc_callback
# upon receiving an RPC. Used for synchronization/waiting
# for child process to start.
class ClientStartedService < Echo::EchoServer::Service
def initialize(received_rpc_callback)
@received_rpc_callback = received_rpc_callback
end
def echo(echo_req, _)
@received_rpc_callback.call unless @received_rpc_callback.nil?
@received_rpc_callback = nil
Echo::EchoReply.new(response: echo_req.request)
end
end
def main
STDERR.puts 'start server'
client_started = false
client_started_mu = Mutex.new
client_started_cv = ConditionVariable.new
received_rpc_callback = proc do
client_started_mu.synchronize do
client_started = true
client_started_cv.signal
end
end
client_started_service = ClientStartedService.new(received_rpc_callback)
server_runner = ServerRunner.new(client_started_service)
server_port = server_runner.run
STDERR.puts 'start client'
control_stub, client_pid = start_client('graceful_sig_handling_client.rb', server_port)
client_started_mu.synchronize do
client_started_cv.wait(client_started_mu) until client_started
end
control_stub.do_echo_rpc(
ClientControl::DoEchoRpcRequest.new(request: 'hello'))
STDERR.puts 'killing client'
Process.kill('SIGINT', client_pid)
Process.wait(client_pid)
client_exit_status = $CHILD_STATUS
if client_exit_status.exited?
if client_exit_status.exitstatus != 0
STDERR.puts 'Client did not close gracefully'
exit(1)
end
else
STDERR.puts 'Client did not close gracefully'
exit(1)
end
STDERR.puts 'Client ended gracefully'
# no need to call cleanup, client should already be dead
server_runner.stop
end
main

Some files were not shown because too many files have changed in this diff Show More

Loading…
Cancel
Save