diff --git a/.gitignore b/.gitignore
index bd9a5139fe2..383fc25b360 100644
--- a/.gitignore
+++ b/.gitignore
@@ -136,7 +136,7 @@ bm_diff_old/
bm_*.json
# cmake build files
-/cmake/build
+**/cmake/build/
# Visual Studio Code artifacts
.vscode/*
diff --git a/BUILD b/BUILD
index 81de8203a03..98622fd7ecc 100644
--- a/BUILD
+++ b/BUILD
@@ -724,6 +724,7 @@ grpc_cc_library(
"src/core/lib/iomgr/endpoint_pair_windows.cc",
"src/core/lib/iomgr/error.cc",
"src/core/lib/iomgr/error_cfstream.cc",
+ "src/core/lib/iomgr/ev_apple.cc",
"src/core/lib/iomgr/ev_epoll1_linux.cc",
"src/core/lib/iomgr/ev_epollex_linux.cc",
"src/core/lib/iomgr/ev_poll_posix.cc",
@@ -885,6 +886,7 @@ grpc_cc_library(
"src/core/lib/iomgr/error.h",
"src/core/lib/iomgr/error_cfstream.h",
"src/core/lib/iomgr/error_internal.h",
+ "src/core/lib/iomgr/ev_apple.h",
"src/core/lib/iomgr/ev_epoll1_linux.h",
"src/core/lib/iomgr/ev_epollex_linux.h",
"src/core/lib/iomgr/ev_poll_posix.h",
@@ -989,7 +991,6 @@ grpc_cc_library(
],
language = "c++",
public_hdrs = GRPC_PUBLIC_HDRS,
- use_cfstream = True,
deps = [
"eventmanager_libuv",
"gpr_base",
diff --git a/BUILD.gn b/BUILD.gn
index 129ad2b2639..bc56c6e571f 100644
--- a/BUILD.gn
+++ b/BUILD.gn
@@ -604,6 +604,8 @@ config("grpc_config") {
"src/core/lib/iomgr/error_cfstream.cc",
"src/core/lib/iomgr/error_cfstream.h",
"src/core/lib/iomgr/error_internal.h",
+ "src/core/lib/iomgr/ev_apple.cc",
+ "src/core/lib/iomgr/ev_apple.h",
"src/core/lib/iomgr/ev_epoll1_linux.cc",
"src/core/lib/iomgr/ev_epoll1_linux.h",
"src/core/lib/iomgr/ev_epollex_linux.cc",
diff --git a/CMakeLists.txt b/CMakeLists.txt
index 27bf340b65a..6c9fed70d72 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -1512,6 +1512,7 @@ add_library(grpc
src/core/lib/iomgr/endpoint_pair_windows.cc
src/core/lib/iomgr/error.cc
src/core/lib/iomgr/error_cfstream.cc
+ src/core/lib/iomgr/ev_apple.cc
src/core/lib/iomgr/ev_epoll1_linux.cc
src/core/lib/iomgr/ev_epollex_linux.cc
src/core/lib/iomgr/ev_poll_posix.cc
@@ -2165,6 +2166,7 @@ add_library(grpc_unsecure
src/core/lib/iomgr/endpoint_pair_windows.cc
src/core/lib/iomgr/error.cc
src/core/lib/iomgr/error_cfstream.cc
+ src/core/lib/iomgr/ev_apple.cc
src/core/lib/iomgr/ev_epoll1_linux.cc
src/core/lib/iomgr/ev_epollex_linux.cc
src/core/lib/iomgr/ev_poll_posix.cc
diff --git a/Makefile b/Makefile
index d6c8cd19efa..c5bf49173e7 100644
--- a/Makefile
+++ b/Makefile
@@ -3837,6 +3837,7 @@ LIBGRPC_SRC = \
src/core/lib/iomgr/endpoint_pair_windows.cc \
src/core/lib/iomgr/error.cc \
src/core/lib/iomgr/error_cfstream.cc \
+ src/core/lib/iomgr/ev_apple.cc \
src/core/lib/iomgr/ev_epoll1_linux.cc \
src/core/lib/iomgr/ev_epollex_linux.cc \
src/core/lib/iomgr/ev_poll_posix.cc \
@@ -4464,6 +4465,7 @@ LIBGRPC_UNSECURE_SRC = \
src/core/lib/iomgr/endpoint_pair_windows.cc \
src/core/lib/iomgr/error.cc \
src/core/lib/iomgr/error_cfstream.cc \
+ src/core/lib/iomgr/ev_apple.cc \
src/core/lib/iomgr/ev_epoll1_linux.cc \
src/core/lib/iomgr/ev_epollex_linux.cc \
src/core/lib/iomgr/ev_poll_posix.cc \
diff --git a/build_autogenerated.yaml b/build_autogenerated.yaml
index 30dba593d4f..fb63a25696c 100644
--- a/build_autogenerated.yaml
+++ b/build_autogenerated.yaml
@@ -566,6 +566,7 @@ libs:
- src/core/lib/iomgr/error.h
- src/core/lib/iomgr/error_cfstream.h
- src/core/lib/iomgr/error_internal.h
+ - src/core/lib/iomgr/ev_apple.h
- src/core/lib/iomgr/ev_epoll1_linux.h
- src/core/lib/iomgr/ev_epollex_linux.h
- src/core/lib/iomgr/ev_poll_posix.h
@@ -938,6 +939,7 @@ libs:
- src/core/lib/iomgr/endpoint_pair_windows.cc
- src/core/lib/iomgr/error.cc
- src/core/lib/iomgr/error_cfstream.cc
+ - src/core/lib/iomgr/ev_apple.cc
- src/core/lib/iomgr/ev_epoll1_linux.cc
- src/core/lib/iomgr/ev_epollex_linux.cc
- src/core/lib/iomgr/ev_poll_posix.cc
@@ -1465,6 +1467,7 @@ libs:
- src/core/lib/iomgr/error.h
- src/core/lib/iomgr/error_cfstream.h
- src/core/lib/iomgr/error_internal.h
+ - src/core/lib/iomgr/ev_apple.h
- src/core/lib/iomgr/ev_epoll1_linux.h
- src/core/lib/iomgr/ev_epollex_linux.h
- src/core/lib/iomgr/ev_poll_posix.h
@@ -1769,6 +1772,7 @@ libs:
- src/core/lib/iomgr/endpoint_pair_windows.cc
- src/core/lib/iomgr/error.cc
- src/core/lib/iomgr/error_cfstream.cc
+ - src/core/lib/iomgr/ev_apple.cc
- src/core/lib/iomgr/ev_epoll1_linux.cc
- src/core/lib/iomgr/ev_epollex_linux.cc
- src/core/lib/iomgr/ev_poll_posix.cc
diff --git a/config.m4 b/config.m4
index 6b450315f4f..b4013a02fbd 100644
--- a/config.m4
+++ b/config.m4
@@ -286,6 +286,7 @@ if test "$PHP_GRPC" != "no"; then
src/core/lib/iomgr/endpoint_pair_windows.cc \
src/core/lib/iomgr/error.cc \
src/core/lib/iomgr/error_cfstream.cc \
+ src/core/lib/iomgr/ev_apple.cc \
src/core/lib/iomgr/ev_epoll1_linux.cc \
src/core/lib/iomgr/ev_epollex_linux.cc \
src/core/lib/iomgr/ev_poll_posix.cc \
diff --git a/config.w32 b/config.w32
index 59cdbdb1ce4..c0b062aafea 100644
--- a/config.w32
+++ b/config.w32
@@ -255,6 +255,7 @@ if (PHP_GRPC != "no") {
"src\\core\\lib\\iomgr\\endpoint_pair_windows.cc " +
"src\\core\\lib\\iomgr\\error.cc " +
"src\\core\\lib\\iomgr\\error_cfstream.cc " +
+ "src\\core\\lib\\iomgr\\ev_apple.cc " +
"src\\core\\lib\\iomgr\\ev_epoll1_linux.cc " +
"src\\core\\lib\\iomgr\\ev_epollex_linux.cc " +
"src\\core\\lib\\iomgr\\ev_poll_posix.cc " +
diff --git a/examples/cpp/README.md b/examples/cpp/README.md
index 0e358bf9a22..a0ff629b1a0 100644
--- a/examples/cpp/README.md
+++ b/examples/cpp/README.md
@@ -1,44 +1,13 @@
-# gRPC in 3 minutes (C++)
+# gRPC C++ Examples
-## Installation
+- **[Hello World][]!** Eager to run your first gRPC example? You'll find
+ instructions for building gRPC and running a simple "Hello World" app in [Quick Start][].
+- **[Route Guide][].** For a basic tutorial on gRPC see [gRPC Basics][].
-To install gRPC on your system, follow the instructions to build from source
-[here](../../BUILDING.md). This also installs the protocol buffer compiler
-`protoc` (if you don't have it already), and the C++ gRPC plugin for `protoc`.
+For information about the other examples in this directory, see their respective
+README files.
-## Hello C++ gRPC!
-
-Here's how to build and run the C++ implementation of the [Hello
-World](../protos/helloworld.proto) example used in [Getting started](..).
-
-### Client and server implementations
-
-The client implementation is at [greeter_client.cc](helloworld/greeter_client.cc).
-
-The server implementation is at [greeter_server.cc](helloworld/greeter_server.cc).
-
-### Try it!
-Build client and server:
-
-```sh
-$ make
-```
-
-Run the server, which will listen on port 50051:
-
-```sh
-$ ./greeter_server
-```
-
-Run the client (in a different terminal):
-
-```sh
-$ ./greeter_client
-```
-
-If things go smoothly, you will see the "Greeter received: Hello world" in the
-client side output.
-
-## Tutorial
-
-You can find a more detailed tutorial in [gRPC Basics: C++](cpptutorial.md)
+[gRPC Basics]: https://grpc.io/docs/tutorials/basic/cpp
+[Hello World]: helloworld
+[Quick Start]: https://grpc.io/docs/quickstart/cpp
+[Route Guide]: route_guide
diff --git a/examples/cpp/cpptutorial.md b/examples/cpp/cpptutorial.md
deleted file mode 100644
index c40676de134..00000000000
--- a/examples/cpp/cpptutorial.md
+++ /dev/null
@@ -1,488 +0,0 @@
-# gRPC Basics: C++
-
-This tutorial provides a basic C++ programmer's introduction to working with
-gRPC. By walking through this example you'll learn how to:
-
-- Define a service in a `.proto` file.
-- Generate server and client code using the protocol buffer compiler.
-- Use the C++ gRPC API to write a simple client and server for your service.
-
-It assumes that you are familiar with
-[protocol buffers](https://developers.google.com/protocol-buffers/docs/overview).
-Note that the example in this tutorial uses the proto3 version of the protocol
-buffers language, which is currently in alpha release: you can find out more in
-the [proto3 language guide](https://developers.google.com/protocol-buffers/docs/proto3)
-and see the [release notes](https://github.com/google/protobuf/releases) for the
-new version in the protocol buffers Github repository.
-
-## Why use gRPC?
-
-Our example is a simple route mapping application that lets clients get
-information about features on their route, create a summary of their route, and
-exchange route information such as traffic updates with the server and other
-clients.
-
-With gRPC we can define our service once in a `.proto` file and implement clients
-and servers in any of gRPC's supported languages, which in turn can be run in
-environments ranging from servers inside Google to your own tablet - all the
-complexity of communication between different languages and environments is
-handled for you by gRPC. We also get all the advantages of working with protocol
-buffers, including efficient serialization, a simple IDL, and easy interface
-updating.
-
-## Example code and setup
-
-The example code for our tutorial is in [examples/cpp/route_guide](route_guide).
-You also should have the relevant tools installed to generate the server and
-client interface code - if you don't already, follow the setup instructions in
-[BUILDING.md](../../BUILDING.md).
-
-## Defining the service
-
-Our first step is to define the gRPC *service* and the method *request* and
-*response* types using
-[protocol buffers](https://developers.google.com/protocol-buffers/docs/overview).
-You can see the complete `.proto` file in
-[`examples/protos/route_guide.proto`](../protos/route_guide.proto).
-
-To define a service, you specify a named `service` in your `.proto` file:
-
-```protobuf
-service RouteGuide {
- ...
-}
-```
-
-Then you define `rpc` methods inside your service definition, specifying their
-request and response types. gRPC lets you define four kinds of service method,
-all of which are used in the `RouteGuide` service:
-
-- A *simple RPC* where the client sends a request to the server using the stub
- and waits for a response to come back, just like a normal function call.
-
-```protobuf
- // Obtains the feature at a given position.
- rpc GetFeature(Point) returns (Feature) {}
-```
-
-- A *server-side streaming RPC* where the client sends a request to the server
- and gets a stream to read a sequence of messages back. The client reads from
- the returned stream until there are no more messages. As you can see in our
- example, you specify a server-side streaming method by placing the `stream`
- keyword before the *response* type.
-
-```protobuf
- // Obtains the Features available within the given Rectangle. Results are
- // streamed rather than returned at once (e.g. in a response message with a
- // repeated field), as the rectangle may cover a large area and contain a
- // huge number of features.
- rpc ListFeatures(Rectangle) returns (stream Feature) {}
-```
-
-- A *client-side streaming RPC* where the client writes a sequence of messages
- and sends them to the server, again using a provided stream. Once the client
- has finished writing the messages, it waits for the server to read them all
- and return its response. You specify a client-side streaming method by placing
- the `stream` keyword before the *request* type.
-
-```protobuf
- // Accepts a stream of Points on a route being traversed, returning a
- // RouteSummary when traversal is completed.
- rpc RecordRoute(stream Point) returns (RouteSummary) {}
-```
-
-- A *bidirectional streaming RPC* where both sides send a sequence of messages
- using a read-write stream. The two streams operate independently, so clients
- and servers can read and write in whatever order they like: for example, the
- server could wait to receive all the client messages before writing its
- responses, or it could alternately read a message then write a message, or
- some other combination of reads and writes. The order of messages in each
- stream is preserved. You specify this type of method by placing the `stream`
- keyword before both the request and the response.
-
-```protobuf
- // Accepts a stream of RouteNotes sent while a route is being traversed,
- // while receiving other RouteNotes (e.g. from other users).
- rpc RouteChat(stream RouteNote) returns (stream RouteNote) {}
-```
-
-Our `.proto` file also contains protocol buffer message type definitions for all
-the request and response types used in our service methods - for example, here's
-the `Point` message type:
-
-```protobuf
-// Points are represented as latitude-longitude pairs in the E7 representation
-// (degrees multiplied by 10**7 and rounded to the nearest integer).
-// Latitudes should be in the range +/- 90 degrees and longitude should be in
-// the range +/- 180 degrees (inclusive).
-message Point {
- int32 latitude = 1;
- int32 longitude = 2;
-}
-```
-
-## Generating client and server code
-
-Next we need to generate the gRPC client and server interfaces from our `.proto`
-service definition. We do this using the protocol buffer compiler `protoc` with
-a special gRPC C++ plugin.
-
-For simplicity, we've provided a [Makefile](route_guide/Makefile) that runs
-`protoc` for you with the appropriate plugin, input, and output (if you want to
-run this yourself, make sure you've installed protoc and followed the gRPC code
-[installation instructions](../../BUILDING.md) first):
-
-```shell
-$ make route_guide.grpc.pb.cc route_guide.pb.cc
-```
-
-which actually runs:
-
-```shell
-$ protoc -I ../../protos --grpc_out=. --plugin=protoc-gen-grpc=`which grpc_cpp_plugin` ../../protos/route_guide.proto
-$ protoc -I ../../protos --cpp_out=. ../../protos/route_guide.proto
-```
-
-Running this command generates the following files in your current directory:
-- `route_guide.pb.h`, the header which declares your generated message classes
-- `route_guide.pb.cc`, which contains the implementation of your message classes
-- `route_guide.grpc.pb.h`, the header which declares your generated service
- classes
-- `route_guide.grpc.pb.cc`, which contains the implementation of your service
- classes
-
-These contain:
-- All the protocol buffer code to populate, serialize, and retrieve our request
- and response message types
-- A class called `RouteGuide` that contains
- - a remote interface type (or *stub*) for clients to call with the methods
- defined in the `RouteGuide` service.
- - two abstract interfaces for servers to implement, also with the methods
- defined in the `RouteGuide` service.
-
-
-
-## Creating the server
-
-First let's look at how we create a `RouteGuide` server. If you're only
-interested in creating gRPC clients, you can skip this section and go straight
-to [Creating the client](#client) (though you might find it interesting
-anyway!).
-
-There are two parts to making our `RouteGuide` service do its job:
-- Implementing the service interface generated from our service definition:
- doing the actual "work" of our service.
-- Running a gRPC server to listen for requests from clients and return the
- service responses.
-
-You can find our example `RouteGuide` server in
-[route_guide/route_guide_server.cc](route_guide/route_guide_server.cc). Let's
-take a closer look at how it works.
-
-### Implementing RouteGuide
-
-As you can see, our server has a `RouteGuideImpl` class that implements the
-generated `RouteGuide::Service` interface:
-
-```cpp
-class RouteGuideImpl final : public RouteGuide::Service {
-...
-}
-```
-In this case we're implementing the *synchronous* version of `RouteGuide`, which
-provides our default gRPC server behaviour. It's also possible to implement an
-asynchronous interface, `RouteGuide::AsyncService`, which allows you to further
-customize your server's threading behaviour, though we won't look at this in
-this tutorial.
-
-`RouteGuideImpl` implements all our service methods. Let's look at the simplest
-type first, `GetFeature`, which just gets a `Point` from the client and returns
-the corresponding feature information from its database in a `Feature`.
-
-```cpp
- Status GetFeature(ServerContext* context, const Point* point,
- Feature* feature) override {
- feature->set_name(GetFeatureName(*point, feature_list_));
- feature->mutable_location()->CopyFrom(*point);
- return Status::OK;
- }
-```
-
-The method is passed a context object for the RPC, the client's `Point` protocol
-buffer request, and a `Feature` protocol buffer to fill in with the response
-information. In the method we populate the `Feature` with the appropriate
-information, and then `return` with an `OK` status to tell gRPC that we've
-finished dealing with the RPC and that the `Feature` can be returned to the
-client.
-
-Now let's look at something a bit more complicated - a streaming RPC.
-`ListFeatures` is a server-side streaming RPC, so we need to send back multiple
-`Feature`s to our client.
-
-```cpp
-Status ListFeatures(ServerContext* context, const Rectangle* rectangle,
- ServerWriter* writer) override {
- auto lo = rectangle->lo();
- auto hi = rectangle->hi();
- long left = std::min(lo.longitude(), hi.longitude());
- long right = std::max(lo.longitude(), hi.longitude());
- long top = std::max(lo.latitude(), hi.latitude());
- long bottom = std::min(lo.latitude(), hi.latitude());
- for (const Feature& f : feature_list_) {
- if (f.location().longitude() >= left &&
- f.location().longitude() <= right &&
- f.location().latitude() >= bottom &&
- f.location().latitude() <= top) {
- writer->Write(f);
- }
- }
- return Status::OK;
-}
-```
-
-As you can see, instead of getting simple request and response objects in our
-method parameters, this time we get a request object (the `Rectangle` in which
-our client wants to find `Feature`s) and a special `ServerWriter` object. In the
-method, we populate as many `Feature` objects as we need to return, writing them
-to the `ServerWriter` using its `Write()` method. Finally, as in our simple RPC,
-we `return Status::OK` to tell gRPC that we've finished writing responses.
-
-If you look at the client-side streaming method `RecordRoute` you'll see it's
-quite similar, except this time we get a `ServerReader` instead of a request
-object and a single response. We use the `ServerReader`s `Read()` method to
-repeatedly read in our client's requests to a request object (in this case a
-`Point`) until there are no more messages: the server needs to check the return
-value of `Read()` after each call. If `true`, the stream is still good and it
-can continue reading; if `false` the message stream has ended.
-
-```cpp
-while (stream->Read(&point)) {
- ...//process client input
-}
-```
-Finally, let's look at our bidirectional streaming RPC `RouteChat()`.
-
-```cpp
- Status RouteChat(ServerContext* context,
- ServerReaderWriter* stream) override {
- std::vector received_notes;
- RouteNote note;
- while (stream->Read(¬e)) {
- for (const RouteNote& n : received_notes) {
- if (n.location().latitude() == note.location().latitude() &&
- n.location().longitude() == note.location().longitude()) {
- stream->Write(n);
- }
- }
- received_notes.push_back(note);
- }
-
- return Status::OK;
- }
-```
-
-This time we get a `ServerReaderWriter` that can be used to read *and* write
-messages. The syntax for reading and writing here is exactly the same as for our
-client-streaming and server-streaming methods. Although each side will always
-get the other's messages in the order they were written, both the client and
-server can read and write in any order — the streams operate completely
-independently.
-
-### Starting the server
-
-Once we've implemented all our methods, we also need to start up a gRPC server
-so that clients can actually use our service. The following snippet shows how we
-do this for our `RouteGuide` service:
-
-```cpp
-void RunServer(const std::string& db_path) {
- std::string server_address("0.0.0.0:50051");
- RouteGuideImpl service(db_path);
-
- ServerBuilder builder;
- builder.AddListeningPort(server_address, grpc::InsecureServerCredentials());
- builder.RegisterService(&service);
- std::unique_ptr server(builder.BuildAndStart());
- std::cout << "Server listening on " << server_address << std::endl;
- server->Wait();
-}
-```
-As you can see, we build and start our server using a `ServerBuilder`. To do this, we:
-
-1. Create an instance of our service implementation class `RouteGuideImpl`.
-1. Create an instance of the factory `ServerBuilder` class.
-1. Specify the address and port we want to use to listen for client requests
- using the builder's `AddListeningPort()` method.
-1. Register our service implementation with the builder.
-1. Call `BuildAndStart()` on the builder to create and start an RPC server for
- our service.
-1. Call `Wait()` on the server to do a blocking wait until process is killed or
- `Shutdown()` is called.
-
-
-## Creating the client
-
-In this section, we'll look at creating a C++ client for our `RouteGuide`
-service. You can see our complete example client code in
-[route_guide/route_guide_client.cc](route_guide/route_guide_client.cc).
-
-### Creating a stub
-
-To call service methods, we first need to create a *stub*.
-
-First we need to create a gRPC *channel* for our stub, specifying the server
-address and port we want to connect to without SSL:
-
-```cpp
-grpc::CreateChannel("localhost:50051", grpc::InsecureChannelCredentials());
-```
-
-Now we can use the channel to create our stub using the `NewStub` method
-provided in the `RouteGuide` class we generated from our `.proto`.
-
-```cpp
-public:
- RouteGuideClient(std::shared_ptr channel, const std::string& db)
- : stub_(RouteGuide::NewStub(channel)) {
- ...
- }
-```
-
-### Calling service methods
-
-Now let's look at how we call our service methods. Note that in this tutorial
-we're calling the *blocking/synchronous* versions of each method: this means
-that the RPC call waits for the server to respond, and will either return a
-response or raise an exception.
-
-#### Simple RPC
-
-Calling the simple RPC `GetFeature` is nearly as straightforward as calling a
-local method.
-
-```cpp
- Point point;
- Feature feature;
- point = MakePoint(409146138, -746188906);
- GetOneFeature(point, &feature);
-
-...
-
- bool GetOneFeature(const Point& point, Feature* feature) {
- ClientContext context;
- Status status = stub_->GetFeature(&context, point, feature);
- ...
- }
-```
-
-As you can see, we create and populate a request protocol buffer object (in our
-case `Point`), and create a response protocol buffer object for the server to
-fill in. We also create a `ClientContext` object for our call - you can
-optionally set RPC configuration values on this object, such as deadlines,
-though for now we'll use the default settings. Note that you cannot reuse this
-object between calls. Finally, we call the method on the stub, passing it the
-context, request, and response. If the method returns `OK`, then we can read the
-response information from the server from our response object.
-
-```cpp
-std::cout << "Found feature called " << feature->name() << " at "
- << feature->location().latitude()/kCoordFactor_ << ", "
- << feature->location().longitude()/kCoordFactor_ << std::endl;
-```
-
-#### Streaming RPCs
-
-Now let's look at our streaming methods. If you've already read [Creating the
-server](#server) some of this may look very familiar - streaming RPCs are
-implemented in a similar way on both sides. Here's where we call the server-side
-streaming method `ListFeatures`, which returns a stream of geographical
-`Feature`s:
-
-```cpp
-std::unique_ptr > reader(
- stub_->ListFeatures(&context, rect));
-while (reader->Read(&feature)) {
- std::cout << "Found feature called "
- << feature.name() << " at "
- << feature.location().latitude()/kCoordFactor_ << ", "
- << feature.location().longitude()/kCoordFactor_ << std::endl;
-}
-Status status = reader->Finish();
-```
-
-Instead of passing the method a context, request, and response, we pass it a
-context and request and get a `ClientReader` object back. The client can use the
-`ClientReader` to read the server's responses. We use the `ClientReader`s
-`Read()` method to repeatedly read in the server's responses to a response
-protocol buffer object (in this case a `Feature`) until there are no more
-messages: the client needs to check the return value of `Read()` after each
-call. If `true`, the stream is still good and it can continue reading; if
-`false` the message stream has ended. Finally, we call `Finish()` on the stream
-to complete the call and get our RPC status.
-
-The client-side streaming method `RecordRoute` is similar, except there we pass
-the method a context and response object and get back a `ClientWriter`.
-
-```cpp
- std::unique_ptr > writer(
- stub_->RecordRoute(&context, &stats));
- for (int i = 0; i < kPoints; i++) {
- const Feature& f = feature_list_[feature_distribution(generator)];
- std::cout << "Visiting point "
- << f.location().latitude()/kCoordFactor_ << ", "
- << f.location().longitude()/kCoordFactor_ << std::endl;
- if (!writer->Write(f.location())) {
- // Broken stream.
- break;
- }
- std::this_thread::sleep_for(std::chrono::milliseconds(
- delay_distribution(generator)));
- }
- writer->WritesDone();
- Status status = writer->Finish();
- if (status.IsOk()) {
- std::cout << "Finished trip with " << stats.point_count() << " points\n"
- << "Passed " << stats.feature_count() << " features\n"
- << "Travelled " << stats.distance() << " meters\n"
- << "It took " << stats.elapsed_time() << " seconds"
- << std::endl;
- } else {
- std::cout << "RecordRoute rpc failed." << std::endl;
- }
-```
-
-Once we've finished writing our client's requests to the stream using `Write()`,
-we need to call `WritesDone()` on the stream to let gRPC know that we've
-finished writing, then `Finish()` to complete the call and get our RPC status.
-If the status is `OK`, our response object that we initially passed to
-`RecordRoute()` will be populated with the server's response.
-
-Finally, let's look at our bidirectional streaming RPC `RouteChat()`. In this
-case, we just pass a context to the method and get back a `ClientReaderWriter`,
-which we can use to both write and read messages.
-
-```cpp
-std::shared_ptr > stream(
- stub_->RouteChat(&context));
-```
-
-The syntax for reading and writing here is exactly the same as for our
-client-streaming and server-streaming methods. Although each side will always
-get the other's messages in the order they were written, both the client and
-server can read and write in any order — the streams operate completely
-independently.
-
-## Try it out!
-
-Build client and server:
-```shell
-$ make
-```
-Run the server, which will listen on port 50051:
-```shell
-$ ./route_guide_server
-```
-Run the client (in a different terminal):
-```shell
-$ ./route_guide_client
-```
diff --git a/examples/cpp/helloworld/README.md b/examples/cpp/helloworld/README.md
index 71718645645..e13c83281a7 100644
--- a/examples/cpp/helloworld/README.md
+++ b/examples/cpp/helloworld/README.md
@@ -1,264 +1,6 @@
-# gRPC C++ Hello World Tutorial
+# gRPC C++ Hello World Example
-### Install gRPC
-Make sure you have installed gRPC on your system. Follow the
-[BUILDING.md](../../../BUILDING.md) instructions.
+You can find a complete set of instructions for building gRPC and running the
+Hello World app in the [C++ Quick Start][].
-### Get the tutorial source code
-
-The example code for this and our other examples lives in the `examples`
-directory. Clone this repository at the [latest stable release tag](https://github.com/grpc/grpc/releases)
-to your local machine by running the following command:
-
-
-```sh
-$ git clone -b RELEASE_TAG_HERE https://github.com/grpc/grpc
-```
-
-Change your current directory to examples/cpp/helloworld
-
-```sh
-$ cd examples/cpp/helloworld/
-```
-
-### Defining a service
-
-The first step in creating our example is to define a *service*: an RPC
-service specifies the methods that can be called remotely with their parameters
-and return types. As you saw in the
-[overview](#protocolbuffers) above, gRPC does this using [protocol
-buffers](https://developers.google.com/protocol-buffers/docs/overview). We
-use the protocol buffers interface definition language (IDL) to define our
-service methods, and define the parameters and return
-types as protocol buffer message types. Both the client and the
-server use interface code generated from the service definition.
-
-Here's our example service definition, defined using protocol buffers IDL in
-[helloworld.proto](../../protos/helloworld.proto). The `Greeting`
-service has one method, `hello`, that lets the server receive a single
-`HelloRequest`
-message from the remote client containing the user's name, then send back
-a greeting in a single `HelloReply`. This is the simplest type of RPC you
-can specify in gRPC - we'll look at some other types later in this document.
-
-```protobuf
-syntax = "proto3";
-
-option java_package = "ex.grpc";
-
-package helloworld;
-
-// The greeting service definition.
-service Greeter {
- // Sends a greeting
- rpc SayHello (HelloRequest) returns (HelloReply) {}
-}
-
-// The request message containing the user's name.
-message HelloRequest {
- string name = 1;
-}
-
-// The response message containing the greetings
-message HelloReply {
- string message = 1;
-}
-
-```
-
-
-### Generating gRPC code
-
-Once we've defined our service, we use the protocol buffer compiler
-`protoc` to generate the special client and server code we need to create
-our application. The generated code contains both stub code for clients to
-use and an abstract interface for servers to implement, both with the method
-defined in our `Greeting` service.
-
-To generate the client and server side interfaces:
-
-```sh
-$ make helloworld.grpc.pb.cc helloworld.pb.cc
-```
-Which internally invokes the proto-compiler as:
-
-```sh
-$ protoc -I ../../protos/ --grpc_out=. --plugin=protoc-gen-grpc=grpc_cpp_plugin ../../protos/helloworld.proto
-$ protoc -I ../../protos/ --cpp_out=. ../../protos/helloworld.proto
-```
-
-### Writing a client
-
-- Create a channel. A channel is a logical connection to an endpoint. A gRPC
- channel can be created with the target address, credentials to use and
- arguments as follows
-
- ```cpp
- auto channel = CreateChannel("localhost:50051", InsecureChannelCredentials());
- ```
-
-- Create a stub. A stub implements the rpc methods of a service and in the
- generated code, a method is provided to create a stub with a channel:
-
- ```cpp
- auto stub = helloworld::Greeter::NewStub(channel);
- ```
-
-- Make a unary rpc, with `ClientContext` and request/response proto messages.
-
- ```cpp
- ClientContext context;
- HelloRequest request;
- request.set_name("hello");
- HelloReply reply;
- Status status = stub->SayHello(&context, request, &reply);
- ```
-
-- Check returned status and response.
-
- ```cpp
- if (status.ok()) {
- // check reply.message()
- } else {
- // rpc failed.
- }
- ```
-
-For a working example, refer to [greeter_client.cc](greeter_client.cc).
-
-### Writing a server
-
-- Implement the service interface
-
- ```cpp
- class GreeterServiceImpl final : public Greeter::Service {
- Status SayHello(ServerContext* context, const HelloRequest* request,
- HelloReply* reply) override {
- std::string prefix("Hello ");
- reply->set_message(prefix + request->name());
- return Status::OK;
- }
- };
-
- ```
-
-- Build a server exporting the service
-
- ```cpp
- GreeterServiceImpl service;
- ServerBuilder builder;
- builder.AddListeningPort("0.0.0.0:50051", grpc::InsecureServerCredentials());
- builder.RegisterService(&service);
- std::unique_ptr server(builder.BuildAndStart());
- ```
-
-For a working example, refer to [greeter_server.cc](greeter_server.cc).
-
-### Writing asynchronous client and server
-
-gRPC uses `CompletionQueue` API for asynchronous operations. The basic work flow
-is
-- bind a `CompletionQueue` to a rpc call
-- do something like a read or write, present with a unique `void*` tag
-- call `CompletionQueue::Next` to wait for operations to complete. If a tag
- appears, it indicates that the corresponding operation is complete.
-
-#### Async client
-
-The channel and stub creation code is the same as the sync client.
-
-- Initiate the rpc and create a handle for the rpc. Bind the rpc to a
- `CompletionQueue`.
-
- ```cpp
- CompletionQueue cq;
- auto rpc = stub->AsyncSayHello(&context, request, &cq);
- ```
-
-- Ask for reply and final status, with a unique tag
-
- ```cpp
- Status status;
- rpc->Finish(&reply, &status, (void*)1);
- ```
-
-- Wait for the completion queue to return the next tag. The reply and status are
- ready once the tag passed into the corresponding `Finish()` call is returned.
-
- ```cpp
- void* got_tag;
- bool ok = false;
- cq.Next(&got_tag, &ok);
- if (ok && got_tag == (void*)1) {
- // check reply and status
- }
- ```
-
-For a working example, refer to [greeter_async_client.cc](greeter_async_client.cc).
-
-#### Async server
-
-The server implementation requests a rpc call with a tag and then wait for the
-completion queue to return the tag. The basic flow is
-
-- Build a server exporting the async service
-
- ```cpp
- helloworld::Greeter::AsyncService service;
- ServerBuilder builder;
- builder.AddListeningPort("0.0.0.0:50051", InsecureServerCredentials());
- builder.RegisterService(&service);
- auto cq = builder.AddCompletionQueue();
- auto server = builder.BuildAndStart();
- ```
-
-- Request one rpc
-
- ```cpp
- ServerContext context;
- HelloRequest request;
- ServerAsyncResponseWriter responder;
- service.RequestSayHello(&context, &request, &responder, &cq, &cq, (void*)1);
- ```
-
-- Wait for the completion queue to return the tag. The context, request and
- responder are ready once the tag is retrieved.
-
- ```cpp
- HelloReply reply;
- Status status;
- void* got_tag;
- bool ok = false;
- cq.Next(&got_tag, &ok);
- if (ok && got_tag == (void*)1) {
- // set reply and status
- responder.Finish(reply, status, (void*)2);
- }
- ```
-
-- Wait for the completion queue to return the tag. The rpc is finished when the
- tag is back.
-
- ```cpp
- void* got_tag;
- bool ok = false;
- cq.Next(&got_tag, &ok);
- if (ok && got_tag == (void*)2) {
- // clean up
- }
- ```
-
-To handle multiple rpcs, the async server creates an object `CallData` to
-maintain the state of each rpc and use the address of it as the unique tag. For
-simplicity the server only uses one completion queue for all events, and runs a
-main loop in `HandleRpcs` to query the queue.
-
-For a working example, refer to [greeter_async_server.cc](greeter_async_server.cc).
-
-#### Flags for the client
-
-```sh
-./greeter_client --target="a target string used to create a GRPC client channel"
-```
-
-The Default value for --target is "localhost:50051".
+[C++ Quick Start]: https://grpc.io/docs/quickstart/cpp
diff --git a/gRPC-C++.podspec b/gRPC-C++.podspec
index 77980999b59..7d546034f95 100644
--- a/gRPC-C++.podspec
+++ b/gRPC-C++.podspec
@@ -445,6 +445,7 @@ Pod::Spec.new do |s|
'src/core/lib/iomgr/error.h',
'src/core/lib/iomgr/error_cfstream.h',
'src/core/lib/iomgr/error_internal.h',
+ 'src/core/lib/iomgr/ev_apple.h',
'src/core/lib/iomgr/ev_epoll1_linux.h',
'src/core/lib/iomgr/ev_epollex_linux.h',
'src/core/lib/iomgr/ev_poll_posix.h',
@@ -896,6 +897,7 @@ Pod::Spec.new do |s|
'src/core/lib/iomgr/error.h',
'src/core/lib/iomgr/error_cfstream.h',
'src/core/lib/iomgr/error_internal.h',
+ 'src/core/lib/iomgr/ev_apple.h',
'src/core/lib/iomgr/ev_epoll1_linux.h',
'src/core/lib/iomgr/ev_epollex_linux.h',
'src/core/lib/iomgr/ev_poll_posix.h',
diff --git a/gRPC-Core.podspec b/gRPC-Core.podspec
index 749e77b3fa7..056c7f8a6fd 100644
--- a/gRPC-Core.podspec
+++ b/gRPC-Core.podspec
@@ -654,6 +654,8 @@ Pod::Spec.new do |s|
'src/core/lib/iomgr/error_cfstream.cc',
'src/core/lib/iomgr/error_cfstream.h',
'src/core/lib/iomgr/error_internal.h',
+ 'src/core/lib/iomgr/ev_apple.cc',
+ 'src/core/lib/iomgr/ev_apple.h',
'src/core/lib/iomgr/ev_epoll1_linux.cc',
'src/core/lib/iomgr/ev_epoll1_linux.h',
'src/core/lib/iomgr/ev_epollex_linux.cc',
@@ -1249,6 +1251,7 @@ Pod::Spec.new do |s|
'src/core/lib/iomgr/error.h',
'src/core/lib/iomgr/error_cfstream.h',
'src/core/lib/iomgr/error_internal.h',
+ 'src/core/lib/iomgr/ev_apple.h',
'src/core/lib/iomgr/ev_epoll1_linux.h',
'src/core/lib/iomgr/ev_epollex_linux.h',
'src/core/lib/iomgr/ev_poll_posix.h',
diff --git a/grpc.gemspec b/grpc.gemspec
index 29e5d3d436b..3dcf652466f 100644
--- a/grpc.gemspec
+++ b/grpc.gemspec
@@ -576,6 +576,8 @@ Gem::Specification.new do |s|
s.files += %w( src/core/lib/iomgr/error_cfstream.cc )
s.files += %w( src/core/lib/iomgr/error_cfstream.h )
s.files += %w( src/core/lib/iomgr/error_internal.h )
+ s.files += %w( src/core/lib/iomgr/ev_apple.cc )
+ s.files += %w( src/core/lib/iomgr/ev_apple.h )
s.files += %w( src/core/lib/iomgr/ev_epoll1_linux.cc )
s.files += %w( src/core/lib/iomgr/ev_epoll1_linux.h )
s.files += %w( src/core/lib/iomgr/ev_epollex_linux.cc )
diff --git a/grpc.gyp b/grpc.gyp
index a62c034d65a..ce39a8ed214 100644
--- a/grpc.gyp
+++ b/grpc.gyp
@@ -640,6 +640,7 @@
'src/core/lib/iomgr/endpoint_pair_windows.cc',
'src/core/lib/iomgr/error.cc',
'src/core/lib/iomgr/error_cfstream.cc',
+ 'src/core/lib/iomgr/ev_apple.cc',
'src/core/lib/iomgr/ev_epoll1_linux.cc',
'src/core/lib/iomgr/ev_epollex_linux.cc',
'src/core/lib/iomgr/ev_poll_posix.cc',
@@ -1129,6 +1130,7 @@
'src/core/lib/iomgr/endpoint_pair_windows.cc',
'src/core/lib/iomgr/error.cc',
'src/core/lib/iomgr/error_cfstream.cc',
+ 'src/core/lib/iomgr/ev_apple.cc',
'src/core/lib/iomgr/ev_epoll1_linux.cc',
'src/core/lib/iomgr/ev_epollex_linux.cc',
'src/core/lib/iomgr/ev_poll_posix.cc',
diff --git a/include/grpc/impl/codegen/grpc_types.h b/include/grpc/impl/codegen/grpc_types.h
index ff45450f3a3..c7dd23caa49 100644
--- a/include/grpc/impl/codegen/grpc_types.h
+++ b/include/grpc/impl/codegen/grpc_types.h
@@ -344,10 +344,6 @@ typedef struct {
balancer before using fallback backend addresses from the resolver.
If 0, enter fallback mode immediately. Default value is 10000. */
#define GRPC_ARG_GRPCLB_FALLBACK_TIMEOUT_MS "grpc.grpclb_fallback_timeout_ms"
-/* Timeout in milliseconds to wait for the serverlist from the xDS load
- balancer before using fallback backend addresses from the resolver.
- If 0, enter fallback mode immediately. Default value is 10000. */
-#define GRPC_ARG_XDS_FALLBACK_TIMEOUT_MS "grpc.xds_fallback_timeout_ms"
/* Timeout in milliseconds to wait for the child of a specific priority to
complete its initial connection attempt before the priority LB policy fails
over to the next priority. Default value is 10 seconds. */
diff --git a/package.xml b/package.xml
index 06d1669ec48..a8430464cf7 100644
--- a/package.xml
+++ b/package.xml
@@ -556,6 +556,8 @@
+
+
diff --git a/src/core/ext/filters/client_channel/lb_policy/xds/eds.cc b/src/core/ext/filters/client_channel/lb_policy/xds/eds.cc
index da60dc5998b..c4235a6305f 100644
--- a/src/core/ext/filters/client_channel/lb_policy/xds/eds.cc
+++ b/src/core/ext/filters/client_channel/lb_policy/xds/eds.cc
@@ -49,27 +49,22 @@ TraceFlag grpc_lb_eds_trace(false, "eds_lb");
namespace {
-constexpr char kXds[] = "xds_experimental";
constexpr char kEds[] = "eds_experimental";
// Config for EDS LB policy.
class EdsLbConfig : public LoadBalancingPolicy::Config {
public:
- EdsLbConfig(const char* name, std::string cluster_name,
- std::string eds_service_name,
+ EdsLbConfig(std::string cluster_name, std::string eds_service_name,
absl::optional lrs_load_reporting_server_name,
- Json locality_picking_policy, Json endpoint_picking_policy,
- RefCountedPtr fallback_policy)
- : name_(name),
- cluster_name_(std::move(cluster_name)),
+ Json locality_picking_policy, Json endpoint_picking_policy)
+ : cluster_name_(std::move(cluster_name)),
eds_service_name_(std::move(eds_service_name)),
lrs_load_reporting_server_name_(
std::move(lrs_load_reporting_server_name)),
locality_picking_policy_(std::move(locality_picking_policy)),
- endpoint_picking_policy_(std::move(endpoint_picking_policy)),
- fallback_policy_(std::move(fallback_policy)) {}
+ endpoint_picking_policy_(std::move(endpoint_picking_policy)) {}
- const char* name() const override { return name_; }
+ const char* name() const override { return kEds; }
const std::string& cluster_name() const { return cluster_name_; }
const std::string& eds_service_name() const { return eds_service_name_; }
@@ -82,26 +77,21 @@ class EdsLbConfig : public LoadBalancingPolicy::Config {
const Json& endpoint_picking_policy() const {
return endpoint_picking_policy_;
}
- RefCountedPtr fallback_policy() const {
- return fallback_policy_;
- }
private:
- const char* name_;
std::string cluster_name_;
std::string eds_service_name_;
absl::optional lrs_load_reporting_server_name_;
Json locality_picking_policy_;
Json endpoint_picking_policy_;
- RefCountedPtr fallback_policy_;
};
// EDS LB policy.
class EdsLb : public LoadBalancingPolicy {
public:
- EdsLb(const char* name, Args args);
+ explicit EdsLb(Args args);
- const char* name() const override { return name_; }
+ const char* name() const override { return kEds; }
void UpdateLocked(UpdateArgs args) override;
void ResetBackoffLocked() override;
@@ -153,24 +143,6 @@ class EdsLb : public LoadBalancingPolicy {
RefCountedPtr eds_policy_;
};
- class FallbackHelper : public ChannelControlHelper {
- public:
- explicit FallbackHelper(RefCountedPtr parent)
- : parent_(std::move(parent)) {}
-
- ~FallbackHelper() { parent_.reset(DEBUG_LOCATION, "FallbackHelper"); }
-
- RefCountedPtr CreateSubchannel(
- const grpc_channel_args& args) override;
- void UpdateState(grpc_connectivity_state state,
- std::unique_ptr picker) override;
- void RequestReresolution() override;
- void AddTraceEvent(TraceSeverity severity, StringView message) override;
-
- private:
- RefCountedPtr parent_;
- };
-
~EdsLb();
void ShutdownLocked() override;
@@ -185,15 +157,6 @@ class EdsLb : public LoadBalancingPolicy {
const grpc_channel_args* args_in);
void MaybeUpdateDropPickerLocked();
- // Methods for dealing with fallback state.
- void MaybeCancelFallbackAtStartupChecks();
- static void OnFallbackTimer(void* arg, grpc_error* error);
- void OnFallbackTimerLocked(grpc_error* error);
- void UpdateFallbackPolicyLocked();
- OrphanablePtr CreateFallbackPolicyLocked(
- const grpc_channel_args* args);
- void MaybeExitFallbackMode();
-
// Caller must ensure that config_ is set before calling.
const StringView GetEdsResourceName() const {
if (xds_client_from_channel_ == nullptr) return server_name_;
@@ -216,9 +179,6 @@ class EdsLb : public LoadBalancingPolicy {
: xds_client_.get();
}
- // Policy name (kXds or kEds).
- const char* name_;
-
// Server name from target URI.
std::string server_name_;
@@ -251,26 +211,6 @@ class EdsLb : public LoadBalancingPolicy {
// The latest state and picker returned from the child policy.
grpc_connectivity_state child_state_;
RefCountedPtr child_picker_;
-
- // Non-null iff we are in fallback mode.
- OrphanablePtr fallback_policy_;
-
- // Whether the checks for fallback at startup are ALL pending. There are
- // several cases where this can be reset:
- // 1. The fallback timer fires, we enter fallback mode.
- // 2. Before the fallback timer fires, the endpoint watcher reports an
- // error, we enter fallback mode.
- // 3. Before the fallback timer fires, if any child policy in the locality map
- // becomes READY, we cancel the fallback timer.
- bool fallback_at_startup_checks_pending_ = false;
- // Timeout in milliseconds for before using fallback backend addresses.
- // 0 means not using fallback.
- const grpc_millis lb_fallback_timeout_ms_;
- // The backend addresses from the resolver.
- ServerAddressList fallback_backend_addresses_;
- // Fallback timer.
- grpc_timer lb_fallback_timer_;
- grpc_closure lb_on_fallback_;
};
//
@@ -331,15 +271,6 @@ void EdsLb::Helper::UpdateState(grpc_connectivity_state state,
eds_policy_->child_state_ = state;
eds_policy_->child_picker_ =
MakeRefCounted(std::move(picker));
- // If the new state is READY, cancel the fallback-at-startup checks.
- if (state == GRPC_CHANNEL_READY) {
- eds_policy_->MaybeCancelFallbackAtStartupChecks();
- eds_policy_->MaybeExitFallbackMode();
- }
- // TODO(roth): If the child reports TRANSIENT_FAILURE and the
- // fallback-at-startup checks are pending, we should probably go into
- // fallback mode immediately (cancelling the fallback-at-startup timer
- // if needed).
// Wrap the picker in a DropPicker and pass it up.
eds_policy_->MaybeUpdateDropPickerLocked();
}
@@ -349,33 +280,6 @@ void EdsLb::Helper::AddTraceEvent(TraceSeverity severity, StringView message) {
eds_policy_->channel_control_helper()->AddTraceEvent(severity, message);
}
-//
-// EdsLb::FallbackHelper
-//
-
-RefCountedPtr EdsLb::FallbackHelper::CreateSubchannel(
- const grpc_channel_args& args) {
- if (parent_->shutting_down_) return nullptr;
- return parent_->channel_control_helper()->CreateSubchannel(args);
-}
-
-void EdsLb::FallbackHelper::UpdateState(
- grpc_connectivity_state state, std::unique_ptr picker) {
- if (parent_->shutting_down_) return;
- parent_->channel_control_helper()->UpdateState(state, std::move(picker));
-}
-
-void EdsLb::FallbackHelper::RequestReresolution() {
- if (parent_->shutting_down_) return;
- parent_->channel_control_helper()->RequestReresolution();
-}
-
-void EdsLb::FallbackHelper::AddTraceEvent(TraceSeverity severity,
- StringView message) {
- if (parent_->shutting_down_) return;
- parent_->channel_control_helper()->AddTraceEvent(severity, message);
-}
-
//
// EdsLb::EndpointWatcher
//
@@ -392,9 +296,6 @@ class EdsLb::EndpointWatcher : public XdsClient::EndpointWatcherInterface {
gpr_log(GPR_INFO, "[edslb %p] Received EDS update from xds client",
eds_policy_.get());
}
- // If the balancer tells us to drop all the calls, we should exit fallback
- // mode immediately.
- if (update.drop_config->drop_all()) eds_policy_->MaybeExitFallbackMode();
// Update the drop config.
const bool drop_config_changed =
eds_policy_->drop_config_ == nullptr ||
@@ -424,34 +325,18 @@ class EdsLb::EndpointWatcher : public XdsClient::EndpointWatcherInterface {
}
void OnError(grpc_error* error) override {
- // If the fallback-at-startup checks are pending, go into fallback mode
- // immediately. This short-circuits the timeout for the
- // fallback-at-startup case.
- if (eds_policy_->fallback_at_startup_checks_pending_) {
- gpr_log(GPR_ERROR,
- "[edslb %p] xds watcher reported error; entering fallback "
- "mode: %s",
- eds_policy_.get(), grpc_error_string(error));
- eds_policy_->fallback_at_startup_checks_pending_ = false;
- grpc_timer_cancel(&eds_policy_->lb_fallback_timer_);
- eds_policy_->UpdateFallbackPolicyLocked();
- // If the xds call failed, request re-resolution.
- // TODO(roth): We check the error string contents here to
- // differentiate between the xds call failing and the xds channel
- // going into TRANSIENT_FAILURE. This is a pretty ugly hack,
- // but it's okay for now, since we're not yet sure whether we will
- // continue to support the current fallback functionality. If we
- // decide to keep the fallback approach, then we should either
- // find a cleaner way to expose the difference between these two
- // cases or decide that we're okay re-resolving in both cases.
- // Note that even if we do keep the current fallback functionality,
- // this re-resolution will only be necessary if we are going to be
- // using this LB policy with resolvers other than the xds resolver.
- if (strstr(grpc_error_string(error), "xds call failed")) {
- eds_policy_->channel_control_helper()->RequestReresolution();
- }
+ gpr_log(GPR_ERROR, "[edslb %p] xds watcher reported error: %s",
+ eds_policy_.get(), grpc_error_string(error));
+ // Go into TRANSIENT_FAILURE if we have not yet created the child
+ // policy (i.e., we have not yet received data from xds). Otherwise,
+ // we keep running with the data we had previously.
+ if (eds_policy_->child_policy_ == nullptr) {
+ eds_policy_->channel_control_helper()->UpdateState(
+ GRPC_CHANNEL_TRANSIENT_FAILURE,
+ absl::make_unique(error));
+ } else {
+ GRPC_ERROR_UNREF(error);
}
- GRPC_ERROR_UNREF(error);
}
private:
@@ -462,13 +347,9 @@ class EdsLb::EndpointWatcher : public XdsClient::EndpointWatcherInterface {
// EdsLb public methods
//
-EdsLb::EdsLb(const char* name, Args args)
+EdsLb::EdsLb(Args args)
: LoadBalancingPolicy(std::move(args)),
- name_(name),
- xds_client_from_channel_(XdsClient::GetFromChannelArgs(*args.args)),
- lb_fallback_timeout_ms_(grpc_channel_args_find_integer(
- args.args, GRPC_ARG_XDS_FALLBACK_TIMEOUT_MS,
- {GRPC_EDS_DEFAULT_FALLBACK_TIMEOUT, 0, INT_MAX})) {
+ xds_client_from_channel_(XdsClient::GetFromChannelArgs(*args.args)) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_eds_trace)) {
gpr_log(GPR_INFO, "[edslb %p] created -- xds client from channel: %p", this,
xds_client_from_channel_.get());
@@ -499,7 +380,6 @@ void EdsLb::ShutdownLocked() {
gpr_log(GPR_INFO, "[edslb %p] shutting down", this);
}
shutting_down_ = true;
- MaybeCancelFallbackAtStartupChecks();
// Drop our ref to the child's picker, in case it's holding a ref to
// the child.
child_picker_.reset();
@@ -508,11 +388,6 @@ void EdsLb::ShutdownLocked() {
interested_parties());
child_policy_.reset();
}
- if (fallback_policy_ != nullptr) {
- grpc_pollset_set_del_pollset_set(fallback_policy_->interested_parties(),
- interested_parties());
- fallback_policy_.reset();
- }
drop_stats_.reset();
// Cancel the endpoint watch here instead of in our dtor if we are using the
// xds resolver, because the watcher holds a ref to us and we might not be
@@ -540,15 +415,10 @@ void EdsLb::UpdateLocked(UpdateArgs args) {
// Update config.
auto old_config = std::move(config_);
config_ = std::move(args.config);
- // Update fallback address list.
- fallback_backend_addresses_ = std::move(args.addresses);
// Update args.
grpc_channel_args_destroy(args_);
args_ = args.args;
args.args = nullptr;
- // Update the existing fallback policy. The fallback policy config and/or the
- // fallback addresses may be new.
- if (fallback_policy_ != nullptr) UpdateFallbackPolicyLocked();
if (is_initial_update) {
// Initialize XdsClient.
if (xds_client_from_channel_ == nullptr) {
@@ -556,7 +426,7 @@ void EdsLb::UpdateLocked(UpdateArgs args) {
xds_client_ = MakeOrphanable(
work_serializer(), interested_parties(), GetEdsResourceName(),
nullptr /* service config watcher */, *args_, &error);
- // TODO(roth): If we decide that we care about fallback mode, add
+ // TODO(roth): If we decide that we care about EDS-only mode, add
// proper error handling here.
GPR_ASSERT(error == GRPC_ERROR_NONE);
if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_eds_trace)) {
@@ -564,13 +434,6 @@ void EdsLb::UpdateLocked(UpdateArgs args) {
xds_client_.get());
}
}
- // Start fallback-at-startup checks.
- grpc_millis deadline = ExecCtx::Get()->Now() + lb_fallback_timeout_ms_;
- Ref(DEBUG_LOCATION, "on_fallback_timer").release(); // Held by closure
- GRPC_CLOSURE_INIT(&lb_on_fallback_, &EdsLb::OnFallbackTimer, this,
- grpc_schedule_on_exec_ctx);
- fallback_at_startup_checks_pending_ = true;
- grpc_timer_init(&lb_fallback_timer_, deadline, &lb_on_fallback_);
}
// Update drop stats for load reporting if needed.
if (is_initial_update || config_->lrs_load_reporting_server_name() !=
@@ -609,9 +472,6 @@ void EdsLb::ResetBackoffLocked() {
if (child_policy_ != nullptr) {
child_policy_->ResetBackoffLocked();
}
- if (fallback_policy_ != nullptr) {
- fallback_policy_->ResetBackoffLocked();
- }
}
//
@@ -875,8 +735,6 @@ OrphanablePtr EdsLb::CreateChildPolicyLocked(
}
void EdsLb::MaybeUpdateDropPickerLocked() {
- // If we are in fallback mode, don't override the picker.
- if (fallback_policy_ != nullptr) return;
// If we're dropping all calls, report READY, regardless of what (or
// whether) the child has reported.
if (drop_config_ != nullptr && drop_config_->drop_all()) {
@@ -891,111 +749,24 @@ void EdsLb::MaybeUpdateDropPickerLocked() {
}
}
-//
-// fallback-related methods
-//
-
-void EdsLb::MaybeCancelFallbackAtStartupChecks() {
- if (!fallback_at_startup_checks_pending_) return;
- if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_eds_trace)) {
- gpr_log(GPR_INFO, "[edslb %p] Cancelling fallback timer", this);
- }
- grpc_timer_cancel(&lb_fallback_timer_);
- fallback_at_startup_checks_pending_ = false;
-}
-
-void EdsLb::OnFallbackTimer(void* arg, grpc_error* error) {
- EdsLb* edslb_policy = static_cast(arg);
- GRPC_ERROR_REF(error); // ref owned by lambda
- edslb_policy->work_serializer()->Run(
- [edslb_policy, error]() { edslb_policy->OnFallbackTimerLocked(error); },
- DEBUG_LOCATION);
-}
-
-void EdsLb::OnFallbackTimerLocked(grpc_error* error) {
- // If some fallback-at-startup check is done after the timer fires but before
- // this callback actually runs, don't fall back.
- if (fallback_at_startup_checks_pending_ && !shutting_down_ &&
- error == GRPC_ERROR_NONE) {
- gpr_log(GPR_INFO,
- "[edslb %p] Child policy not ready after fallback timeout; "
- "entering fallback mode",
- this);
- fallback_at_startup_checks_pending_ = false;
- UpdateFallbackPolicyLocked();
- }
- Unref(DEBUG_LOCATION, "on_fallback_timer");
- GRPC_ERROR_UNREF(error);
-}
-
-void EdsLb::UpdateFallbackPolicyLocked() {
- if (shutting_down_) return;
- // Create policy if needed.
- if (fallback_policy_ == nullptr) {
- fallback_policy_ = CreateFallbackPolicyLocked(args_);
- }
- // Construct update args.
- UpdateArgs update_args;
- update_args.addresses = fallback_backend_addresses_;
- update_args.config = config_->fallback_policy();
- update_args.args = grpc_channel_args_copy(args_);
- // Update the policy.
- if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_eds_trace)) {
- gpr_log(GPR_INFO, "[edslb %p] Updating fallback child policy handler %p",
- this, fallback_policy_.get());
- }
- fallback_policy_->UpdateLocked(std::move(update_args));
-}
-
-OrphanablePtr EdsLb::CreateFallbackPolicyLocked(
- const grpc_channel_args* args) {
- LoadBalancingPolicy::Args lb_policy_args;
- lb_policy_args.work_serializer = work_serializer();
- lb_policy_args.args = args;
- lb_policy_args.channel_control_helper =
- absl::make_unique(Ref(DEBUG_LOCATION, "FallbackHelper"));
- OrphanablePtr lb_policy =
- MakeOrphanable(std::move(lb_policy_args),
- &grpc_lb_eds_trace);
- if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_eds_trace)) {
- gpr_log(GPR_INFO, "[edslb %p] Created new fallback child policy handler %p",
- this, lb_policy.get());
- }
- // Add our interested_parties pollset_set to that of the newly created
- // child policy. This will make the child policy progress upon activity on
- // this policy, which in turn is tied to the application's call.
- grpc_pollset_set_add_pollset_set(lb_policy->interested_parties(),
- interested_parties());
- return lb_policy;
-}
-
-void EdsLb::MaybeExitFallbackMode() {
- if (fallback_policy_ == nullptr) return;
- gpr_log(GPR_INFO, "[edslb %p] Exiting fallback mode", this);
- fallback_policy_.reset();
-}
-
//
// factory
//
class EdsLbFactory : public LoadBalancingPolicyFactory {
public:
- explicit EdsLbFactory(const char* name) : name_(name) {}
-
OrphanablePtr CreateLoadBalancingPolicy(
LoadBalancingPolicy::Args args) const override {
- return MakeOrphanable(std::move(args), &grpc_lb_eds_trace,
- name_);
+ return MakeOrphanable(std::move(args), &grpc_lb_eds_trace);
}
- const char* name() const override { return name_; }
+ const char* name() const override { return kEds; }
RefCountedPtr ParseLoadBalancingConfig(
const Json& json, grpc_error** error) const override {
GPR_DEBUG_ASSERT(error != nullptr && *error == GRPC_ERROR_NONE);
if (json.type() == Json::Type::JSON_NULL) {
- // xds was mentioned as a policy in the deprecated loadBalancingPolicy
+ // eds was mentioned as a policy in the deprecated loadBalancingPolicy
// field or in the client API.
*error = GRPC_ERROR_CREATE_FROM_STATIC_STRING(
"field:loadBalancingPolicy error:eds policy requires configuration. "
@@ -1016,21 +787,15 @@ class EdsLbFactory : public LoadBalancingPolicyFactory {
}
// Cluster name.
std::string cluster_name;
- if (name_ == kEds) {
- it = json.object_value().find("clusterName");
- if (it == json.object_value().end()) {
- error_list.push_back(GRPC_ERROR_CREATE_FROM_STATIC_STRING(
- "field:clusterName error:required field missing"));
- } else if (it->second.type() != Json::Type::STRING) {
- error_list.push_back(GRPC_ERROR_CREATE_FROM_STATIC_STRING(
- "field:clusterName error:type should be string"));
- } else {
- cluster_name = it->second.string_value();
- }
+ it = json.object_value().find("clusterName");
+ if (it == json.object_value().end()) {
+ error_list.push_back(GRPC_ERROR_CREATE_FROM_STATIC_STRING(
+ "field:clusterName error:required field missing"));
+ } else if (it->second.type() != Json::Type::STRING) {
+ error_list.push_back(GRPC_ERROR_CREATE_FROM_STATIC_STRING(
+ "field:clusterName error:type should be string"));
} else {
- // For xds policy, this field does not exist in the config, so it
- // will always be set to the same value as edsServiceName.
- cluster_name = eds_service_name;
+ cluster_name = it->second.string_value();
}
// LRS load reporting server name.
absl::optional lrs_load_reporting_server_name;
@@ -1043,20 +808,20 @@ class EdsLbFactory : public LoadBalancingPolicyFactory {
lrs_load_reporting_server_name.emplace(it->second.string_value());
}
}
- // Locality-picking policy. Not supported for xds policy.
- Json locality_picking_policy = Json::Array{
- Json::Object{
- {"weighted_target_experimental",
- Json::Object{
- {"targets", Json::Object()},
- }},
- },
- };
- if (name_ == kEds) {
- it = json.object_value().find("localityPickingPolicy");
- if (it != json.object_value().end()) {
- locality_picking_policy = it->second;
- }
+ // Locality-picking policy.
+ Json locality_picking_policy;
+ it = json.object_value().find("localityPickingPolicy");
+ if (it == json.object_value().end()) {
+ locality_picking_policy = Json::Array{
+ Json::Object{
+ {"weighted_target_experimental",
+ Json::Object{
+ {"targets", Json::Object()},
+ }},
+ },
+ };
+ } else {
+ locality_picking_policy = it->second;
}
grpc_error* parse_error = GRPC_ERROR_NONE;
if (LoadBalancingPolicyRegistry::ParseLoadBalancingConfig(
@@ -1067,10 +832,8 @@ class EdsLbFactory : public LoadBalancingPolicyFactory {
GRPC_ERROR_UNREF(parse_error);
}
// Endpoint-picking policy. Called "childPolicy" for xds policy.
- const char* field_name =
- name_ == kEds ? "endpointPickingPolicy" : "childPolicy";
Json endpoint_picking_policy;
- it = json.object_value().find(field_name);
+ it = json.object_value().find("endpointPickingPolicy");
if (it == json.object_value().end()) {
endpoint_picking_policy = Json::Array{
Json::Object{
@@ -1085,36 +848,16 @@ class EdsLbFactory : public LoadBalancingPolicyFactory {
endpoint_picking_policy, &parse_error) == nullptr) {
GPR_DEBUG_ASSERT(parse_error != GRPC_ERROR_NONE);
error_list.push_back(GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
- field_name, &parse_error, 1));
- GRPC_ERROR_UNREF(parse_error);
- }
- // Fallback policy.
- Json fallback_policy_config;
- it = json.object_value().find("fallbackPolicy");
- if (it == json.object_value().end()) {
- fallback_policy_config = Json::Array{Json::Object{
- {"round_robin", Json::Object()},
- }};
- } else {
- fallback_policy_config = it->second;
- }
- parse_error = GRPC_ERROR_NONE;
- RefCountedPtr fallback_policy =
- LoadBalancingPolicyRegistry::ParseLoadBalancingConfig(
- fallback_policy_config, &parse_error);
- if (fallback_policy == nullptr) {
- GPR_DEBUG_ASSERT(parse_error != GRPC_ERROR_NONE);
- error_list.push_back(GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
- "fallbackPolicy", &parse_error, 1));
+ "endpointPickingPolicy", &parse_error, 1));
GRPC_ERROR_UNREF(parse_error);
- error_list.push_back(parse_error);
}
+ // Construct config.
if (error_list.empty()) {
return MakeRefCounted(
- name_, std::move(cluster_name), std::move(eds_service_name),
+ std::move(cluster_name), std::move(eds_service_name),
std::move(lrs_load_reporting_server_name),
std::move(locality_picking_policy),
- std::move(endpoint_picking_policy), std::move(fallback_policy));
+ std::move(endpoint_picking_policy));
} else {
*error = GRPC_ERROR_CREATE_FROM_VECTOR(
"eds_experimental LB policy config", &error_list);
@@ -1125,14 +868,14 @@ class EdsLbFactory : public LoadBalancingPolicyFactory {
private:
class EdsChildHandler : public ChildPolicyHandler {
public:
- EdsChildHandler(Args args, TraceFlag* tracer, const char* name)
- : ChildPolicyHandler(std::move(args), tracer), name_(name) {}
+ EdsChildHandler(Args args, TraceFlag* tracer)
+ : ChildPolicyHandler(std::move(args), tracer) {}
bool ConfigChangeRequiresNewPolicyInstance(
LoadBalancingPolicy::Config* old_config,
LoadBalancingPolicy::Config* new_config) const override {
- GPR_ASSERT(old_config->name() == name_);
- GPR_ASSERT(new_config->name() == name_);
+ GPR_ASSERT(old_config->name() == kEds);
+ GPR_ASSERT(new_config->name() == kEds);
EdsLbConfig* old_eds_config = static_cast(old_config);
EdsLbConfig* new_eds_config = static_cast(new_config);
return old_eds_config->cluster_name() != new_eds_config->cluster_name() ||
@@ -1142,14 +885,9 @@ class EdsLbFactory : public LoadBalancingPolicyFactory {
OrphanablePtr CreateLoadBalancingPolicy(
const char* name, LoadBalancingPolicy::Args args) const override {
- return MakeOrphanable(name_, std::move(args));
+ return MakeOrphanable(std::move(args));
}
-
- private:
- const char* name_;
};
-
- const char* name_;
};
} // namespace
@@ -1163,13 +901,7 @@ class EdsLbFactory : public LoadBalancingPolicyFactory {
void grpc_lb_policy_eds_init() {
grpc_core::LoadBalancingPolicyRegistry::Builder::
RegisterLoadBalancingPolicyFactory(
- absl::make_unique(grpc_core::kEds));
- // TODO(roth): This is here just for backward compatibility with some
- // old tests we have internally. Remove this once they are upgraded
- // to use the new policy name and config.
- grpc_core::LoadBalancingPolicyRegistry::Builder::
- RegisterLoadBalancingPolicyFactory(
- absl::make_unique(grpc_core::kXds));
+ absl::make_unique());
}
void grpc_lb_policy_eds_shutdown() {}
diff --git a/src/core/ext/filters/client_channel/resolver.cc b/src/core/ext/filters/client_channel/resolver.cc
index 8d762433786..4a76a7d7bdc 100644
--- a/src/core/ext/filters/client_channel/resolver.cc
+++ b/src/core/ext/filters/client_channel/resolver.cc
@@ -32,8 +32,8 @@ namespace grpc_core {
Resolver::Resolver(std::shared_ptr work_serializer,
std::unique_ptr result_handler)
: InternallyRefCounted(&grpc_trace_resolver_refcount),
- result_handler_(std::move(result_handler)),
- work_serializer_(std::move(work_serializer)) {}
+ work_serializer_(std::move(work_serializer)),
+ result_handler_(std::move(result_handler)) {}
//
// Resolver::Result
diff --git a/src/core/lib/iomgr/cfstream_handle.cc b/src/core/lib/iomgr/cfstream_handle.cc
index 40fb3779651..7a78ccd8769 100644
--- a/src/core/lib/iomgr/cfstream_handle.cc
+++ b/src/core/lib/iomgr/cfstream_handle.cc
@@ -32,6 +32,7 @@
#include "src/core/lib/debug/trace.h"
#include "src/core/lib/iomgr/closure.h"
#include "src/core/lib/iomgr/error_cfstream.h"
+#include "src/core/lib/iomgr/ev_apple.h"
#include "src/core/lib/iomgr/exec_ctx.h"
extern grpc_core::TraceFlag grpc_tcp_trace;
@@ -147,8 +148,8 @@ CFStreamHandle::CFStreamHandle(CFReadStreamRef read_stream,
kCFStreamEventOpenCompleted | kCFStreamEventCanAcceptBytes |
kCFStreamEventErrorOccurred | kCFStreamEventEndEncountered,
CFStreamHandle::WriteCallback, &ctx);
- CFReadStreamSetDispatchQueue(read_stream, dispatch_queue_);
- CFWriteStreamSetDispatchQueue(write_stream, dispatch_queue_);
+ grpc_apple_register_read_stream(read_stream, dispatch_queue_);
+ grpc_apple_register_write_stream(write_stream, dispatch_queue_);
}
CFStreamHandle::~CFStreamHandle() {
diff --git a/src/core/lib/iomgr/ev_apple.cc b/src/core/lib/iomgr/ev_apple.cc
new file mode 100644
index 00000000000..d1525828865
--- /dev/null
+++ b/src/core/lib/iomgr/ev_apple.cc
@@ -0,0 +1,356 @@
+/*
+ *
+ * Copyright 2020 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+/// Event engine based on Apple's CFRunLoop API family. If the CFRunLoop engine
+/// is enabled (see iomgr_posix_cfstream.cc), a global thread is started to
+/// handle and trigger all the CFStream events. The CFStream streams register
+/// themselves with the run loop with functions grpc_apple_register_read_stream
+/// and grpc_apple_register_read_stream. Pollsets are dummy and block on a
+/// condition variable in pollset_work().
+
+#include
+
+#include "src/core/lib/iomgr/port.h"
+
+#ifdef GRPC_APPLE_EV
+
+#include
+
+#include
+
+#include "src/core/lib/gprpp/thd.h"
+#include "src/core/lib/iomgr/ev_apple.h"
+
+grpc_core::DebugOnlyTraceFlag grpc_apple_polling_trace(false, "apple_polling");
+
+#ifndef NDEBUG
+#define GRPC_POLLING_TRACE(format, ...) \
+ if (GRPC_TRACE_FLAG_ENABLED(grpc_apple_polling_trace)) { \
+ gpr_log(GPR_DEBUG, "(polling) " format, __VA_ARGS__); \
+ }
+#else
+#define GRPC_POLLING_TRACE(...)
+#endif // NDEBUG
+
+#define GRPC_POLLSET_KICK_BROADCAST ((grpc_pollset_worker*)1)
+
+struct GlobalRunLoopContext {
+ grpc_core::CondVar init_cv;
+ grpc_core::CondVar input_source_cv;
+
+ grpc_core::Mutex mu;
+
+ // Whether an input source registration is pending. Protected by mu.
+ bool input_source_registered = false;
+
+ // The reference to the global run loop object. Protected by mu.
+ CFRunLoopRef run_loop;
+
+ // Whether the pollset has been globally shut down. Protected by mu.
+ bool is_shutdown = false;
+};
+
+struct GrpcAppleWorker {
+ // The condition varible to kick the worker. Works with the pollset's lock
+ // (GrpcApplePollset.mu).
+ grpc_core::CondVar cv;
+
+ // Whether the worker is kicked. Protected by the pollset's lock
+ // (GrpcApplePollset.mu).
+ bool kicked = false;
+};
+
+struct GrpcApplePollset {
+ grpc_core::Mutex mu;
+
+ // Tracks the current workers in the pollset. Protected by mu.
+ std::list workers;
+
+ // Whether the pollset is shut down. Protected by mu.
+ bool is_shutdown = false;
+
+ // Closure to call when shutdown is done. Protected by mu.
+ grpc_closure* shutdown_closure;
+
+ // Whether there's an outstanding kick that was not processed. Protected by
+ // mu.
+ bool kicked_without_poller = false;
+};
+
+static GlobalRunLoopContext* gGlobalRunLoopContext = nullptr;
+static grpc_core::Thread* gGlobalRunLoopThread = nullptr;
+
+/// Register the stream with the dispatch queue. Callbacks of the stream will be
+/// issued to the dispatch queue when a network event happens and will be
+/// managed by Grand Central Dispatch.
+static void grpc_apple_register_read_stream_queue(
+ CFReadStreamRef read_stream, dispatch_queue_t dispatch_queue) {
+ CFReadStreamSetDispatchQueue(read_stream, dispatch_queue);
+}
+
+/// Register the stream with the dispatch queue. Callbacks of the stream will be
+/// issued to the dispatch queue when a network event happens and will be
+/// managed by Grand Central Dispatch.
+static void grpc_apple_register_write_stream_queue(
+ CFWriteStreamRef write_stream, dispatch_queue_t dispatch_queue) {
+ CFWriteStreamSetDispatchQueue(write_stream, dispatch_queue);
+}
+
+/// Register the stream with the global run loop. Callbacks of the stream will
+/// be issued to the run loop when a network event happens and will be driven by
+/// the global run loop thread gGlobalRunLoopThread.
+static void grpc_apple_register_read_stream_run_loop(
+ CFReadStreamRef read_stream, dispatch_queue_t dispatch_queue) {
+ GRPC_POLLING_TRACE("Register read stream: %p", read_stream);
+ grpc_core::MutexLock lock(&gGlobalRunLoopContext->mu);
+ CFReadStreamScheduleWithRunLoop(read_stream, gGlobalRunLoopContext->run_loop,
+ kCFRunLoopDefaultMode);
+ gGlobalRunLoopContext->input_source_registered = true;
+ gGlobalRunLoopContext->input_source_cv.Signal();
+}
+
+/// Register the stream with the global run loop. Callbacks of the stream will
+/// be issued to the run loop when a network event happens, and will be driven
+/// by the global run loop thread gGlobalRunLoopThread.
+static void grpc_apple_register_write_stream_run_loop(
+ CFWriteStreamRef write_stream, dispatch_queue_t dispatch_queue) {
+ GRPC_POLLING_TRACE("Register write stream: %p", write_stream);
+ grpc_core::MutexLock lock(&gGlobalRunLoopContext->mu);
+ CFWriteStreamScheduleWithRunLoop(
+ write_stream, gGlobalRunLoopContext->run_loop, kCFRunLoopDefaultMode);
+ gGlobalRunLoopContext->input_source_registered = true;
+ gGlobalRunLoopContext->input_source_cv.Signal();
+}
+
+/// The default implementation of stream registration is to register the stream
+/// to a dispatch queue. However, if the CFRunLoop based pollset is enabled (by
+/// macro and environment variable, see docs in iomgr_posix_cfstream.cc), the
+/// CFStream streams are registered with the global run loop instead (see
+/// pollset_global_init below).
+static void (*grpc_apple_register_read_stream_impl)(
+ CFReadStreamRef, dispatch_queue_t) = grpc_apple_register_read_stream_queue;
+static void (*grpc_apple_register_write_stream_impl)(CFWriteStreamRef,
+ dispatch_queue_t) =
+ grpc_apple_register_write_stream_queue;
+
+void grpc_apple_register_read_stream(CFReadStreamRef read_stream,
+ dispatch_queue_t dispatch_queue) {
+ grpc_apple_register_read_stream_impl(read_stream, dispatch_queue);
+}
+
+void grpc_apple_register_write_stream(CFWriteStreamRef write_stream,
+ dispatch_queue_t dispatch_queue) {
+ grpc_apple_register_write_stream_impl(write_stream, dispatch_queue);
+}
+
+/// Drive the run loop in a global singleton thread until the global run loop is
+/// shutdown.
+static void GlobalRunLoopFunc(void* arg) {
+ grpc_core::ReleasableMutexLock lock(&gGlobalRunLoopContext->mu);
+ gGlobalRunLoopContext->run_loop = CFRunLoopGetCurrent();
+ gGlobalRunLoopContext->init_cv.Signal();
+
+ while (!gGlobalRunLoopContext->is_shutdown) {
+ // CFRunLoopRun() will return immediately if no stream is registered on it.
+ // So we wait on a conditional variable until a stream is registered;
+ // otherwise we'll be running a spinning loop.
+ while (!gGlobalRunLoopContext->input_source_registered) {
+ gGlobalRunLoopContext->input_source_cv.Wait(&gGlobalRunLoopContext->mu);
+ }
+ gGlobalRunLoopContext->input_source_registered = false;
+ lock.Unlock();
+ CFRunLoopRun();
+ lock.Lock();
+ }
+ lock.Unlock();
+}
+
+// pollset implementation
+
+static void pollset_global_init(void) {
+ gGlobalRunLoopContext = new GlobalRunLoopContext;
+
+ grpc_apple_register_read_stream_impl =
+ grpc_apple_register_read_stream_run_loop;
+ grpc_apple_register_write_stream_impl =
+ grpc_apple_register_write_stream_run_loop;
+
+ grpc_core::MutexLock lock(&gGlobalRunLoopContext->mu);
+ gGlobalRunLoopThread =
+ new grpc_core::Thread("apple_ev", GlobalRunLoopFunc, nullptr);
+ gGlobalRunLoopThread->Start();
+ while (gGlobalRunLoopContext->run_loop == NULL)
+ gGlobalRunLoopContext->init_cv.Wait(&gGlobalRunLoopContext->mu);
+}
+
+static void pollset_global_shutdown(void) {
+ {
+ grpc_core::MutexLock lock(&gGlobalRunLoopContext->mu);
+ gGlobalRunLoopContext->is_shutdown = true;
+ CFRunLoopStop(gGlobalRunLoopContext->run_loop);
+ }
+ gGlobalRunLoopThread->Join();
+ delete gGlobalRunLoopThread;
+ delete gGlobalRunLoopContext;
+}
+
+/// The caller must acquire the lock GrpcApplePollset.mu before calling this
+/// function. The lock may be temporarily released when waiting on the condition
+/// variable but will be re-acquired before the function returns.
+///
+/// The Apple pollset simply waits on a condition variable until it is kicked.
+/// The network events are handled in the global run loop thread. Processing of
+/// these events will eventually trigger the kick.
+static grpc_error* pollset_work(grpc_pollset* pollset,
+ grpc_pollset_worker** worker,
+ grpc_millis deadline) {
+ GRPC_POLLING_TRACE("pollset work: %p, worker: %p, deadline: %" PRIu64,
+ pollset, worker, deadline);
+ GrpcApplePollset* apple_pollset =
+ reinterpret_cast(pollset);
+ GrpcAppleWorker actual_worker;
+ if (worker) {
+ *worker = reinterpret_cast(&actual_worker);
+ }
+
+ if (apple_pollset->kicked_without_poller) {
+ // Process the outstanding kick and reset the flag. Do not block.
+ apple_pollset->kicked_without_poller = false;
+ } else {
+ // Block until kicked, timed out, or the pollset shuts down.
+ apple_pollset->workers.push_front(&actual_worker);
+ auto it = apple_pollset->workers.begin();
+
+ while (!actual_worker.kicked && !apple_pollset->is_shutdown) {
+ if (actual_worker.cv.Wait(
+ &apple_pollset->mu,
+ grpc_millis_to_timespec(deadline, GPR_CLOCK_REALTIME))) {
+ // timed out
+ break;
+ }
+ }
+
+ apple_pollset->workers.erase(it);
+
+ // If the pollset is shut down asynchronously and this is the last pending
+ // worker, the shutdown process is complete at this moment and the shutdown
+ // callback will be called.
+ if (apple_pollset->is_shutdown && apple_pollset->workers.empty()) {
+ grpc_core::ExecCtx::Run(DEBUG_LOCATION, apple_pollset->shutdown_closure,
+ GRPC_ERROR_NONE);
+ }
+ }
+
+ return GRPC_ERROR_NONE;
+}
+
+/// Kick a specific worker. The caller must acquire the lock GrpcApplePollset.mu
+/// before calling this function.
+static void kick_worker(GrpcAppleWorker* worker) {
+ worker->kicked = true;
+ worker->cv.Signal();
+}
+
+/// The caller must acquire the lock GrpcApplePollset.mu before calling this
+/// function. The kick action simply signals the condition variable of the
+/// worker.
+static grpc_error* pollset_kick(grpc_pollset* pollset,
+ grpc_pollset_worker* specific_worker) {
+ GrpcApplePollset* apple_pollset =
+ reinterpret_cast(pollset);
+
+ GRPC_POLLING_TRACE("pollset kick: %p, worker:%p", pollset, specific_worker);
+
+ if (specific_worker == nullptr) {
+ if (apple_pollset->workers.empty()) {
+ apple_pollset->kicked_without_poller = true;
+ } else {
+ GrpcAppleWorker* actual_worker = apple_pollset->workers.front();
+ kick_worker(actual_worker);
+ }
+ } else if (specific_worker == GRPC_POLLSET_KICK_BROADCAST) {
+ for (auto& actual_worker : apple_pollset->workers) {
+ kick_worker(actual_worker);
+ }
+ } else {
+ GrpcAppleWorker* actual_worker =
+ reinterpret_cast(specific_worker);
+ kick_worker(actual_worker);
+ }
+
+ return GRPC_ERROR_NONE;
+}
+
+static void pollset_init(grpc_pollset* pollset, gpr_mu** mu) {
+ GRPC_POLLING_TRACE("pollset init: %p", pollset);
+ GrpcApplePollset* apple_pollset = new (pollset) GrpcApplePollset();
+ *mu = apple_pollset->mu.get();
+}
+
+/// The caller must acquire the lock GrpcApplePollset.mu before calling this
+/// function.
+static void pollset_shutdown(grpc_pollset* pollset, grpc_closure* closure) {
+ GRPC_POLLING_TRACE("pollset shutdown: %p", pollset);
+
+ GrpcApplePollset* apple_pollset =
+ reinterpret_cast(pollset);
+ apple_pollset->is_shutdown = true;
+ pollset_kick(pollset, GRPC_POLLSET_KICK_BROADCAST);
+
+ // If there is any worker blocked, shutdown will be done asynchronously.
+ if (apple_pollset->workers.empty()) {
+ grpc_core::ExecCtx::Run(DEBUG_LOCATION, closure, GRPC_ERROR_NONE);
+ } else {
+ apple_pollset->shutdown_closure = closure;
+ }
+}
+
+static void pollset_destroy(grpc_pollset* pollset) {
+ GRPC_POLLING_TRACE("pollset destroy: %p", pollset);
+ GrpcApplePollset* apple_pollset =
+ reinterpret_cast(pollset);
+ apple_pollset->~GrpcApplePollset();
+}
+
+size_t pollset_size(void) { return sizeof(GrpcApplePollset); }
+
+grpc_pollset_vtable grpc_apple_pollset_vtable = {
+ pollset_global_init, pollset_global_shutdown,
+ pollset_init, pollset_shutdown,
+ pollset_destroy, pollset_work,
+ pollset_kick, pollset_size};
+
+// pollset_set implementation
+
+grpc_pollset_set* pollset_set_create(void) { return nullptr; }
+void pollset_set_destroy(grpc_pollset_set* pollset_set) {}
+void pollset_set_add_pollset(grpc_pollset_set* pollset_set,
+ grpc_pollset* pollset) {}
+void pollset_set_del_pollset(grpc_pollset_set* pollset_set,
+ grpc_pollset* pollset) {}
+void pollset_set_add_pollset_set(grpc_pollset_set* bag,
+ grpc_pollset_set* item) {}
+void pollset_set_del_pollset_set(grpc_pollset_set* bag,
+ grpc_pollset_set* item) {}
+
+grpc_pollset_set_vtable grpc_apple_pollset_set_vtable = {
+ pollset_set_create, pollset_set_destroy,
+ pollset_set_add_pollset, pollset_set_del_pollset,
+ pollset_set_add_pollset_set, pollset_set_del_pollset_set};
+
+#endif
diff --git a/src/core/lib/iomgr/ev_apple.h b/src/core/lib/iomgr/ev_apple.h
new file mode 100644
index 00000000000..a05f91ce15f
--- /dev/null
+++ b/src/core/lib/iomgr/ev_apple.h
@@ -0,0 +1,43 @@
+/*
+ *
+ * Copyright 2020 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+#ifndef GRPC_CORE_LIB_IOMGR_EV_APPLE_H
+#define GRPC_CORE_LIB_IOMGR_EV_APPLE_H
+
+#include
+
+#ifdef GRPC_APPLE_EV
+
+#include
+
+#include "src/core/lib/iomgr/pollset.h"
+#include "src/core/lib/iomgr/pollset_set.h"
+
+void grpc_apple_register_read_stream(CFReadStreamRef read_stream,
+ dispatch_queue_t dispatch_queue);
+
+void grpc_apple_register_write_stream(CFWriteStreamRef write_stream,
+ dispatch_queue_t dispatch_queue);
+
+extern grpc_pollset_vtable grpc_apple_pollset_vtable;
+
+extern grpc_pollset_set_vtable grpc_apple_pollset_set_vtable;
+
+#endif
+
+#endif
diff --git a/src/core/lib/iomgr/iomgr_posix_cfstream.cc b/src/core/lib/iomgr/iomgr_posix_cfstream.cc
index 72b2ae5eb00..24f9f4101b2 100644
--- a/src/core/lib/iomgr/iomgr_posix_cfstream.cc
+++ b/src/core/lib/iomgr/iomgr_posix_cfstream.cc
@@ -16,6 +16,20 @@
*
*/
+/// CFStream is build-enabled on iOS by default and disabled by default on other
+/// platforms (see port_platform.h). To enable CFStream build on another
+/// platform, the users need to define macro "GRPC_CFSTREAM=1" when building
+/// gRPC.
+///
+/// When CFStream is to be built (either by default on iOS or by macro on other
+/// platforms), the users can disable CFStream with environment variable
+/// "grpc_cfstream=0". This will let gRPC to fallback to use POSIX sockets. In
+/// addition, the users may choose to use an alternative CFRunLoop based pollset
+/// "ev_apple" by setting environment variable "grpc_cfstream_run_loop=1". This
+/// pollset resolves a bug from Apple when CFStream streams dispatch events to
+/// dispatch queues. The caveat of this pollset is that users may not be able to
+/// run a gRPC server in the same process.
+
#include
#include "src/core/lib/iomgr/port.h"
@@ -23,6 +37,7 @@
#ifdef GRPC_CFSTREAM_IOMGR
#include "src/core/lib/debug/trace.h"
+#include "src/core/lib/iomgr/ev_apple.h"
#include "src/core/lib/iomgr/ev_posix.h"
#include "src/core/lib/iomgr/iomgr_internal.h"
#include "src/core/lib/iomgr/iomgr_posix.h"
@@ -33,6 +48,7 @@
#include "src/core/lib/iomgr/timer.h"
static const char* grpc_cfstream_env_var = "grpc_cfstream";
+static const char* grpc_cfstream_run_loop_env_var = "GRPC_CFSTREAM_RUN_LOOP";
extern grpc_tcp_server_vtable grpc_posix_tcp_server_vtable;
extern grpc_tcp_client_vtable grpc_posix_tcp_client_vtable;
@@ -42,6 +58,33 @@ extern grpc_pollset_vtable grpc_posix_pollset_vtable;
extern grpc_pollset_set_vtable grpc_posix_pollset_set_vtable;
extern grpc_address_resolver_vtable grpc_posix_resolver_vtable;
+static void apple_iomgr_platform_init(void) { grpc_pollset_global_init(); }
+
+static void apple_iomgr_platform_flush(void) {}
+
+static void apple_iomgr_platform_shutdown(void) {
+ grpc_pollset_global_shutdown();
+}
+
+static void apple_iomgr_platform_shutdown_background_closure(void) {}
+
+static bool apple_iomgr_platform_is_any_background_poller_thread(void) {
+ return false;
+}
+
+static bool apple_iomgr_platform_add_closure_to_background_poller(
+ grpc_closure* closure, grpc_error* error) {
+ return false;
+}
+
+static grpc_iomgr_platform_vtable apple_vtable = {
+ apple_iomgr_platform_init,
+ apple_iomgr_platform_flush,
+ apple_iomgr_platform_shutdown,
+ apple_iomgr_platform_shutdown_background_closure,
+ apple_iomgr_platform_is_any_background_poller_thread,
+ apple_iomgr_platform_add_closure_to_background_poller};
+
static void iomgr_platform_init(void) {
grpc_wakeup_fd_global_init();
grpc_event_engine_init();
@@ -76,32 +119,53 @@ static grpc_iomgr_platform_vtable vtable = {
iomgr_platform_add_closure_to_background_poller};
void grpc_set_default_iomgr_platform() {
- char* enable_cfstream = getenv(grpc_cfstream_env_var);
- grpc_tcp_client_vtable* client_vtable = &grpc_posix_tcp_client_vtable;
- // CFStream is enabled by default on iOS, and disabled by default on other
- // platforms. Defaults can be overriden by setting the grpc_cfstream
- // environment variable.
-#if TARGET_OS_IPHONE
- if (enable_cfstream == nullptr || enable_cfstream[0] == '1') {
- client_vtable = &grpc_cfstream_client_vtable;
+ char* enable_cfstream_str = getenv(grpc_cfstream_env_var);
+ bool enable_cfstream =
+ enable_cfstream_str == nullptr || enable_cfstream_str[0] != '0';
+ char* enable_cfstream_run_loop_str = getenv(grpc_cfstream_run_loop_env_var);
+ // CFStream run-loop is disabled by default. The user has to enable it
+ // explicitly with environment variable.
+ bool enable_cfstream_run_loop = enable_cfstream_run_loop_str != nullptr &&
+ enable_cfstream_run_loop_str[0] == '1';
+ if (!enable_cfstream) {
+ // Use POSIX sockets for both client and server
+ grpc_set_tcp_client_impl(&grpc_posix_tcp_client_vtable);
+ grpc_set_tcp_server_impl(&grpc_posix_tcp_server_vtable);
+ grpc_set_pollset_vtable(&grpc_posix_pollset_vtable);
+ grpc_set_pollset_set_vtable(&grpc_posix_pollset_set_vtable);
+ grpc_set_iomgr_platform_vtable(&vtable);
+ } else if (enable_cfstream && !enable_cfstream_run_loop) {
+ // Use CFStream with dispatch queue for client; use POSIX sockets for server
+ grpc_set_tcp_client_impl(&grpc_cfstream_client_vtable);
+ grpc_set_tcp_server_impl(&grpc_posix_tcp_server_vtable);
+ grpc_set_pollset_vtable(&grpc_posix_pollset_vtable);
+ grpc_set_pollset_set_vtable(&grpc_posix_pollset_set_vtable);
+ grpc_set_iomgr_platform_vtable(&vtable);
+ } else {
+ // Use CFStream with CFRunLoop for client; server not supported
+ grpc_set_tcp_client_impl(&grpc_cfstream_client_vtable);
+ grpc_set_pollset_vtable(&grpc_apple_pollset_vtable);
+ grpc_set_pollset_set_vtable(&grpc_apple_pollset_set_vtable);
+ grpc_set_iomgr_platform_vtable(&apple_vtable);
}
-#else
- if (enable_cfstream != nullptr && enable_cfstream[0] == '1') {
- client_vtable = &grpc_cfstream_client_vtable;
- }
-#endif
-
- grpc_set_tcp_client_impl(client_vtable);
- grpc_set_tcp_server_impl(&grpc_posix_tcp_server_vtable);
grpc_set_timer_impl(&grpc_generic_timer_vtable);
- grpc_set_pollset_vtable(&grpc_posix_pollset_vtable);
- grpc_set_pollset_set_vtable(&grpc_posix_pollset_set_vtable);
grpc_set_resolver_impl(&grpc_posix_resolver_vtable);
- grpc_set_iomgr_platform_vtable(&vtable);
}
bool grpc_iomgr_run_in_background() {
- return grpc_event_engine_run_in_background();
+ char* enable_cfstream_str = getenv(grpc_cfstream_env_var);
+ bool enable_cfstream =
+ enable_cfstream_str == nullptr || enable_cfstream_str[0] != '0';
+ char* enable_cfstream_run_loop_str = getenv(grpc_cfstream_run_loop_env_var);
+ // CFStream run-loop is disabled by default. The user has to enable it
+ // explicitly with environment variable.
+ bool enable_cfstream_run_loop = enable_cfstream_run_loop_str != nullptr &&
+ enable_cfstream_run_loop_str[0] == '1';
+ if (enable_cfstream && enable_cfstream_run_loop) {
+ return false;
+ } else {
+ return grpc_event_engine_run_in_background();
+ }
}
#endif /* GRPC_CFSTREAM_IOMGR */
diff --git a/src/core/lib/iomgr/pollset_set_custom.cc b/src/core/lib/iomgr/pollset_set_custom.cc
index 099388f7c5b..2c1df608197 100644
--- a/src/core/lib/iomgr/pollset_set_custom.cc
+++ b/src/core/lib/iomgr/pollset_set_custom.cc
@@ -22,23 +22,23 @@
#include "src/core/lib/iomgr/pollset_set.h"
-grpc_pollset_set* pollset_set_create(void) {
+static grpc_pollset_set* pollset_set_create(void) {
return (grpc_pollset_set*)((intptr_t)0xdeafbeef);
}
-void pollset_set_destroy(grpc_pollset_set* /*pollset_set*/) {}
+static void pollset_set_destroy(grpc_pollset_set* /*pollset_set*/) {}
-void pollset_set_add_pollset(grpc_pollset_set* /*pollset_set*/,
- grpc_pollset* /*pollset*/) {}
+static void pollset_set_add_pollset(grpc_pollset_set* /*pollset_set*/,
+ grpc_pollset* /*pollset*/) {}
-void pollset_set_del_pollset(grpc_pollset_set* /*pollset_set*/,
- grpc_pollset* /*pollset*/) {}
+static void pollset_set_del_pollset(grpc_pollset_set* /*pollset_set*/,
+ grpc_pollset* /*pollset*/) {}
-void pollset_set_add_pollset_set(grpc_pollset_set* /*bag*/,
- grpc_pollset_set* /*item*/) {}
+static void pollset_set_add_pollset_set(grpc_pollset_set* /*bag*/,
+ grpc_pollset_set* /*item*/) {}
-void pollset_set_del_pollset_set(grpc_pollset_set* /*bag*/,
- grpc_pollset_set* /*item*/) {}
+static void pollset_set_del_pollset_set(grpc_pollset_set* /*bag*/,
+ grpc_pollset_set* /*item*/) {}
static grpc_pollset_set_vtable vtable = {
pollset_set_create, pollset_set_destroy,
diff --git a/src/core/lib/iomgr/port.h b/src/core/lib/iomgr/port.h
index 1d2e9506451..211423e643d 100644
--- a/src/core/lib/iomgr/port.h
+++ b/src/core/lib/iomgr/port.h
@@ -129,6 +129,7 @@
#define GRPC_CFSTREAM_IOMGR 1
#define GRPC_CFSTREAM_CLIENT 1
#define GRPC_CFSTREAM_ENDPOINT 1
+#define GRPC_APPLE_EV 1
#define GRPC_POSIX_SOCKET_ARES_EV_DRIVER 1
#define GRPC_POSIX_SOCKET_EV 1
#define GRPC_POSIX_SOCKET_EV_EPOLL1 1
diff --git a/src/core/lib/surface/server.cc b/src/core/lib/surface/server.cc
index e6c81d9d460..08b15ac001a 100644
--- a/src/core/lib/surface/server.cc
+++ b/src/core/lib/surface/server.cc
@@ -577,7 +577,6 @@ static void publish_new_rpc(void* arg, grpc_error* error) {
rm->pending_tail->pending_next = calld;
rm->pending_tail = calld;
}
- calld->pending_next = nullptr;
gpr_mu_unlock(&server->mu_call);
}
diff --git a/src/objective-c/tests/Tests.xcodeproj/xcshareddata/xcschemes/InteropTests.xcscheme b/src/objective-c/tests/Tests.xcodeproj/xcshareddata/xcschemes/InteropTests.xcscheme
index cbde360a338..d4557937965 100644
--- a/src/objective-c/tests/Tests.xcodeproj/xcshareddata/xcschemes/InteropTests.xcscheme
+++ b/src/objective-c/tests/Tests.xcodeproj/xcshareddata/xcschemes/InteropTests.xcscheme
@@ -66,8 +66,13 @@
ReferencedContainer = "container:Tests.xcodeproj">
-
-
+
+
+
+
-
-
+
+
+
+
-
-
+
+
+
+
-
-
+
+
+
+
-
-
+
+
+
+
-#include
-
#include
#include
+#include
+#include
#include "src/core/lib/channel/channel_args.h"
#include "src/core/lib/gpr/string.h"
#include "src/core/lib/gpr/tmpfile.h"
#include "src/core/lib/gprpp/host_port.h"
+#include "src/core/lib/iomgr/load_file.h"
#include "src/core/lib/security/credentials/credentials.h"
#include "src/core/lib/security/security_connector/ssl_utils_config.h"
-#include "test/core/end2end/data/ssl_test_data.h"
+#include "test/core/end2end/end2end_tests.h"
#include "test/core/util/port.h"
#include "test/core/util/test_config.h"
+#define CA_CERT_PATH "src/core/tsi/test_creds/ca.pem"
+#define SERVER_CERT_PATH "src/core/tsi/test_creds/server1.pem"
+#define SERVER_KEY_PATH "src/core/tsi/test_creds/server1.key"
+
struct fullstack_secure_fixture_data {
grpc_core::UniquePtr localaddr;
bool server_credential_reloaded = false;
@@ -48,10 +50,25 @@ ssl_server_certificate_config_callback(
fullstack_secure_fixture_data* ffd =
static_cast(user_data);
if (!ffd->server_credential_reloaded) {
- grpc_ssl_pem_key_cert_pair pem_key_cert_pair = {test_server1_key,
- test_server1_cert};
- *config = grpc_ssl_server_certificate_config_create(test_root_cert,
+ grpc_slice ca_slice, cert_slice, key_slice;
+ GPR_ASSERT(GRPC_LOG_IF_ERROR("load_file",
+ grpc_load_file(CA_CERT_PATH, 1, &ca_slice)));
+ GPR_ASSERT(GRPC_LOG_IF_ERROR(
+ "load_file", grpc_load_file(SERVER_CERT_PATH, 1, &cert_slice)));
+ GPR_ASSERT(GRPC_LOG_IF_ERROR(
+ "load_file", grpc_load_file(SERVER_KEY_PATH, 1, &key_slice)));
+ const char* ca_cert =
+ reinterpret_cast GRPC_SLICE_START_PTR(ca_slice);
+ const char* server_cert =
+ reinterpret_cast GRPC_SLICE_START_PTR(cert_slice);
+ const char* server_key =
+ reinterpret_cast GRPC_SLICE_START_PTR(key_slice);
+ grpc_ssl_pem_key_cert_pair pem_key_cert_pair = {server_key, server_cert};
+ *config = grpc_ssl_server_certificate_config_create(ca_cert,
&pem_key_cert_pair, 1);
+ grpc_slice_unref(cert_slice);
+ grpc_slice_unref(key_slice);
+ grpc_slice_unref(ca_slice);
ffd->server_credential_reloaded = true;
return GRPC_SSL_CERTIFICATE_CONFIG_RELOAD_NEW;
} else {
@@ -175,20 +192,10 @@ static grpc_end2end_test_config configs[] = {
int main(int argc, char** argv) {
size_t i;
- FILE* roots_file;
- size_t roots_size = strlen(test_root_cert);
- char* roots_filename;
grpc::testing::TestEnvironment env(argc, argv);
grpc_end2end_tests_pre_init();
-
- /* Set the SSL roots env var. */
- roots_file = gpr_tmpfile("chttp2_simple_ssl_fullstack_test", &roots_filename);
- GPR_ASSERT(roots_filename != nullptr);
- GPR_ASSERT(roots_file != nullptr);
- GPR_ASSERT(fwrite(test_root_cert, 1, roots_size, roots_file) == roots_size);
- fclose(roots_file);
- GPR_GLOBAL_CONFIG_SET(grpc_default_ssl_roots_file_path, roots_filename);
+ GPR_GLOBAL_CONFIG_SET(grpc_default_ssl_roots_file_path, CA_CERT_PATH);
grpc_init();
@@ -198,9 +205,5 @@ int main(int argc, char** argv) {
grpc_shutdown();
- /* Cleanup. */
- remove(roots_filename);
- gpr_free(roots_filename);
-
return 0;
}
diff --git a/test/core/end2end/fixtures/h2_ssl_proxy.cc b/test/core/end2end/fixtures/h2_ssl_proxy.cc
index aea19e54403..ecccda35838 100644
--- a/test/core/end2end/fixtures/h2_ssl_proxy.cc
+++ b/test/core/end2end/fixtures/h2_ssl_proxy.cc
@@ -16,24 +16,26 @@
*
*/
-#include "test/core/end2end/end2end_tests.h"
-
-#include
-#include
-
#include
#include
+#include
+#include
#include "src/core/lib/channel/channel_args.h"
#include "src/core/lib/gpr/string.h"
#include "src/core/lib/gpr/tmpfile.h"
+#include "src/core/lib/iomgr/load_file.h"
#include "src/core/lib/security/credentials/credentials.h"
#include "src/core/lib/security/security_connector/ssl_utils_config.h"
-#include "test/core/end2end/data/ssl_test_data.h"
+#include "test/core/end2end/end2end_tests.h"
#include "test/core/end2end/fixtures/proxy.h"
#include "test/core/util/port.h"
#include "test/core/util/test_config.h"
+#define CA_CERT_PATH "src/core/tsi/test_creds/ca.pem"
+#define SERVER_CERT_PATH "src/core/tsi/test_creds/server1.pem"
+#define SERVER_KEY_PATH "src/core/tsi/test_creds/server1.key"
+
typedef struct fullstack_secure_fixture_data {
grpc_end2end_proxy* proxy;
} fullstack_secure_fixture_data;
@@ -41,10 +43,20 @@ typedef struct fullstack_secure_fixture_data {
static grpc_server* create_proxy_server(const char* port,
grpc_channel_args* server_args) {
grpc_server* s = grpc_server_create(server_args, nullptr);
- grpc_ssl_pem_key_cert_pair pem_cert_key_pair = {test_server1_key,
- test_server1_cert};
+ grpc_slice cert_slice, key_slice;
+ GPR_ASSERT(GRPC_LOG_IF_ERROR(
+ "load_file", grpc_load_file(SERVER_CERT_PATH, 1, &cert_slice)));
+ GPR_ASSERT(GRPC_LOG_IF_ERROR("load_file",
+ grpc_load_file(SERVER_KEY_PATH, 1, &key_slice)));
+ const char* server_cert =
+ reinterpret_cast GRPC_SLICE_START_PTR(cert_slice);
+ const char* server_key =
+ reinterpret_cast GRPC_SLICE_START_PTR(key_slice);
+ grpc_ssl_pem_key_cert_pair pem_key_cert_pair = {server_key, server_cert};
grpc_server_credentials* ssl_creds = grpc_ssl_server_credentials_create(
- nullptr, &pem_cert_key_pair, 1, 0, nullptr);
+ nullptr, &pem_key_cert_pair, 1, 0, nullptr);
+ grpc_slice_unref(cert_slice);
+ grpc_slice_unref(key_slice);
GPR_ASSERT(grpc_server_add_secure_http2_port(s, port, ssl_creds));
grpc_server_credentials_release(ssl_creds);
return s;
@@ -166,10 +178,20 @@ static int fail_server_auth_check(grpc_channel_args* server_args) {
static void chttp2_init_server_simple_ssl_secure_fullstack(
grpc_end2end_test_fixture* f, grpc_channel_args* server_args) {
- grpc_ssl_pem_key_cert_pair pem_cert_key_pair = {test_server1_key,
- test_server1_cert};
+ grpc_slice cert_slice, key_slice;
+ GPR_ASSERT(GRPC_LOG_IF_ERROR(
+ "load_file", grpc_load_file(SERVER_CERT_PATH, 1, &cert_slice)));
+ GPR_ASSERT(GRPC_LOG_IF_ERROR("load_file",
+ grpc_load_file(SERVER_KEY_PATH, 1, &key_slice)));
+ const char* server_cert =
+ reinterpret_cast GRPC_SLICE_START_PTR(cert_slice);
+ const char* server_key =
+ reinterpret_cast GRPC_SLICE_START_PTR(key_slice);
+ grpc_ssl_pem_key_cert_pair pem_key_cert_pair = {server_key, server_cert};
grpc_server_credentials* ssl_creds = grpc_ssl_server_credentials_create(
- nullptr, &pem_cert_key_pair, 1, 0, nullptr);
+ nullptr, &pem_key_cert_pair, 1, 0, nullptr);
+ grpc_slice_unref(cert_slice);
+ grpc_slice_unref(key_slice);
if (fail_server_auth_check(server_args)) {
grpc_auth_metadata_processor processor = {process_auth_failure, nullptr,
nullptr};
@@ -195,20 +217,10 @@ static grpc_end2end_test_config configs[] = {
int main(int argc, char** argv) {
size_t i;
- FILE* roots_file;
- size_t roots_size = strlen(test_root_cert);
- char* roots_filename;
grpc::testing::TestEnvironment env(argc, argv);
grpc_end2end_tests_pre_init();
-
- /* Set the SSL roots env var. */
- roots_file = gpr_tmpfile("chttp2_simple_ssl_fullstack_test", &roots_filename);
- GPR_ASSERT(roots_filename != nullptr);
- GPR_ASSERT(roots_file != nullptr);
- GPR_ASSERT(fwrite(test_root_cert, 1, roots_size, roots_file) == roots_size);
- fclose(roots_file);
- GPR_GLOBAL_CONFIG_SET(grpc_default_ssl_roots_file_path, roots_filename);
+ GPR_GLOBAL_CONFIG_SET(grpc_default_ssl_roots_file_path, CA_CERT_PATH);
grpc_init();
@@ -218,9 +230,5 @@ int main(int argc, char** argv) {
grpc_shutdown();
- /* Cleanup. */
- remove(roots_filename);
- gpr_free(roots_filename);
-
return 0;
}
diff --git a/test/core/end2end/fixtures/h2_tls.cc b/test/core/end2end/fixtures/h2_tls.cc
index dfba0f12530..95cff3a2b00 100644
--- a/test/core/end2end/fixtures/h2_tls.cc
+++ b/test/core/end2end/fixtures/h2_tls.cc
@@ -30,14 +30,18 @@
#include "src/core/lib/gprpp/host_port.h"
#include "src/core/lib/gprpp/inlined_vector.h"
#include "src/core/lib/gprpp/thd.h"
+#include "src/core/lib/iomgr/load_file.h"
#include "src/core/lib/security/credentials/credentials.h"
#include "src/core/lib/security/credentials/tls/grpc_tls_credentials_options.h"
#include "src/core/lib/security/security_connector/ssl_utils_config.h"
-#include "test/core/end2end/data/ssl_test_data.h"
#include "test/core/end2end/end2end_tests.h"
#include "test/core/util/port.h"
#include "test/core/util/test_config.h"
+#define CA_CERT_PATH "src/core/tsi/test_creds/ca.pem"
+#define SERVER_CERT_PATH "src/core/tsi/test_creds/server1.pem"
+#define SERVER_KEY_PATH "src/core/tsi/test_creds/server1.key"
+
typedef grpc_core::InlinedVector ThreadList;
struct fullstack_secure_fixture_data {
@@ -140,17 +144,30 @@ static int client_cred_reload_sync(void* /*config_user_data*/,
arg->status = GRPC_SSL_CERTIFICATE_CONFIG_RELOAD_UNCHANGED;
return 0;
}
- const grpc_ssl_pem_key_cert_pair pem_key_pair = {
- test_server1_key,
- test_server1_cert,
- };
+ grpc_slice ca_slice, cert_slice, key_slice;
+ GPR_ASSERT(GRPC_LOG_IF_ERROR("load_file",
+ grpc_load_file(CA_CERT_PATH, 1, &ca_slice)));
+ GPR_ASSERT(GRPC_LOG_IF_ERROR(
+ "load_file", grpc_load_file(SERVER_CERT_PATH, 1, &cert_slice)));
+ GPR_ASSERT(GRPC_LOG_IF_ERROR("load_file",
+ grpc_load_file(SERVER_KEY_PATH, 1, &key_slice)));
+ const char* ca_cert =
+ reinterpret_cast GRPC_SLICE_START_PTR(ca_slice);
+ const char* server_cert =
+ reinterpret_cast GRPC_SLICE_START_PTR(cert_slice);
+ const char* server_key =
+ reinterpret_cast GRPC_SLICE_START_PTR(key_slice);
+ grpc_ssl_pem_key_cert_pair pem_key_cert_pair = {server_key, server_cert};
if (arg->key_materials_config->pem_key_cert_pair_list().empty()) {
- const auto* pem_key_pair_ptr = &pem_key_pair;
+ const auto* pem_key_cert_pair_ptr = &pem_key_cert_pair;
grpc_tls_key_materials_config_set_key_materials(
- arg->key_materials_config, test_root_cert, &pem_key_pair_ptr, 1);
+ arg->key_materials_config, ca_cert, &pem_key_cert_pair_ptr, 1);
}
// new credential has been reloaded.
arg->status = GRPC_SSL_CERTIFICATE_CONFIG_RELOAD_NEW;
+ grpc_slice_unref(cert_slice);
+ grpc_slice_unref(key_slice);
+ grpc_slice_unref(ca_slice);
return 0;
}
@@ -163,21 +180,34 @@ static int server_cred_reload_sync(void* /*config_user_data*/,
arg->status = GRPC_SSL_CERTIFICATE_CONFIG_RELOAD_UNCHANGED;
return 0;
}
- const grpc_ssl_pem_key_cert_pair pem_key_pair = {
- test_server1_key,
- test_server1_cert,
- };
+ grpc_slice ca_slice, cert_slice, key_slice;
+ GPR_ASSERT(GRPC_LOG_IF_ERROR("load_file",
+ grpc_load_file(CA_CERT_PATH, 1, &ca_slice)));
+ GPR_ASSERT(GRPC_LOG_IF_ERROR(
+ "load_file", grpc_load_file(SERVER_CERT_PATH, 1, &cert_slice)));
+ GPR_ASSERT(GRPC_LOG_IF_ERROR("load_file",
+ grpc_load_file(SERVER_KEY_PATH, 1, &key_slice)));
+ const char* ca_cert =
+ reinterpret_cast GRPC_SLICE_START_PTR(ca_slice);
+ const char* server_cert =
+ reinterpret_cast GRPC_SLICE_START_PTR(cert_slice);
+ const char* server_key =
+ reinterpret_cast GRPC_SLICE_START_PTR(key_slice);
+ grpc_ssl_pem_key_cert_pair pem_key_cert_pair = {server_key, server_cert};
GPR_ASSERT(arg != nullptr);
GPR_ASSERT(arg->key_materials_config != nullptr);
GPR_ASSERT(arg->key_materials_config->pem_key_cert_pair_list().data() !=
nullptr);
if (arg->key_materials_config->pem_key_cert_pair_list().empty()) {
- const auto* pem_key_pair_ptr = &pem_key_pair;
+ const auto* pem_key_cert_pair_ptr = &pem_key_cert_pair;
grpc_tls_key_materials_config_set_key_materials(
- arg->key_materials_config, test_root_cert, &pem_key_pair_ptr, 1);
+ arg->key_materials_config, ca_cert, &pem_key_cert_pair_ptr, 1);
}
// new credential has been reloaded.
arg->status = GRPC_SSL_CERTIFICATE_CONFIG_RELOAD_NEW;
+ grpc_slice_unref(cert_slice);
+ grpc_slice_unref(key_slice);
+ grpc_slice_unref(ca_slice);
return 0;
}
@@ -268,25 +298,13 @@ static grpc_end2end_test_config configs[] = {
};
int main(int argc, char** argv) {
- FILE* roots_file;
- size_t roots_size = strlen(test_root_cert);
- char* roots_filename;
grpc::testing::TestEnvironment env(argc, argv);
grpc_end2end_tests_pre_init();
- /* Set the SSL roots env var. */
- roots_file = gpr_tmpfile("chttp2_simple_ssl_fullstack_test", &roots_filename);
- GPR_ASSERT(roots_filename != nullptr);
- GPR_ASSERT(roots_file != nullptr);
- GPR_ASSERT(fwrite(test_root_cert, 1, roots_size, roots_file) == roots_size);
- fclose(roots_file);
- GPR_GLOBAL_CONFIG_SET(grpc_default_ssl_roots_file_path, roots_filename);
+ GPR_GLOBAL_CONFIG_SET(grpc_default_ssl_roots_file_path, CA_CERT_PATH);
grpc_init();
for (size_t ind = 0; ind < sizeof(configs) / sizeof(*configs); ind++) {
grpc_end2end_tests(argc, argv, configs[ind]);
}
grpc_shutdown();
- /* Cleanup. */
- remove(roots_filename);
- gpr_free(roots_filename);
return 0;
}
diff --git a/test/core/end2end/generate_tests.bzl b/test/core/end2end/generate_tests.bzl
index 3c1817ff82b..9e93745c7cf 100755
--- a/test/core/end2end/generate_tests.bzl
+++ b/test/core/end2end/generate_tests.bzl
@@ -434,8 +434,6 @@ def grpc_end2end_tests():
language = "C++",
data = [
"//src/core/tsi/test_creds:ca.pem",
- "//src/core/tsi/test_creds:client.key",
- "//src/core/tsi/test_creds:client.pem",
"//src/core/tsi/test_creds:server1.key",
"//src/core/tsi/test_creds:server1.pem",
],
@@ -511,8 +509,6 @@ def grpc_end2end_nosec_tests():
language = "C++",
data = [
"//src/core/tsi/test_creds:ca.pem",
- "//src/core/tsi/test_creds:client.key",
- "//src/core/tsi/test_creds:client.pem",
"//src/core/tsi/test_creds:server1.key",
"//src/core/tsi/test_creds:server1.pem",
],
diff --git a/test/core/security/BUILD b/test/core/security/BUILD
index d4ffb2b3a32..ad698aa68ad 100644
--- a/test/core/security/BUILD
+++ b/test/core/security/BUILD
@@ -37,6 +37,11 @@ grpc_fuzzer(
name = "ssl_server_fuzzer",
srcs = ["ssl_server_fuzzer.cc"],
corpus = "corpus/ssl_server_corpus",
+ data = [
+ "//src/core/tsi/test_creds:ca.pem",
+ "//src/core/tsi/test_creds:server1.key",
+ "//src/core/tsi/test_creds:server1.pem",
+ ],
language = "C++",
tags = ["no_windows"],
deps = [
@@ -248,6 +253,11 @@ grpc_cc_test(
grpc_cc_test(
name = "tls_security_connector_test",
srcs = ["tls_security_connector_test.cc"],
+ data = [
+ "//src/core/tsi/test_creds:ca.pem",
+ "//src/core/tsi/test_creds:server1.key",
+ "//src/core/tsi/test_creds:server1.pem",
+ ],
external_deps = [
"gtest",
],
@@ -266,6 +276,11 @@ grpc_cc_test(
grpc_cc_test(
name = "grpc_tls_credentials_options_test",
srcs = ["grpc_tls_credentials_options_test.cc"],
+ data = [
+ "//src/core/tsi/test_creds:ca.pem",
+ "//src/core/tsi/test_creds:server1.key",
+ "//src/core/tsi/test_creds:server1.pem",
+ ],
external_deps = ["gtest"],
language = "C++",
deps = [
diff --git a/test/core/security/grpc_tls_credentials_options_test.cc b/test/core/security/grpc_tls_credentials_options_test.cc
index 142aabf5858..ff8bf1dac04 100644
--- a/test/core/security/grpc_tls_credentials_options_test.cc
+++ b/test/core/security/grpc_tls_credentials_options_test.cc
@@ -17,7 +17,6 @@
*/
#include "src/core/lib/security/credentials/tls/grpc_tls_credentials_options.h"
-#include "test/core/end2end/data/ssl_test_data.h"
#include
#include
@@ -25,28 +24,61 @@
#include
#include
+#include "src/core/lib/iomgr/load_file.h"
+
+#define CA_CERT_PATH "src/core/tsi/test_creds/ca.pem"
+#define SERVER_CERT_PATH "src/core/tsi/test_creds/server1.pem"
+#define SERVER_KEY_PATH "src/core/tsi/test_creds/server1.key"
+
namespace testing {
static void SetKeyMaterials(grpc_tls_key_materials_config* config) {
- const grpc_ssl_pem_key_cert_pair pem_key_pair = {
- test_server1_key,
- test_server1_cert,
- };
- const auto* pem_key_pair_ptr = &pem_key_pair;
- grpc_tls_key_materials_config_set_key_materials(config, test_root_cert,
- &pem_key_pair_ptr, 1);
+ grpc_slice ca_slice, cert_slice, key_slice;
+ GPR_ASSERT(GRPC_LOG_IF_ERROR("load_file",
+ grpc_load_file(CA_CERT_PATH, 1, &ca_slice)));
+ GPR_ASSERT(GRPC_LOG_IF_ERROR(
+ "load_file", grpc_load_file(SERVER_CERT_PATH, 1, &cert_slice)));
+ GPR_ASSERT(GRPC_LOG_IF_ERROR("load_file",
+ grpc_load_file(SERVER_KEY_PATH, 1, &key_slice)));
+ const char* ca_cert =
+ reinterpret_cast GRPC_SLICE_START_PTR(ca_slice);
+ const char* server_cert =
+ reinterpret_cast GRPC_SLICE_START_PTR(cert_slice);
+ const char* server_key =
+ reinterpret_cast GRPC_SLICE_START_PTR(key_slice);
+ grpc_ssl_pem_key_cert_pair pem_key_cert_pair = {server_key, server_cert};
+ const auto* pem_key_cert_pair_ptr = &pem_key_cert_pair;
+ grpc_tls_key_materials_config_set_key_materials(config, ca_cert,
+ &pem_key_cert_pair_ptr, 1);
+ grpc_slice_unref(cert_slice);
+ grpc_slice_unref(key_slice);
+ grpc_slice_unref(ca_slice);
}
TEST(GrpcTlsCredentialsOptionsTest, SetKeyMaterials) {
grpc_tls_key_materials_config* config =
grpc_tls_key_materials_config_create();
SetKeyMaterials(config);
- EXPECT_STREQ(config->pem_root_certs(), test_root_cert);
+ grpc_slice ca_slice, cert_slice, key_slice;
+ GPR_ASSERT(GRPC_LOG_IF_ERROR("load_file",
+ grpc_load_file(CA_CERT_PATH, 1, &ca_slice)));
+ GPR_ASSERT(GRPC_LOG_IF_ERROR(
+ "load_file", grpc_load_file(SERVER_CERT_PATH, 1, &cert_slice)));
+ GPR_ASSERT(GRPC_LOG_IF_ERROR("load_file",
+ grpc_load_file(SERVER_KEY_PATH, 1, &key_slice)));
+ const char* ca_cert =
+ reinterpret_cast GRPC_SLICE_START_PTR(ca_slice);
+ const char* server_cert =
+ reinterpret_cast GRPC_SLICE_START_PTR(cert_slice);
+ const char* server_key =
+ reinterpret_cast GRPC_SLICE_START_PTR(key_slice);
+ EXPECT_STREQ(config->pem_root_certs(), ca_cert);
EXPECT_EQ(config->pem_key_cert_pair_list().size(), 1);
- EXPECT_STREQ(config->pem_key_cert_pair_list()[0].private_key(),
- test_server1_key);
- EXPECT_STREQ(config->pem_key_cert_pair_list()[0].cert_chain(),
- test_server1_cert);
+ EXPECT_STREQ(config->pem_key_cert_pair_list()[0].private_key(), server_key);
+ EXPECT_STREQ(config->pem_key_cert_pair_list()[0].cert_chain(), server_cert);
+ grpc_slice_unref(cert_slice);
+ grpc_slice_unref(key_slice);
+ grpc_slice_unref(ca_slice);
delete config;
}
diff --git a/test/core/security/ssl_server_fuzzer.cc b/test/core/security/ssl_server_fuzzer.cc
index 18f1dd7814b..538e43964c1 100644
--- a/test/core/security/ssl_server_fuzzer.cc
+++ b/test/core/security/ssl_server_fuzzer.cc
@@ -23,9 +23,12 @@
#include "src/core/lib/iomgr/load_file.h"
#include "src/core/lib/security/credentials/credentials.h"
#include "src/core/lib/security/security_connector/security_connector.h"
-#include "test/core/end2end/data/ssl_test_data.h"
#include "test/core/util/mock_endpoint.h"
+#define CA_CERT_PATH "src/core/tsi/test_creds/ca.pem"
+#define SERVER_CERT_PATH "src/core/tsi/test_creds/server1.pem"
+#define SERVER_KEY_PATH "src/core/tsi/test_creds/server1.key"
+
bool squelch = true;
// ssl has an array of global gpr_mu's that are never released.
// Turning this on will fail the leak check.
@@ -66,18 +69,25 @@ extern "C" int LLVMFuzzerTestOneInput(const uint8_t* data, size_t size) {
mock_endpoint, grpc_slice_from_copied_buffer((const char*)data, size));
// Load key pair and establish server SSL credentials.
- grpc_ssl_pem_key_cert_pair pem_key_cert_pair;
grpc_slice ca_slice, cert_slice, key_slice;
- ca_slice = grpc_slice_from_static_string(test_root_cert);
- cert_slice = grpc_slice_from_static_string(test_server1_cert);
- key_slice = grpc_slice_from_static_string(test_server1_key);
- const char* ca_cert = (const char*)GRPC_SLICE_START_PTR(ca_slice);
- pem_key_cert_pair.private_key =
- (const char*)GRPC_SLICE_START_PTR(key_slice);
- pem_key_cert_pair.cert_chain =
- (const char*)GRPC_SLICE_START_PTR(cert_slice);
+ GPR_ASSERT(GRPC_LOG_IF_ERROR("load_file",
+ grpc_load_file(CA_CERT_PATH, 1, &ca_slice)));
+ GPR_ASSERT(GRPC_LOG_IF_ERROR(
+ "load_file", grpc_load_file(SERVER_CERT_PATH, 1, &cert_slice)));
+ GPR_ASSERT(GRPC_LOG_IF_ERROR(
+ "load_file", grpc_load_file(SERVER_KEY_PATH, 1, &key_slice)));
+ const char* ca_cert =
+ reinterpret_cast GRPC_SLICE_START_PTR(ca_slice);
+ const char* server_cert =
+ reinterpret_cast GRPC_SLICE_START_PTR(cert_slice);
+ const char* server_key =
+ reinterpret_cast GRPC_SLICE_START_PTR(key_slice);
+ grpc_ssl_pem_key_cert_pair pem_key_cert_pair = {server_key, server_cert};
grpc_server_credentials* creds = grpc_ssl_server_credentials_create(
ca_cert, &pem_key_cert_pair, 1, 0, nullptr);
+ grpc_slice_unref(cert_slice);
+ grpc_slice_unref(key_slice);
+ grpc_slice_unref(ca_slice);
// Create security connector
grpc_core::RefCountedPtr sc =
@@ -109,9 +119,6 @@ extern "C" int LLVMFuzzerTestOneInput(const uint8_t* data, size_t size) {
sc.reset(DEBUG_LOCATION, "test");
grpc_server_credentials_release(creds);
- grpc_slice_unref(cert_slice);
- grpc_slice_unref(key_slice);
- grpc_slice_unref(ca_slice);
grpc_core::ExecCtx::Get()->Flush();
}
diff --git a/test/core/security/tls_security_connector_test.cc b/test/core/security/tls_security_connector_test.cc
index 3f81f025a80..ef2f94a80b8 100644
--- a/test/core/security/tls_security_connector_test.cc
+++ b/test/core/security/tls_security_connector_test.cc
@@ -26,22 +26,39 @@
#include
#include
+#include "src/core/lib/iomgr/load_file.h"
#include "src/core/tsi/transport_security.h"
-#include "test/core/end2end/data/ssl_test_data.h"
#include "test/core/util/test_config.h"
+#define CA_CERT_PATH "src/core/tsi/test_creds/ca.pem"
+#define SERVER_CERT_PATH "src/core/tsi/test_creds/server1.pem"
+#define SERVER_KEY_PATH "src/core/tsi/test_creds/server1.key"
+
namespace {
enum CredReloadResult { FAIL, SUCCESS, UNCHANGED, ASYNC };
void SetKeyMaterials(grpc_tls_key_materials_config* config) {
- const grpc_ssl_pem_key_cert_pair pem_key_pair = {
- test_server1_key,
- test_server1_cert,
- };
- const auto* pem_key_pair_ptr = &pem_key_pair;
- grpc_tls_key_materials_config_set_key_materials(config, test_root_cert,
- &pem_key_pair_ptr, 1);
+ grpc_slice ca_slice, cert_slice, key_slice;
+ GPR_ASSERT(GRPC_LOG_IF_ERROR("load_file",
+ grpc_load_file(CA_CERT_PATH, 1, &ca_slice)));
+ GPR_ASSERT(GRPC_LOG_IF_ERROR(
+ "load_file", grpc_load_file(SERVER_CERT_PATH, 1, &cert_slice)));
+ GPR_ASSERT(GRPC_LOG_IF_ERROR("load_file",
+ grpc_load_file(SERVER_KEY_PATH, 1, &key_slice)));
+ const char* ca_cert =
+ reinterpret_cast GRPC_SLICE_START_PTR(ca_slice);
+ const char* server_cert =
+ reinterpret_cast GRPC_SLICE_START_PTR(cert_slice);
+ const char* server_key =
+ reinterpret_cast GRPC_SLICE_START_PTR(key_slice);
+ grpc_ssl_pem_key_cert_pair pem_key_cert_pair = {server_key, server_cert};
+ const auto* pem_key_cert_pair_ptr = &pem_key_cert_pair;
+ grpc_tls_key_materials_config_set_key_materials(config, ca_cert,
+ &pem_key_cert_pair_ptr, 1);
+ grpc_slice_unref(cert_slice);
+ grpc_slice_unref(key_slice);
+ grpc_slice_unref(ca_slice);
}
int CredReloadSuccess(void* /*config_user_data*/,
diff --git a/test/core/surface/BUILD b/test/core/surface/BUILD
index 37a371dc4e8..b7ea065f020 100644
--- a/test/core/surface/BUILD
+++ b/test/core/surface/BUILD
@@ -136,6 +136,11 @@ grpc_cc_test(
grpc_cc_test(
name = "sequential_connectivity_test",
srcs = ["sequential_connectivity_test.cc"],
+ data = [
+ "//src/core/tsi/test_creds:ca.pem",
+ "//src/core/tsi/test_creds:server1.key",
+ "//src/core/tsi/test_creds:server1.pem",
+ ],
flaky = True, # TODO(b/151696318)
language = "C++",
deps = [
diff --git a/test/core/surface/sequential_connectivity_test.cc b/test/core/surface/sequential_connectivity_test.cc
index c4a8667331c..8f7a46bfe38 100644
--- a/test/core/surface/sequential_connectivity_test.cc
+++ b/test/core/surface/sequential_connectivity_test.cc
@@ -25,10 +25,14 @@
#include "src/core/lib/gprpp/host_port.h"
#include "src/core/lib/gprpp/thd.h"
#include "src/core/lib/iomgr/exec_ctx.h"
-#include "test/core/end2end/data/ssl_test_data.h"
+#include "src/core/lib/iomgr/load_file.h"
#include "test/core/util/port.h"
#include "test/core/util/test_config.h"
+#define CA_CERT_PATH "src/core/tsi/test_creds/ca.pem"
+#define SERVER_CERT_PATH "src/core/tsi/test_creds/server1.pem"
+#define SERVER_KEY_PATH "src/core/tsi/test_creds/server1.key"
+
typedef struct test_fixture {
const char* name;
void (*add_server_port)(grpc_server* server, const char* addr);
@@ -139,17 +143,33 @@ static const test_fixture insecure_test = {
};
static void secure_test_add_port(grpc_server* server, const char* addr) {
- grpc_ssl_pem_key_cert_pair pem_cert_key_pair = {test_server1_key,
- test_server1_cert};
+ grpc_slice cert_slice, key_slice;
+ GPR_ASSERT(GRPC_LOG_IF_ERROR(
+ "load_file", grpc_load_file(SERVER_CERT_PATH, 1, &cert_slice)));
+ GPR_ASSERT(GRPC_LOG_IF_ERROR("load_file",
+ grpc_load_file(SERVER_KEY_PATH, 1, &key_slice)));
+ const char* server_cert =
+ reinterpret_cast GRPC_SLICE_START_PTR(cert_slice);
+ const char* server_key =
+ reinterpret_cast GRPC_SLICE_START_PTR(key_slice);
+ grpc_ssl_pem_key_cert_pair pem_key_cert_pair = {server_key, server_cert};
grpc_server_credentials* ssl_creds = grpc_ssl_server_credentials_create(
- nullptr, &pem_cert_key_pair, 1, 0, nullptr);
+ nullptr, &pem_key_cert_pair, 1, 0, nullptr);
+ grpc_slice_unref(cert_slice);
+ grpc_slice_unref(key_slice);
grpc_server_add_secure_http2_port(server, addr, ssl_creds);
grpc_server_credentials_release(ssl_creds);
}
static grpc_channel* secure_test_create_channel(const char* addr) {
+ grpc_slice ca_slice;
+ GPR_ASSERT(GRPC_LOG_IF_ERROR("load_file",
+ grpc_load_file(CA_CERT_PATH, 1, &ca_slice)));
+ const char* test_root_cert =
+ reinterpret_cast GRPC_SLICE_START_PTR(ca_slice);
grpc_channel_credentials* ssl_creds =
grpc_ssl_credentials_create(test_root_cert, nullptr, nullptr, nullptr);
+ grpc_slice_unref(ca_slice);
grpc_arg ssl_name_override = {
GRPC_ARG_STRING,
const_cast(GRPC_SSL_TARGET_NAME_OVERRIDE_ARG),
diff --git a/test/core/util/grpc_fuzzer.bzl b/test/core/util/grpc_fuzzer.bzl
index e0a9b78df2f..99594b29e1d 100644
--- a/test/core/util/grpc_fuzzer.bzl
+++ b/test/core/util/grpc_fuzzer.bzl
@@ -14,12 +14,12 @@
load("//bazel:grpc_build_system.bzl", "grpc_cc_test")
-def grpc_fuzzer(name, corpus, srcs = [], deps = [], size = "large", **kwargs):
+def grpc_fuzzer(name, corpus, srcs = [], deps = [], data = [], size = "large", **kwargs):
grpc_cc_test(
name = name,
srcs = srcs,
deps = deps + ["//test/core/util:fuzzer_corpus_test"],
- data = native.glob([corpus + "/**"]),
+ data = data + native.glob([corpus + "/**"]),
external_deps = [
"gtest",
],
diff --git a/test/cpp/client/BUILD b/test/cpp/client/BUILD
index 18e9ce4973f..ce0f6d39f9f 100644
--- a/test/cpp/client/BUILD
+++ b/test/cpp/client/BUILD
@@ -35,18 +35,16 @@ grpc_cc_test(
grpc_cc_test(
name = "client_channel_stress_test",
srcs = ["client_channel_stress_test.cc"],
- flaky = True, # TODO(b/153136407)
# TODO(jtattermusch): test fails frequently on Win RBE, but passes locally
# reenable the tests once it works reliably on Win RBE.
- # TODO(roth): Test disabled on msan and tsan due to variable
- # duration problem triggered by https://github.com/grpc/grpc/pull/22481.
- # Re-enable once the problem is diagnosed and fixed. Tracked
- # internally in b/153136407.
+ # TODO(roth): Test marked as manual for now due to variable duration
+ # problem triggered by https://github.com/grpc/grpc/pull/22481.
+ # Once we figure out the problem, either re-enable or just decide to
+ # remove this test. Tracked internally in b/153136407.
tags = [
+ "manual",
"no_test_android", # fails on android due to "Too many open files".
"no_windows",
- "nomsan",
- "notsan",
],
deps = [
"//:gpr",
diff --git a/test/cpp/end2end/xds_end2end_test.cc b/test/cpp/end2end/xds_end2end_test.cc
index 3a814aa75a7..ba8714deba5 100644
--- a/test/cpp/end2end/xds_end2end_test.cc
+++ b/test/cpp/end2end/xds_end2end_test.cc
@@ -1141,14 +1141,10 @@ class XdsEnd2endTest : public ::testing::TestWithParam {
void ShutdownBackend(size_t index) { backends_[index]->Shutdown(); }
- void ResetStub(int fallback_timeout = 0, int failover_timeout = 0,
+ void ResetStub(int failover_timeout = 0,
const grpc::string& expected_targets = "",
int xds_resource_does_not_exist_timeout = 0) {
ChannelArguments args;
- // TODO(juanlishen): Add setter to ChannelArguments.
- if (fallback_timeout > 0) {
- args.SetInt(GRPC_ARG_XDS_FALLBACK_TIMEOUT_MS, fallback_timeout);
- }
if (failover_timeout > 0) {
args.SetInt(GRPC_ARG_PRIORITY_FAILOVER_TIMEOUT_MS, failover_timeout);
}
@@ -1282,8 +1278,8 @@ class XdsEnd2endTest : public ::testing::TestWithParam {
: kDefaultServiceConfigWithoutLoadReporting_;
result.service_config =
grpc_core::ServiceConfig::Create(service_config_json, &error);
- ASSERT_NE(result.service_config.get(), nullptr);
ASSERT_EQ(error, GRPC_ERROR_NONE) << grpc_error_string(error);
+ ASSERT_NE(result.service_config.get(), nullptr);
grpc_arg arg = grpc_core::FakeResolverResponseGenerator::MakeChannelArg(
lb_channel_response_generator == nullptr
? lb_channel_response_generator_.get()
@@ -1516,7 +1512,8 @@ class XdsEnd2endTest : public ::testing::TestWithParam {
"{\n"
" \"loadBalancingConfig\":[\n"
" { \"does_not_exist\":{} },\n"
- " { \"xds_experimental\":{\n"
+ " { \"eds_experimental\":{\n"
+ " \"clusterName\": \"application_target_name\",\n"
" \"lrsLoadReportingServerName\": \"\"\n"
" } }\n"
" ]\n"
@@ -1525,7 +1522,8 @@ class XdsEnd2endTest : public ::testing::TestWithParam {
"{\n"
" \"loadBalancingConfig\":[\n"
" { \"does_not_exist\":{} },\n"
- " { \"xds_experimental\":{\n"
+ " { \"eds_experimental\":{\n"
+ " \"clusterName\": \"application_target_name\"\n"
" } }\n"
" ]\n"
"}";
@@ -1560,7 +1558,7 @@ TEST_P(BasicTest, Vanilla) {
}
// Check LB policy name for the channel.
EXPECT_EQ(
- (GetParam().use_xds_resolver() ? "cds_experimental" : "xds_experimental"),
+ (GetParam().use_xds_resolver() ? "cds_experimental" : "eds_experimental"),
channel_->GetLoadBalancingPolicyName());
}
@@ -1938,7 +1936,7 @@ using SecureNamingTest = BasicTest;
// Tests that secure naming check passes if target name is expected.
TEST_P(SecureNamingTest, TargetNameIsExpected) {
// TODO(juanlishen): Use separate fake creds for the balancer channel.
- ResetStub(0, 0, kApplicationTargetName_ + ";lb");
+ ResetStub(0, kApplicationTargetName_ + ";lb");
SetNextResolution({});
SetNextResolutionForLbChannel({balancers_[0]->port()});
const size_t kNumRpcsPerAddress = 100;
@@ -1968,7 +1966,7 @@ TEST_P(SecureNamingTest, TargetNameIsUnexpected) {
// the name from the balancer doesn't match expectations.
ASSERT_DEATH_IF_SUPPORTED(
{
- ResetStub(0, 0, kApplicationTargetName_ + ";lb");
+ ResetStub(0, kApplicationTargetName_ + ";lb");
SetNextResolution({});
SetNextResolutionForLbChannel({balancers_[0]->port()});
channel_->WaitForConnected(grpc_timeout_seconds_to_deadline(1));
@@ -2127,7 +2125,7 @@ TEST_P(LdsTest, RouteActionHasNoCluster) {
// Tests that LDS client times out when no response received.
TEST_P(LdsTest, Timeout) {
- ResetStub(0, 0, "", 500);
+ ResetStub(0, "", 500);
balancers_[0]->ads_service()->SetResourceIgnore(kLdsTypeUrl);
SetNextResolution({});
SetNextResolutionForLbChannelAllBalancers();
@@ -2262,7 +2260,7 @@ TEST_P(RdsTest, RouteActionHasNoCluster) {
// Tests that RDS client times out when no response received.
TEST_P(RdsTest, Timeout) {
- ResetStub(0, 0, "", 500);
+ ResetStub(0, "", 500);
balancers_[0]->ads_service()->SetResourceIgnore(kRdsTypeUrl);
balancers_[0]->ads_service()->SetLdsToUseDynamicRds();
SetNextResolution({});
@@ -2335,7 +2333,7 @@ TEST_P(CdsTest, WrongLrsServer) {
// Tests that CDS client times out when no response received.
TEST_P(CdsTest, Timeout) {
- ResetStub(0, 0, "", 500);
+ ResetStub(0, "", 500);
balancers_[0]->ads_service()->SetResourceIgnore(kCdsTypeUrl);
SetNextResolution({});
SetNextResolutionForLbChannelAllBalancers();
@@ -2345,7 +2343,7 @@ TEST_P(CdsTest, Timeout) {
using EdsTest = BasicTest;
TEST_P(EdsTest, Timeout) {
- ResetStub(0, 0, "", 500);
+ ResetStub(0, "", 500);
balancers_[0]->ads_service()->SetResourceIgnore(kEdsTypeUrl);
SetNextResolution({});
SetNextResolutionForLbChannelAllBalancers();
@@ -2605,7 +2603,7 @@ class FailoverTest : public BasicTest {
public:
void SetUp() override {
BasicTest::SetUp();
- ResetStub(0, 100, "");
+ ResetStub(100, "");
}
};
@@ -3042,241 +3040,6 @@ TEST_P(DropTest, DropAll) {
}
}
-using FallbackTest = BasicTest;
-
-// Tests that RPCs are handled by the fallback backends before the serverlist is
-// received, but will be handled by the serverlist after it's received.
-TEST_P(FallbackTest, Vanilla) {
- const int kFallbackTimeoutMs = 200 * grpc_test_slowdown_factor();
- const int kServerlistDelayMs = 500 * grpc_test_slowdown_factor();
- const size_t kNumBackendsInResolution = backends_.size() / 2;
- ResetStub(kFallbackTimeoutMs);
- SetNextResolution(GetBackendPorts(0, kNumBackendsInResolution));
- SetNextResolutionForLbChannelAllBalancers();
- // Send non-empty serverlist only after kServerlistDelayMs.
- AdsServiceImpl::EdsResourceArgs args({
- {"locality0", GetBackendPorts(kNumBackendsInResolution)},
- });
- std::thread delayed_resource_setter(
- std::bind(&BasicTest::SetEdsResourceWithDelay, this, 0,
- AdsServiceImpl::BuildEdsResource(args), kServerlistDelayMs,
- kDefaultResourceName));
- // Wait until all the fallback backends are reachable.
- WaitForAllBackends(0 /* start_index */,
- kNumBackendsInResolution /* stop_index */);
- gpr_log(GPR_INFO, "========= BEFORE FIRST BATCH ==========");
- CheckRpcSendOk(kNumBackendsInResolution);
- gpr_log(GPR_INFO, "========= DONE WITH FIRST BATCH ==========");
- // Fallback is used: each backend returned by the resolver should have
- // gotten one request.
- for (size_t i = 0; i < kNumBackendsInResolution; ++i) {
- EXPECT_EQ(1U, backends_[i]->backend_service()->request_count());
- }
- for (size_t i = kNumBackendsInResolution; i < backends_.size(); ++i) {
- EXPECT_EQ(0U, backends_[i]->backend_service()->request_count());
- }
- // Wait until the serverlist reception has been processed and all backends
- // in the serverlist are reachable.
- WaitForAllBackends(kNumBackendsInResolution /* start_index */);
- gpr_log(GPR_INFO, "========= BEFORE SECOND BATCH ==========");
- CheckRpcSendOk(backends_.size() - kNumBackendsInResolution);
- gpr_log(GPR_INFO, "========= DONE WITH SECOND BATCH ==========");
- // Serverlist is used: each backend returned by the balancer should
- // have gotten one request.
- for (size_t i = 0; i < kNumBackendsInResolution; ++i) {
- EXPECT_EQ(0U, backends_[i]->backend_service()->request_count());
- }
- for (size_t i = kNumBackendsInResolution; i < backends_.size(); ++i) {
- EXPECT_EQ(1U, backends_[i]->backend_service()->request_count());
- }
- delayed_resource_setter.join();
-}
-
-// Tests that RPCs are handled by the updated fallback backends before
-// serverlist is received,
-TEST_P(FallbackTest, Update) {
- const int kFallbackTimeoutMs = 200 * grpc_test_slowdown_factor();
- const int kServerlistDelayMs = 500 * grpc_test_slowdown_factor();
- const size_t kNumBackendsInResolution = backends_.size() / 3;
- const size_t kNumBackendsInResolutionUpdate = backends_.size() / 3;
- ResetStub(kFallbackTimeoutMs);
- SetNextResolution(GetBackendPorts(0, kNumBackendsInResolution));
- SetNextResolutionForLbChannelAllBalancers();
- // Send non-empty serverlist only after kServerlistDelayMs.
- AdsServiceImpl::EdsResourceArgs args({
- {"locality0", GetBackendPorts(kNumBackendsInResolution +
- kNumBackendsInResolutionUpdate)},
- });
- std::thread delayed_resource_setter(
- std::bind(&BasicTest::SetEdsResourceWithDelay, this, 0,
- AdsServiceImpl::BuildEdsResource(args), kServerlistDelayMs,
- kDefaultResourceName));
- // Wait until all the fallback backends are reachable.
- WaitForAllBackends(0 /* start_index */,
- kNumBackendsInResolution /* stop_index */);
- gpr_log(GPR_INFO, "========= BEFORE FIRST BATCH ==========");
- CheckRpcSendOk(kNumBackendsInResolution);
- gpr_log(GPR_INFO, "========= DONE WITH FIRST BATCH ==========");
- // Fallback is used: each backend returned by the resolver should have
- // gotten one request.
- for (size_t i = 0; i < kNumBackendsInResolution; ++i) {
- EXPECT_EQ(1U, backends_[i]->backend_service()->request_count());
- }
- for (size_t i = kNumBackendsInResolution; i < backends_.size(); ++i) {
- EXPECT_EQ(0U, backends_[i]->backend_service()->request_count());
- }
- SetNextResolution(GetBackendPorts(
- kNumBackendsInResolution,
- kNumBackendsInResolution + kNumBackendsInResolutionUpdate));
- // Wait until the resolution update has been processed and all the new
- // fallback backends are reachable.
- WaitForAllBackends(kNumBackendsInResolution /* start_index */,
- kNumBackendsInResolution +
- kNumBackendsInResolutionUpdate /* stop_index */);
- gpr_log(GPR_INFO, "========= BEFORE SECOND BATCH ==========");
- CheckRpcSendOk(kNumBackendsInResolutionUpdate);
- gpr_log(GPR_INFO, "========= DONE WITH SECOND BATCH ==========");
- // The resolution update is used: each backend in the resolution update should
- // have gotten one request.
- for (size_t i = 0; i < kNumBackendsInResolution; ++i) {
- EXPECT_EQ(0U, backends_[i]->backend_service()->request_count());
- }
- for (size_t i = kNumBackendsInResolution;
- i < kNumBackendsInResolution + kNumBackendsInResolutionUpdate; ++i) {
- EXPECT_EQ(1U, backends_[i]->backend_service()->request_count());
- }
- for (size_t i = kNumBackendsInResolution + kNumBackendsInResolutionUpdate;
- i < backends_.size(); ++i) {
- EXPECT_EQ(0U, backends_[i]->backend_service()->request_count());
- }
- // Wait until the serverlist reception has been processed and all backends
- // in the serverlist are reachable.
- WaitForAllBackends(kNumBackendsInResolution +
- kNumBackendsInResolutionUpdate /* start_index */);
- gpr_log(GPR_INFO, "========= BEFORE THIRD BATCH ==========");
- CheckRpcSendOk(backends_.size() - kNumBackendsInResolution -
- kNumBackendsInResolutionUpdate);
- gpr_log(GPR_INFO, "========= DONE WITH THIRD BATCH ==========");
- // Serverlist is used: each backend returned by the balancer should
- // have gotten one request.
- for (size_t i = 0;
- i < kNumBackendsInResolution + kNumBackendsInResolutionUpdate; ++i) {
- EXPECT_EQ(0U, backends_[i]->backend_service()->request_count());
- }
- for (size_t i = kNumBackendsInResolution + kNumBackendsInResolutionUpdate;
- i < backends_.size(); ++i) {
- EXPECT_EQ(1U, backends_[i]->backend_service()->request_count());
- }
- delayed_resource_setter.join();
-}
-
-// Tests that fallback will kick in immediately if the balancer channel fails.
-TEST_P(FallbackTest, FallbackEarlyWhenBalancerChannelFails) {
- const int kFallbackTimeoutMs = 10000 * grpc_test_slowdown_factor();
- ResetStub(kFallbackTimeoutMs);
- // Return an unreachable balancer and one fallback backend.
- SetNextResolution({backends_[0]->port()});
- SetNextResolutionForLbChannel({g_port_saver->GetPort()});
- // Send RPC with deadline less than the fallback timeout and make sure it
- // succeeds.
- CheckRpcSendOk(/* times */ 1, /* timeout_ms */ 1000,
- /* wait_for_ready */ false);
-}
-
-// Tests that fallback will kick in immediately if the balancer call fails.
-TEST_P(FallbackTest, FallbackEarlyWhenBalancerCallFails) {
- const int kFallbackTimeoutMs = 10000 * grpc_test_slowdown_factor();
- ResetStub(kFallbackTimeoutMs);
- // Return one balancer and one fallback backend.
- SetNextResolution({backends_[0]->port()});
- SetNextResolutionForLbChannelAllBalancers();
- // Balancer drops call without sending a serverlist.
- balancers_[0]->ads_service()->NotifyDoneWithAdsCall();
- // Send RPC with deadline less than the fallback timeout and make sure it
- // succeeds.
- CheckRpcSendOk(/* times */ 1, /* timeout_ms */ 1000,
- /* wait_for_ready */ false);
-}
-
-// Tests that fallback mode is entered if balancer response is received but the
-// backends can't be reached.
-TEST_P(FallbackTest, FallbackIfResponseReceivedButChildNotReady) {
- const int kFallbackTimeoutMs = 500 * grpc_test_slowdown_factor();
- ResetStub(kFallbackTimeoutMs);
- SetNextResolution({backends_[0]->port()});
- SetNextResolutionForLbChannelAllBalancers();
- // Send a serverlist that only contains an unreachable backend before fallback
- // timeout.
- AdsServiceImpl::EdsResourceArgs args({
- {"locality0", {g_port_saver->GetPort()}},
- });
- balancers_[0]->ads_service()->SetEdsResource(
- AdsServiceImpl::BuildEdsResource(args), kDefaultResourceName);
- // Because no child policy is ready before fallback timeout, we enter fallback
- // mode.
- WaitForBackend(0);
-}
-
-// Tests that fallback mode is exited if the balancer tells the client to drop
-// all the calls.
-TEST_P(FallbackTest, FallbackModeIsExitedWhenBalancerSaysToDropAllCalls) {
- // Return an unreachable balancer and one fallback backend.
- SetNextResolution({backends_[0]->port()});
- SetNextResolutionForLbChannel({g_port_saver->GetPort()});
- // Enter fallback mode because the LB channel fails to connect.
- WaitForBackend(0);
- // Return a new balancer that sends a response to drop all calls.
- AdsServiceImpl::EdsResourceArgs args({
- {"locality0", GetBackendPorts()},
- });
- args.drop_categories = {{kLbDropType, 1000000}};
- balancers_[0]->ads_service()->SetEdsResource(
- AdsServiceImpl::BuildEdsResource(args), kDefaultResourceName);
- SetNextResolutionForLbChannelAllBalancers();
- // Send RPCs until failure.
- gpr_timespec deadline = gpr_time_add(
- gpr_now(GPR_CLOCK_REALTIME), gpr_time_from_millis(5000, GPR_TIMESPAN));
- do {
- auto status = SendRpc();
- if (!status.ok()) break;
- } while (gpr_time_cmp(gpr_now(GPR_CLOCK_REALTIME), deadline) < 0);
- CheckRpcSendFailure();
-}
-
-// Tests that fallback mode is exited if the child policy becomes ready.
-TEST_P(FallbackTest, FallbackModeIsExitedAfterChildReady) {
- // Return an unreachable balancer and one fallback backend.
- SetNextResolution({backends_[0]->port()});
- SetNextResolutionForLbChannel({g_port_saver->GetPort()});
- // Enter fallback mode because the LB channel fails to connect.
- WaitForBackend(0);
- // Return a new balancer that sends a dead backend.
- ShutdownBackend(1);
- AdsServiceImpl::EdsResourceArgs args({
- {"locality0", {backends_[1]->port()}},
- });
- balancers_[0]->ads_service()->SetEdsResource(
- AdsServiceImpl::BuildEdsResource(args), kDefaultResourceName);
- SetNextResolutionForLbChannelAllBalancers();
- // The state (TRANSIENT_FAILURE) update from the child policy will be ignored
- // because we are still in fallback mode.
- gpr_timespec deadline = gpr_time_add(gpr_now(GPR_CLOCK_REALTIME),
- gpr_time_from_millis(500, GPR_TIMESPAN));
- // Send 0.5 second worth of RPCs.
- do {
- CheckRpcSendOk();
- } while (gpr_time_cmp(gpr_now(GPR_CLOCK_REALTIME), deadline) < 0);
- // After the backend is restarted, the child policy will eventually be READY,
- // and we will exit fallback mode.
- StartBackend(1);
- WaitForBackend(1);
- // We have exited fallback mode, so calls will go to the child policy
- // exclusively.
- CheckRpcSendOk(100);
- EXPECT_EQ(0U, backends_[0]->backend_service()->request_count());
- EXPECT_EQ(100U, backends_[1]->backend_service()->request_count());
-}
-
class BalancerUpdateTest : public XdsEnd2endTest {
public:
BalancerUpdateTest() : XdsEnd2endTest(4, 3) {}
@@ -3779,12 +3542,6 @@ INSTANTIATE_TEST_SUITE_P(XdsTest, DropTest,
TestType(true, true)),
&TestTypeName);
-// Fallback does not work with xds resolver.
-INSTANTIATE_TEST_SUITE_P(XdsTest, FallbackTest,
- ::testing::Values(TestType(false, true),
- TestType(false, false)),
- &TestTypeName);
-
INSTANTIATE_TEST_SUITE_P(XdsTest, BalancerUpdateTest,
::testing::Values(TestType(false, true),
TestType(false, false),
diff --git a/test/cpp/ios/Podfile b/test/cpp/ios/Podfile
index f49b8ba0d09..0806fa24a5a 100644
--- a/test/cpp/ios/Podfile
+++ b/test/cpp/ios/Podfile
@@ -72,7 +72,7 @@ post_install do |installer|
# GPR_UNREACHABLE_CODE causes "Control may reach end of non-void
# function" warning
config.build_settings['GCC_WARN_ABOUT_RETURN_TYPE'] = 'NO'
- config.build_settings['GCC_PREPROCESSOR_DEFINITIONS'] = '$(inherited) COCOAPODS=1 GRPC_CRONET_WITH_PACKET_COALESCING=1 GRPC_CFSTREAM=1'
+ config.build_settings['GCC_PREPROCESSOR_DEFINITIONS'] = '$(inherited) COCOAPODS=1 GRPC_CRONET_WITH_PACKET_COALESCING=1'
end
end
diff --git a/tools/doxygen/Doxyfile.c++.internal b/tools/doxygen/Doxyfile.c++.internal
index 60e99d7b5a0..1fe2866bdc8 100644
--- a/tools/doxygen/Doxyfile.c++.internal
+++ b/tools/doxygen/Doxyfile.c++.internal
@@ -1539,6 +1539,8 @@ src/core/lib/iomgr/error.h \
src/core/lib/iomgr/error_cfstream.cc \
src/core/lib/iomgr/error_cfstream.h \
src/core/lib/iomgr/error_internal.h \
+src/core/lib/iomgr/ev_apple.cc \
+src/core/lib/iomgr/ev_apple.h \
src/core/lib/iomgr/ev_epoll1_linux.cc \
src/core/lib/iomgr/ev_epoll1_linux.h \
src/core/lib/iomgr/ev_epollex_linux.cc \
diff --git a/tools/doxygen/Doxyfile.core.internal b/tools/doxygen/Doxyfile.core.internal
index fef1fd11d4a..3c6ca1042ac 100644
--- a/tools/doxygen/Doxyfile.core.internal
+++ b/tools/doxygen/Doxyfile.core.internal
@@ -1351,6 +1351,8 @@ src/core/lib/iomgr/error.h \
src/core/lib/iomgr/error_cfstream.cc \
src/core/lib/iomgr/error_cfstream.h \
src/core/lib/iomgr/error_internal.h \
+src/core/lib/iomgr/ev_apple.cc \
+src/core/lib/iomgr/ev_apple.h \
src/core/lib/iomgr/ev_epoll1_linux.cc \
src/core/lib/iomgr/ev_epoll1_linux.h \
src/core/lib/iomgr/ev_epollex_linux.cc \
diff --git a/tools/run_tests/run_xds_tests.py b/tools/run_tests/run_xds_tests.py
index 762743e0993..c6247c15fb8 100755
--- a/tools/run_tests/run_xds_tests.py
+++ b/tools/run_tests/run_xds_tests.py
@@ -198,6 +198,7 @@ _INSTANCE_GROUP_SIZE = args.instance_group_size
_NUM_TEST_RPCS = 10 * args.qps
_WAIT_FOR_STATS_SEC = 180
_WAIT_FOR_URL_MAP_PATCH_SEC = 300
+_GCP_API_RETRIES = 5
_BOOTSTRAP_TEMPLATE = """
{{
"node": {{
@@ -549,8 +550,8 @@ def create_instance_template(gcp, name, network, source_image, machine_type,
}
logger.debug('Sending GCP request with body=%s', config)
- result = gcp.compute.instanceTemplates().insert(project=gcp.project,
- body=config).execute()
+ result = gcp.compute.instanceTemplates().insert(
+ project=gcp.project, body=config).execute(num_retries=_GCP_API_RETRIES)
wait_for_global_operation(gcp, result['name'])
gcp.instance_template = GcpResource(config['name'], result['targetLink'])
@@ -567,13 +568,14 @@ def add_instance_group(gcp, zone, name, size):
}
logger.debug('Sending GCP request with body=%s', config)
- result = gcp.compute.instanceGroupManagers().insert(project=gcp.project,
- zone=zone,
- body=config).execute()
+ result = gcp.compute.instanceGroupManagers().insert(
+ project=gcp.project, zone=zone,
+ body=config).execute(num_retries=_GCP_API_RETRIES)
wait_for_zone_operation(gcp, zone, result['name'])
result = gcp.compute.instanceGroupManagers().get(
project=gcp.project, zone=zone,
- instanceGroupManager=config['name']).execute()
+ instanceGroupManager=config['name']).execute(
+ num_retries=_GCP_API_RETRIES)
instance_group = InstanceGroup(config['name'], result['instanceGroup'],
zone)
gcp.instance_groups.append(instance_group)
@@ -600,8 +602,8 @@ def create_health_check(gcp, name):
}
compute_to_use = gcp.compute
logger.debug('Sending GCP request with body=%s', config)
- result = compute_to_use.healthChecks().insert(project=gcp.project,
- body=config).execute()
+ result = compute_to_use.healthChecks().insert(
+ project=gcp.project, body=config).execute(num_retries=_GCP_API_RETRIES)
wait_for_global_operation(gcp, result['name'])
gcp.health_check = GcpResource(config['name'], result['targetLink'])
@@ -617,8 +619,8 @@ def create_health_check_firewall_rule(gcp, name):
'targetTags': ['allow-health-checks'],
}
logger.debug('Sending GCP request with body=%s', config)
- result = gcp.compute.firewalls().insert(project=gcp.project,
- body=config).execute()
+ result = gcp.compute.firewalls().insert(
+ project=gcp.project, body=config).execute(num_retries=_GCP_API_RETRIES)
wait_for_global_operation(gcp, result['name'])
gcp.health_check_firewall_rule = GcpResource(config['name'],
result['targetLink'])
@@ -639,8 +641,8 @@ def add_backend_service(gcp, name):
'protocol': protocol
}
logger.debug('Sending GCP request with body=%s', config)
- result = compute_to_use.backendServices().insert(project=gcp.project,
- body=config).execute()
+ result = compute_to_use.backendServices().insert(
+ project=gcp.project, body=config).execute(num_retries=_GCP_API_RETRIES)
wait_for_global_operation(gcp, result['name'])
backend_service = GcpResource(config['name'], result['targetLink'])
gcp.backend_services.append(backend_service)
@@ -661,8 +663,8 @@ def create_url_map(gcp, name, backend_service, host_name):
}]
}
logger.debug('Sending GCP request with body=%s', config)
- result = gcp.compute.urlMaps().insert(project=gcp.project,
- body=config).execute()
+ result = gcp.compute.urlMaps().insert(
+ project=gcp.project, body=config).execute(num_retries=_GCP_API_RETRIES)
wait_for_global_operation(gcp, result['name'])
gcp.url_map = GcpResource(config['name'], result['targetLink'])
@@ -675,9 +677,9 @@ def patch_url_map_host_rule_with_port(gcp, name, backend_service, host_name):
}]
}
logger.debug('Sending GCP request with body=%s', config)
- result = gcp.compute.urlMaps().patch(project=gcp.project,
- urlMap=name,
- body=config).execute()
+ result = gcp.compute.urlMaps().patch(
+ project=gcp.project, urlMap=name,
+ body=config).execute(num_retries=_GCP_API_RETRIES)
wait_for_global_operation(gcp, result['name'])
@@ -690,15 +692,17 @@ def create_target_proxy(gcp, name):
}
logger.debug('Sending GCP request with body=%s', config)
result = gcp.alpha_compute.targetGrpcProxies().insert(
- project=gcp.project, body=config).execute()
+ project=gcp.project,
+ body=config).execute(num_retries=_GCP_API_RETRIES)
else:
config = {
'name': name,
'url_map': gcp.url_map.url,
}
logger.debug('Sending GCP request with body=%s', config)
- result = gcp.compute.targetHttpProxies().insert(project=gcp.project,
- body=config).execute()
+ result = gcp.compute.targetHttpProxies().insert(
+ project=gcp.project,
+ body=config).execute(num_retries=_GCP_API_RETRIES)
wait_for_global_operation(gcp, result['name'])
gcp.target_proxy = GcpResource(config['name'], result['targetLink'])
@@ -720,7 +724,8 @@ def create_global_forwarding_rule(gcp, name, potential_ports):
}
logger.debug('Sending GCP request with body=%s', config)
result = compute_to_use.globalForwardingRules().insert(
- project=gcp.project, body=config).execute()
+ project=gcp.project,
+ body=config).execute(num_retries=_GCP_API_RETRIES)
wait_for_global_operation(gcp, result['name'])
gcp.global_forwarding_rule = GcpResource(config['name'],
result['targetLink'])
@@ -736,7 +741,8 @@ def delete_global_forwarding_rule(gcp):
try:
result = gcp.compute.globalForwardingRules().delete(
project=gcp.project,
- forwardingRule=gcp.global_forwarding_rule.name).execute()
+ forwardingRule=gcp.global_forwarding_rule.name).execute(
+ num_retries=_GCP_API_RETRIES)
wait_for_global_operation(gcp, result['name'])
except googleapiclient.errors.HttpError as http_error:
logger.info('Delete failed: %s', http_error)
@@ -747,11 +753,13 @@ def delete_target_proxy(gcp):
if gcp.alpha_compute:
result = gcp.alpha_compute.targetGrpcProxies().delete(
project=gcp.project,
- targetGrpcProxy=gcp.target_proxy.name).execute()
+ targetGrpcProxy=gcp.target_proxy.name).execute(
+ num_retries=_GCP_API_RETRIES)
else:
result = gcp.compute.targetHttpProxies().delete(
project=gcp.project,
- targetHttpProxy=gcp.target_proxy.name).execute()
+ targetHttpProxy=gcp.target_proxy.name).execute(
+ num_retries=_GCP_API_RETRIES)
wait_for_global_operation(gcp, result['name'])
except googleapiclient.errors.HttpError as http_error:
logger.info('Delete failed: %s', http_error)
@@ -760,7 +768,8 @@ def delete_target_proxy(gcp):
def delete_url_map(gcp):
try:
result = gcp.compute.urlMaps().delete(
- project=gcp.project, urlMap=gcp.url_map.name).execute()
+ project=gcp.project,
+ urlMap=gcp.url_map.name).execute(num_retries=_GCP_API_RETRIES)
wait_for_global_operation(gcp, result['name'])
except googleapiclient.errors.HttpError as http_error:
logger.info('Delete failed: %s', http_error)
@@ -771,7 +780,8 @@ def delete_backend_services(gcp):
try:
result = gcp.compute.backendServices().delete(
project=gcp.project,
- backendService=backend_service.name).execute()
+ backendService=backend_service.name).execute(
+ num_retries=_GCP_API_RETRIES)
wait_for_global_operation(gcp, result['name'])
except googleapiclient.errors.HttpError as http_error:
logger.info('Delete failed: %s', http_error)
@@ -781,7 +791,8 @@ def delete_firewall(gcp):
try:
result = gcp.compute.firewalls().delete(
project=gcp.project,
- firewall=gcp.health_check_firewall_rule.name).execute()
+ firewall=gcp.health_check_firewall_rule.name).execute(
+ num_retries=_GCP_API_RETRIES)
wait_for_global_operation(gcp, result['name'])
except googleapiclient.errors.HttpError as http_error:
logger.info('Delete failed: %s', http_error)
@@ -790,7 +801,8 @@ def delete_firewall(gcp):
def delete_health_check(gcp):
try:
result = gcp.compute.healthChecks().delete(
- project=gcp.project, healthCheck=gcp.health_check.name).execute()
+ project=gcp.project, healthCheck=gcp.health_check.name).execute(
+ num_retries=_GCP_API_RETRIES)
wait_for_global_operation(gcp, result['name'])
except googleapiclient.errors.HttpError as http_error:
logger.info('Delete failed: %s', http_error)
@@ -802,7 +814,8 @@ def delete_instance_groups(gcp):
result = gcp.compute.instanceGroupManagers().delete(
project=gcp.project,
zone=instance_group.zone,
- instanceGroupManager=instance_group.name).execute()
+ instanceGroupManager=instance_group.name).execute(
+ num_retries=_GCP_API_RETRIES)
wait_for_zone_operation(gcp,
instance_group.zone,
result['name'],
@@ -815,7 +828,8 @@ def delete_instance_template(gcp):
try:
result = gcp.compute.instanceTemplates().delete(
project=gcp.project,
- instanceTemplate=gcp.instance_template.name).execute()
+ instanceTemplate=gcp.instance_template.name).execute(
+ num_retries=_GCP_API_RETRIES)
wait_for_global_operation(gcp, result['name'])
except googleapiclient.errors.HttpError as http_error:
logger.info('Delete failed: %s', http_error)
@@ -839,7 +853,7 @@ def patch_backend_instances(gcp,
logger.debug('Sending GCP request with body=%s', config)
result = compute_to_use.backendServices().patch(
project=gcp.project, backendService=backend_service.name,
- body=config).execute()
+ body=config).execute(num_retries=_GCP_API_RETRIES)
wait_for_global_operation(gcp,
result['name'],
timeout_sec=_WAIT_FOR_BACKEND_SEC)
@@ -853,7 +867,7 @@ def resize_instance_group(gcp,
project=gcp.project,
zone=instance_group.zone,
instanceGroupManager=instance_group.name,
- size=new_size).execute()
+ size=new_size).execute(num_retries=_GCP_API_RETRIES)
wait_for_zone_operation(gcp,
instance_group.zone,
result['name'],
@@ -865,7 +879,7 @@ def resize_instance_group(gcp,
break
if time.time() - start_time > timeout_sec:
raise Exception('Failed to resize primary instance group')
- time.sleep(1)
+ time.sleep(2)
def patch_url_map_backend_service(gcp, backend_service):
@@ -878,9 +892,9 @@ def patch_url_map_backend_service(gcp, backend_service):
}]
}
logger.debug('Sending GCP request with body=%s', config)
- result = gcp.compute.urlMaps().patch(project=gcp.project,
- urlMap=gcp.url_map.name,
- body=config).execute()
+ result = gcp.compute.urlMaps().patch(
+ project=gcp.project, urlMap=gcp.url_map.name,
+ body=config).execute(num_retries=_GCP_API_RETRIES)
wait_for_global_operation(gcp, result['name'])
@@ -890,12 +904,13 @@ def wait_for_global_operation(gcp,
start_time = time.time()
while time.time() - start_time <= timeout_sec:
result = gcp.compute.globalOperations().get(
- project=gcp.project, operation=operation).execute()
+ project=gcp.project,
+ operation=operation).execute(num_retries=_GCP_API_RETRIES)
if result['status'] == 'DONE':
if 'error' in result:
raise Exception(result['error'])
return
- time.sleep(1)
+ time.sleep(2)
raise Exception('Operation %s did not complete within %d', operation,
timeout_sec)
@@ -907,12 +922,13 @@ def wait_for_zone_operation(gcp,
start_time = time.time()
while time.time() - start_time <= timeout_sec:
result = gcp.compute.zoneOperations().get(
- project=gcp.project, zone=zone, operation=operation).execute()
+ project=gcp.project, zone=zone,
+ operation=operation).execute(num_retries=_GCP_API_RETRIES)
if result['status'] == 'DONE':
if 'error' in result:
raise Exception(result['error'])
return
- time.sleep(1)
+ time.sleep(2)
raise Exception('Operation %s did not complete within %d', operation,
timeout_sec)
@@ -927,7 +943,7 @@ def wait_for_healthy_backends(gcp,
result = gcp.compute.backendServices().getHealth(
project=gcp.project,
backendService=backend_service.name,
- body=config).execute()
+ body=config).execute(num_retries=_GCP_API_RETRIES)
if 'healthStatus' in result:
healthy = True
for instance in result['healthStatus']:
@@ -936,7 +952,7 @@ def wait_for_healthy_backends(gcp,
break
if healthy:
return
- time.sleep(1)
+ time.sleep(2)
raise Exception('Not all backends became healthy within %d seconds: %s' %
(timeout_sec, result))
@@ -949,7 +965,7 @@ def get_instance_names(gcp, instance_group):
instanceGroup=instance_group.name,
body={
'instanceState': 'ALL'
- }).execute()
+ }).execute(num_retries=_GCP_API_RETRIES)
if 'items' not in result:
return []
for item in result['items']:
@@ -1081,19 +1097,22 @@ try:
if not gcp.instance_template:
result = compute.instanceTemplates().get(
project=args.project_id,
- instanceTemplate=template_name).execute()
+ instanceTemplate=template_name).execute(
+ num_retries=_GCP_API_RETRIES)
gcp.instance_template = GcpResource(template_name,
result['selfLink'])
if not gcp.backend_services:
result = compute.backendServices().get(
project=args.project_id,
- backendService=backend_service_name).execute()
+ backendService=backend_service_name).execute(
+ num_retries=_GCP_API_RETRIES)
backend_service = GcpResource(backend_service_name,
result['selfLink'])
gcp.backend_services.append(backend_service)
result = compute.backendServices().get(
project=args.project_id,
- backendService=alternate_backend_service_name).execute()
+ backendService=alternate_backend_service_name).execute(
+ num_retries=_GCP_API_RETRIES)
alternate_backend_service = GcpResource(
alternate_backend_service_name, result['selfLink'])
gcp.backend_services.append(alternate_backend_service)
@@ -1101,14 +1120,16 @@ try:
result = compute.instanceGroups().get(
project=args.project_id,
zone=args.zone,
- instanceGroup=instance_group_name).execute()
+ instanceGroup=instance_group_name).execute(
+ num_retries=_GCP_API_RETRIES)
instance_group = InstanceGroup(instance_group_name,
result['selfLink'], args.zone)
gcp.instance_groups.append(instance_group)
result = compute.instanceGroups().get(
project=args.project_id,
zone=args.zone,
- instanceGroup=same_zone_instance_group_name).execute()
+ instanceGroup=same_zone_instance_group_name).execute(
+ num_retries=_GCP_API_RETRIES)
same_zone_instance_group = InstanceGroup(
same_zone_instance_group_name, result['selfLink'],
args.zone)
@@ -1118,7 +1139,7 @@ try:
project=args.project_id,
zone=args.secondary_zone,
instanceGroup=secondary_zone_instance_group_name
- ).execute()
+ ).execute(num_retries=_GCP_API_RETRIES)
secondary_zone_instance_group = InstanceGroup(
secondary_zone_instance_group_name, result['selfLink'],
args.secondary_zone)
@@ -1126,12 +1147,14 @@ try:
if not gcp.health_check:
result = compute.healthChecks().get(
project=args.project_id,
- healthCheck=health_check_name).execute()
+ healthCheck=health_check_name).execute(
+ num_retries=_GCP_API_RETRIES)
gcp.health_check = GcpResource(health_check_name,
result['selfLink'])
if not gcp.url_map:
- result = compute.urlMaps().get(project=args.project_id,
- urlMap=url_map_name).execute()
+ result = compute.urlMaps().get(
+ project=args.project_id,
+ urlMap=url_map_name).execute(num_retries=_GCP_API_RETRIES)
gcp.url_map = GcpResource(url_map_name, result['selfLink'])
if not gcp.service_port:
gcp.service_port = args.service_port_range[0]