diff --git a/CMakeLists.txt b/CMakeLists.txt index 1bab5e6cba2..bccead24f28 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -720,6 +720,7 @@ add_dependencies(buildtests_cxx transport_security_common_api_test) if(_gRPC_PLATFORM_LINUX OR _gRPC_PLATFORM_MAC OR _gRPC_PLATFORM_POSIX) add_dependencies(buildtests_cxx writes_per_rpc_test) endif() +add_dependencies(buildtests_cxx xds_end2end_test) add_dependencies(buildtests_cxx resolver_component_test_unsecure) add_dependencies(buildtests_cxx resolver_component_test) if(_gRPC_PLATFORM_LINUX OR _gRPC_PLATFORM_MAC OR _gRPC_PLATFORM_POSIX) @@ -5616,6 +5617,7 @@ add_library(end2end_tests test/core/end2end/tests/empty_batch.cc test/core/end2end/tests/filter_call_init_fails.cc test/core/end2end/tests/filter_causes_close.cc + test/core/end2end/tests/filter_context.cc test/core/end2end/tests/filter_latency.cc test/core/end2end/tests/filter_status_code.cc test/core/end2end/tests/graceful_server_shutdown.cc @@ -5739,6 +5741,7 @@ add_library(end2end_nosec_tests test/core/end2end/tests/empty_batch.cc test/core/end2end/tests/filter_call_init_fails.cc test/core/end2end/tests/filter_causes_close.cc + test/core/end2end/tests/filter_context.cc test/core/end2end/tests/filter_latency.cc test/core/end2end/tests/filter_status_code.cc test/core/end2end/tests/graceful_server_shutdown.cc @@ -16230,6 +16233,53 @@ endif() endif (gRPC_BUILD_TESTS) if (gRPC_BUILD_TESTS) +add_executable(xds_end2end_test + ${_gRPC_PROTO_GENS_DIR}/src/proto/grpc/lb/v1/load_balancer.pb.cc + ${_gRPC_PROTO_GENS_DIR}/src/proto/grpc/lb/v1/load_balancer.grpc.pb.cc + ${_gRPC_PROTO_GENS_DIR}/src/proto/grpc/lb/v1/load_balancer.pb.h + ${_gRPC_PROTO_GENS_DIR}/src/proto/grpc/lb/v1/load_balancer.grpc.pb.h + test/cpp/end2end/xds_end2end_test.cc + third_party/googletest/googletest/src/gtest-all.cc + third_party/googletest/googlemock/src/gmock-all.cc +) + +protobuf_generate_grpc_cpp( + src/proto/grpc/lb/v1/load_balancer.proto +) + +target_include_directories(xds_end2end_test + PRIVATE ${CMAKE_CURRENT_SOURCE_DIR} + PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include + PRIVATE ${_gRPC_SSL_INCLUDE_DIR} + PRIVATE ${_gRPC_PROTOBUF_INCLUDE_DIR} + PRIVATE ${_gRPC_ZLIB_INCLUDE_DIR} + PRIVATE ${_gRPC_BENCHMARK_INCLUDE_DIR} + PRIVATE ${_gRPC_CARES_INCLUDE_DIR} + PRIVATE ${_gRPC_GFLAGS_INCLUDE_DIR} + PRIVATE ${_gRPC_ADDRESS_SORTING_INCLUDE_DIR} + PRIVATE ${_gRPC_NANOPB_INCLUDE_DIR} + PRIVATE third_party/googletest/googletest/include + PRIVATE third_party/googletest/googletest + PRIVATE third_party/googletest/googlemock/include + PRIVATE third_party/googletest/googlemock + PRIVATE ${_gRPC_PROTO_GENS_DIR} +) + +target_link_libraries(xds_end2end_test + ${_gRPC_PROTOBUF_LIBRARIES} + ${_gRPC_ALLTARGETS_LIBRARIES} + grpc++_test_util + grpc_test_util + grpc++ + grpc + gpr + ${_gRPC_GFLAGS_LIBRARIES} +) + + +endif (gRPC_BUILD_TESTS) +if (gRPC_BUILD_TESTS) + add_executable(public_headers_must_be_c89 test/core/surface/public_headers_must_be_c89.c ) diff --git a/Makefile b/Makefile index 2822623a221..94944265d61 100644 --- a/Makefile +++ b/Makefile @@ -404,6 +404,28 @@ LIBS = m pthread ws2_32 LDFLAGS += -pthread endif +# If we are installing into a non-default prefix, both +# the libraries we build, and the apps users build, +# need to know how to find the libraries they depend on. +# There is much gnashing of teeth about this subject. +# It's tricky to do that without editing images during install, +# as you don't want tests during build to find previously installed and +# now stale libraries, etc. +ifeq ($(SYSTEM),Linux) +ifneq ($(prefix),/usr) +# Linux best practice for rpath on installed files is probably: +# 1) .pc file provides -Wl,-rpath,$(prefix)/lib +# 2) binaries we install into $(prefix)/bin use -Wl,-rpath,$ORIGIN/../lib +# 3) libraries we install into $(prefix)/lib use -Wl,-rpath,$ORIGIN +# cf. https://www.akkadia.org/drepper/dsohowto.pdf +# Doing all of that right is hard, but using -Wl,-rpath,$ORIGIN is always +# safe, and solves problems seen in the wild. Note that $ORIGIN +# is a literal string interpreted much later by ld.so. Escape it +# here with a dollar sign so Make doesn't expand $O. +LDFLAGS += '-Wl,-rpath,$$ORIGIN' +endif +endif + # # The steps for cross-compiling are as follows: # First, clone and make install of grpc using the native compilers for the host. @@ -1254,6 +1276,7 @@ time_change_test: $(BINDIR)/$(CONFIG)/time_change_test transport_pid_controller_test: $(BINDIR)/$(CONFIG)/transport_pid_controller_test transport_security_common_api_test: $(BINDIR)/$(CONFIG)/transport_security_common_api_test writes_per_rpc_test: $(BINDIR)/$(CONFIG)/writes_per_rpc_test +xds_end2end_test: $(BINDIR)/$(CONFIG)/xds_end2end_test public_headers_must_be_c89: $(BINDIR)/$(CONFIG)/public_headers_must_be_c89 gen_hpack_tables: $(BINDIR)/$(CONFIG)/gen_hpack_tables gen_legal_metadata_characters: $(BINDIR)/$(CONFIG)/gen_legal_metadata_characters @@ -1765,6 +1788,7 @@ buildtests_cxx: privatelibs_cxx \ $(BINDIR)/$(CONFIG)/transport_pid_controller_test \ $(BINDIR)/$(CONFIG)/transport_security_common_api_test \ $(BINDIR)/$(CONFIG)/writes_per_rpc_test \ + $(BINDIR)/$(CONFIG)/xds_end2end_test \ $(BINDIR)/$(CONFIG)/boringssl_crypto_test_data \ $(BINDIR)/$(CONFIG)/boringssl_asn1_test \ $(BINDIR)/$(CONFIG)/boringssl_base64_test \ @@ -1954,6 +1978,7 @@ buildtests_cxx: privatelibs_cxx \ $(BINDIR)/$(CONFIG)/transport_pid_controller_test \ $(BINDIR)/$(CONFIG)/transport_security_common_api_test \ $(BINDIR)/$(CONFIG)/writes_per_rpc_test \ + $(BINDIR)/$(CONFIG)/xds_end2end_test \ $(BINDIR)/$(CONFIG)/resolver_component_test_unsecure \ $(BINDIR)/$(CONFIG)/resolver_component_test \ $(BINDIR)/$(CONFIG)/resolver_component_tests_runner_invoker_unsecure \ @@ -2478,6 +2503,8 @@ test_cxx: buildtests_cxx $(Q) $(BINDIR)/$(CONFIG)/transport_security_common_api_test || ( echo test transport_security_common_api_test failed ; exit 1 ) $(E) "[RUN] Testing writes_per_rpc_test" $(Q) $(BINDIR)/$(CONFIG)/writes_per_rpc_test || ( echo test writes_per_rpc_test failed ; exit 1 ) + $(E) "[RUN] Testing xds_end2end_test" + $(Q) $(BINDIR)/$(CONFIG)/xds_end2end_test || ( echo test xds_end2end_test failed ; exit 1 ) $(E) "[RUN] Testing resolver_component_tests_runner_invoker_unsecure" $(Q) $(BINDIR)/$(CONFIG)/resolver_component_tests_runner_invoker_unsecure || ( echo test resolver_component_tests_runner_invoker_unsecure failed ; exit 1 ) $(E) "[RUN] Testing resolver_component_tests_runner_invoker" @@ -10388,6 +10415,7 @@ LIBEND2END_TESTS_SRC = \ test/core/end2end/tests/empty_batch.cc \ test/core/end2end/tests/filter_call_init_fails.cc \ test/core/end2end/tests/filter_causes_close.cc \ + test/core/end2end/tests/filter_context.cc \ test/core/end2end/tests/filter_latency.cc \ test/core/end2end/tests/filter_status_code.cc \ test/core/end2end/tests/graceful_server_shutdown.cc \ @@ -10504,6 +10532,7 @@ LIBEND2END_NOSEC_TESTS_SRC = \ test/core/end2end/tests/empty_batch.cc \ test/core/end2end/tests/filter_call_init_fails.cc \ test/core/end2end/tests/filter_causes_close.cc \ + test/core/end2end/tests/filter_context.cc \ test/core/end2end/tests/filter_latency.cc \ test/core/end2end/tests/filter_status_code.cc \ test/core/end2end/tests/graceful_server_shutdown.cc \ @@ -21284,6 +21313,53 @@ endif endif +XDS_END2END_TEST_SRC = \ + $(GENDIR)/src/proto/grpc/lb/v1/load_balancer.pb.cc $(GENDIR)/src/proto/grpc/lb/v1/load_balancer.grpc.pb.cc \ + test/cpp/end2end/xds_end2end_test.cc \ + +XDS_END2END_TEST_OBJS = $(addprefix $(OBJDIR)/$(CONFIG)/, $(addsuffix .o, $(basename $(XDS_END2END_TEST_SRC)))) +ifeq ($(NO_SECURE),true) + +# You can't build secure targets if you don't have OpenSSL. + +$(BINDIR)/$(CONFIG)/xds_end2end_test: openssl_dep_error + +else + + + + +ifeq ($(NO_PROTOBUF),true) + +# You can't build the protoc plugins or protobuf-enabled targets if you don't have protobuf 3.5.0+. + +$(BINDIR)/$(CONFIG)/xds_end2end_test: protobuf_dep_error + +else + +$(BINDIR)/$(CONFIG)/xds_end2end_test: $(PROTOBUF_DEP) $(XDS_END2END_TEST_OBJS) $(LIBDIR)/$(CONFIG)/libgrpc++_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc++.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr.a + $(E) "[LD] Linking $@" + $(Q) mkdir -p `dirname $@` + $(Q) $(LDXX) $(LDFLAGS) $(XDS_END2END_TEST_OBJS) $(LIBDIR)/$(CONFIG)/libgrpc++_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc++.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr.a $(LDLIBSXX) $(LDLIBS_PROTOBUF) $(LDLIBS) $(LDLIBS_SECURE) $(GTEST_LIB) -o $(BINDIR)/$(CONFIG)/xds_end2end_test + +endif + +endif + +$(OBJDIR)/$(CONFIG)/src/proto/grpc/lb/v1/load_balancer.o: $(LIBDIR)/$(CONFIG)/libgrpc++_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc++.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr.a + +$(OBJDIR)/$(CONFIG)/test/cpp/end2end/xds_end2end_test.o: $(LIBDIR)/$(CONFIG)/libgrpc++_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc++.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr.a + +deps_xds_end2end_test: $(XDS_END2END_TEST_OBJS:.o=.dep) + +ifneq ($(NO_SECURE),true) +ifneq ($(NO_DEPS),true) +-include $(XDS_END2END_TEST_OBJS:.o=.dep) +endif +endif +$(OBJDIR)/$(CONFIG)/test/cpp/end2end/xds_end2end_test.o: $(GENDIR)/src/proto/grpc/lb/v1/load_balancer.pb.cc $(GENDIR)/src/proto/grpc/lb/v1/load_balancer.grpc.pb.cc + + PUBLIC_HEADERS_MUST_BE_C89_SRC = \ test/core/surface/public_headers_must_be_c89.c \ diff --git a/build.yaml b/build.yaml index c18630ecdd3..621b9a4de2f 100644 --- a/build.yaml +++ b/build.yaml @@ -5644,6 +5644,19 @@ targets: - mac - linux - posix +- name: xds_end2end_test + gtest: true + build: test + language: c++ + src: + - src/proto/grpc/lb/v1/load_balancer.proto + - test/cpp/end2end/xds_end2end_test.cc + deps: + - grpc++_test_util + - grpc_test_util + - grpc++ + - grpc + - gpr - name: public_headers_must_be_c89 build: test language: c89 diff --git a/doc/service_config.md b/doc/service_config.md index dd1cbc56300..4cef4567d19 100644 --- a/doc/service_config.md +++ b/doc/service_config.md @@ -12,11 +12,13 @@ The service config is a JSON string of the following form: ``` { - // Load balancing policy name (case insensitive). + // [deprecated] Load balancing policy name (case insensitive). // Currently, the only selectable client-side policy provided with gRPC // is 'round_robin', but third parties may add their own policies. // This field is optional; if unset, the default behavior is to pick - // the first available backend. + // the first available backend. If set, the load balancing policy should be + // supported by the client, otherwise the service config is considered + // invalid. // If the policy name is set via the client API, that value overrides // the value specified here. // @@ -61,10 +63,11 @@ The service config is a JSON string of the following form: } ], - // Whether RPCs sent to this method should wait until the connection is - // ready by default. If false, the RPC will abort immediately if there - // is a transient failure connecting to the server. Otherwise, gRPC will - // attempt to connect until the deadline is exceeded. + // Optional. Whether RPCs sent to this method should wait until the + // connection is ready by default. If false, the RPC will abort + // immediately if there is a transient failure connecting to the server. + // Otherwise, gRPC will attempt to connect until the deadline is + // exceeded. // // The value specified via the gRPC client API will override the value // set here. However, note that setting the value in the client API will @@ -73,10 +76,10 @@ The service config is a JSON string of the following form: // is obtained by the gRPC client via name resolution. 'waitForReady': bool, - // The default timeout in seconds for RPCs sent to this method. This can - // be overridden in code. If no reply is received in the specified amount - // of time, the request is aborted and a deadline-exceeded error status - // is returned to the caller. + // Optional. The default timeout in seconds for RPCs sent to this method. + // This can be overridden in code. If no reply is received in the + // specified amount of time, the request is aborted and a + // deadline-exceeded error status is returned to the caller. // // The actual deadline used will be the minimum of the value specified // here and the value set by the application via the gRPC client API. @@ -87,10 +90,10 @@ The service config is a JSON string of the following form: // https://developers.google.com/protocol-buffers/docs/proto3#json 'timeout': string, - // The maximum allowed payload size for an individual request or object - // in a stream (client->server) in bytes. The size which is measured is - // the serialized, uncompressed payload in bytes. This applies both - // to streaming and non-streaming requests. + // Optional. The maximum allowed payload size for an individual request + // or object in a stream (client->server) in bytes. The size which is + // measured is the serialized, uncompressed payload in bytes. This + // applies both to streaming and non-streaming requests. // // The actual value used is the minimum of the value specified here and // the value set by the application via the gRPC client API. @@ -103,10 +106,10 @@ The service config is a JSON string of the following form: // be empty. 'maxRequestMessageBytes': number, - // The maximum allowed payload size for an individual response or object - // in a stream (server->client) in bytes. The size which is measured is - // the serialized, uncompressed payload in bytes. This applies both - // to streaming and non-streaming requests. + // Optional. The maximum allowed payload size for an individual response + // or object in a stream (server->client) in bytes. The size which is + // measured is the serialized, uncompressed payload in bytes. This + // applies both to streaming and non-streaming requests. // // The actual value used is the minimum of the value specified here and // the value set by the application via the gRPC client API. diff --git a/gRPC-Core.podspec b/gRPC-Core.podspec index 318092f758b..2e54b9d7847 100644 --- a/gRPC-Core.podspec +++ b/gRPC-Core.podspec @@ -1293,6 +1293,7 @@ Pod::Spec.new do |s| 'test/core/end2end/tests/empty_batch.cc', 'test/core/end2end/tests/filter_call_init_fails.cc', 'test/core/end2end/tests/filter_causes_close.cc', + 'test/core/end2end/tests/filter_context.cc', 'test/core/end2end/tests/filter_latency.cc', 'test/core/end2end/tests/filter_status_code.cc', 'test/core/end2end/tests/graceful_server_shutdown.cc', diff --git a/grpc.def b/grpc.def index 59e29e0d168..e0a08d22c19 100644 --- a/grpc.def +++ b/grpc.def @@ -16,6 +16,7 @@ EXPORTS grpc_init grpc_shutdown grpc_is_initialized + grpc_shutdown_blocking grpc_version_string grpc_g_stands_for grpc_completion_queue_factory_lookup diff --git a/grpc.gyp b/grpc.gyp index ca9d017dbbe..53e891b28dc 100644 --- a/grpc.gyp +++ b/grpc.gyp @@ -2710,6 +2710,7 @@ 'test/core/end2end/tests/empty_batch.cc', 'test/core/end2end/tests/filter_call_init_fails.cc', 'test/core/end2end/tests/filter_causes_close.cc', + 'test/core/end2end/tests/filter_context.cc', 'test/core/end2end/tests/filter_latency.cc', 'test/core/end2end/tests/filter_status_code.cc', 'test/core/end2end/tests/graceful_server_shutdown.cc', @@ -2799,6 +2800,7 @@ 'test/core/end2end/tests/empty_batch.cc', 'test/core/end2end/tests/filter_call_init_fails.cc', 'test/core/end2end/tests/filter_causes_close.cc', + 'test/core/end2end/tests/filter_context.cc', 'test/core/end2end/tests/filter_latency.cc', 'test/core/end2end/tests/filter_status_code.cc', 'test/core/end2end/tests/graceful_server_shutdown.cc', diff --git a/include/grpc/grpc.h b/include/grpc/grpc.h index fec7f5269e1..9a99e016a93 100644 --- a/include/grpc/grpc.h +++ b/include/grpc/grpc.h @@ -73,10 +73,11 @@ GRPCAPI void grpc_init(void); Before it's called, there should haven been a matching invocation to grpc_init(). - No memory is used by grpc after this call returns, nor are any instructions - executing within the grpc library. - Prior to calling, all application owned grpc objects must have been - destroyed. */ + The last call to grpc_shutdown will initiate cleaning up of grpc library + internals, which can happen in another thread. Once the clean-up is done, + no memory is used by grpc, nor are any instructions executing within the + grpc library. Prior to calling, all application owned grpc objects must + have been destroyed. */ GRPCAPI void grpc_shutdown(void); /** EXPERIMENTAL. Returns 1 if the grpc library has been initialized. @@ -85,6 +86,10 @@ GRPCAPI void grpc_shutdown(void); https://github.com/grpc/grpc/issues/15334 */ GRPCAPI int grpc_is_initialized(void); +/** EXPERIMENTAL. Blocking shut down grpc library. + This is only for wrapped language to use now. */ +GRPCAPI void grpc_shutdown_blocking(void); + /** Return a string representing the current version of grpc */ GRPCAPI const char* grpc_version_string(void); @@ -318,14 +323,14 @@ GRPCAPI void grpc_channel_destroy(grpc_channel* channel); If a grpc_call fails, it's guaranteed that no change to the call state has been made. */ -/** Called by clients to cancel an RPC on the server. +/** Cancel an RPC. Can be called multiple times, from any thread. THREAD-SAFETY grpc_call_cancel and grpc_call_cancel_with_status are thread-safe, and can be called at any point before grpc_call_unref is called.*/ GRPCAPI grpc_call_error grpc_call_cancel(grpc_call* call, void* reserved); -/** Called by clients to cancel an RPC on the server. +/** Cancel an RPC. Can be called multiple times, from any thread. If a status has not been received for the call, set it to the status code and description passed in. diff --git a/include/grpc/impl/codegen/grpc_types.h b/include/grpc/impl/codegen/grpc_types.h index 79b182c4515..078db2b90a8 100644 --- a/include/grpc/impl/codegen/grpc_types.h +++ b/include/grpc/impl/codegen/grpc_types.h @@ -317,6 +317,10 @@ typedef struct { balancer before using fallback backend addresses from the resolver. If 0, fallback will never be used. Default value is 10000. */ #define GRPC_ARG_GRPCLB_FALLBACK_TIMEOUT_MS "grpc.grpclb_fallback_timeout_ms" +/* Timeout in milliseconds to wait for the serverlist from the xDS load + balancer before using fallback backend addresses from the resolver. + If 0, fallback will never be used. Default value is 10000. */ +#define GRPC_ARG_XDS_FALLBACK_TIMEOUT_MS "grpc.xds_fallback_timeout_ms" /** If non-zero, grpc server's cronet compression workaround will be enabled */ #define GRPC_ARG_WORKAROUND_CRONET_COMPRESSION \ "grpc.workaround.cronet_compression" diff --git a/src/core/ext/filters/client_channel/README.md b/src/core/ext/filters/client_channel/README.md index 9676a4535b2..ffb09fd34e7 100644 --- a/src/core/ext/filters/client_channel/README.md +++ b/src/core/ext/filters/client_channel/README.md @@ -4,7 +4,7 @@ Client Configuration Support for GRPC This library provides high level configuration machinery to construct client channels and load balance between them. -Each grpc_channel is created with a grpc_resolver. It is the resolver's duty +Each `grpc_channel` is created with a `Resolver`. It is the resolver's duty to resolve a name into a set of arguments for the channel. Such arguments might include: @@ -12,7 +12,7 @@ might include: - a load balancing policy to decide which server to send a request to - a set of filters to mutate outgoing requests (say, by adding metadata) -The resolver provides this data as a stream of grpc_channel_args objects to +The resolver provides this data as a stream of `grpc_channel_args` objects to the channel. We represent arguments as a stream so that they can be changed by the resolver during execution, by reacting to external events (such as new service configuration data being pushed to some store). @@ -21,11 +21,11 @@ new service configuration data being pushed to some store). Load Balancing -------------- -Load balancing configuration is provided by a grpc_lb_policy object. +Load balancing configuration is provided by a `LoadBalancingPolicy` object. The primary job of the load balancing policies is to pick a target server given only the initial metadata for a request. It does this by providing -a grpc_subchannel object to the owning channel. +a `ConnectedSubchannel` object to the owning channel. Sub-Channels @@ -38,9 +38,9 @@ decisions (for example, by avoiding disconnected backends). Configured sub-channels are fully setup to participate in the grpc data plane. Their behavior is specified by a set of grpc channel filters defined at their -construction. To customize this behavior, resolvers build -grpc_client_channel_factory objects, which use the decorator pattern to customize -construction arguments for concrete grpc_subchannel instances. +construction. To customize this behavior, transports build +`ClientChannelFactory` objects, which customize construction arguments for +concrete subchannel instances. Naming for GRPC diff --git a/src/core/ext/filters/client_channel/client_channel.cc b/src/core/ext/filters/client_channel/client_channel.cc index 3566ef8fb35..3f87438b13b 100644 --- a/src/core/ext/filters/client_channel/client_channel.cc +++ b/src/core/ext/filters/client_channel/client_channel.cc @@ -107,8 +107,8 @@ typedef struct client_channel_channel_data { grpc_channel_stack* owning_stack; /** interested parties (owned) */ grpc_pollset_set* interested_parties; - // Client channel factory. Holds a ref. - grpc_client_channel_factory* client_channel_factory; + // Client channel factory. + grpc_core::ClientChannelFactory* client_channel_factory; // Subchannel pool. grpc_core::RefCountedPtr subchannel_pool; @@ -205,16 +205,15 @@ class ClientChannelControlHelper chand_->subchannel_pool.get()); grpc_channel_args* new_args = grpc_channel_args_copy_and_add(&args, &arg, 1); - Subchannel* subchannel = grpc_client_channel_factory_create_subchannel( - chand_->client_channel_factory, new_args); + Subchannel* subchannel = + chand_->client_channel_factory->CreateSubchannel(new_args); grpc_channel_args_destroy(new_args); return subchannel; } - grpc_channel* CreateChannel(const char* target, grpc_client_channel_type type, + grpc_channel* CreateChannel(const char* target, const grpc_channel_args& args) override { - return grpc_client_channel_factory_create_channel( - chand_->client_channel_factory, target, type, &args); + return chand_->client_channel_factory->CreateChannel(target, &args); } void UpdateState( @@ -420,19 +419,12 @@ static grpc_error* cc_init_channel_elem(grpc_channel_element* elem, arg = grpc_channel_args_find(args->channel_args, GRPC_ARG_ENABLE_RETRIES); chand->enable_retries = grpc_channel_arg_get_bool(arg, true); // Record client channel factory. - arg = grpc_channel_args_find(args->channel_args, - GRPC_ARG_CLIENT_CHANNEL_FACTORY); - if (arg == nullptr) { + chand->client_channel_factory = + grpc_core::ClientChannelFactory::GetFromChannelArgs(args->channel_args); + if (chand->client_channel_factory == nullptr) { return GRPC_ERROR_CREATE_FROM_STATIC_STRING( "Missing client channel factory in args for client channel filter"); } - if (arg->type != GRPC_ARG_POINTER) { - return GRPC_ERROR_CREATE_FROM_STATIC_STRING( - "client channel factory arg must be a pointer"); - } - chand->client_channel_factory = - static_cast(arg->value.pointer.p); - grpc_client_channel_factory_ref(chand->client_channel_factory); // Get server name to resolve, using proxy mapper if needed. arg = grpc_channel_args_find(args->channel_args, GRPC_ARG_SERVER_URI); if (arg == nullptr) { @@ -509,9 +501,6 @@ static void cc_destroy_channel_elem(grpc_channel_element* elem) { // longer be any need to explicitly reset these smart pointer data members. chand->picker.reset(); chand->subchannel_pool.reset(); - if (chand->client_channel_factory != nullptr) { - grpc_client_channel_factory_unref(chand->client_channel_factory); - } chand->info_lb_policy_name.reset(); chand->info_service_config_json.reset(); chand->retry_throttle_data.reset(); @@ -705,6 +694,7 @@ struct call_data { arena(args.arena), owning_call(args.call_stack), call_combiner(args.call_combiner), + call_context(args.context), pending_send_initial_metadata(false), pending_send_message(false), pending_send_trailing_metadata(false), @@ -718,12 +708,6 @@ struct call_data { for (size_t i = 0; i < GPR_ARRAY_SIZE(pending_batches); ++i) { GPR_ASSERT(pending_batches[i].batch == nullptr); } - for (size_t i = 0; i < GRPC_CONTEXT_COUNT; ++i) { - if (pick.pick.subchannel_call_context[i].destroy != nullptr) { - pick.pick.subchannel_call_context[i].destroy( - pick.pick.subchannel_call_context[i].value); - } - } } // State for handling deadlines. @@ -740,6 +724,7 @@ struct call_data { gpr_arena* arena; grpc_call_stack* owning_call; grpc_call_combiner* call_combiner; + grpc_call_context_element* call_context; grpc_core::RefCountedPtr retry_throttle_data; grpc_core::RefCountedPtr method_params; @@ -2440,14 +2425,16 @@ static void create_subchannel_call(grpc_call_element* elem) { const size_t parent_data_size = calld->enable_retries ? sizeof(subchannel_call_retry_state) : 0; const grpc_core::ConnectedSubchannel::CallArgs call_args = { - calld->pollent, // pollent - calld->path, // path - calld->call_start_time, // start_time - calld->deadline, // deadline - calld->arena, // arena - calld->pick.pick.subchannel_call_context, // context - calld->call_combiner, // call_combiner - parent_data_size // parent_data_size + calld->pollent, // pollent + calld->path, // path + calld->call_start_time, // start_time + calld->deadline, // deadline + calld->arena, // arena + // TODO(roth): When we implement hedging support, we will probably + // need to use a separate call context for each subchannel call. + calld->call_context, // context + calld->call_combiner, // call_combiner + parent_data_size // parent_data_size }; grpc_error* error = GRPC_ERROR_NONE; calld->subchannel_call = @@ -2462,7 +2449,7 @@ static void create_subchannel_call(grpc_call_element* elem) { } else { if (parent_data_size > 0) { new (calld->subchannel_call->GetParentData()) - subchannel_call_retry_state(calld->pick.pick.subchannel_call_context); + subchannel_call_retry_state(calld->call_context); } pending_batches_resume(elem); } diff --git a/src/core/ext/filters/client_channel/client_channel_channelz.h b/src/core/ext/filters/client_channel/client_channel_channelz.h index 1dc1bf595be..9272116882e 100644 --- a/src/core/ext/filters/client_channel/client_channel_channelz.h +++ b/src/core/ext/filters/client_channel/client_channel_channelz.h @@ -71,11 +71,11 @@ class SubchannelNode : public BaseNode { grpc_json* RenderJson() override; // proxy methods to composed classes. - void AddTraceEvent(ChannelTrace::Severity severity, grpc_slice data) { + void AddTraceEvent(ChannelTrace::Severity severity, const grpc_slice& data) { trace_.AddTraceEvent(severity, data); } void AddTraceEventWithReference(ChannelTrace::Severity severity, - grpc_slice data, + const grpc_slice& data, RefCountedPtr referenced_channel) { trace_.AddTraceEventWithReference(severity, data, std::move(referenced_channel)); diff --git a/src/core/ext/filters/client_channel/client_channel_factory.cc b/src/core/ext/filters/client_channel/client_channel_factory.cc index 8c558382fdf..671a38430ef 100644 --- a/src/core/ext/filters/client_channel/client_channel_factory.cc +++ b/src/core/ext/filters/client_channel/client_channel_factory.cc @@ -21,47 +21,35 @@ #include "src/core/ext/filters/client_channel/client_channel_factory.h" #include "src/core/lib/channel/channel_args.h" -void grpc_client_channel_factory_ref(grpc_client_channel_factory* factory) { - factory->vtable->ref(factory); -} +// Channel arg key for client channel factory. +#define GRPC_ARG_CLIENT_CHANNEL_FACTORY "grpc.client_channel_factory" -void grpc_client_channel_factory_unref(grpc_client_channel_factory* factory) { - factory->vtable->unref(factory); -} +namespace grpc_core { -grpc_core::Subchannel* grpc_client_channel_factory_create_subchannel( - grpc_client_channel_factory* factory, const grpc_channel_args* args) { - return factory->vtable->create_subchannel(factory, args); -} +namespace { -grpc_channel* grpc_client_channel_factory_create_channel( - grpc_client_channel_factory* factory, const char* target, - grpc_client_channel_type type, const grpc_channel_args* args) { - return factory->vtable->create_client_channel(factory, target, type, args); +void* factory_arg_copy(void* f) { return f; } +void factory_arg_destroy(void* f) {} +int factory_arg_cmp(void* factory1, void* factory2) { + return GPR_ICMP(factory1, factory2); } +const grpc_arg_pointer_vtable factory_arg_vtable = { + factory_arg_copy, factory_arg_destroy, factory_arg_cmp}; -static void* factory_arg_copy(void* factory) { - grpc_client_channel_factory_ref( - static_cast(factory)); - return factory; -} +} // namespace -static void factory_arg_destroy(void* factory) { - grpc_client_channel_factory_unref( - static_cast(factory)); +grpc_arg ClientChannelFactory::CreateChannelArg(ClientChannelFactory* factory) { + return grpc_channel_arg_pointer_create( + const_cast(GRPC_ARG_CLIENT_CHANNEL_FACTORY), factory, + &factory_arg_vtable); } -static int factory_arg_cmp(void* factory1, void* factory2) { - if (factory1 < factory2) return -1; - if (factory1 > factory2) return 1; - return 0; +ClientChannelFactory* ClientChannelFactory::GetFromChannelArgs( + const grpc_channel_args* args) { + const grpc_arg* arg = + grpc_channel_args_find(args, GRPC_ARG_CLIENT_CHANNEL_FACTORY); + if (arg == nullptr || arg->type != GRPC_ARG_POINTER) return nullptr; + return static_cast(arg->value.pointer.p); } -static const grpc_arg_pointer_vtable factory_arg_vtable = { - factory_arg_copy, factory_arg_destroy, factory_arg_cmp}; - -grpc_arg grpc_client_channel_factory_create_channel_arg( - grpc_client_channel_factory* factory) { - return grpc_channel_arg_pointer_create((char*)GRPC_ARG_CLIENT_CHANNEL_FACTORY, - factory, &factory_arg_vtable); -} +} // namespace grpc_core diff --git a/src/core/ext/filters/client_channel/client_channel_factory.h b/src/core/ext/filters/client_channel/client_channel_factory.h index 4b72aa46499..21f78a833df 100644 --- a/src/core/ext/filters/client_channel/client_channel_factory.h +++ b/src/core/ext/filters/client_channel/client_channel_factory.h @@ -24,51 +24,32 @@ #include #include "src/core/ext/filters/client_channel/subchannel.h" -#include "src/core/lib/channel/channel_stack.h" +#include "src/core/lib/gprpp/abstract.h" -// Channel arg key for client channel factory. -#define GRPC_ARG_CLIENT_CHANNEL_FACTORY "grpc.client_channel_factory" +namespace grpc_core { -typedef struct grpc_client_channel_factory grpc_client_channel_factory; -typedef struct grpc_client_channel_factory_vtable - grpc_client_channel_factory_vtable; +class ClientChannelFactory { + public: + virtual ~ClientChannelFactory() = default; -typedef enum { - GRPC_CLIENT_CHANNEL_TYPE_REGULAR, /** for the user-level regular calls */ - GRPC_CLIENT_CHANNEL_TYPE_LOAD_BALANCING, /** for communication with a load - balancing service */ -} grpc_client_channel_type; + // Creates a subchannel with the specified args. + virtual Subchannel* CreateSubchannel(const grpc_channel_args* args) + GRPC_ABSTRACT; -/** Constructor for new configured channels. - Creating decorators around this type is encouraged to adapt behavior. */ -struct grpc_client_channel_factory { - const grpc_client_channel_factory_vtable* vtable; -}; - -struct grpc_client_channel_factory_vtable { - void (*ref)(grpc_client_channel_factory* factory); - void (*unref)(grpc_client_channel_factory* factory); - grpc_core::Subchannel* (*create_subchannel)( - grpc_client_channel_factory* factory, const grpc_channel_args* args); - grpc_channel* (*create_client_channel)(grpc_client_channel_factory* factory, - const char* target, - grpc_client_channel_type type, - const grpc_channel_args* args); -}; + // Creates a channel for the specified target with the specified args. + virtual grpc_channel* CreateChannel( + const char* target, const grpc_channel_args* args) GRPC_ABSTRACT; -void grpc_client_channel_factory_ref(grpc_client_channel_factory* factory); -void grpc_client_channel_factory_unref(grpc_client_channel_factory* factory); + // Returns a channel arg containing the specified factory. + static grpc_arg CreateChannelArg(ClientChannelFactory* factory); -/** Create a new grpc_subchannel */ -grpc_core::Subchannel* grpc_client_channel_factory_create_subchannel( - grpc_client_channel_factory* factory, const grpc_channel_args* args); + // Returns the factory from args, or null if not found. + static ClientChannelFactory* GetFromChannelArgs( + const grpc_channel_args* args); -/** Create a new grpc_channel */ -grpc_channel* grpc_client_channel_factory_create_channel( - grpc_client_channel_factory* factory, const char* target, - grpc_client_channel_type type, const grpc_channel_args* args); + GRPC_ABSTRACT_BASE_CLASS +}; -grpc_arg grpc_client_channel_factory_create_channel_arg( - grpc_client_channel_factory* factory); +} // namespace grpc_core #endif /* GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_CLIENT_CHANNEL_FACTORY_H */ diff --git a/src/core/ext/filters/client_channel/lb_policy.h b/src/core/ext/filters/client_channel/lb_policy.h index 5040ddc5047..75dca52a615 100644 --- a/src/core/ext/filters/client_channel/lb_policy.h +++ b/src/core/ext/filters/client_channel/lb_policy.h @@ -22,7 +22,6 @@ #include #include "src/core/ext/filters/client_channel/client_channel_channelz.h" -#include "src/core/ext/filters/client_channel/client_channel_factory.h" #include "src/core/ext/filters/client_channel/subchannel.h" #include "src/core/lib/gprpp/abstract.h" #include "src/core/lib/gprpp/orphanable.h" @@ -74,11 +73,6 @@ class LoadBalancingPolicy : public InternallyRefCounted { /// Will be set to the selected subchannel, or nullptr on failure or when /// the LB policy decides to drop the call. RefCountedPtr connected_subchannel; - /// Will be populated with context to pass to the subchannel call, if - /// needed. - // TODO(roth): Remove this from the API, especially since it's not - // working properly anyway (see https://github.com/grpc/grpc/issues/15927). - grpc_call_context_element subchannel_call_context[GRPC_CONTEXT_COUNT] = {}; }; /// A picker is the object used to actual perform picks. @@ -193,21 +187,15 @@ class LoadBalancingPolicy : public InternallyRefCounted { virtual Subchannel* CreateSubchannel(const grpc_channel_args& args) GRPC_ABSTRACT; - /// Creates a channel with the specified target, type, and channel args. + /// Creates a channel with the specified target and channel args. virtual grpc_channel* CreateChannel( - const char* target, grpc_client_channel_type type, - const grpc_channel_args& args) GRPC_ABSTRACT; + const char* target, const grpc_channel_args& args) GRPC_ABSTRACT; /// Sets the connectivity state and returns a new picker to be used /// by the client channel. virtual void UpdateState(grpc_connectivity_state state, grpc_error* state_error, - UniquePtr picker) { - std::move(picker); // Suppress clang-tidy complaint. - // The rest of this is copied from the GRPC_ABSTRACT macro. - gpr_log(GPR_ERROR, "Function marked GRPC_ABSTRACT was not implemented"); - GPR_ASSERT(false); - } + UniquePtr) GRPC_ABSTRACT; /// Requests that the resolver re-resolve. virtual void RequestReresolution() GRPC_ABSTRACT; @@ -261,10 +249,8 @@ class LoadBalancingPolicy : public InternallyRefCounted { /// Note that the LB policy gets the set of addresses from the /// GRPC_ARG_SERVER_ADDRESS_LIST channel arg. virtual void UpdateLocked(const grpc_channel_args& args, - RefCountedPtr lb_config) { - std::move(lb_config); // Suppress clang-tidy complaint. - GRPC_ABSTRACT; - } + RefCountedPtr) // NOLINT + GRPC_ABSTRACT; /// Tries to enter a READY connectivity state. /// This is a no-op by default, since most LB policies never go into @@ -311,8 +297,8 @@ class LoadBalancingPolicy : public InternallyRefCounted { grpc_combiner* combiner() const { return combiner_; } - // Note: LB policies MUST NOT call any method on the helper from - // their constructor. + // Note: LB policies MUST NOT call any method on the helper from their + // constructor. // Note: This will return null after ShutdownLocked() has been called. ChannelControlHelper* channel_control_helper() const { return channel_control_helper_.get(); diff --git a/src/core/ext/filters/client_channel/lb_policy/grpclb/client_load_reporting_filter.cc b/src/core/ext/filters/client_channel/lb_policy/grpclb/client_load_reporting_filter.cc index 399bb452f45..3bb31fe3b08 100644 --- a/src/core/ext/filters/client_channel/lb_policy/grpclb/client_load_reporting_filter.cc +++ b/src/core/ext/filters/client_channel/lb_policy/grpclb/client_load_reporting_filter.cc @@ -37,17 +37,6 @@ static void destroy_channel_elem(grpc_channel_element* elem) {} namespace { struct call_data { - call_data(const grpc_call_element_args& args) { - if (args.context[GRPC_GRPCLB_CLIENT_STATS].value != nullptr) { - // Get stats object from context and take a ref. - client_stats = static_cast( - args.context[GRPC_GRPCLB_CLIENT_STATS].value) - ->Ref(); - // Record call started. - client_stats->AddCallStarted(); - } - } - // Stats object to update. grpc_core::RefCountedPtr client_stats; // State for intercepting send_initial_metadata. @@ -82,7 +71,7 @@ static void recv_initial_metadata_ready(void* arg, grpc_error* error) { static grpc_error* init_call_elem(grpc_call_element* elem, const grpc_call_element_args* args) { GPR_ASSERT(args->context != nullptr); - new (elem->call_data) call_data(*args); + new (elem->call_data) call_data(); return GRPC_ERROR_NONE; } @@ -96,9 +85,6 @@ static void destroy_call_elem(grpc_call_element* elem, calld->client_stats->AddCallFinished( !calld->send_initial_metadata_succeeded /* client_failed_to_send */, calld->recv_initial_metadata_succeeded /* known_received */); - // All done, so unref the stats object. - // TODO(roth): Eliminate this once filter stack is converted to C++. - calld->client_stats.reset(); } calld->~call_data(); } @@ -107,25 +93,36 @@ static void start_transport_stream_op_batch( grpc_call_element* elem, grpc_transport_stream_op_batch* batch) { call_data* calld = static_cast(elem->call_data); GPR_TIMER_SCOPE("clr_start_transport_stream_op_batch", 0); - if (calld->client_stats != nullptr) { - // Intercept send_initial_metadata. - if (batch->send_initial_metadata) { - calld->original_on_complete_for_send = batch->on_complete; - GRPC_CLOSURE_INIT(&calld->on_complete_for_send, on_complete_for_send, - calld, grpc_schedule_on_exec_ctx); - batch->on_complete = &calld->on_complete_for_send; - } - // Intercept recv_initial_metadata. - if (batch->recv_initial_metadata) { - calld->original_recv_initial_metadata_ready = - batch->payload->recv_initial_metadata.recv_initial_metadata_ready; - GRPC_CLOSURE_INIT(&calld->recv_initial_metadata_ready, - recv_initial_metadata_ready, calld, - grpc_schedule_on_exec_ctx); - batch->payload->recv_initial_metadata.recv_initial_metadata_ready = - &calld->recv_initial_metadata_ready; + // Handle send_initial_metadata. + if (batch->send_initial_metadata) { + // Grab client stats object from user_data for LB token metadata. + grpc_linked_mdelem* lb_token = + batch->payload->send_initial_metadata.send_initial_metadata->idx.named + .lb_token; + if (lb_token != nullptr) { + grpc_core::GrpcLbClientStats* client_stats = + static_cast(grpc_mdelem_get_user_data( + lb_token->md, grpc_core::GrpcLbClientStats::Destroy)); + if (client_stats != nullptr) { + calld->client_stats = client_stats->Ref(); + // Intercept completion. + calld->original_on_complete_for_send = batch->on_complete; + GRPC_CLOSURE_INIT(&calld->on_complete_for_send, on_complete_for_send, + calld, grpc_schedule_on_exec_ctx); + batch->on_complete = &calld->on_complete_for_send; + } } } + // Intercept completion of recv_initial_metadata. + if (batch->recv_initial_metadata) { + calld->original_recv_initial_metadata_ready = + batch->payload->recv_initial_metadata.recv_initial_metadata_ready; + GRPC_CLOSURE_INIT(&calld->recv_initial_metadata_ready, + recv_initial_metadata_ready, calld, + grpc_schedule_on_exec_ctx); + batch->payload->recv_initial_metadata.recv_initial_metadata_ready = + &calld->recv_initial_metadata_ready; + } // Chain to next filter. grpc_call_next_op(elem, batch); } diff --git a/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.cc b/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.cc index 90398aac7f4..e21b1789172 100644 --- a/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.cc +++ b/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.cc @@ -39,15 +39,14 @@ /// the balancer, we update the round_robin policy with the new list of /// addresses. If we cannot communicate with the balancer on startup, /// however, we may enter fallback mode, in which case we will populate -/// the RR policy's addresses from the backend addresses returned by the +/// the child policy's addresses from the backend addresses returned by the /// resolver. /// -/// Once an RR policy instance is in place (and getting updated as described), +/// Once a child policy instance is in place (and getting updated as described), /// calls for a pick, a ping, or a cancellation will be serviced right -/// away by forwarding them to the RR instance. Any time there's no RR -/// policy available (i.e., right after the creation of the gRPCLB policy), -/// pick and ping requests are added to a list of pending picks and pings -/// to be flushed and serviced when the RR policy instance becomes available. +/// away by forwarding them to the child policy instance. Any time there's no +/// child policy available (i.e., right after the creation of the gRPCLB +/// policy), pick requests are queued. /// /// \see https://github.com/grpc/grpc/blob/master/doc/load-balancing.md for the /// high level design and details. @@ -225,7 +224,8 @@ class GrpcLb : public LoadBalancingPolicy { UniquePtr AsText() const; // Extracts all non-drop entries into a ServerAddressList. - ServerAddressList GetServerAddressList() const; + ServerAddressList GetServerAddressList( + GrpcLbClientStats* client_stats) const; // Returns true if the serverlist contains at least one drop entry and // no backend address entries. @@ -273,35 +273,40 @@ class GrpcLb : public LoadBalancingPolicy { Subchannel* CreateSubchannel(const grpc_channel_args& args) override; grpc_channel* CreateChannel(const char* target, - grpc_client_channel_type type, const grpc_channel_args& args) override; void UpdateState(grpc_connectivity_state state, grpc_error* state_error, UniquePtr picker) override; void RequestReresolution() override; + void set_child(LoadBalancingPolicy* child) { child_ = child; } + private: + bool CalledByPendingChild() const; + bool CalledByCurrentChild() const; + RefCountedPtr parent_; + LoadBalancingPolicy* child_ = nullptr; }; ~GrpcLb(); void ShutdownLocked() override; - // Helper function used in UpdateLocked(). + // Helper functions used in UpdateLocked(). void ProcessChannelArgsLocked(const grpc_channel_args& args); + void ParseLbConfig(Config* grpclb_config); // Methods for dealing with the balancer channel and call. void StartBalancerCallLocked(); static void OnFallbackTimerLocked(void* arg, grpc_error* error); void StartBalancerCallRetryTimerLocked(); static void OnBalancerCallRetryTimerLocked(void* arg, grpc_error* error); - static void OnBalancerChannelConnectivityChangedLocked(void* arg, - grpc_error* error); - // Methods for dealing with the RR policy. - grpc_channel_args* CreateRoundRobinPolicyArgsLocked(); - void CreateRoundRobinPolicyLocked(Args args); - void CreateOrUpdateRoundRobinPolicyLocked(); + // Methods for dealing with the child policy. + grpc_channel_args* CreateChildPolicyArgsLocked(); + OrphanablePtr CreateChildPolicyLocked( + const char* name, grpc_channel_args* args); + void CreateOrUpdateChildPolicyLocked(); // Who the client is trying to communicate with. const char* server_name_ = nullptr; @@ -316,10 +321,6 @@ class GrpcLb : public LoadBalancingPolicy { grpc_channel* lb_channel_ = nullptr; // Uuid of the lb channel. Used for channelz. gpr_atm lb_channel_uuid_ = 0; - grpc_connectivity_state lb_channel_connectivity_; - grpc_closure lb_channel_on_connectivity_changed_; - // Are we already watching the LB channel's connectivity? - bool watching_lb_channel_ = false; // Response generator to inject address updates into lb_channel_. RefCountedPtr response_generator_; @@ -351,8 +352,17 @@ class GrpcLb : public LoadBalancingPolicy { grpc_timer lb_fallback_timer_; grpc_closure lb_on_fallback_; - // The RR policy to use for the backends. - OrphanablePtr rr_policy_; + // Lock held when modifying the value of child_policy_ or + // pending_child_policy_. + gpr_mu child_policy_mu_; + // The child policy to use for the backends. + OrphanablePtr child_policy_; + // When switching child policies, the new policy will be stored here + // until it reports READY, at which point it will be moved to child_policy_. + OrphanablePtr pending_child_policy_; + // The child policy name and config. + UniquePtr child_policy_name_; + RefCountedPtr child_policy_config_; }; // @@ -453,7 +463,8 @@ bool IsServerValid(const grpc_grpclb_server* server, size_t idx, bool log) { } // Returns addresses extracted from the serverlist. -ServerAddressList GrpcLb::Serverlist::GetServerAddressList() const { +ServerAddressList GrpcLb::Serverlist::GetServerAddressList( + GrpcLbClientStats* client_stats) const { ServerAddressList addresses; for (size_t i = 0; i < serverlist_->num_servers; ++i) { const grpc_grpclb_server* server = serverlist_->servers[i]; @@ -471,6 +482,11 @@ ServerAddressList GrpcLb::Serverlist::GetServerAddressList() const { grpc_slice lb_token_mdstr = grpc_slice_from_copied_buffer( server->load_balance_token, lb_token_length); lb_token = grpc_mdelem_from_slices(GRPC_MDSTR_LB_TOKEN, lb_token_mdstr); + if (client_stats != nullptr) { + GPR_ASSERT(grpc_mdelem_set_user_data( + lb_token, GrpcLbClientStats::Destroy, + client_stats->Ref().release()) == client_stats); + } } else { char* uri = grpc_sockaddr_to_uri(&addr); gpr_log(GPR_INFO, @@ -511,22 +527,6 @@ const char* GrpcLb::Serverlist::ShouldDrop() { // GrpcLb::Picker // -// Adds lb_token of selected subchannel (address) to the call's initial -// metadata. -grpc_error* AddLbTokenToInitialMetadata( - grpc_mdelem lb_token, grpc_linked_mdelem* lb_token_mdelem_storage, - grpc_metadata_batch* initial_metadata) { - GPR_ASSERT(lb_token_mdelem_storage != nullptr); - GPR_ASSERT(!GRPC_MDISNULL(lb_token)); - return grpc_metadata_batch_add_tail(initial_metadata, lb_token_mdelem_storage, - lb_token); -} - -// Destroy function used when embedding client stats in call context. -void DestroyClientStats(void* arg) { - static_cast(arg)->Unref(); -} - GrpcLb::Picker::PickResult GrpcLb::Picker::Pick(PickState* pick, grpc_error** error) { // Check if we should drop the call. @@ -557,15 +557,14 @@ GrpcLb::Picker::PickResult GrpcLb::Picker::Pick(PickState* pick, abort(); } grpc_mdelem lb_token = {reinterpret_cast(arg->value.pointer.p)}; - AddLbTokenToInitialMetadata(GRPC_MDELEM_REF(lb_token), - &pick->lb_token_mdelem_storage, - pick->initial_metadata); - // Pass on client stats via context. Passes ownership of the reference. - if (client_stats_ != nullptr) { - pick->subchannel_call_context[GRPC_GRPCLB_CLIENT_STATS].value = - client_stats_->Ref().release(); - pick->subchannel_call_context[GRPC_GRPCLB_CLIENT_STATS].destroy = - DestroyClientStats; + GPR_ASSERT(!GRPC_MDISNULL(lb_token)); + GPR_ASSERT(grpc_metadata_batch_add_tail( + pick->initial_metadata, &pick->lb_token_mdelem_storage, + GRPC_MDELEM_REF(lb_token)) == GRPC_ERROR_NONE); + GrpcLbClientStats* client_stats = static_cast( + grpc_mdelem_get_user_data(lb_token, GrpcLbClientStats::Destroy)); + if (client_stats != nullptr) { + client_stats->AddCallStarted(); } } return result; @@ -575,16 +574,31 @@ GrpcLb::Picker::PickResult GrpcLb::Picker::Pick(PickState* pick, // GrpcLb::Helper // +bool GrpcLb::Helper::CalledByPendingChild() const { + GPR_ASSERT(child_ != nullptr); + return child_ == parent_->pending_child_policy_.get(); +} + +bool GrpcLb::Helper::CalledByCurrentChild() const { + GPR_ASSERT(child_ != nullptr); + return child_ == parent_->child_policy_.get(); +} + Subchannel* GrpcLb::Helper::CreateSubchannel(const grpc_channel_args& args) { - if (parent_->shutting_down_) return nullptr; + if (parent_->shutting_down_ || + (!CalledByPendingChild() && !CalledByCurrentChild())) { + return nullptr; + } return parent_->channel_control_helper()->CreateSubchannel(args); } grpc_channel* GrpcLb::Helper::CreateChannel(const char* target, - grpc_client_channel_type type, const grpc_channel_args& args) { - if (parent_->shutting_down_) return nullptr; - return parent_->channel_control_helper()->CreateChannel(target, type, args); + if (parent_->shutting_down_ || + (!CalledByPendingChild() && !CalledByCurrentChild())) { + return nullptr; + } + return parent_->channel_control_helper()->CreateChannel(target, args); } void GrpcLb::Helper::UpdateState(grpc_connectivity_state state, @@ -594,31 +608,51 @@ void GrpcLb::Helper::UpdateState(grpc_connectivity_state state, GRPC_ERROR_UNREF(state_error); return; } + // If this request is from the pending child policy, ignore it until + // it reports READY, at which point we swap it into place. + if (CalledByPendingChild()) { + if (grpc_lb_glb_trace.enabled()) { + gpr_log(GPR_INFO, + "[grpclb %p helper %p] pending child policy %p reports state=%s", + parent_.get(), this, parent_->pending_child_policy_.get(), + grpc_connectivity_state_name(state)); + } + if (state != GRPC_CHANNEL_READY) { + GRPC_ERROR_UNREF(state_error); + return; + } + MutexLock lock(&parent_->child_policy_mu_); + parent_->child_policy_ = std::move(parent_->pending_child_policy_); + } else if (!CalledByCurrentChild()) { + // This request is from an outdated child, so ignore it. + GRPC_ERROR_UNREF(state_error); + return; + } // There are three cases to consider here: // 1. We're in fallback mode. In this case, we're always going to use - // RR's result, so we pass its picker through as-is. + // the child policy's result, so we pass its picker through as-is. // 2. The serverlist contains only drop entries. In this case, we // want to use our own picker so that we can return the drops. // 3. Not in fallback mode and serverlist is not all drops (i.e., it // may be empty or contain at least one backend address). There are // two sub-cases: - // a. RR is reporting state READY. In this case, we wrap RR's - // picker in our own, so that we can handle drops and LB token - // metadata for each pick. - // b. RR is reporting a state other than READY. In this case, we - // don't want to use our own picker, because we don't want to - // process drops for picks that yield a QUEUE result; this would + // a. The child policy is reporting state READY. In this case, we wrap + // the child's picker in our own, so that we can handle drops and LB + // token metadata for each pick. + // b. The child policy is reporting a state other than READY. In this + // case, we don't want to use our own picker, because we don't want + // to process drops for picks that yield a QUEUE result; this would // result in dropping too many calls, since we will see the // queued picks multiple times, and we'd consider each one a // separate call for the drop calculation. // - // Cases 1 and 3b: return picker from RR as-is. + // Cases 1 and 3b: return picker from the child policy as-is. if (parent_->serverlist_ == nullptr || (!parent_->serverlist_->ContainsAllDropEntries() && state != GRPC_CHANNEL_READY)) { if (grpc_lb_glb_trace.enabled()) { gpr_log(GPR_INFO, - "[grpclb %p helper %p] state=%s passing RR picker %p as-is", + "[grpclb %p helper %p] state=%s passing child picker %p as-is", parent_.get(), this, grpc_connectivity_state_name(state), picker.get()); } @@ -626,9 +660,9 @@ void GrpcLb::Helper::UpdateState(grpc_connectivity_state state, std::move(picker)); return; } - // Cases 2 and 3a: wrap picker from RR in our own picker. + // Cases 2 and 3a: wrap picker from the child in our own picker. if (grpc_lb_glb_trace.enabled()) { - gpr_log(GPR_INFO, "[grpclb %p helper %p] state=%s wrapping RR picker %p", + gpr_log(GPR_INFO, "[grpclb %p helper %p] state=%s wrapping child picker %p", parent_.get(), this, grpc_connectivity_state_name(state), picker.get()); } @@ -646,15 +680,19 @@ void GrpcLb::Helper::UpdateState(grpc_connectivity_state state, void GrpcLb::Helper::RequestReresolution() { if (parent_->shutting_down_) return; + // If there is a pending child policy, ignore re-resolution requests + // from the current child policy (or any outdated pending child). + if (parent_->pending_child_policy_ != nullptr && !CalledByPendingChild()) { + return; + } if (grpc_lb_glb_trace.enabled()) { gpr_log(GPR_INFO, - "[grpclb %p] Re-resolution requested from the internal RR policy " - "(%p).", - parent_.get(), parent_->rr_policy_.get()); + "[grpclb %p] Re-resolution requested from child policy (%p).", + parent_.get(), child_); } // If we are talking to a balancer, we expect to get updated addresses // from the balancer, so we can ignore the re-resolution request from - // the RR policy. Otherwise, pass the re-resolution request up to the + // the child policy. Otherwise, pass the re-resolution request up to the // channel. if (parent_->lb_calld_ == nullptr || !parent_->lb_calld_->seen_initial_response()) { @@ -1002,7 +1040,7 @@ void GrpcLb::BalancerCallState::OnBalancerMessageReceivedLocked( // instance will be destroyed either upon the next update or when the // GrpcLb instance is destroyed. grpclb_policy->serverlist_ = std::move(serverlist_wrapper); - grpclb_policy->CreateOrUpdateRoundRobinPolicyLocked(); + grpclb_policy->CreateOrUpdateChildPolicyLocked(); } } else { // No valid initial response or serverlist found. @@ -1182,10 +1220,7 @@ GrpcLb::GrpcLb(Args args) .set_jitter(GRPC_GRPCLB_RECONNECT_JITTER) .set_max_backoff(GRPC_GRPCLB_RECONNECT_MAX_BACKOFF_SECONDS * 1000)) { - // Initialization. - GRPC_CLOSURE_INIT(&lb_channel_on_connectivity_changed_, - &GrpcLb::OnBalancerChannelConnectivityChangedLocked, this, - grpc_combiner_scheduler(args.combiner)); + gpr_mu_init(&child_policy_mu_); // Record server name. const grpc_arg* arg = grpc_channel_args_find(args.args, GRPC_ARG_SERVER_URI); const char* server_uri = grpc_channel_arg_get_string(arg); @@ -1211,6 +1246,7 @@ GrpcLb::GrpcLb(Args args) GrpcLb::~GrpcLb() { gpr_free((void*)server_name_); grpc_channel_args_destroy(args_); + gpr_mu_destroy(&child_policy_mu_); } void GrpcLb::ShutdownLocked() { @@ -1222,7 +1258,11 @@ void GrpcLb::ShutdownLocked() { if (fallback_timer_callback_pending_) { grpc_timer_cancel(&lb_fallback_timer_); } - rr_policy_.reset(); + { + MutexLock lock(&child_policy_mu_); + child_policy_.reset(); + pending_child_policy_.reset(); + } // We destroy the LB channel here instead of in our destructor because // destroying the channel triggers a last callback to // OnBalancerChannelConnectivityChangedLocked(), and we need to be @@ -1242,17 +1282,30 @@ void GrpcLb::ResetBackoffLocked() { if (lb_channel_ != nullptr) { grpc_channel_reset_connect_backoff(lb_channel_); } - if (rr_policy_ != nullptr) { - rr_policy_->ResetBackoffLocked(); + if (child_policy_ != nullptr) { + child_policy_->ResetBackoffLocked(); + } + if (pending_child_policy_ != nullptr) { + pending_child_policy_->ResetBackoffLocked(); } } void GrpcLb::FillChildRefsForChannelz( channelz::ChildRefsList* child_subchannels, channelz::ChildRefsList* child_channels) { - // delegate to the RoundRobin to fill the children subchannels. - if (rr_policy_ != nullptr) { - rr_policy_->FillChildRefsForChannelz(child_subchannels, child_channels); + { + // Delegate to the child policy to fill the children subchannels. + // This must be done holding child_policy_mu_, since this method + // does not run in the combiner. + MutexLock lock(&child_policy_mu_); + if (child_policy_ != nullptr) { + child_policy_->FillChildRefsForChannelz(child_subchannels, + child_channels); + } + if (pending_child_policy_ != nullptr) { + pending_child_policy_->FillChildRefsForChannelz(child_subchannels, + child_channels); + } } gpr_atm uuid = gpr_atm_no_barrier_load(&lb_channel_uuid_); if (uuid != 0) { @@ -1260,6 +1313,32 @@ void GrpcLb::FillChildRefsForChannelz( } } +void GrpcLb::UpdateLocked(const grpc_channel_args& args, + RefCountedPtr lb_config) { + const bool is_initial_update = lb_channel_ == nullptr; + ParseLbConfig(lb_config.get()); + ProcessChannelArgsLocked(args); + // Update the existing child policy. + if (child_policy_ != nullptr) CreateOrUpdateChildPolicyLocked(); + // If this is the initial update, start the fallback timer. + if (is_initial_update) { + if (lb_fallback_timeout_ms_ > 0 && serverlist_ == nullptr && + !fallback_timer_callback_pending_) { + grpc_millis deadline = ExecCtx::Get()->Now() + lb_fallback_timeout_ms_; + Ref(DEBUG_LOCATION, "on_fallback_timer").release(); // Ref for callback + GRPC_CLOSURE_INIT(&lb_on_fallback_, &GrpcLb::OnFallbackTimerLocked, this, + grpc_combiner_scheduler(combiner())); + fallback_timer_callback_pending_ = true; + grpc_timer_init(&lb_fallback_timer_, deadline, &lb_on_fallback_); + } + StartBalancerCallLocked(); + } +} + +// +// helpers for UpdateLocked() +// + // Returns the backend addresses extracted from the given addresses. UniquePtr ExtractBackendAddresses( const ServerAddressList& addresses) { @@ -1305,8 +1384,8 @@ void GrpcLb::ProcessChannelArgsLocked(const grpc_channel_args& args) { if (lb_channel_ == nullptr) { char* uri_str; gpr_asprintf(&uri_str, "fake:///%s", server_name_); - lb_channel_ = channel_control_helper()->CreateChannel( - uri_str, GRPC_CLIENT_CHANNEL_TYPE_LOAD_BALANCING, *lb_channel_args); + lb_channel_ = + channel_control_helper()->CreateChannel(uri_str, *lb_channel_args); GPR_ASSERT(lb_channel_ != nullptr); grpc_core::channelz::ChannelNode* channel_node = grpc_channel_get_channelz_node(lb_channel_); @@ -1321,44 +1400,26 @@ void GrpcLb::ProcessChannelArgsLocked(const grpc_channel_args& args) { grpc_channel_args_destroy(lb_channel_args); } -void GrpcLb::UpdateLocked(const grpc_channel_args& args, - RefCountedPtr lb_config) { - const bool is_initial_update = lb_channel_ == nullptr; - ProcessChannelArgsLocked(args); - // Update the existing RR policy. - if (rr_policy_ != nullptr) CreateOrUpdateRoundRobinPolicyLocked(); - // If this is the initial update, start the fallback timer. - if (is_initial_update) { - if (lb_fallback_timeout_ms_ > 0 && serverlist_ == nullptr && - !fallback_timer_callback_pending_) { - grpc_millis deadline = ExecCtx::Get()->Now() + lb_fallback_timeout_ms_; - Ref(DEBUG_LOCATION, "on_fallback_timer").release(); // Ref for callback - GRPC_CLOSURE_INIT(&lb_on_fallback_, &GrpcLb::OnFallbackTimerLocked, this, - grpc_combiner_scheduler(combiner())); - fallback_timer_callback_pending_ = true; - grpc_timer_init(&lb_fallback_timer_, deadline, &lb_on_fallback_); +void GrpcLb::ParseLbConfig(Config* grpclb_config) { + const grpc_json* child_policy = nullptr; + if (grpclb_config != nullptr) { + const grpc_json* grpclb_config_json = grpclb_config->json(); + for (const grpc_json* field = grpclb_config_json; field != nullptr; + field = field->next) { + if (field->key == nullptr) return; + if (strcmp(field->key, "childPolicy") == 0) { + if (child_policy != nullptr) return; // Duplicate. + child_policy = ParseLoadBalancingConfig(field); + } } - StartBalancerCallLocked(); - } else if (!watching_lb_channel_) { - // If this is not the initial update and we're not already watching - // the LB channel's connectivity state, start a watch now. This - // ensures that we'll know when to switch to a new balancer call. - lb_channel_connectivity_ = grpc_channel_check_connectivity_state( - lb_channel_, true /* try to connect */); - grpc_channel_element* client_channel_elem = grpc_channel_stack_last_element( - grpc_channel_get_channel_stack(lb_channel_)); - GPR_ASSERT(client_channel_elem->filter == &grpc_client_channel_filter); - watching_lb_channel_ = true; - // TODO(roth): We currently track this ref manually. Once the - // ClosureRef API is ready, we should pass the RefCountedPtr<> along - // with the callback. - auto self = Ref(DEBUG_LOCATION, "watch_lb_channel_connectivity"); - self.release(); - grpc_client_channel_watch_connectivity_state( - client_channel_elem, - grpc_polling_entity_create_from_pollset_set(interested_parties()), - &lb_channel_connectivity_, &lb_channel_on_connectivity_changed_, - nullptr); + } + if (child_policy != nullptr) { + child_policy_name_ = UniquePtr(gpr_strdup(child_policy->key)); + child_policy_config_ = MakeRefCounted( + child_policy->child, grpclb_config->service_config()); + } else { + child_policy_name_.reset(); + child_policy_config_.reset(); } } @@ -1393,7 +1454,7 @@ void GrpcLb::OnFallbackTimerLocked(void* arg, grpc_error* error) { grpclb_policy); } GPR_ASSERT(grpclb_policy->fallback_backend_addresses_ != nullptr); - grpclb_policy->CreateOrUpdateRoundRobinPolicyLocked(); + grpclb_policy->CreateOrUpdateChildPolicyLocked(); } grpclb_policy->Unref(DEBUG_LOCATION, "on_fallback_timer"); } @@ -1436,64 +1497,20 @@ void GrpcLb::OnBalancerCallRetryTimerLocked(void* arg, grpc_error* error) { grpclb_policy->Unref(DEBUG_LOCATION, "on_balancer_call_retry_timer"); } -// Invoked as part of the update process. It continues watching the LB channel -// until it shuts down or becomes READY. It's invoked even if the LB channel -// stayed READY throughout the update (for example if the update is identical). -void GrpcLb::OnBalancerChannelConnectivityChangedLocked(void* arg, - grpc_error* error) { - GrpcLb* grpclb_policy = static_cast(arg); - if (grpclb_policy->shutting_down_) goto done; - // Re-initialize the lb_call. This should also take care of updating the - // embedded RR policy. Note that the current RR policy, if any, will stay in - // effect until an update from the new lb_call is received. - switch (grpclb_policy->lb_channel_connectivity_) { - case GRPC_CHANNEL_CONNECTING: - case GRPC_CHANNEL_TRANSIENT_FAILURE: { - // Keep watching the LB channel. - grpc_channel_element* client_channel_elem = - grpc_channel_stack_last_element( - grpc_channel_get_channel_stack(grpclb_policy->lb_channel_)); - GPR_ASSERT(client_channel_elem->filter == &grpc_client_channel_filter); - grpc_client_channel_watch_connectivity_state( - client_channel_elem, - grpc_polling_entity_create_from_pollset_set( - grpclb_policy->interested_parties()), - &grpclb_policy->lb_channel_connectivity_, - &grpclb_policy->lb_channel_on_connectivity_changed_, nullptr); - break; - } - // The LB channel may be IDLE because it's shut down before the update. - // Restart the LB call to kick the LB channel into gear. - case GRPC_CHANNEL_IDLE: - case GRPC_CHANNEL_READY: - grpclb_policy->lb_calld_.reset(); - if (grpclb_policy->retry_timer_callback_pending_) { - grpc_timer_cancel(&grpclb_policy->lb_call_retry_timer_); - } - grpclb_policy->lb_call_backoff_.Reset(); - grpclb_policy->StartBalancerCallLocked(); - // fallthrough - case GRPC_CHANNEL_SHUTDOWN: - done: - grpclb_policy->watching_lb_channel_ = false; - grpclb_policy->Unref(DEBUG_LOCATION, - "watch_lb_channel_connectivity_cb_shutdown"); - } -} - // -// code for interacting with the RR policy +// code for interacting with the child policy // -grpc_channel_args* GrpcLb::CreateRoundRobinPolicyArgsLocked() { +grpc_channel_args* GrpcLb::CreateChildPolicyArgsLocked() { ServerAddressList tmp_addresses; ServerAddressList* addresses = &tmp_addresses; bool is_backend_from_grpclb_load_balancer = false; if (serverlist_ != nullptr) { - tmp_addresses = serverlist_->GetServerAddressList(); + tmp_addresses = serverlist_->GetServerAddressList( + lb_calld_ == nullptr ? nullptr : lb_calld_->client_stats()); is_backend_from_grpclb_load_balancer = true; } else { - // If CreateOrUpdateRoundRobinPolicyLocked() is invoked when we haven't + // If CreateOrUpdateChildPolicyLocked() is invoked when we haven't // received any serverlist from the balancer, we use the fallback backends // returned by the resolver. Note that the fallback backend list may be // empty, in which case the new round_robin policy will keep the requested @@ -1520,49 +1537,139 @@ grpc_channel_args* GrpcLb::CreateRoundRobinPolicyArgsLocked() { const_cast(GRPC_ARG_INHIBIT_HEALTH_CHECKING), 1); ++num_args_to_add; } - grpc_channel_args* args = grpc_channel_args_copy_and_add_and_remove( + return grpc_channel_args_copy_and_add_and_remove( args_, keys_to_remove, GPR_ARRAY_SIZE(keys_to_remove), args_to_add, num_args_to_add); - return args; } -void GrpcLb::CreateRoundRobinPolicyLocked(Args args) { - GPR_ASSERT(rr_policy_ == nullptr); - rr_policy_ = LoadBalancingPolicyRegistry::CreateLoadBalancingPolicy( - "round_robin", std::move(args)); - if (GPR_UNLIKELY(rr_policy_ == nullptr)) { - gpr_log(GPR_ERROR, "[grpclb %p] Failure creating a RoundRobin policy", - this); - return; +OrphanablePtr GrpcLb::CreateChildPolicyLocked( + const char* name, grpc_channel_args* args) { + Helper* helper = New(Ref()); + LoadBalancingPolicy::Args lb_policy_args; + lb_policy_args.combiner = combiner(); + lb_policy_args.args = args; + lb_policy_args.channel_control_helper = + UniquePtr(helper); + OrphanablePtr lb_policy = + LoadBalancingPolicyRegistry::CreateLoadBalancingPolicy( + name, std::move(lb_policy_args)); + if (GPR_UNLIKELY(lb_policy == nullptr)) { + gpr_log(GPR_ERROR, "[grpclb %p] Failure creating child policy %s", this, + name); + return nullptr; } + helper->set_child(lb_policy.get()); if (grpc_lb_glb_trace.enabled()) { - gpr_log(GPR_INFO, "[grpclb %p] Created new RR policy %p", this, - rr_policy_.get()); + gpr_log(GPR_INFO, "[grpclb %p] Created new child policy %s (%p)", this, + name, lb_policy.get()); } // Add the gRPC LB's interested_parties pollset_set to that of the newly - // created RR policy. This will make the RR policy progress upon activity on - // gRPC LB, which in turn is tied to the application's call. - grpc_pollset_set_add_pollset_set(rr_policy_->interested_parties(), + // created child policy. This will make the child policy progress upon + // activity on gRPC LB, which in turn is tied to the application's call. + grpc_pollset_set_add_pollset_set(lb_policy->interested_parties(), interested_parties()); + return lb_policy; } -void GrpcLb::CreateOrUpdateRoundRobinPolicyLocked() { +void GrpcLb::CreateOrUpdateChildPolicyLocked() { if (shutting_down_) return; - grpc_channel_args* args = CreateRoundRobinPolicyArgsLocked(); + grpc_channel_args* args = CreateChildPolicyArgsLocked(); GPR_ASSERT(args != nullptr); - if (rr_policy_ == nullptr) { - LoadBalancingPolicy::Args lb_policy_args; - lb_policy_args.combiner = combiner(); - lb_policy_args.args = args; - lb_policy_args.channel_control_helper = - UniquePtr(New(Ref())); - CreateRoundRobinPolicyLocked(std::move(lb_policy_args)); + // If the child policy name changes, we need to create a new child + // policy. When this happens, we leave child_policy_ as-is and store + // the new child policy in pending_child_policy_. Once the new child + // policy transitions into state READY, we swap it into child_policy_, + // replacing the original child policy. So pending_child_policy_ is + // non-null only between when we apply an update that changes the child + // policy name and when the new child reports state READY. + // + // Updates can arrive at any point during this transition. We always + // apply updates relative to the most recently created child policy, + // even if the most recent one is still in pending_child_policy_. This + // is true both when applying the updates to an existing child policy + // and when determining whether we need to create a new policy. + // + // As a result of this, there are several cases to consider here: + // + // 1. We have no existing child policy (i.e., we have started up but + // have not yet received a serverlist from the balancer or gone + // into fallback mode; in this case, both child_policy_ and + // pending_child_policy_ are null). In this case, we create a + // new child policy and store it in child_policy_. + // + // 2. We have an existing child policy and have no pending child policy + // from a previous update (i.e., either there has not been a + // previous update that changed the policy name, or we have already + // finished swapping in the new policy; in this case, child_policy_ + // is non-null but pending_child_policy_ is null). In this case: + // a. If child_policy_->name() equals child_policy_name, then we + // update the existing child policy. + // b. If child_policy_->name() does not equal child_policy_name, + // we create a new policy. The policy will be stored in + // pending_child_policy_ and will later be swapped into + // child_policy_ by the helper when the new child transitions + // into state READY. + // + // 3. We have an existing child policy and have a pending child policy + // from a previous update (i.e., a previous update set + // pending_child_policy_ as per case 2b above and that policy has + // not yet transitioned into state READY and been swapped into + // child_policy_; in this case, both child_policy_ and + // pending_child_policy_ are non-null). In this case: + // a. If pending_child_policy_->name() equals child_policy_name, + // then we update the existing pending child policy. + // b. If pending_child_policy->name() does not equal + // child_policy_name, then we create a new policy. The new + // policy is stored in pending_child_policy_ (replacing the one + // that was there before, which will be immediately shut down) + // and will later be swapped into child_policy_ by the helper + // when the new child transitions into state READY. + const char* child_policy_name = + child_policy_name_ == nullptr ? "round_robin" : child_policy_name_.get(); + const bool create_policy = + // case 1 + child_policy_ == nullptr || + // case 2b + (pending_child_policy_ == nullptr && + strcmp(child_policy_->name(), child_policy_name) != 0) || + // case 3b + (pending_child_policy_ != nullptr && + strcmp(pending_child_policy_->name(), child_policy_name) != 0); + LoadBalancingPolicy* policy_to_update = nullptr; + if (create_policy) { + // Cases 1, 2b, and 3b: create a new child policy. + // If child_policy_ is null, we set it (case 1), else we set + // pending_child_policy_ (cases 2b and 3b). + if (grpc_lb_glb_trace.enabled()) { + gpr_log(GPR_INFO, "[grpclb %p] Creating new %schild policy %s", this, + child_policy_ == nullptr ? "" : "pending ", child_policy_name); + } + auto new_policy = CreateChildPolicyLocked(child_policy_name, args); + // Swap the policy into place. + auto& lb_policy = + child_policy_ == nullptr ? child_policy_ : pending_child_policy_; + { + MutexLock lock(&child_policy_mu_); + lb_policy = std::move(new_policy); + } + policy_to_update = lb_policy.get(); + } else { + // Cases 2a and 3a: update an existing policy. + // If we have a pending child policy, send the update to the pending + // policy (case 3a), else send it to the current policy (case 2a). + policy_to_update = pending_child_policy_ != nullptr + ? pending_child_policy_.get() + : child_policy_.get(); } + GPR_ASSERT(policy_to_update != nullptr); + // Update the policy. if (grpc_lb_glb_trace.enabled()) { - gpr_log(GPR_INFO, "[grpclb %p] Updating RR policy %p", this, - rr_policy_.get()); + gpr_log(GPR_INFO, "[grpclb %p] Updating %schild policy %p", this, + policy_to_update == pending_child_policy_.get() ? "pending " : "", + policy_to_update); } - rr_policy_->UpdateLocked(*args, nullptr); + policy_to_update->UpdateLocked(*args, child_policy_config_); + // Clean up. grpc_channel_args_destroy(args); } diff --git a/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_client_stats.h b/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_client_stats.h index 45ca40942ca..cb261ee16c7 100644 --- a/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_client_stats.h +++ b/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_client_stats.h @@ -56,6 +56,12 @@ class GrpcLbClientStats : public RefCounted { int64_t* num_calls_finished_known_received, UniquePtr* drop_token_counts); + // A destruction function to use as the user_data key when attaching + // client stats to a grpc_mdelem. + static void Destroy(void* arg) { + static_cast(arg)->Unref(); + } + private: // This field must only be accessed via *_locked() methods. UniquePtr drop_token_counts_; diff --git a/src/core/ext/filters/client_channel/lb_policy/grpclb/load_balancer_api.cc b/src/core/ext/filters/client_channel/lb_policy/grpclb/load_balancer_api.cc index f24281a5bfb..594c8cf6e94 100644 --- a/src/core/ext/filters/client_channel/lb_policy/grpclb/load_balancer_api.cc +++ b/src/core/ext/filters/client_channel/lb_policy/grpclb/load_balancer_api.cc @@ -161,10 +161,10 @@ void grpc_grpclb_request_destroy(grpc_grpclb_request* request) { typedef grpc_lb_v1_LoadBalanceResponse grpc_grpclb_response; grpc_grpclb_initial_response* grpc_grpclb_initial_response_parse( - grpc_slice encoded_grpc_grpclb_response) { - pb_istream_t stream = - pb_istream_from_buffer(GRPC_SLICE_START_PTR(encoded_grpc_grpclb_response), - GRPC_SLICE_LENGTH(encoded_grpc_grpclb_response)); + const grpc_slice& encoded_grpc_grpclb_response) { + pb_istream_t stream = pb_istream_from_buffer( + const_cast(GRPC_SLICE_START_PTR(encoded_grpc_grpclb_response)), + GRPC_SLICE_LENGTH(encoded_grpc_grpclb_response)); grpc_grpclb_response res; memset(&res, 0, sizeof(grpc_grpclb_response)); if (GPR_UNLIKELY( @@ -185,10 +185,10 @@ grpc_grpclb_initial_response* grpc_grpclb_initial_response_parse( } grpc_grpclb_serverlist* grpc_grpclb_response_parse_serverlist( - grpc_slice encoded_grpc_grpclb_response) { - pb_istream_t stream = - pb_istream_from_buffer(GRPC_SLICE_START_PTR(encoded_grpc_grpclb_response), - GRPC_SLICE_LENGTH(encoded_grpc_grpclb_response)); + const grpc_slice& encoded_grpc_grpclb_response) { + pb_istream_t stream = pb_istream_from_buffer( + const_cast(GRPC_SLICE_START_PTR(encoded_grpc_grpclb_response)), + GRPC_SLICE_LENGTH(encoded_grpc_grpclb_response)); pb_istream_t stream_at_start = stream; grpc_grpclb_serverlist* sl = static_cast( gpr_zalloc(sizeof(grpc_grpclb_serverlist))); diff --git a/src/core/ext/filters/client_channel/lb_policy/grpclb/load_balancer_api.h b/src/core/ext/filters/client_channel/lb_policy/grpclb/load_balancer_api.h index 71d371c880a..3c1d41a01b1 100644 --- a/src/core/ext/filters/client_channel/lb_policy/grpclb/load_balancer_api.h +++ b/src/core/ext/filters/client_channel/lb_policy/grpclb/load_balancer_api.h @@ -55,11 +55,11 @@ void grpc_grpclb_request_destroy(grpc_grpclb_request* request); /** Parse (ie, decode) the bytes in \a encoded_grpc_grpclb_response as a \a * grpc_grpclb_initial_response */ grpc_grpclb_initial_response* grpc_grpclb_initial_response_parse( - grpc_slice encoded_grpc_grpclb_response); + const grpc_slice& encoded_grpc_grpclb_response); /** Parse the list of servers from an encoded \a grpc_grpclb_response */ grpc_grpclb_serverlist* grpc_grpclb_response_parse_serverlist( - grpc_slice encoded_grpc_grpclb_response); + const grpc_slice& encoded_grpc_grpclb_response); /** Return a copy of \a sl. The caller is responsible for calling \a * grpc_grpclb_destroy_serverlist on the returned copy. */ diff --git a/src/core/ext/filters/client_channel/lb_policy/xds/xds.cc b/src/core/ext/filters/client_channel/lb_policy/xds/xds.cc index e1291da50af..5153330a84e 100644 --- a/src/core/ext/filters/client_channel/lb_policy/xds/xds.cc +++ b/src/core/ext/filters/client_channel/lb_policy/xds/xds.cc @@ -129,78 +129,128 @@ class XdsLb : public LoadBalancingPolicy { channelz::ChildRefsList* child_channels) override; private: - /// Contains a call to the LB server and all the data related to the call. - class BalancerCallState : public InternallyRefCounted { + /// Contains a channel to the LB server and all the data related to the + /// channel. + class BalancerChannelState + : public InternallyRefCounted { public: - explicit BalancerCallState( - RefCountedPtr parent_xdslb_policy); + /// Contains a call to the LB server and all the data related to the call. + class BalancerCallState : public InternallyRefCounted { + public: + explicit BalancerCallState(RefCountedPtr lb_chand); - // It's the caller's responsibility to ensure that Orphan() is called from - // inside the combiner. - void Orphan() override; - - void StartQuery(); - - XdsLbClientStats* client_stats() const { return client_stats_.get(); } + // It's the caller's responsibility to ensure that Orphan() is called from + // inside the combiner. + void Orphan() override; - bool seen_initial_response() const { return seen_initial_response_; } - - private: - // So Delete() can access our private dtor. - template - friend void grpc_core::Delete(T*); + void StartQuery(); - ~BalancerCallState(); + RefCountedPtr client_stats() const { + return client_stats_; + } - XdsLb* xdslb_policy() const { - return static_cast(xdslb_policy_.get()); - } + bool seen_initial_response() const { return seen_initial_response_; } - void ScheduleNextClientLoadReportLocked(); - void SendClientLoadReportLocked(); + private: + // So Delete() can access our private dtor. + template + friend void grpc_core::Delete(T*); - static bool LoadReportCountersAreZero(xds_grpclb_request* request); + ~BalancerCallState(); - static void MaybeSendClientLoadReportLocked(void* arg, grpc_error* error); - static void OnInitialRequestSentLocked(void* arg, grpc_error* error); - static void OnBalancerMessageReceivedLocked(void* arg, grpc_error* error); - static void OnBalancerStatusReceivedLocked(void* arg, grpc_error* error); + XdsLb* xdslb_policy() const { return lb_chand_->xdslb_policy_.get(); } - // The owning LB policy. - RefCountedPtr xdslb_policy_; + bool IsCurrentCallOnChannel() const { + return this == lb_chand_->lb_calld_.get(); + } - // The streaming call to the LB server. Always non-NULL. - grpc_call* lb_call_ = nullptr; + void ScheduleNextClientLoadReportLocked(); + void SendClientLoadReportLocked(); + + static bool LoadReportCountersAreZero(xds_grpclb_request* request); + + static void MaybeSendClientLoadReportLocked(void* arg, grpc_error* error); + static void OnInitialRequestSentLocked(void* arg, grpc_error* error); + static void OnBalancerMessageReceivedLocked(void* arg, grpc_error* error); + static void OnBalancerStatusReceivedLocked(void* arg, grpc_error* error); + + // The owning LB channel. + RefCountedPtr lb_chand_; + + // The streaming call to the LB server. Always non-NULL. + grpc_call* lb_call_ = nullptr; + + // recv_initial_metadata + grpc_metadata_array lb_initial_metadata_recv_; + + // send_message + grpc_byte_buffer* send_message_payload_ = nullptr; + grpc_closure lb_on_initial_request_sent_; + + // recv_message + grpc_byte_buffer* recv_message_payload_ = nullptr; + grpc_closure lb_on_balancer_message_received_; + bool seen_initial_response_ = false; + + // recv_trailing_metadata + grpc_closure lb_on_balancer_status_received_; + grpc_metadata_array lb_trailing_metadata_recv_; + grpc_status_code lb_call_status_; + grpc_slice lb_call_status_details_; + + // The stats for client-side load reporting associated with this LB call. + // Created after the first serverlist is received. + RefCountedPtr client_stats_; + grpc_millis client_stats_report_interval_ = 0; + grpc_timer client_load_report_timer_; + bool client_load_report_timer_callback_pending_ = false; + bool last_client_load_report_counters_were_zero_ = false; + bool client_load_report_is_due_ = false; + // The closure used for either the load report timer or the callback for + // completion of sending the load report. + grpc_closure client_load_report_closure_; + }; + + BalancerChannelState(const char* balancer_name, + const grpc_channel_args& args, + RefCountedPtr parent_xdslb_policy); + ~BalancerChannelState(); - // recv_initial_metadata - grpc_metadata_array lb_initial_metadata_recv_; + void Orphan() override; - // send_message - grpc_byte_buffer* send_message_payload_ = nullptr; - grpc_closure lb_on_initial_request_sent_; + grpc_channel* channel() const { return channel_; } + BalancerCallState* lb_calld() const { return lb_calld_.get(); } - // recv_message - grpc_byte_buffer* recv_message_payload_ = nullptr; - grpc_closure lb_on_balancer_message_received_; - bool seen_initial_response_ = false; + bool IsCurrentChannel() const { + return this == xdslb_policy_->lb_chand_.get(); + } + bool IsPendingChannel() const { + return this == xdslb_policy_->pending_lb_chand_.get(); + } + bool HasActiveCall() const { return lb_calld_ != nullptr; } - // recv_trailing_metadata - grpc_closure lb_on_balancer_status_received_; - grpc_metadata_array lb_trailing_metadata_recv_; - grpc_status_code lb_call_status_; - grpc_slice lb_call_status_details_; + void StartCallRetryTimerLocked(); + static void OnCallRetryTimerLocked(void* arg, grpc_error* error); + void StartCallLocked(); - // The stats for client-side load reporting associated with this LB call. - // Created after the first serverlist is received. - RefCountedPtr client_stats_; - grpc_millis client_stats_report_interval_ = 0; - grpc_timer client_load_report_timer_; - bool client_load_report_timer_callback_pending_ = false; - bool last_client_load_report_counters_were_zero_ = false; - bool client_load_report_is_due_ = false; - // The closure used for either the load report timer or the callback for - // completion of sending the load report. - grpc_closure client_load_report_closure_; + private: + // The owning LB policy. + RefCountedPtr xdslb_policy_; + + // The channel and its status. + grpc_channel* channel_; + bool shutting_down_ = false; + + // The data associated with the current LB call. It holds a ref to this LB + // channel. It's instantiated every time we query for backends. It's reset + // whenever the current LB call is no longer needed (e.g., the LB policy is + // shutting down, or the LB call has ended). A non-NULL lb_calld_ always + // contains a non-NULL lb_call_. + OrphanablePtr lb_calld_; + BackOff lb_call_backoff_; + grpc_timer lb_call_retry_timer_; + grpc_closure lb_on_call_retry_; + bool retry_timer_callback_pending_ = false; }; class Picker : public SubchannelPicker { @@ -223,7 +273,6 @@ class XdsLb : public LoadBalancingPolicy { Subchannel* CreateSubchannel(const grpc_channel_args& args) override; grpc_channel* CreateChannel(const char* target, - grpc_client_channel_type type, const grpc_channel_args& args) override; void UpdateState(grpc_connectivity_state state, grpc_error* state_error, UniquePtr picker) override; @@ -246,13 +295,13 @@ class XdsLb : public LoadBalancingPolicy { // found. Does nothing upon failure. void ParseLbConfig(Config* xds_config); - // Methods for dealing with the balancer channel and call. - void StartBalancerCallLocked(); + BalancerChannelState* LatestLbChannel() const { + return pending_lb_chand_ != nullptr ? pending_lb_chand_.get() + : lb_chand_.get(); + } + + // Callback to enter fallback mode. static void OnFallbackTimerLocked(void* arg, grpc_error* error); - void StartBalancerCallRetryTimerLocked(); - static void OnBalancerCallRetryTimerLocked(void* arg, grpc_error* error); - static void OnBalancerChannelConnectivityChangedLocked(void* arg, - grpc_error* error); // Methods for dealing with the child policy. void CreateOrUpdateChildPolicyLocked(); @@ -272,30 +321,15 @@ class XdsLb : public LoadBalancingPolicy { bool shutting_down_ = false; // The channel for communicating with the LB server. - grpc_channel* lb_channel_ = nullptr; + OrphanablePtr lb_chand_; + OrphanablePtr pending_lb_chand_; // Mutex to protect the channel to the LB server. This is used when // processing a channelz request. - gpr_mu lb_channel_mu_; - grpc_connectivity_state lb_channel_connectivity_; - grpc_closure lb_channel_on_connectivity_changed_; - // Are we already watching the LB channel's connectivity? - bool watching_lb_channel_ = false; - // Response generator to inject address updates into lb_channel_. - RefCountedPtr response_generator_; - - // The data associated with the current LB call. It holds a ref to this LB - // policy. It's initialized every time we query for backends. It's reset to - // NULL whenever the current LB call is no longer needed (e.g., the LB policy - // is shutting down, or the LB call has ended). A non-NULL lb_calld_ always - // contains a non-NULL lb_call_. - OrphanablePtr lb_calld_; + // TODO(juanlishen): Replace this with atomic. + gpr_mu lb_chand_mu_; + // Timeout in milliseconds for the LB call. 0 means no deadline. int lb_call_timeout_ms_ = 0; - // Balancer call retry state. - BackOff lb_call_backoff_; - bool retry_timer_callback_pending_ = false; - grpc_timer lb_call_retry_timer_; - grpc_closure lb_on_call_retry_; // The deserialized response from the balancer. May be nullptr until one // such response has arrived. @@ -323,11 +357,6 @@ class XdsLb : public LoadBalancingPolicy { // XdsLb::Picker // -// Destroy function used when embedding client stats in call context. -void DestroyClientStats(void* arg) { - static_cast(arg)->Unref(); -} - XdsLb::Picker::PickResult XdsLb::Picker::Pick(PickState* pick, grpc_error** error) { // TODO(roth): Add support for drop handling. @@ -336,10 +365,7 @@ XdsLb::Picker::PickResult XdsLb::Picker::Pick(PickState* pick, // If pick succeeded, add client stats. if (result == PickResult::PICK_COMPLETE && pick->connected_subchannel != nullptr && client_stats_ != nullptr) { - pick->subchannel_call_context[GRPC_GRPCLB_CLIENT_STATS].value = - client_stats_->Ref().release(); - pick->subchannel_call_context[GRPC_GRPCLB_CLIENT_STATS].destroy = - DestroyClientStats; + // TODO(roth): Add support for client stats. } return result; } @@ -354,10 +380,9 @@ Subchannel* XdsLb::Helper::CreateSubchannel(const grpc_channel_args& args) { } grpc_channel* XdsLb::Helper::CreateChannel(const char* target, - grpc_client_channel_type type, const grpc_channel_args& args) { if (parent_->shutting_down_) return nullptr; - return parent_->channel_control_helper()->CreateChannel(target, type, args); + return parent_->channel_control_helper()->CreateChannel(target, args); } void XdsLb::Helper::UpdateState(grpc_connectivity_state state, @@ -370,11 +395,11 @@ void XdsLb::Helper::UpdateState(grpc_connectivity_state state, // TODO(juanlishen): When in fallback mode, pass the child picker // through without wrapping it. (Or maybe use a different helper for // the fallback policy?) - RefCountedPtr client_stats; - if (parent_->lb_calld_ != nullptr && - parent_->lb_calld_->client_stats() != nullptr) { - client_stats = parent_->lb_calld_->client_stats()->Ref(); - } + GPR_ASSERT(parent_->lb_chand_ != nullptr); + RefCountedPtr client_stats = + parent_->lb_chand_->lb_calld() == nullptr + ? nullptr + : parent_->lb_chand_->lb_calld()->client_stats(); parent_->channel_control_helper()->UpdateState( state, state_error, UniquePtr( @@ -389,12 +414,13 @@ void XdsLb::Helper::RequestReresolution() { "(%p).", parent_.get(), parent_->child_policy_.get()); } + GPR_ASSERT(parent_->lb_chand_ != nullptr); // If we are talking to a balancer, we expect to get updated addresses // from the balancer, so we can ignore the re-resolution request from - // the RR policy. Otherwise, pass the re-resolution request up to the + // the child policy. Otherwise, pass the re-resolution request up to the // channel. - if (parent_->lb_calld_ == nullptr || - !parent_->lb_calld_->seen_initial_response()) { + if (parent_->lb_chand_->lb_calld() == nullptr || + !parent_->lb_chand_->lb_calld()->seen_initial_response()) { parent_->channel_control_helper()->RequestReresolution(); } } @@ -475,14 +501,98 @@ UniquePtr ProcessServerlist( } // -// XdsLb::BalancerCallState +// XdsLb::BalancerChannelState +// + +XdsLb::BalancerChannelState::BalancerChannelState( + const char* balancer_name, const grpc_channel_args& args, + grpc_core::RefCountedPtr parent_xdslb_policy) + : InternallyRefCounted(&grpc_lb_xds_trace), + xdslb_policy_(std::move(parent_xdslb_policy)), + lb_call_backoff_( + BackOff::Options() + .set_initial_backoff(GRPC_XDS_INITIAL_CONNECT_BACKOFF_SECONDS * + 1000) + .set_multiplier(GRPC_XDS_RECONNECT_BACKOFF_MULTIPLIER) + .set_jitter(GRPC_XDS_RECONNECT_JITTER) + .set_max_backoff(GRPC_XDS_RECONNECT_MAX_BACKOFF_SECONDS * 1000)) { + channel_ = xdslb_policy_->channel_control_helper()->CreateChannel( + balancer_name, args); + GPR_ASSERT(channel_ != nullptr); + StartCallLocked(); +} + +XdsLb::BalancerChannelState::~BalancerChannelState() { + grpc_channel_destroy(channel_); +} + +void XdsLb::BalancerChannelState::Orphan() { + shutting_down_ = true; + lb_calld_.reset(); + if (retry_timer_callback_pending_) grpc_timer_cancel(&lb_call_retry_timer_); + Unref(DEBUG_LOCATION, "lb_channel_orphaned"); +} + +void XdsLb::BalancerChannelState::StartCallRetryTimerLocked() { + grpc_millis next_try = lb_call_backoff_.NextAttemptTime(); + if (grpc_lb_xds_trace.enabled()) { + gpr_log(GPR_INFO, + "[xdslb %p] Failed to connect to LB server (lb_chand: %p)...", + xdslb_policy_.get(), this); + grpc_millis timeout = next_try - ExecCtx::Get()->Now(); + if (timeout > 0) { + gpr_log(GPR_INFO, "[xdslb %p] ... retry_timer_active in %" PRId64 "ms.", + xdslb_policy_.get(), timeout); + } else { + gpr_log(GPR_INFO, "[xdslb %p] ... retry_timer_active immediately.", + xdslb_policy_.get()); + } + } + Ref(DEBUG_LOCATION, "on_balancer_call_retry_timer").release(); + GRPC_CLOSURE_INIT(&lb_on_call_retry_, &OnCallRetryTimerLocked, this, + grpc_combiner_scheduler(xdslb_policy_->combiner())); + grpc_timer_init(&lb_call_retry_timer_, next_try, &lb_on_call_retry_); + retry_timer_callback_pending_ = true; +} + +void XdsLb::BalancerChannelState::OnCallRetryTimerLocked(void* arg, + grpc_error* error) { + BalancerChannelState* lb_chand = static_cast(arg); + lb_chand->retry_timer_callback_pending_ = false; + if (!lb_chand->shutting_down_ && error == GRPC_ERROR_NONE && + lb_chand->lb_calld_ == nullptr) { + if (grpc_lb_xds_trace.enabled()) { + gpr_log(GPR_INFO, + "[xdslb %p] Restarting call to LB server (lb_chand: %p)", + lb_chand->xdslb_policy_.get(), lb_chand); + } + lb_chand->StartCallLocked(); + } + lb_chand->Unref(DEBUG_LOCATION, "on_balancer_call_retry_timer"); +} + +void XdsLb::BalancerChannelState::StartCallLocked() { + if (shutting_down_) return; + GPR_ASSERT(channel_ != nullptr); + GPR_ASSERT(lb_calld_ == nullptr); + lb_calld_ = MakeOrphanable(Ref()); + if (grpc_lb_xds_trace.enabled()) { + gpr_log(GPR_INFO, + "[xdslb %p] Query for backends (lb_chand: %p, lb_calld: %p)", + xdslb_policy_.get(), this, lb_calld_.get()); + } + lb_calld_->StartQuery(); +} + +// +// XdsLb::BalancerChannelState::BalancerCallState // -XdsLb::BalancerCallState::BalancerCallState( - RefCountedPtr parent_xdslb_policy) +XdsLb::BalancerChannelState::BalancerCallState::BalancerCallState( + RefCountedPtr lb_chand) : InternallyRefCounted(&grpc_lb_xds_trace), - xdslb_policy_(std::move(parent_xdslb_policy)) { - GPR_ASSERT(xdslb_policy_ != nullptr); + lb_chand_(std::move(lb_chand)) { + GPR_ASSERT(xdslb_policy() != nullptr); GPR_ASSERT(!xdslb_policy()->shutting_down_); // Init the LB call. Note that the LB call will progress every time there's // activity in xdslb_policy_->interested_parties(), which is comprised of @@ -494,8 +604,8 @@ XdsLb::BalancerCallState::BalancerCallState( ? GRPC_MILLIS_INF_FUTURE : ExecCtx::Get()->Now() + xdslb_policy()->lb_call_timeout_ms_; lb_call_ = grpc_channel_create_pollset_set_call( - xdslb_policy()->lb_channel_, nullptr, GRPC_PROPAGATE_DEFAULTS, - xdslb_policy_->interested_parties(), + lb_chand_->channel_, nullptr, GRPC_PROPAGATE_DEFAULTS, + xdslb_policy()->interested_parties(), GRPC_MDSTR_SLASH_GRPC_DOT_LB_DOT_V1_DOT_LOADBALANCER_SLASH_BALANCELOAD, nullptr, deadline, nullptr); // Init the LB call request payload. @@ -519,7 +629,7 @@ XdsLb::BalancerCallState::BalancerCallState( grpc_combiner_scheduler(xdslb_policy()->combiner())); } -XdsLb::BalancerCallState::~BalancerCallState() { +XdsLb::BalancerChannelState::BalancerCallState::~BalancerCallState() { GPR_ASSERT(lb_call_ != nullptr); grpc_call_unref(lb_call_); grpc_metadata_array_destroy(&lb_initial_metadata_recv_); @@ -529,7 +639,7 @@ XdsLb::BalancerCallState::~BalancerCallState() { grpc_slice_unref_internal(lb_call_status_details_); } -void XdsLb::BalancerCallState::Orphan() { +void XdsLb::BalancerChannelState::BalancerCallState::Orphan() { GPR_ASSERT(lb_call_ != nullptr); // If we are here because xdslb_policy wants to cancel the call, // lb_on_balancer_status_received_ will complete the cancellation and clean @@ -544,11 +654,11 @@ void XdsLb::BalancerCallState::Orphan() { // in lb_on_balancer_status_received_ instead of here. } -void XdsLb::BalancerCallState::StartQuery() { +void XdsLb::BalancerChannelState::BalancerCallState::StartQuery() { GPR_ASSERT(lb_call_ != nullptr); if (grpc_lb_xds_trace.enabled()) { gpr_log(GPR_INFO, "[xdslb %p] Starting LB call (lb_calld: %p, lb_call: %p)", - xdslb_policy_.get(), this, lb_call_); + xdslb_policy(), this, lb_call_); } // Create the ops. grpc_call_error call_error; @@ -616,7 +726,8 @@ void XdsLb::BalancerCallState::StartQuery() { GPR_ASSERT(GRPC_CALL_OK == call_error); } -void XdsLb::BalancerCallState::ScheduleNextClientLoadReportLocked() { +void XdsLb::BalancerChannelState::BalancerCallState:: + ScheduleNextClientLoadReportLocked() { const grpc_millis next_client_load_report_time = ExecCtx::Get()->Now() + client_stats_report_interval_; GRPC_CLOSURE_INIT(&client_load_report_closure_, @@ -627,12 +738,11 @@ void XdsLb::BalancerCallState::ScheduleNextClientLoadReportLocked() { client_load_report_timer_callback_pending_ = true; } -void XdsLb::BalancerCallState::MaybeSendClientLoadReportLocked( - void* arg, grpc_error* error) { +void XdsLb::BalancerChannelState::BalancerCallState:: + MaybeSendClientLoadReportLocked(void* arg, grpc_error* error) { BalancerCallState* lb_calld = static_cast(arg); - XdsLb* xdslb_policy = lb_calld->xdslb_policy(); lb_calld->client_load_report_timer_callback_pending_ = false; - if (error != GRPC_ERROR_NONE || lb_calld != xdslb_policy->lb_calld_.get()) { + if (error != GRPC_ERROR_NONE || !lb_calld->IsCurrentCallOnChannel()) { lb_calld->Unref(DEBUG_LOCATION, "client_load_report"); return; } @@ -646,7 +756,7 @@ void XdsLb::BalancerCallState::MaybeSendClientLoadReportLocked( } } -bool XdsLb::BalancerCallState::LoadReportCountersAreZero( +bool XdsLb::BalancerChannelState::BalancerCallState::LoadReportCountersAreZero( xds_grpclb_request* request) { XdsLbClientStats::DroppedCallCounts* drop_entries = static_cast( @@ -660,7 +770,8 @@ bool XdsLb::BalancerCallState::LoadReportCountersAreZero( } // TODO(vpowar): Use LRS to send the client Load Report. -void XdsLb::BalancerCallState::SendClientLoadReportLocked() { +void XdsLb::BalancerChannelState::BalancerCallState:: + SendClientLoadReportLocked() { // Construct message payload. GPR_ASSERT(send_message_payload_ == nullptr); xds_grpclb_request* request = @@ -681,27 +792,27 @@ void XdsLb::BalancerCallState::SendClientLoadReportLocked() { xds_grpclb_request_destroy(request); } -void XdsLb::BalancerCallState::OnInitialRequestSentLocked(void* arg, - grpc_error* error) { +void XdsLb::BalancerChannelState::BalancerCallState::OnInitialRequestSentLocked( + void* arg, grpc_error* error) { BalancerCallState* lb_calld = static_cast(arg); grpc_byte_buffer_destroy(lb_calld->send_message_payload_); lb_calld->send_message_payload_ = nullptr; // If we attempted to send a client load report before the initial request was // sent (and this lb_calld is still in use), send the load report now. if (lb_calld->client_load_report_is_due_ && - lb_calld == lb_calld->xdslb_policy()->lb_calld_.get()) { + lb_calld->IsCurrentCallOnChannel()) { lb_calld->SendClientLoadReportLocked(); lb_calld->client_load_report_is_due_ = false; } lb_calld->Unref(DEBUG_LOCATION, "on_initial_request_sent"); } -void XdsLb::BalancerCallState::OnBalancerMessageReceivedLocked( - void* arg, grpc_error* error) { +void XdsLb::BalancerChannelState::BalancerCallState:: + OnBalancerMessageReceivedLocked(void* arg, grpc_error* error) { BalancerCallState* lb_calld = static_cast(arg); XdsLb* xdslb_policy = lb_calld->xdslb_policy(); // Empty payload means the LB call was cancelled. - if (lb_calld != xdslb_policy->lb_calld_.get() || + if (!lb_calld->IsCurrentCallOnChannel() || lb_calld->recv_message_payload_ == nullptr) { lb_calld->Unref(DEBUG_LOCATION, "on_message_received"); return; @@ -719,20 +830,25 @@ void XdsLb::BalancerCallState::OnBalancerMessageReceivedLocked( nullptr) { // Have NOT seen initial response, look for initial response. if (initial_response->has_client_stats_report_interval) { - lb_calld->client_stats_report_interval_ = GPR_MAX( - GPR_MS_PER_SEC, xds_grpclb_duration_to_millis( - &initial_response->client_stats_report_interval)); - if (grpc_lb_xds_trace.enabled()) { + const grpc_millis interval = xds_grpclb_duration_to_millis( + &initial_response->client_stats_report_interval); + if (interval > 0) { + lb_calld->client_stats_report_interval_ = + GPR_MAX(GPR_MS_PER_SEC, interval); + } + } + if (grpc_lb_xds_trace.enabled()) { + if (lb_calld->client_stats_report_interval_ != 0) { gpr_log(GPR_INFO, "[xdslb %p] Received initial LB response message; " "client load reporting interval = %" PRId64 " milliseconds", xdslb_policy, lb_calld->client_stats_report_interval_); + } else { + gpr_log(GPR_INFO, + "[xdslb %p] Received initial LB response message; client load " + "reporting NOT enabled", + xdslb_policy); } - } else if (grpc_lb_xds_trace.enabled()) { - gpr_log(GPR_INFO, - "[xdslb %p] Received initial LB response message; client load " - "reporting NOT enabled", - xdslb_policy); } xds_grpclb_initial_response_destroy(initial_response); lb_calld->seen_initial_response_ = true; @@ -755,7 +871,23 @@ void XdsLb::BalancerCallState::OnBalancerMessageReceivedLocked( } } /* update serverlist */ + // TODO(juanlishen): Don't ingore empty serverlist. if (serverlist->num_servers > 0) { + // Pending LB channel receives a serverlist; promote it. + // Note that this call can't be on a discarded pending channel, because + // such channels don't have any current call but we have checked this call + // is a current call. + if (!lb_calld->lb_chand_->IsCurrentChannel()) { + if (grpc_lb_xds_trace.enabled()) { + gpr_log(GPR_INFO, + "[xdslb %p] Promoting pending LB channel %p to replace " + "current LB channel %p", + xdslb_policy, lb_calld->lb_chand_.get(), + lb_calld->xdslb_policy()->lb_chand_.get()); + } + lb_calld->xdslb_policy()->lb_chand_ = + std::move(lb_calld->xdslb_policy()->pending_lb_chand_); + } // Start sending client load report only after we start using the // serverlist returned from the current LB call. if (lb_calld->client_stats_report_interval_ > 0 && @@ -828,37 +960,53 @@ void XdsLb::BalancerCallState::OnBalancerMessageReceivedLocked( } } -void XdsLb::BalancerCallState::OnBalancerStatusReceivedLocked( - void* arg, grpc_error* error) { +void XdsLb::BalancerChannelState::BalancerCallState:: + OnBalancerStatusReceivedLocked(void* arg, grpc_error* error) { BalancerCallState* lb_calld = static_cast(arg); XdsLb* xdslb_policy = lb_calld->xdslb_policy(); + BalancerChannelState* lb_chand = lb_calld->lb_chand_.get(); GPR_ASSERT(lb_calld->lb_call_ != nullptr); if (grpc_lb_xds_trace.enabled()) { char* status_details = grpc_slice_to_c_string(lb_calld->lb_call_status_details_); gpr_log(GPR_INFO, "[xdslb %p] Status from LB server received. Status = %d, details " - "= '%s', (lb_calld: %p, lb_call: %p), error '%s'", - xdslb_policy, lb_calld->lb_call_status_, status_details, lb_calld, - lb_calld->lb_call_, grpc_error_string(error)); + "= '%s', (lb_chand: %p, lb_calld: %p, lb_call: %p), error '%s'", + xdslb_policy, lb_calld->lb_call_status_, status_details, lb_chand, + lb_calld, lb_calld->lb_call_, grpc_error_string(error)); gpr_free(status_details); } - // If this lb_calld is still in use, this call ended because of a failure so - // we want to retry connecting. Otherwise, we have deliberately ended this - // call and no further action is required. - if (lb_calld == xdslb_policy->lb_calld_.get()) { - xdslb_policy->lb_calld_.reset(); + // Ignore status from a stale call. + if (lb_calld->IsCurrentCallOnChannel()) { + // Because this call is the current one on the channel, the channel can't + // have been swapped out; otherwise, the call should have been reset. + GPR_ASSERT(lb_chand->IsCurrentChannel() || lb_chand->IsPendingChannel()); GPR_ASSERT(!xdslb_policy->shutting_down_); - xdslb_policy->channel_control_helper()->RequestReresolution(); - if (lb_calld->seen_initial_response_) { - // If we lose connection to the LB server, reset the backoff and restart - // the LB call immediately. - xdslb_policy->lb_call_backoff_.Reset(); - xdslb_policy->StartBalancerCallLocked(); + if (lb_chand != xdslb_policy->LatestLbChannel()) { + // This channel must be the current one and there is a pending one. Swap + // in the pending one and we are done. + if (grpc_lb_xds_trace.enabled()) { + gpr_log(GPR_INFO, + "[xdslb %p] Promoting pending LB channel %p to replace " + "current LB channel %p", + xdslb_policy, lb_calld->lb_chand_.get(), + lb_calld->xdslb_policy()->lb_chand_.get()); + } + xdslb_policy->lb_chand_ = std::move(xdslb_policy->pending_lb_chand_); } else { - // If this LB call fails establishing any connection to the LB server, - // retry later. - xdslb_policy->StartBalancerCallRetryTimerLocked(); + // This channel is the most recently created one. Try to restart the call + // and reresolve. + lb_chand->lb_calld_.reset(); + if (lb_calld->seen_initial_response_) { + // If we lost connection to the LB server, reset the backoff and restart + // the LB call immediately. + lb_chand->lb_call_backoff_.Reset(); + lb_chand->StartCallLocked(); + } else { + // If we failed to connect to the LB server, retry later. + lb_chand->StartCallRetryTimerLocked(); + } + xdslb_policy->channel_control_helper()->RequestReresolution(); } } lb_calld->Unref(DEBUG_LOCATION, "lb_call_ended"); @@ -868,53 +1016,23 @@ void XdsLb::BalancerCallState::OnBalancerStatusReceivedLocked( // helper code for creating balancer channel // -UniquePtr ExtractBalancerAddresses( - const ServerAddressList& addresses) { - auto balancer_addresses = MakeUnique(); - for (size_t i = 0; i < addresses.size(); ++i) { - if (addresses[i].IsBalancer()) { - balancer_addresses->emplace_back(addresses[i]); - } - } - return balancer_addresses; -} - -/* Returns the channel args for the LB channel, used to create a bidirectional - * stream for the reception of load balancing updates. - * - * Inputs: - * - \a addresses: corresponding to the balancers. - * - \a response_generator: in order to propagate updates from the resolver - * above the grpclb policy. - * - \a args: other args inherited from the xds policy. */ -grpc_channel_args* BuildBalancerChannelArgs( - const ServerAddressList& addresses, - FakeResolverResponseGenerator* response_generator, - const grpc_channel_args* args) { - UniquePtr balancer_addresses = - ExtractBalancerAddresses(addresses); - // Channel args to remove. +// Returns the channel args for the LB channel, used to create a bidirectional +// stream for the reception of load balancing updates. +grpc_channel_args* BuildBalancerChannelArgs(const grpc_channel_args* args) { static const char* args_to_remove[] = { // LB policy name, since we want to use the default (pick_first) in // the LB channel. GRPC_ARG_LB_POLICY_NAME, + // The service config that contains the LB config. We don't want to + // recursively use xds in the LB channel. + GRPC_ARG_SERVICE_CONFIG, // The channel arg for the server URI, since that will be different for // the LB channel than for the parent channel. The client channel // factory will re-add this arg with the right value. GRPC_ARG_SERVER_URI, // The resolved addresses, which will be generated by the name resolver - // used in the LB channel. Note that the LB channel will use the fake - // resolver, so this won't actually generate a query to DNS (or some - // other name service). However, the addresses returned by the fake - // resolver will have is_balancer=false, whereas our own addresses have - // is_balancer=true. We need the LB channel to return addresses with - // is_balancer=false so that it does not wind up recursively using the - // xds LB policy, as per the special case logic in client_channel.c. + // used in the LB channel. GRPC_ARG_SERVER_ADDRESS_LIST, - // The fake resolver response generator, because we are replacing it - // with the one from the xds policy, used to propagate updates to - // the LB channel. - GRPC_ARG_FAKE_RESOLVER_RESPONSE_GENERATOR, // The LB channel should use the authority indicated by the target // authority table (see \a grpc_lb_policy_xds_modify_lb_channel_args), // as opposed to the authority from the parent channel. @@ -926,14 +1044,6 @@ grpc_channel_args* BuildBalancerChannelArgs( }; // Channel args to add. const grpc_arg args_to_add[] = { - // New server address list. - // Note that we pass these in both when creating the LB channel - // and via the fake resolver. The latter is what actually gets used. - CreateServerAddressListChannelArg(balancer_addresses.get()), - // The fake resolver response generator, which we use to inject - // address updates into the LB channel. - grpc_core::FakeResolverResponseGenerator::MakeChannelArg( - response_generator), // A channel arg indicating the target is a xds load balancer. grpc_channel_arg_integer_create( const_cast(GRPC_ARG_ADDRESS_IS_XDS_LOAD_BALANCER), 1), @@ -954,21 +1064,8 @@ grpc_channel_args* BuildBalancerChannelArgs( // ctor and dtor // -XdsLb::XdsLb(Args args) - : LoadBalancingPolicy(std::move(args)), - response_generator_(MakeRefCounted()), - lb_call_backoff_( - BackOff::Options() - .set_initial_backoff(GRPC_XDS_INITIAL_CONNECT_BACKOFF_SECONDS * - 1000) - .set_multiplier(GRPC_XDS_RECONNECT_BACKOFF_MULTIPLIER) - .set_jitter(GRPC_XDS_RECONNECT_JITTER) - .set_max_backoff(GRPC_XDS_RECONNECT_MAX_BACKOFF_SECONDS * 1000)) { - // Initialization. - gpr_mu_init(&lb_channel_mu_); - GRPC_CLOSURE_INIT(&lb_channel_on_connectivity_changed_, - &XdsLb::OnBalancerChannelConnectivityChangedLocked, this, - grpc_combiner_scheduler(args.combiner)); +XdsLb::XdsLb(Args args) : LoadBalancingPolicy(std::move(args)) { + gpr_mu_init(&lb_chand_mu_); // Record server name. const grpc_arg* arg = grpc_channel_args_find(args.args, GRPC_ARG_SERVER_URI); const char* server_uri = grpc_channel_arg_get_string(arg); @@ -992,7 +1089,7 @@ XdsLb::XdsLb(Args args) } XdsLb::~XdsLb() { - gpr_mu_destroy(&lb_channel_mu_); + gpr_mu_destroy(&lb_chand_mu_); gpr_free((void*)server_name_); grpc_channel_args_destroy(args_); if (serverlist_ != nullptr) { @@ -1002,10 +1099,6 @@ XdsLb::~XdsLb() { void XdsLb::ShutdownLocked() { shutting_down_ = true; - lb_calld_.reset(); - if (retry_timer_callback_pending_) { - grpc_timer_cancel(&lb_call_retry_timer_); - } if (fallback_timer_callback_pending_) { grpc_timer_cancel(&lb_fallback_timer_); } @@ -1014,11 +1107,10 @@ void XdsLb::ShutdownLocked() { // destroying the channel triggers a last callback to // OnBalancerChannelConnectivityChangedLocked(), and we need to be // alive when that callback is invoked. - if (lb_channel_ != nullptr) { - gpr_mu_lock(&lb_channel_mu_); - grpc_channel_destroy(lb_channel_); - lb_channel_ = nullptr; - gpr_mu_unlock(&lb_channel_mu_); + { + MutexLock lock(&lb_chand_mu_); + lb_chand_.reset(); + pending_lb_chand_.reset(); } } @@ -1027,8 +1119,11 @@ void XdsLb::ShutdownLocked() { // void XdsLb::ResetBackoffLocked() { - if (lb_channel_ != nullptr) { - grpc_channel_reset_connect_backoff(lb_channel_); + if (lb_chand_ != nullptr) { + grpc_channel_reset_connect_backoff(lb_chand_->channel()); + } + if (pending_lb_chand_ != nullptr) { + grpc_channel_reset_connect_backoff(pending_lb_chand_->channel()); } if (child_policy_ != nullptr) { child_policy_->ResetBackoffLocked(); @@ -1037,12 +1132,19 @@ void XdsLb::ResetBackoffLocked() { void XdsLb::FillChildRefsForChannelz(channelz::ChildRefsList* child_subchannels, channelz::ChildRefsList* child_channels) { - // delegate to the child_policy_ to fill the children subchannels. + // Delegate to the child_policy_ to fill the children subchannels. child_policy_->FillChildRefsForChannelz(child_subchannels, child_channels); - MutexLock lock(&lb_channel_mu_); - if (lb_channel_ != nullptr) { + MutexLock lock(&lb_chand_mu_); + if (lb_chand_ != nullptr) { grpc_core::channelz::ChannelNode* channel_node = - grpc_channel_get_channelz_node(lb_channel_); + grpc_channel_get_channelz_node(lb_chand_->channel()); + if (channel_node != nullptr) { + child_channels->push_back(channel_node->uuid()); + } + } + if (pending_lb_chand_ != nullptr) { + grpc_core::channelz::ChannelNode* channel_node = + grpc_channel_get_channelz_node(pending_lb_chand_->channel()); if (channel_node != nullptr) { child_channels->push_back(channel_node->uuid()); } @@ -1069,22 +1171,29 @@ void XdsLb::ProcessChannelArgsLocked(const grpc_channel_args& args) { args_ = grpc_channel_args_copy_and_add_and_remove( &args, args_to_remove, GPR_ARRAY_SIZE(args_to_remove), &new_arg, 1); // Construct args for balancer channel. - grpc_channel_args* lb_channel_args = - BuildBalancerChannelArgs(*addresses, response_generator_.get(), &args); - // Create balancer channel if needed. - if (lb_channel_ == nullptr) { - char* uri_str; - gpr_asprintf(&uri_str, "fake:///%s", server_name_); - gpr_mu_lock(&lb_channel_mu_); - lb_channel_ = channel_control_helper()->CreateChannel( - uri_str, GRPC_CLIENT_CHANNEL_TYPE_LOAD_BALANCING, *lb_channel_args); - gpr_mu_unlock(&lb_channel_mu_); - GPR_ASSERT(lb_channel_ != nullptr); - gpr_free(uri_str); + grpc_channel_args* lb_channel_args = BuildBalancerChannelArgs(&args); + // Create an LB channel if we don't have one yet or the balancer name has + // changed from the last received one. + bool create_lb_channel = lb_chand_ == nullptr; + if (lb_chand_ != nullptr) { + UniquePtr last_balancer_name( + grpc_channel_get_target(LatestLbChannel()->channel())); + create_lb_channel = + strcmp(last_balancer_name.get(), balancer_name_.get()) != 0; + } + if (create_lb_channel) { + OrphanablePtr lb_chand = + MakeOrphanable(balancer_name_.get(), + *lb_channel_args, Ref()); + if (lb_chand_ == nullptr || !lb_chand_->HasActiveCall()) { + GPR_ASSERT(pending_lb_chand_ == nullptr); + // If we do not have a working LB channel yet, use the newly created one. + lb_chand_ = std::move(lb_chand); + } else { + // Otherwise, wait until the new LB channel to be ready to swap it in. + pending_lb_chand_ = std::move(lb_chand); + } } - // Propagate updates to the LB channel (pick_first) through the fake - // resolver. - response_generator_->SetResponse(lb_channel_args); grpc_channel_args_destroy(lb_channel_args); } @@ -1124,12 +1233,13 @@ void XdsLb::ParseLbConfig(Config* xds_config) { void XdsLb::UpdateLocked(const grpc_channel_args& args, RefCountedPtr lb_config) { - const bool is_initial_update = lb_channel_ == nullptr; + const bool is_initial_update = lb_chand_ == nullptr; ParseLbConfig(lb_config.get()); // TODO(juanlishen): Pass fallback policy config update after fallback policy // is added. if (balancer_name_ == nullptr) { gpr_log(GPR_ERROR, "[xdslb %p] LB config parsing fails.", this); + return; } ProcessChannelArgsLocked(args); // Update the existing child policy. @@ -1149,24 +1259,6 @@ void XdsLb::UpdateLocked(const grpc_channel_args& args, fallback_timer_callback_pending_ = true; grpc_timer_init(&lb_fallback_timer_, deadline, &lb_on_fallback_); } - StartBalancerCallLocked(); - } else if (!watching_lb_channel_) { - // If this is not the initial update and we're not already watching - // the LB channel's connectivity state, start a watch now. This - // ensures that we'll know when to switch to a new balancer call. - lb_channel_connectivity_ = grpc_channel_check_connectivity_state( - lb_channel_, true /* try to connect */); - grpc_channel_element* client_channel_elem = grpc_channel_stack_last_element( - grpc_channel_get_channel_stack(lb_channel_)); - GPR_ASSERT(client_channel_elem->filter == &grpc_client_channel_filter); - watching_lb_channel_ = true; - // Ref held by closure. - Ref(DEBUG_LOCATION, "watch_lb_channel_connectivity").release(); - grpc_client_channel_watch_connectivity_state( - client_channel_elem, - grpc_polling_entity_create_from_pollset_set(interested_parties()), - &lb_channel_connectivity_, &lb_channel_on_connectivity_changed_, - nullptr); } } @@ -1174,20 +1266,6 @@ void XdsLb::UpdateLocked(const grpc_channel_args& args, // code for balancer channel and call // -void XdsLb::StartBalancerCallLocked() { - GPR_ASSERT(lb_channel_ != nullptr); - if (shutting_down_) return; - // Init the LB call data. - GPR_ASSERT(lb_calld_ == nullptr); - lb_calld_ = MakeOrphanable(Ref()); - if (grpc_lb_xds_trace.enabled()) { - gpr_log(GPR_INFO, - "[xdslb %p] Query for backends (lb_channel: %p, lb_calld: %p)", - this, lb_channel_, lb_calld_.get()); - } - lb_calld_->StartQuery(); -} - void XdsLb::OnFallbackTimerLocked(void* arg, grpc_error* error) { XdsLb* xdslb_policy = static_cast(arg); xdslb_policy->fallback_timer_callback_pending_ = false; @@ -1204,88 +1282,6 @@ void XdsLb::OnFallbackTimerLocked(void* arg, grpc_error* error) { xdslb_policy->Unref(DEBUG_LOCATION, "on_fallback_timer"); } -void XdsLb::StartBalancerCallRetryTimerLocked() { - grpc_millis next_try = lb_call_backoff_.NextAttemptTime(); - if (grpc_lb_xds_trace.enabled()) { - gpr_log(GPR_INFO, "[xdslb %p] Connection to LB server lost...", this); - grpc_millis timeout = next_try - ExecCtx::Get()->Now(); - if (timeout > 0) { - gpr_log(GPR_INFO, "[xdslb %p] ... retry_timer_active in %" PRId64 "ms.", - this, timeout); - } else { - gpr_log(GPR_INFO, "[xdslb %p] ... retry_timer_active immediately.", this); - } - } - // TODO(roth): We currently track this ref manually. Once the - // ClosureRef API is ready, we should pass the RefCountedPtr<> along - // with the callback. - auto self = Ref(DEBUG_LOCATION, "on_balancer_call_retry_timer"); - self.release(); - GRPC_CLOSURE_INIT(&lb_on_call_retry_, &XdsLb::OnBalancerCallRetryTimerLocked, - this, grpc_combiner_scheduler(combiner())); - retry_timer_callback_pending_ = true; - grpc_timer_init(&lb_call_retry_timer_, next_try, &lb_on_call_retry_); -} - -void XdsLb::OnBalancerCallRetryTimerLocked(void* arg, grpc_error* error) { - XdsLb* xdslb_policy = static_cast(arg); - xdslb_policy->retry_timer_callback_pending_ = false; - if (!xdslb_policy->shutting_down_ && error == GRPC_ERROR_NONE && - xdslb_policy->lb_calld_ == nullptr) { - if (grpc_lb_xds_trace.enabled()) { - gpr_log(GPR_INFO, "[xdslb %p] Restarting call to LB server", - xdslb_policy); - } - xdslb_policy->StartBalancerCallLocked(); - } - xdslb_policy->Unref(DEBUG_LOCATION, "on_balancer_call_retry_timer"); -} - -// Invoked as part of the update process. It continues watching the LB channel -// until it shuts down or becomes READY. It's invoked even if the LB channel -// stayed READY throughout the update (for example if the update is identical). -void XdsLb::OnBalancerChannelConnectivityChangedLocked(void* arg, - grpc_error* error) { - XdsLb* xdslb_policy = static_cast(arg); - if (xdslb_policy->shutting_down_) goto done; - // Re-initialize the lb_call. This should also take care of updating the - // child policy. Note that the current child policy, if any, will - // stay in effect until an update from the new lb_call is received. - switch (xdslb_policy->lb_channel_connectivity_) { - case GRPC_CHANNEL_CONNECTING: - case GRPC_CHANNEL_TRANSIENT_FAILURE: { - // Keep watching the LB channel. - grpc_channel_element* client_channel_elem = - grpc_channel_stack_last_element( - grpc_channel_get_channel_stack(xdslb_policy->lb_channel_)); - GPR_ASSERT(client_channel_elem->filter == &grpc_client_channel_filter); - grpc_client_channel_watch_connectivity_state( - client_channel_elem, - grpc_polling_entity_create_from_pollset_set( - xdslb_policy->interested_parties()), - &xdslb_policy->lb_channel_connectivity_, - &xdslb_policy->lb_channel_on_connectivity_changed_, nullptr); - break; - } - // The LB channel may be IDLE because it's shut down before the update. - // Restart the LB call to kick the LB channel into gear. - case GRPC_CHANNEL_IDLE: - case GRPC_CHANNEL_READY: - xdslb_policy->lb_calld_.reset(); - if (xdslb_policy->retry_timer_callback_pending_) { - grpc_timer_cancel(&xdslb_policy->lb_call_retry_timer_); - } - xdslb_policy->lb_call_backoff_.Reset(); - xdslb_policy->StartBalancerCallLocked(); - // Fall through. - case GRPC_CHANNEL_SHUTDOWN: - done: - xdslb_policy->watching_lb_channel_ = false; - xdslb_policy->Unref(DEBUG_LOCATION, - "watch_lb_channel_connectivity_cb_shutdown"); - } -} - // // code for interacting with the child policy // @@ -1307,11 +1303,14 @@ grpc_channel_args* XdsLb::CreateChildPolicyArgsLocked() { grpc_channel_arg_integer_create( const_cast(GRPC_ARG_ADDRESS_IS_BACKEND_FROM_XDS_LOAD_BALANCER), 1), + // Inhibit client-side health checking, since the balancer does + // this for us. + grpc_channel_arg_integer_create( + const_cast(GRPC_ARG_INHIBIT_HEALTH_CHECKING), 1), }; - grpc_channel_args* args = grpc_channel_args_copy_and_add_and_remove( + return grpc_channel_args_copy_and_add_and_remove( args_, keys_to_remove, GPR_ARRAY_SIZE(keys_to_remove), args_to_add, GPR_ARRAY_SIZE(args_to_add)); - return args; } void XdsLb::CreateChildPolicyLocked(const char* name, Args args) { @@ -1367,18 +1366,6 @@ class XdsFactory : public LoadBalancingPolicyFactory { public: OrphanablePtr CreateLoadBalancingPolicy( LoadBalancingPolicy::Args args) const override { - /* Count the number of gRPC-LB addresses. There must be at least one. */ - const ServerAddressList* addresses = - FindServerAddressListChannelArg(args.args); - if (addresses == nullptr) return nullptr; - bool found_balancer_address = false; - for (size_t i = 0; i < addresses->size(); ++i) { - if ((*addresses)[i].IsBalancer()) { - found_balancer_address = true; - break; - } - } - if (!found_balancer_address) return nullptr; return OrphanablePtr(New(std::move(args))); } diff --git a/src/core/ext/filters/client_channel/lb_policy/xds/xds_channel_secure.cc b/src/core/ext/filters/client_channel/lb_policy/xds/xds_channel_secure.cc index 55c646e6eed..7f8c232d6d0 100644 --- a/src/core/ext/filters/client_channel/lb_policy/xds/xds_channel_secure.cc +++ b/src/core/ext/filters/client_channel/lb_policy/xds/xds_channel_secure.cc @@ -33,55 +33,12 @@ #include "src/core/lib/security/transport/target_authority_table.h" #include "src/core/lib/slice/slice_internal.h" -namespace grpc_core { -namespace { - -int BalancerNameCmp(const grpc_core::UniquePtr& a, - const grpc_core::UniquePtr& b) { - return strcmp(a.get(), b.get()); -} - -RefCountedPtr CreateTargetAuthorityTable( - const ServerAddressList& addresses) { - TargetAuthorityTable::Entry* target_authority_entries = - static_cast( - gpr_zalloc(sizeof(*target_authority_entries) * addresses.size())); - for (size_t i = 0; i < addresses.size(); ++i) { - char* addr_str; - GPR_ASSERT( - grpc_sockaddr_to_string(&addr_str, &addresses[i].address(), true) > 0); - target_authority_entries[i].key = grpc_slice_from_copied_string(addr_str); - gpr_free(addr_str); - char* balancer_name = grpc_channel_arg_get_string(grpc_channel_args_find( - addresses[i].args(), GRPC_ARG_ADDRESS_BALANCER_NAME)); - target_authority_entries[i].value.reset(gpr_strdup(balancer_name)); - } - RefCountedPtr target_authority_table = - TargetAuthorityTable::Create(addresses.size(), target_authority_entries, - BalancerNameCmp); - gpr_free(target_authority_entries); - return target_authority_table; -} - -} // namespace -} // namespace grpc_core - grpc_channel_args* grpc_lb_policy_xds_modify_lb_channel_args( grpc_channel_args* args) { const char* args_to_remove[1]; size_t num_args_to_remove = 0; grpc_arg args_to_add[2]; size_t num_args_to_add = 0; - // Add arg for targets info table. - grpc_core::ServerAddressList* addresses = - grpc_core::FindServerAddressListChannelArg(args); - GPR_ASSERT(addresses != nullptr); - grpc_core::RefCountedPtr - target_authority_table = - grpc_core::CreateTargetAuthorityTable(*addresses); - args_to_add[num_args_to_add++] = - grpc_core::CreateTargetAuthorityTableChannelArg( - target_authority_table.get()); // Substitute the channel credentials with a version without call // credentials: the load balancer is not necessarily trusted to handle // bearer token credentials. diff --git a/src/core/ext/filters/client_channel/lb_policy/xds/xds_load_balancer_api.cc b/src/core/ext/filters/client_channel/lb_policy/xds/xds_load_balancer_api.cc index 79b7bdbe338..90094974a14 100644 --- a/src/core/ext/filters/client_channel/lb_policy/xds/xds_load_balancer_api.cc +++ b/src/core/ext/filters/client_channel/lb_policy/xds/xds_load_balancer_api.cc @@ -161,10 +161,10 @@ void xds_grpclb_request_destroy(xds_grpclb_request* request) { typedef grpc_lb_v1_LoadBalanceResponse xds_grpclb_response; xds_grpclb_initial_response* xds_grpclb_initial_response_parse( - grpc_slice encoded_xds_grpclb_response) { - pb_istream_t stream = - pb_istream_from_buffer(GRPC_SLICE_START_PTR(encoded_xds_grpclb_response), - GRPC_SLICE_LENGTH(encoded_xds_grpclb_response)); + const grpc_slice& encoded_xds_grpclb_response) { + pb_istream_t stream = pb_istream_from_buffer( + const_cast(GRPC_SLICE_START_PTR(encoded_xds_grpclb_response)), + GRPC_SLICE_LENGTH(encoded_xds_grpclb_response)); xds_grpclb_response res; memset(&res, 0, sizeof(xds_grpclb_response)); if (GPR_UNLIKELY( @@ -185,10 +185,10 @@ xds_grpclb_initial_response* xds_grpclb_initial_response_parse( } xds_grpclb_serverlist* xds_grpclb_response_parse_serverlist( - grpc_slice encoded_xds_grpclb_response) { - pb_istream_t stream = - pb_istream_from_buffer(GRPC_SLICE_START_PTR(encoded_xds_grpclb_response), - GRPC_SLICE_LENGTH(encoded_xds_grpclb_response)); + const grpc_slice& encoded_xds_grpclb_response) { + pb_istream_t stream = pb_istream_from_buffer( + const_cast(GRPC_SLICE_START_PTR(encoded_xds_grpclb_response)), + GRPC_SLICE_LENGTH(encoded_xds_grpclb_response)); pb_istream_t stream_at_start = stream; xds_grpclb_serverlist* sl = static_cast( gpr_zalloc(sizeof(xds_grpclb_serverlist))); diff --git a/src/core/ext/filters/client_channel/lb_policy/xds/xds_load_balancer_api.h b/src/core/ext/filters/client_channel/lb_policy/xds/xds_load_balancer_api.h index 67049956417..e52d20f8658 100644 --- a/src/core/ext/filters/client_channel/lb_policy/xds/xds_load_balancer_api.h +++ b/src/core/ext/filters/client_channel/lb_policy/xds/xds_load_balancer_api.h @@ -55,11 +55,11 @@ void xds_grpclb_request_destroy(xds_grpclb_request* request); /** Parse (ie, decode) the bytes in \a encoded_xds_grpclb_response as a \a * xds_grpclb_initial_response */ xds_grpclb_initial_response* xds_grpclb_initial_response_parse( - grpc_slice encoded_xds_grpclb_response); + const grpc_slice& encoded_xds_grpclb_response); /** Parse the list of servers from an encoded \a xds_grpclb_response */ xds_grpclb_serverlist* xds_grpclb_response_parse_serverlist( - grpc_slice encoded_xds_grpclb_response); + const grpc_slice& encoded_xds_grpclb_response); /** Return a copy of \a sl. The caller is responsible for calling \a * xds_grpclb_destroy_serverlist on the returned copy. */ diff --git a/src/core/ext/filters/client_channel/lb_policy_factory.h b/src/core/ext/filters/client_channel/lb_policy_factory.h index 79503f2a562..1da4b7c6956 100644 --- a/src/core/ext/filters/client_channel/lb_policy_factory.h +++ b/src/core/ext/filters/client_channel/lb_policy_factory.h @@ -31,10 +31,7 @@ class LoadBalancingPolicyFactory { public: /// Returns a new LB policy instance. virtual OrphanablePtr CreateLoadBalancingPolicy( - LoadBalancingPolicy::Args args) const { - std::move(args); // Suppress clang-tidy complaint. - GRPC_ABSTRACT; - } + LoadBalancingPolicy::Args) const GRPC_ABSTRACT; /// Returns the LB policy name that this factory provides. /// Caller does NOT take ownership of result. diff --git a/src/core/ext/filters/client_channel/resolver/fake/fake_resolver.cc b/src/core/ext/filters/client_channel/resolver/fake/fake_resolver.cc index 258339491c1..3489f3d491b 100644 --- a/src/core/ext/filters/client_channel/resolver/fake/fake_resolver.cc +++ b/src/core/ext/filters/client_channel/resolver/fake/fake_resolver.cc @@ -86,7 +86,14 @@ FakeResolver::FakeResolver(const ResolverArgs& args) : Resolver(args.combiner) { channel_args_ = grpc_channel_args_copy(args.args); FakeResolverResponseGenerator* response_generator = FakeResolverResponseGenerator::GetFromArgs(args.args); - if (response_generator != nullptr) response_generator->resolver_ = this; + if (response_generator != nullptr) { + response_generator->resolver_ = this; + if (response_generator->response_ != nullptr) { + response_generator->SetResponse(response_generator->response_); + grpc_channel_args_destroy(response_generator->response_); + response_generator->response_ = nullptr; + } + } } FakeResolver::~FakeResolver() { @@ -114,6 +121,9 @@ void FakeResolver::RequestReresolutionLocked() { void FakeResolver::MaybeFinishNextLocked() { if (next_completion_ != nullptr && (next_results_ != nullptr || return_failure_)) { + // When both next_results_ and channel_args_ contain an arg with the same + // name, only the one in next_results_ will be kept since next_results_ is + // before channel_args_. *target_result_ = return_failure_ ? nullptr : grpc_channel_args_union(next_results_, channel_args_); @@ -157,15 +167,19 @@ void FakeResolverResponseGenerator::SetResponseLocked(void* arg, void FakeResolverResponseGenerator::SetResponse(grpc_channel_args* response) { GPR_ASSERT(response != nullptr); - GPR_ASSERT(resolver_ != nullptr); - SetResponseClosureArg* closure_arg = New(); - closure_arg->generator = this; - closure_arg->response = grpc_channel_args_copy(response); - GRPC_CLOSURE_SCHED( - GRPC_CLOSURE_INIT(&closure_arg->set_response_closure, SetResponseLocked, - closure_arg, - grpc_combiner_scheduler(resolver_->combiner())), - GRPC_ERROR_NONE); + if (resolver_ != nullptr) { + SetResponseClosureArg* closure_arg = New(); + closure_arg->generator = this; + closure_arg->response = grpc_channel_args_copy(response); + GRPC_CLOSURE_SCHED( + GRPC_CLOSURE_INIT(&closure_arg->set_response_closure, SetResponseLocked, + closure_arg, + grpc_combiner_scheduler(resolver_->combiner())), + GRPC_ERROR_NONE); + } else { + GPR_ASSERT(response_ == nullptr); + response_ = grpc_channel_args_copy(response); + } } void FakeResolverResponseGenerator::SetReresolutionResponseLocked( diff --git a/src/core/ext/filters/client_channel/resolver/fake/fake_resolver.h b/src/core/ext/filters/client_channel/resolver/fake/fake_resolver.h index d86111c3829..f423e6d46db 100644 --- a/src/core/ext/filters/client_channel/resolver/fake/fake_resolver.h +++ b/src/core/ext/filters/client_channel/resolver/fake/fake_resolver.h @@ -44,7 +44,9 @@ class FakeResolverResponseGenerator FakeResolverResponseGenerator() {} // Instructs the fake resolver associated with the response generator - // instance to trigger a new resolution with the specified response. + // instance to trigger a new resolution with the specified response. If the + // resolver is not available yet, delays response setting until it is. This + // can be called at most once before the resolver is available. void SetResponse(grpc_channel_args* next_response); // Sets the re-resolution response, which is returned by the fake resolver @@ -79,6 +81,7 @@ class FakeResolverResponseGenerator static void SetFailureLocked(void* arg, grpc_error* error); FakeResolver* resolver_ = nullptr; // Do not own. + grpc_channel_args* response_ = nullptr; }; } // namespace grpc_core diff --git a/src/core/ext/filters/client_channel/resolving_lb_policy.cc b/src/core/ext/filters/client_channel/resolving_lb_policy.cc index 02a7af54588..a02a7e8acdb 100644 --- a/src/core/ext/filters/client_channel/resolving_lb_policy.cc +++ b/src/core/ext/filters/client_channel/resolving_lb_policy.cc @@ -80,10 +80,10 @@ class ResolvingLoadBalancingPolicy::ResolvingControlHelper return parent_->channel_control_helper()->CreateSubchannel(args); } - grpc_channel* CreateChannel(const char* target, grpc_client_channel_type type, + grpc_channel* CreateChannel(const char* target, const grpc_channel_args& args) override { if (parent_->resolver_ == nullptr) return nullptr; // Shutting down. - return parent_->channel_control_helper()->CreateChannel(target, type, args); + return parent_->channel_control_helper()->CreateChannel(target, args); } void UpdateState(grpc_connectivity_state state, grpc_error* state_error, diff --git a/src/core/ext/transport/chttp2/client/insecure/channel_create.cc b/src/core/ext/transport/chttp2/client/insecure/channel_create.cc index 8aabcfa2000..0d61abd2a01 100644 --- a/src/core/ext/transport/chttp2/client/insecure/channel_create.cc +++ b/src/core/ext/transport/chttp2/client/insecure/channel_create.cc @@ -33,50 +33,53 @@ #include "src/core/lib/surface/api_trace.h" #include "src/core/lib/surface/channel.h" -static void client_channel_factory_ref( - grpc_client_channel_factory* cc_factory) {} +namespace grpc_core { -static void client_channel_factory_unref( - grpc_client_channel_factory* cc_factory) {} - -static grpc_core::Subchannel* client_channel_factory_create_subchannel( - grpc_client_channel_factory* cc_factory, const grpc_channel_args* args) { - grpc_channel_args* new_args = grpc_default_authority_add_if_not_present(args); - grpc_connector* connector = grpc_chttp2_connector_create(); - grpc_core::Subchannel* s = grpc_core::Subchannel::Create(connector, new_args); - grpc_connector_unref(connector); - grpc_channel_args_destroy(new_args); - return s; -} +class Chttp2InsecureClientChannelFactory : public ClientChannelFactory { + public: + Subchannel* CreateSubchannel(const grpc_channel_args* args) override { + grpc_channel_args* new_args = + grpc_default_authority_add_if_not_present(args); + grpc_connector* connector = grpc_chttp2_connector_create(); + Subchannel* s = Subchannel::Create(connector, new_args); + grpc_connector_unref(connector); + grpc_channel_args_destroy(new_args); + return s; + } -static grpc_channel* client_channel_factory_create_channel( - grpc_client_channel_factory* cc_factory, const char* target, - grpc_client_channel_type type, const grpc_channel_args* args) { - if (target == nullptr) { - gpr_log(GPR_ERROR, "cannot create channel with NULL target name"); - return nullptr; + grpc_channel* CreateChannel(const char* target, + const grpc_channel_args* args) override { + if (target == nullptr) { + gpr_log(GPR_ERROR, "cannot create channel with NULL target name"); + return nullptr; + } + // Add channel arg containing the server URI. + UniquePtr canonical_target = + ResolverRegistry::AddDefaultPrefixIfNeeded(target); + grpc_arg arg = grpc_channel_arg_string_create( + const_cast(GRPC_ARG_SERVER_URI), canonical_target.get()); + const char* to_remove[] = {GRPC_ARG_SERVER_URI}; + grpc_channel_args* new_args = + grpc_channel_args_copy_and_add_and_remove(args, to_remove, 1, &arg, 1); + grpc_channel* channel = + grpc_channel_create(target, new_args, GRPC_CLIENT_CHANNEL, nullptr); + grpc_channel_args_destroy(new_args); + return channel; } - // Add channel arg containing the server URI. - grpc_core::UniquePtr canonical_target = - grpc_core::ResolverRegistry::AddDefaultPrefixIfNeeded(target); - grpc_arg arg = grpc_channel_arg_string_create( - const_cast(GRPC_ARG_SERVER_URI), canonical_target.get()); - const char* to_remove[] = {GRPC_ARG_SERVER_URI}; - grpc_channel_args* new_args = - grpc_channel_args_copy_and_add_and_remove(args, to_remove, 1, &arg, 1); - grpc_channel* channel = - grpc_channel_create(target, new_args, GRPC_CLIENT_CHANNEL, nullptr); - grpc_channel_args_destroy(new_args); - return channel; -} +}; -static const grpc_client_channel_factory_vtable client_channel_factory_vtable = - {client_channel_factory_ref, client_channel_factory_unref, - client_channel_factory_create_subchannel, - client_channel_factory_create_channel}; +} // namespace grpc_core -static grpc_client_channel_factory client_channel_factory = { - &client_channel_factory_vtable}; +namespace { + +grpc_core::Chttp2InsecureClientChannelFactory* g_factory; +gpr_once g_factory_once = GPR_ONCE_INIT; + +void FactoryInit() { + g_factory = grpc_core::New(); +} + +} // namespace /* Create a client channel: Asynchronously: - resolve target @@ -91,16 +94,13 @@ grpc_channel* grpc_insecure_channel_create(const char* target, (target, args, reserved)); GPR_ASSERT(reserved == nullptr); // Add channel arg containing the client channel factory. - grpc_arg arg = - grpc_client_channel_factory_create_channel_arg(&client_channel_factory); + gpr_once_init(&g_factory_once, FactoryInit); + grpc_arg arg = grpc_core::ClientChannelFactory::CreateChannelArg(g_factory); grpc_channel_args* new_args = grpc_channel_args_copy_and_add(args, &arg, 1); // Create channel. - grpc_channel* channel = client_channel_factory_create_channel( - &client_channel_factory, target, GRPC_CLIENT_CHANNEL_TYPE_REGULAR, - new_args); + grpc_channel* channel = g_factory->CreateChannel(target, new_args); // Clean up. grpc_channel_args_destroy(new_args); - return channel != nullptr ? channel : grpc_lame_client_channel_create( target, GRPC_STATUS_INTERNAL, diff --git a/src/core/ext/transport/chttp2/client/secure/secure_channel_create.cc b/src/core/ext/transport/chttp2/client/secure/secure_channel_create.cc index eb2fee2af91..bc38ff25c79 100644 --- a/src/core/ext/transport/chttp2/client/secure/secure_channel_create.cc +++ b/src/core/ext/transport/chttp2/client/secure/secure_channel_create.cc @@ -40,148 +40,148 @@ #include "src/core/lib/surface/channel.h" #include "src/core/lib/uri/uri_parser.h" -static void client_channel_factory_ref( - grpc_client_channel_factory* cc_factory) {} +namespace grpc_core { -static void client_channel_factory_unref( - grpc_client_channel_factory* cc_factory) {} - -static grpc_channel_args* get_secure_naming_channel_args( - const grpc_channel_args* args) { - grpc_channel_credentials* channel_credentials = - grpc_channel_credentials_find_in_args(args); - if (channel_credentials == nullptr) { - gpr_log(GPR_ERROR, - "Can't create subchannel: channel credentials missing for secure " - "channel."); - return nullptr; - } - // Make sure security connector does not already exist in args. - if (grpc_security_connector_find_in_args(args) != nullptr) { - gpr_log(GPR_ERROR, - "Can't create subchannel: security connector already present in " - "channel args."); - return nullptr; - } - // To which address are we connecting? By default, use the server URI. - const grpc_arg* server_uri_arg = - grpc_channel_args_find(args, GRPC_ARG_SERVER_URI); - const char* server_uri_str = grpc_channel_arg_get_string(server_uri_arg); - GPR_ASSERT(server_uri_str != nullptr); - grpc_uri* server_uri = - grpc_uri_parse(server_uri_str, true /* supress errors */); - GPR_ASSERT(server_uri != nullptr); - const grpc_core::TargetAuthorityTable* target_authority_table = - grpc_core::FindTargetAuthorityTableInArgs(args); - grpc_core::UniquePtr authority; - if (target_authority_table != nullptr) { - // Find the authority for the target. - const char* target_uri_str = - grpc_core::Subchannel::GetUriFromSubchannelAddressArg(args); - grpc_uri* target_uri = - grpc_uri_parse(target_uri_str, false /* suppress errors */); - GPR_ASSERT(target_uri != nullptr); - if (target_uri->path[0] != '\0') { // "path" may be empty - const grpc_slice key = grpc_slice_from_static_string( - target_uri->path[0] == '/' ? target_uri->path + 1 : target_uri->path); - const grpc_core::UniquePtr* value = - target_authority_table->Get(key); - if (value != nullptr) authority.reset(gpr_strdup(value->get())); - grpc_slice_unref_internal(key); +class Chttp2SecureClientChannelFactory : public ClientChannelFactory { + public: + Subchannel* CreateSubchannel(const grpc_channel_args* args) override { + grpc_channel_args* new_args = GetSecureNamingChannelArgs(args); + if (new_args == nullptr) { + gpr_log(GPR_ERROR, + "Failed to create channel args during subchannel creation."); + return nullptr; } - grpc_uri_destroy(target_uri); - } - // If the authority hasn't already been set (either because no target - // authority table was present or because the target was not present - // in the table), fall back to using the original server URI. - if (authority == nullptr) { - authority = - grpc_core::ResolverRegistry::GetDefaultAuthority(server_uri_str); + grpc_connector* connector = grpc_chttp2_connector_create(); + Subchannel* s = Subchannel::Create(connector, new_args); + grpc_connector_unref(connector); + grpc_channel_args_destroy(new_args); + return s; } - grpc_arg args_to_add[2]; - size_t num_args_to_add = 0; - if (grpc_channel_args_find(args, GRPC_ARG_DEFAULT_AUTHORITY) == nullptr) { - // If the channel args don't already contain GRPC_ARG_DEFAULT_AUTHORITY, add - // the arg, setting it to the value just obtained. - args_to_add[num_args_to_add++] = grpc_channel_arg_string_create( - const_cast(GRPC_ARG_DEFAULT_AUTHORITY), authority.get()); + + grpc_channel* CreateChannel(const char* target, + const grpc_channel_args* args) override { + if (target == nullptr) { + gpr_log(GPR_ERROR, "cannot create channel with NULL target name"); + return nullptr; + } + // Add channel arg containing the server URI. + UniquePtr canonical_target = + ResolverRegistry::AddDefaultPrefixIfNeeded(target); + grpc_arg arg = grpc_channel_arg_string_create( + const_cast(GRPC_ARG_SERVER_URI), canonical_target.get()); + const char* to_remove[] = {GRPC_ARG_SERVER_URI}; + grpc_channel_args* new_args = + grpc_channel_args_copy_and_add_and_remove(args, to_remove, 1, &arg, 1); + grpc_channel* channel = + grpc_channel_create(target, new_args, GRPC_CLIENT_CHANNEL, nullptr); + grpc_channel_args_destroy(new_args); + return channel; } - grpc_channel_args* args_with_authority = - grpc_channel_args_copy_and_add(args, args_to_add, num_args_to_add); - grpc_uri_destroy(server_uri); - // Create the security connector using the credentials and target name. - grpc_channel_args* new_args_from_connector = nullptr; - grpc_core::RefCountedPtr - subchannel_security_connector = - channel_credentials->create_security_connector( - /*call_creds=*/nullptr, authority.get(), args_with_authority, - &new_args_from_connector); - if (subchannel_security_connector == nullptr) { - gpr_log(GPR_ERROR, - "Failed to create secure subchannel for secure name '%s'", - authority.get()); + + private: + static grpc_channel_args* GetSecureNamingChannelArgs( + const grpc_channel_args* args) { + grpc_channel_credentials* channel_credentials = + grpc_channel_credentials_find_in_args(args); + if (channel_credentials == nullptr) { + gpr_log(GPR_ERROR, + "Can't create subchannel: channel credentials missing for secure " + "channel."); + return nullptr; + } + // Make sure security connector does not already exist in args. + if (grpc_security_connector_find_in_args(args) != nullptr) { + gpr_log(GPR_ERROR, + "Can't create subchannel: security connector already present in " + "channel args."); + return nullptr; + } + // To which address are we connecting? By default, use the server URI. + const grpc_arg* server_uri_arg = + grpc_channel_args_find(args, GRPC_ARG_SERVER_URI); + const char* server_uri_str = grpc_channel_arg_get_string(server_uri_arg); + GPR_ASSERT(server_uri_str != nullptr); + grpc_uri* server_uri = + grpc_uri_parse(server_uri_str, true /* suppress errors */); + GPR_ASSERT(server_uri != nullptr); + const TargetAuthorityTable* target_authority_table = + FindTargetAuthorityTableInArgs(args); + UniquePtr authority; + if (target_authority_table != nullptr) { + // Find the authority for the target. + const char* target_uri_str = + Subchannel::GetUriFromSubchannelAddressArg(args); + grpc_uri* target_uri = + grpc_uri_parse(target_uri_str, false /* suppress errors */); + GPR_ASSERT(target_uri != nullptr); + if (target_uri->path[0] != '\0') { // "path" may be empty + const grpc_slice key = grpc_slice_from_static_string( + target_uri->path[0] == '/' ? target_uri->path + 1 + : target_uri->path); + const UniquePtr* value = target_authority_table->Get(key); + if (value != nullptr) authority.reset(gpr_strdup(value->get())); + grpc_slice_unref_internal(key); + } + grpc_uri_destroy(target_uri); + } + // If the authority hasn't already been set (either because no target + // authority table was present or because the target was not present + // in the table), fall back to using the original server URI. + if (authority == nullptr) { + authority = ResolverRegistry::GetDefaultAuthority(server_uri_str); + } + grpc_arg args_to_add[2]; + size_t num_args_to_add = 0; + if (grpc_channel_args_find(args, GRPC_ARG_DEFAULT_AUTHORITY) == nullptr) { + // If the channel args don't already contain GRPC_ARG_DEFAULT_AUTHORITY, + // add the arg, setting it to the value just obtained. + args_to_add[num_args_to_add++] = grpc_channel_arg_string_create( + const_cast(GRPC_ARG_DEFAULT_AUTHORITY), authority.get()); + } + grpc_channel_args* args_with_authority = + grpc_channel_args_copy_and_add(args, args_to_add, num_args_to_add); + grpc_uri_destroy(server_uri); + // Create the security connector using the credentials and target name. + grpc_channel_args* new_args_from_connector = nullptr; + RefCountedPtr + subchannel_security_connector = + channel_credentials->create_security_connector( + /*call_creds=*/nullptr, authority.get(), args_with_authority, + &new_args_from_connector); + if (subchannel_security_connector == nullptr) { + gpr_log(GPR_ERROR, + "Failed to create secure subchannel for secure name '%s'", + authority.get()); + grpc_channel_args_destroy(args_with_authority); + return nullptr; + } + grpc_arg new_security_connector_arg = + grpc_security_connector_to_arg(subchannel_security_connector.get()); + grpc_channel_args* new_args = grpc_channel_args_copy_and_add( + new_args_from_connector != nullptr ? new_args_from_connector + : args_with_authority, + &new_security_connector_arg, 1); + subchannel_security_connector.reset(DEBUG_LOCATION, "lb_channel_create"); + if (new_args_from_connector != nullptr) { + grpc_channel_args_destroy(new_args_from_connector); + } grpc_channel_args_destroy(args_with_authority); - return nullptr; + return new_args; } - grpc_arg new_security_connector_arg = - grpc_security_connector_to_arg(subchannel_security_connector.get()); +}; - grpc_channel_args* new_args = grpc_channel_args_copy_and_add( - new_args_from_connector != nullptr ? new_args_from_connector - : args_with_authority, - &new_security_connector_arg, 1); +} // namespace grpc_core - subchannel_security_connector.reset(DEBUG_LOCATION, "lb_channel_create"); - if (new_args_from_connector != nullptr) { - grpc_channel_args_destroy(new_args_from_connector); - } - grpc_channel_args_destroy(args_with_authority); - return new_args; -} +namespace { -static grpc_core::Subchannel* client_channel_factory_create_subchannel( - grpc_client_channel_factory* cc_factory, const grpc_channel_args* args) { - grpc_channel_args* new_args = get_secure_naming_channel_args(args); - if (new_args == nullptr) { - gpr_log(GPR_ERROR, - "Failed to create channel args during subchannel creation."); - return nullptr; - } - grpc_connector* connector = grpc_chttp2_connector_create(); - grpc_core::Subchannel* s = grpc_core::Subchannel::Create(connector, new_args); - grpc_connector_unref(connector); - grpc_channel_args_destroy(new_args); - return s; -} +grpc_core::Chttp2SecureClientChannelFactory* g_factory; +gpr_once g_factory_once = GPR_ONCE_INIT; -static grpc_channel* client_channel_factory_create_channel( - grpc_client_channel_factory* cc_factory, const char* target, - grpc_client_channel_type type, const grpc_channel_args* args) { - if (target == nullptr) { - gpr_log(GPR_ERROR, "cannot create channel with NULL target name"); - return nullptr; - } - // Add channel arg containing the server URI. - grpc_core::UniquePtr canonical_target = - grpc_core::ResolverRegistry::AddDefaultPrefixIfNeeded(target); - grpc_arg arg = grpc_channel_arg_string_create((char*)GRPC_ARG_SERVER_URI, - canonical_target.get()); - const char* to_remove[] = {GRPC_ARG_SERVER_URI}; - grpc_channel_args* new_args = - grpc_channel_args_copy_and_add_and_remove(args, to_remove, 1, &arg, 1); - grpc_channel* channel = - grpc_channel_create(target, new_args, GRPC_CLIENT_CHANNEL, nullptr); - grpc_channel_args_destroy(new_args); - return channel; +void FactoryInit() { + g_factory = grpc_core::New(); } -static const grpc_client_channel_factory_vtable client_channel_factory_vtable = - {client_channel_factory_ref, client_channel_factory_unref, - client_channel_factory_create_subchannel, - client_channel_factory_create_channel}; - -static grpc_client_channel_factory client_channel_factory = { - &client_channel_factory_vtable}; +} // namespace // Create a secure client channel: // Asynchronously: - resolve target @@ -201,16 +201,15 @@ grpc_channel* grpc_secure_channel_create(grpc_channel_credentials* creds, if (creds != nullptr) { // Add channel args containing the client channel factory and channel // credentials. + gpr_once_init(&g_factory_once, FactoryInit); grpc_arg args_to_add[] = { - grpc_client_channel_factory_create_channel_arg(&client_channel_factory), + grpc_core::ClientChannelFactory::CreateChannelArg(g_factory), grpc_channel_credentials_to_arg(creds)}; grpc_channel_args* new_args = grpc_channel_args_copy_and_add( args, args_to_add, GPR_ARRAY_SIZE(args_to_add)); new_args = creds->update_arguments(new_args); // Create channel. - channel = client_channel_factory_create_channel( - &client_channel_factory, target, GRPC_CLIENT_CHANNEL_TYPE_REGULAR, - new_args); + channel = g_factory->CreateChannel(target, new_args); // Clean up. grpc_channel_args_destroy(new_args); } diff --git a/src/core/ext/transport/chttp2/transport/bin_decoder.cc b/src/core/ext/transport/chttp2/transport/bin_decoder.cc index b660a456521..249035d7e89 100644 --- a/src/core/ext/transport/chttp2/transport/bin_decoder.cc +++ b/src/core/ext/transport/chttp2/transport/bin_decoder.cc @@ -51,7 +51,7 @@ static uint8_t decode_table[] = { static const uint8_t tail_xtra[4] = {0, 0, 1, 2}; -static bool input_is_valid(uint8_t* input_ptr, size_t length) { +static bool input_is_valid(const uint8_t* input_ptr, size_t length) { size_t i; for (i = 0; i < length; ++i) { @@ -158,7 +158,7 @@ bool grpc_base64_decode_partial(struct grpc_base64_decode_context* ctx) { return true; } -grpc_slice grpc_chttp2_base64_decode(grpc_slice input) { +grpc_slice grpc_chttp2_base64_decode(const grpc_slice& input) { size_t input_length = GRPC_SLICE_LENGTH(input); size_t output_length = input_length / 4 * 3; struct grpc_base64_decode_context ctx; @@ -174,7 +174,7 @@ grpc_slice grpc_chttp2_base64_decode(grpc_slice input) { } if (input_length > 0) { - uint8_t* input_end = GRPC_SLICE_END_PTR(input); + const uint8_t* input_end = GRPC_SLICE_END_PTR(input); if (*(--input_end) == '=') { output_length--; if (*(--input_end) == '=') { @@ -202,7 +202,7 @@ grpc_slice grpc_chttp2_base64_decode(grpc_slice input) { return output; } -grpc_slice grpc_chttp2_base64_decode_with_length(grpc_slice input, +grpc_slice grpc_chttp2_base64_decode_with_length(const grpc_slice& input, size_t output_length) { size_t input_length = GRPC_SLICE_LENGTH(input); grpc_slice output = GRPC_SLICE_MALLOC(output_length); diff --git a/src/core/ext/transport/chttp2/transport/bin_decoder.h b/src/core/ext/transport/chttp2/transport/bin_decoder.h index 8a4d4a71790..1cbca033a1f 100644 --- a/src/core/ext/transport/chttp2/transport/bin_decoder.h +++ b/src/core/ext/transport/chttp2/transport/bin_decoder.h @@ -26,8 +26,8 @@ struct grpc_base64_decode_context { /* input/output: */ - uint8_t* input_cur; - uint8_t* input_end; + const uint8_t* input_cur; + const uint8_t* input_end; uint8_t* output_cur; uint8_t* output_end; /* Indicate if the decoder should handle the tail of input data*/ @@ -42,12 +42,12 @@ bool grpc_base64_decode_partial(struct grpc_base64_decode_context* ctx); /* base64 decode a slice with pad chars. Returns a new slice, does not take ownership of the input. Returns an empty slice if decoding is failed. */ -grpc_slice grpc_chttp2_base64_decode(grpc_slice input); +grpc_slice grpc_chttp2_base64_decode(const grpc_slice& input); /* base64 decode a slice without pad chars, data length is needed. Returns a new slice, does not take ownership of the input. Returns an empty slice if decoding is failed. */ -grpc_slice grpc_chttp2_base64_decode_with_length(grpc_slice input, +grpc_slice grpc_chttp2_base64_decode_with_length(const grpc_slice& input, size_t output_length); /* Infer the length of decoded data from encoded data. */ diff --git a/src/core/ext/transport/chttp2/transport/bin_encoder.cc b/src/core/ext/transport/chttp2/transport/bin_encoder.cc index bad29e3421c..c816aba991f 100644 --- a/src/core/ext/transport/chttp2/transport/bin_encoder.cc +++ b/src/core/ext/transport/chttp2/transport/bin_encoder.cc @@ -48,13 +48,13 @@ static const b64_huff_sym huff_alphabet[64] = { static const uint8_t tail_xtra[3] = {0, 2, 3}; -grpc_slice grpc_chttp2_base64_encode(grpc_slice input) { +grpc_slice grpc_chttp2_base64_encode(const grpc_slice& input) { size_t input_length = GRPC_SLICE_LENGTH(input); size_t input_triplets = input_length / 3; size_t tail_case = input_length % 3; size_t output_length = input_triplets * 4 + tail_xtra[tail_case]; grpc_slice output = GRPC_SLICE_MALLOC(output_length); - uint8_t* in = GRPC_SLICE_START_PTR(input); + const uint8_t* in = GRPC_SLICE_START_PTR(input); char* out = reinterpret_cast GRPC_SLICE_START_PTR(output); size_t i; @@ -92,9 +92,9 @@ grpc_slice grpc_chttp2_base64_encode(grpc_slice input) { return output; } -grpc_slice grpc_chttp2_huffman_compress(grpc_slice input) { +grpc_slice grpc_chttp2_huffman_compress(const grpc_slice& input) { size_t nbits; - uint8_t* in; + const uint8_t* in; uint8_t* out; grpc_slice output; uint32_t temp = 0; @@ -166,7 +166,8 @@ static void enc_add1(huff_out* out, uint8_t a) { enc_flush_some(out); } -grpc_slice grpc_chttp2_base64_encode_and_huffman_compress(grpc_slice input) { +grpc_slice grpc_chttp2_base64_encode_and_huffman_compress( + const grpc_slice& input) { size_t input_length = GRPC_SLICE_LENGTH(input); size_t input_triplets = input_length / 3; size_t tail_case = input_length % 3; @@ -174,7 +175,7 @@ grpc_slice grpc_chttp2_base64_encode_and_huffman_compress(grpc_slice input) { size_t max_output_bits = 11 * output_syms; size_t max_output_length = max_output_bits / 8 + (max_output_bits % 8 != 0); grpc_slice output = GRPC_SLICE_MALLOC(max_output_length); - uint8_t* in = GRPC_SLICE_START_PTR(input); + const uint8_t* in = GRPC_SLICE_START_PTR(input); uint8_t* start_out = GRPC_SLICE_START_PTR(output); huff_out out; size_t i; diff --git a/src/core/ext/transport/chttp2/transport/bin_encoder.h b/src/core/ext/transport/chttp2/transport/bin_encoder.h index 1b7bb1574af..4f7ee67bd31 100644 --- a/src/core/ext/transport/chttp2/transport/bin_encoder.h +++ b/src/core/ext/transport/chttp2/transport/bin_encoder.h @@ -25,17 +25,18 @@ /* base64 encode a slice. Returns a new slice, does not take ownership of the input */ -grpc_slice grpc_chttp2_base64_encode(grpc_slice input); +grpc_slice grpc_chttp2_base64_encode(const grpc_slice& input); /* Compress a slice with the static huffman encoder detailed in the hpack standard. Returns a new slice, does not take ownership of the input */ -grpc_slice grpc_chttp2_huffman_compress(grpc_slice input); +grpc_slice grpc_chttp2_huffman_compress(const grpc_slice& input); /* equivalent to: grpc_slice x = grpc_chttp2_base64_encode(input); grpc_slice y = grpc_chttp2_huffman_compress(x); grpc_slice_unref_internal( x); return y; */ -grpc_slice grpc_chttp2_base64_encode_and_huffman_compress(grpc_slice input); +grpc_slice grpc_chttp2_base64_encode_and_huffman_compress( + const grpc_slice& input); #endif /* GRPC_CORE_EXT_TRANSPORT_CHTTP2_TRANSPORT_BIN_ENCODER_H */ diff --git a/src/core/ext/transport/chttp2/transport/chttp2_transport.cc b/src/core/ext/transport/chttp2/transport/chttp2_transport.cc index 970c71b663d..306349b7910 100644 --- a/src/core/ext/transport/chttp2/transport/chttp2_transport.cc +++ b/src/core/ext/transport/chttp2/transport/chttp2_transport.cc @@ -824,10 +824,10 @@ static const char* write_state_name(grpc_chttp2_write_state st) { static void set_write_state(grpc_chttp2_transport* t, grpc_chttp2_write_state st, const char* reason) { - GRPC_CHTTP2_IF_TRACING(gpr_log(GPR_INFO, "W:%p %s state %s -> %s [%s]", t, - t->is_client ? "CLIENT" : "SERVER", - write_state_name(t->write_state), - write_state_name(st), reason)); + GRPC_CHTTP2_IF_TRACING( + gpr_log(GPR_INFO, "W:%p %s [%s] state %s -> %s [%s]", t, + t->is_client ? "CLIENT" : "SERVER", t->peer_string, + write_state_name(t->write_state), write_state_name(st), reason)); t->write_state = st; /* If the state is being reset back to idle, it means a write was just * finished. Make sure all the run_after_write closures are scheduled. @@ -1129,7 +1129,7 @@ static void queue_setting_update(grpc_chttp2_transport* t, void grpc_chttp2_add_incoming_goaway(grpc_chttp2_transport* t, uint32_t goaway_error, - grpc_slice goaway_text) { + const grpc_slice& goaway_text) { // Discard the error from a previous goaway frame (if any) if (t->goaway_error != GRPC_ERROR_NONE) { GRPC_ERROR_UNREF(t->goaway_error); @@ -2996,7 +2996,7 @@ void Chttp2IncomingByteStream::PublishError(grpc_error* error) { grpc_chttp2_cancel_stream(transport_, stream_, GRPC_ERROR_REF(error)); } -grpc_error* Chttp2IncomingByteStream::Push(grpc_slice slice, +grpc_error* Chttp2IncomingByteStream::Push(const grpc_slice& slice, grpc_slice* slice_out) { if (remaining_bytes_ < GRPC_SLICE_LENGTH(slice)) { grpc_error* error = diff --git a/src/core/ext/transport/chttp2/transport/frame_data.cc b/src/core/ext/transport/chttp2/transport/frame_data.cc index 1de00735cf3..6080a4bd1c4 100644 --- a/src/core/ext/transport/chttp2/transport/frame_data.cc +++ b/src/core/ext/transport/chttp2/transport/frame_data.cc @@ -287,7 +287,8 @@ grpc_error* grpc_deframe_unprocessed_incoming_frames( grpc_error* grpc_chttp2_data_parser_parse(void* parser, grpc_chttp2_transport* t, grpc_chttp2_stream* s, - grpc_slice slice, int is_last) { + const grpc_slice& slice, + int is_last) { if (!s->pending_byte_stream) { grpc_slice_ref_internal(slice); grpc_slice_buffer_add(&s->frame_storage, slice); diff --git a/src/core/ext/transport/chttp2/transport/frame_data.h b/src/core/ext/transport/chttp2/transport/frame_data.h index 2c5da99fa68..ec3890098ec 100644 --- a/src/core/ext/transport/chttp2/transport/frame_data.h +++ b/src/core/ext/transport/chttp2/transport/frame_data.h @@ -67,7 +67,7 @@ grpc_error* grpc_chttp2_data_parser_begin_frame(grpc_chttp2_data_parser* parser, grpc_error* grpc_chttp2_data_parser_parse(void* parser, grpc_chttp2_transport* t, grpc_chttp2_stream* s, - grpc_slice slice, int is_last); + const grpc_slice& slice, int is_last); void grpc_chttp2_encode_data(uint32_t id, grpc_slice_buffer* inbuf, uint32_t write_bytes, int is_eof, diff --git a/src/core/ext/transport/chttp2/transport/frame_goaway.cc b/src/core/ext/transport/chttp2/transport/frame_goaway.cc index 2a1dd3c3163..e901a6bdc76 100644 --- a/src/core/ext/transport/chttp2/transport/frame_goaway.cc +++ b/src/core/ext/transport/chttp2/transport/frame_goaway.cc @@ -57,10 +57,11 @@ grpc_error* grpc_chttp2_goaway_parser_begin_frame(grpc_chttp2_goaway_parser* p, grpc_error* grpc_chttp2_goaway_parser_parse(void* parser, grpc_chttp2_transport* t, grpc_chttp2_stream* s, - grpc_slice slice, int is_last) { - uint8_t* const beg = GRPC_SLICE_START_PTR(slice); - uint8_t* const end = GRPC_SLICE_END_PTR(slice); - uint8_t* cur = beg; + const grpc_slice& slice, + int is_last) { + const uint8_t* const beg = GRPC_SLICE_START_PTR(slice); + const uint8_t* const end = GRPC_SLICE_END_PTR(slice); + const uint8_t* cur = beg; grpc_chttp2_goaway_parser* p = static_cast(parser); @@ -149,7 +150,7 @@ grpc_error* grpc_chttp2_goaway_parser_parse(void* parser, } void grpc_chttp2_goaway_append(uint32_t last_stream_id, uint32_t error_code, - grpc_slice debug_data, + const grpc_slice& debug_data, grpc_slice_buffer* slice_buffer) { grpc_slice header = GRPC_SLICE_MALLOC(9 + 4 + 4); uint8_t* p = GRPC_SLICE_START_PTR(header); diff --git a/src/core/ext/transport/chttp2/transport/frame_goaway.h b/src/core/ext/transport/chttp2/transport/frame_goaway.h index 66c7a68befe..6f65bb2d604 100644 --- a/src/core/ext/transport/chttp2/transport/frame_goaway.h +++ b/src/core/ext/transport/chttp2/transport/frame_goaway.h @@ -53,10 +53,11 @@ grpc_error* grpc_chttp2_goaway_parser_begin_frame( grpc_error* grpc_chttp2_goaway_parser_parse(void* parser, grpc_chttp2_transport* t, grpc_chttp2_stream* s, - grpc_slice slice, int is_last); + const grpc_slice& slice, + int is_last); void grpc_chttp2_goaway_append(uint32_t last_stream_id, uint32_t error_code, - grpc_slice debug_data, + const grpc_slice& debug_data, grpc_slice_buffer* slice_buffer); #endif /* GRPC_CORE_EXT_TRANSPORT_CHTTP2_TRANSPORT_FRAME_GOAWAY_H */ diff --git a/src/core/ext/transport/chttp2/transport/frame_ping.cc b/src/core/ext/transport/chttp2/transport/frame_ping.cc index 205826b779a..9a56bf093f4 100644 --- a/src/core/ext/transport/chttp2/transport/frame_ping.cc +++ b/src/core/ext/transport/chttp2/transport/frame_ping.cc @@ -73,10 +73,11 @@ grpc_error* grpc_chttp2_ping_parser_begin_frame(grpc_chttp2_ping_parser* parser, grpc_error* grpc_chttp2_ping_parser_parse(void* parser, grpc_chttp2_transport* t, grpc_chttp2_stream* s, - grpc_slice slice, int is_last) { - uint8_t* const beg = GRPC_SLICE_START_PTR(slice); - uint8_t* const end = GRPC_SLICE_END_PTR(slice); - uint8_t* cur = beg; + const grpc_slice& slice, + int is_last) { + const uint8_t* const beg = GRPC_SLICE_START_PTR(slice); + const uint8_t* const end = GRPC_SLICE_END_PTR(slice); + const uint8_t* cur = beg; grpc_chttp2_ping_parser* p = static_cast(parser); while (p->byte != 8 && cur != end) { diff --git a/src/core/ext/transport/chttp2/transport/frame_ping.h b/src/core/ext/transport/chttp2/transport/frame_ping.h index 55a4499ad59..915d023a34c 100644 --- a/src/core/ext/transport/chttp2/transport/frame_ping.h +++ b/src/core/ext/transport/chttp2/transport/frame_ping.h @@ -37,7 +37,7 @@ grpc_error* grpc_chttp2_ping_parser_begin_frame(grpc_chttp2_ping_parser* parser, grpc_error* grpc_chttp2_ping_parser_parse(void* parser, grpc_chttp2_transport* t, grpc_chttp2_stream* s, - grpc_slice slice, int is_last); + const grpc_slice& slice, int is_last); /* Test-only function for disabling ping ack */ void grpc_set_disable_ping_ack(bool disable_ping_ack); diff --git a/src/core/ext/transport/chttp2/transport/frame_rst_stream.cc b/src/core/ext/transport/chttp2/transport/frame_rst_stream.cc index a0a75345947..ccde36cbc48 100644 --- a/src/core/ext/transport/chttp2/transport/frame_rst_stream.cc +++ b/src/core/ext/transport/chttp2/transport/frame_rst_stream.cc @@ -74,10 +74,11 @@ grpc_error* grpc_chttp2_rst_stream_parser_begin_frame( grpc_error* grpc_chttp2_rst_stream_parser_parse(void* parser, grpc_chttp2_transport* t, grpc_chttp2_stream* s, - grpc_slice slice, int is_last) { - uint8_t* const beg = GRPC_SLICE_START_PTR(slice); - uint8_t* const end = GRPC_SLICE_END_PTR(slice); - uint8_t* cur = beg; + const grpc_slice& slice, + int is_last) { + const uint8_t* const beg = GRPC_SLICE_START_PTR(slice); + const uint8_t* const end = GRPC_SLICE_END_PTR(slice); + const uint8_t* cur = beg; grpc_chttp2_rst_stream_parser* p = static_cast(parser); diff --git a/src/core/ext/transport/chttp2/transport/frame_rst_stream.h b/src/core/ext/transport/chttp2/transport/frame_rst_stream.h index 6bcf9c44797..64707666181 100644 --- a/src/core/ext/transport/chttp2/transport/frame_rst_stream.h +++ b/src/core/ext/transport/chttp2/transport/frame_rst_stream.h @@ -38,6 +38,7 @@ grpc_error* grpc_chttp2_rst_stream_parser_begin_frame( grpc_error* grpc_chttp2_rst_stream_parser_parse(void* parser, grpc_chttp2_transport* t, grpc_chttp2_stream* s, - grpc_slice slice, int is_last); + const grpc_slice& slice, + int is_last); #endif /* GRPC_CORE_EXT_TRANSPORT_CHTTP2_TRANSPORT_FRAME_RST_STREAM_H */ diff --git a/src/core/ext/transport/chttp2/transport/frame_settings.cc b/src/core/ext/transport/chttp2/transport/frame_settings.cc index 987ac0e79d0..ed1554e2fef 100644 --- a/src/core/ext/transport/chttp2/transport/frame_settings.cc +++ b/src/core/ext/transport/chttp2/transport/frame_settings.cc @@ -111,7 +111,8 @@ grpc_error* grpc_chttp2_settings_parser_begin_frame( grpc_error* grpc_chttp2_settings_parser_parse(void* p, grpc_chttp2_transport* t, grpc_chttp2_stream* s, - grpc_slice slice, int is_last) { + const grpc_slice& slice, + int is_last) { grpc_chttp2_settings_parser* parser = static_cast(p); const uint8_t* cur = GRPC_SLICE_START_PTR(slice); diff --git a/src/core/ext/transport/chttp2/transport/frame_settings.h b/src/core/ext/transport/chttp2/transport/frame_settings.h index 8d8d9b1a914..8a3ff0426b3 100644 --- a/src/core/ext/transport/chttp2/transport/frame_settings.h +++ b/src/core/ext/transport/chttp2/transport/frame_settings.h @@ -55,6 +55,7 @@ grpc_error* grpc_chttp2_settings_parser_begin_frame( grpc_error* grpc_chttp2_settings_parser_parse(void* parser, grpc_chttp2_transport* t, grpc_chttp2_stream* s, - grpc_slice slice, int is_last); + const grpc_slice& slice, + int is_last); #endif /* GRPC_CORE_EXT_TRANSPORT_CHTTP2_TRANSPORT_FRAME_SETTINGS_H */ diff --git a/src/core/ext/transport/chttp2/transport/frame_window_update.cc b/src/core/ext/transport/chttp2/transport/frame_window_update.cc index b8738ea7ea0..80e799f17f1 100644 --- a/src/core/ext/transport/chttp2/transport/frame_window_update.cc +++ b/src/core/ext/transport/chttp2/transport/frame_window_update.cc @@ -69,11 +69,11 @@ grpc_error* grpc_chttp2_window_update_parser_begin_frame( grpc_error* grpc_chttp2_window_update_parser_parse(void* parser, grpc_chttp2_transport* t, grpc_chttp2_stream* s, - grpc_slice slice, + const grpc_slice& slice, int is_last) { - uint8_t* const beg = GRPC_SLICE_START_PTR(slice); - uint8_t* const end = GRPC_SLICE_END_PTR(slice); - uint8_t* cur = beg; + const uint8_t* const beg = GRPC_SLICE_START_PTR(slice); + const uint8_t* const end = GRPC_SLICE_END_PTR(slice); + const uint8_t* cur = beg; grpc_chttp2_window_update_parser* p = static_cast(parser); diff --git a/src/core/ext/transport/chttp2/transport/frame_window_update.h b/src/core/ext/transport/chttp2/transport/frame_window_update.h index 3d2391f637d..f6721a5bc5d 100644 --- a/src/core/ext/transport/chttp2/transport/frame_window_update.h +++ b/src/core/ext/transport/chttp2/transport/frame_window_update.h @@ -39,7 +39,7 @@ grpc_error* grpc_chttp2_window_update_parser_begin_frame( grpc_error* grpc_chttp2_window_update_parser_parse(void* parser, grpc_chttp2_transport* t, grpc_chttp2_stream* s, - grpc_slice slice, + const grpc_slice& slice, int is_last); #endif /* GRPC_CORE_EXT_TRANSPORT_CHTTP2_TRANSPORT_FRAME_WINDOW_UPDATE_H */ diff --git a/src/core/ext/transport/chttp2/transport/hpack_parser.cc b/src/core/ext/transport/chttp2/transport/hpack_parser.cc index 7b47c9bc18e..5bcdb4e2326 100644 --- a/src/core/ext/transport/chttp2/transport/hpack_parser.cc +++ b/src/core/ext/transport/chttp2/transport/hpack_parser.cc @@ -1570,16 +1570,16 @@ void grpc_chttp2_hpack_parser_destroy(grpc_chttp2_hpack_parser* p) { } grpc_error* grpc_chttp2_hpack_parser_parse(grpc_chttp2_hpack_parser* p, - grpc_slice slice) { + const grpc_slice& slice) { /* max number of bytes to parse at a time... limits call stack depth on * compilers without TCO */ #define MAX_PARSE_LENGTH 1024 p->current_slice_refcount = slice.refcount; - uint8_t* start = GRPC_SLICE_START_PTR(slice); - uint8_t* end = GRPC_SLICE_END_PTR(slice); + const uint8_t* start = GRPC_SLICE_START_PTR(slice); + const uint8_t* end = GRPC_SLICE_END_PTR(slice); grpc_error* error = GRPC_ERROR_NONE; while (start != end && error == GRPC_ERROR_NONE) { - uint8_t* target = start + GPR_MIN(MAX_PARSE_LENGTH, end - start); + const uint8_t* target = start + GPR_MIN(MAX_PARSE_LENGTH, end - start); error = p->state(p, start, target); start = target; } @@ -1621,7 +1621,8 @@ static void parse_stream_compression_md(grpc_chttp2_transport* t, grpc_error* grpc_chttp2_header_parser_parse(void* hpack_parser, grpc_chttp2_transport* t, grpc_chttp2_stream* s, - grpc_slice slice, int is_last) { + const grpc_slice& slice, + int is_last) { GPR_TIMER_SCOPE("grpc_chttp2_header_parser_parse", 0); grpc_chttp2_hpack_parser* parser = static_cast(hpack_parser); diff --git a/src/core/ext/transport/chttp2/transport/hpack_parser.h b/src/core/ext/transport/chttp2/transport/hpack_parser.h index 3e05de4b925..3dc8e13bea2 100644 --- a/src/core/ext/transport/chttp2/transport/hpack_parser.h +++ b/src/core/ext/transport/chttp2/transport/hpack_parser.h @@ -97,13 +97,14 @@ void grpc_chttp2_hpack_parser_destroy(grpc_chttp2_hpack_parser* p); void grpc_chttp2_hpack_parser_set_has_priority(grpc_chttp2_hpack_parser* p); grpc_error* grpc_chttp2_hpack_parser_parse(grpc_chttp2_hpack_parser* p, - grpc_slice slice); + const grpc_slice& slice); /* wraps grpc_chttp2_hpack_parser_parse to provide a frame level parser for the transport */ grpc_error* grpc_chttp2_header_parser_parse(void* hpack_parser, grpc_chttp2_transport* t, grpc_chttp2_stream* s, - grpc_slice slice, int is_last); + const grpc_slice& slice, + int is_last); #endif /* GRPC_CORE_EXT_TRANSPORT_CHTTP2_TRANSPORT_HPACK_PARSER_H */ diff --git a/src/core/ext/transport/chttp2/transport/internal.h b/src/core/ext/transport/chttp2/transport/internal.h index 341f5b3977f..760324c0c95 100644 --- a/src/core/ext/transport/chttp2/transport/internal.h +++ b/src/core/ext/transport/chttp2/transport/internal.h @@ -245,7 +245,7 @@ class Chttp2IncomingByteStream : public ByteStream { void PublishError(grpc_error* error); - grpc_error* Push(grpc_slice slice, grpc_slice* slice_out); + grpc_error* Push(const grpc_slice& slice, grpc_slice* slice_out); grpc_error* Finished(grpc_error* error, bool reset_on_error); @@ -438,7 +438,8 @@ struct grpc_chttp2_transport { void* parser_data = nullptr; grpc_chttp2_stream* incoming_stream = nullptr; grpc_error* (*parser)(void* parser_user_data, grpc_chttp2_transport* t, - grpc_chttp2_stream* s, grpc_slice slice, int is_last); + grpc_chttp2_stream* s, const grpc_slice& slice, + int is_last); grpc_chttp2_write_cb* write_cb_pool = nullptr; @@ -681,7 +682,7 @@ void grpc_chttp2_end_write(grpc_chttp2_transport* t, grpc_error* error); /** Process one slice of incoming data; return 1 if the connection is still viable after reading, or 0 if the connection should be torn down */ grpc_error* grpc_chttp2_perform_read(grpc_chttp2_transport* t, - grpc_slice slice); + const grpc_slice& slice); bool grpc_chttp2_list_add_writable_stream(grpc_chttp2_transport* t, grpc_chttp2_stream* s); @@ -740,7 +741,7 @@ grpc_chttp2_stream* grpc_chttp2_parsing_accept_stream(grpc_chttp2_transport* t, void grpc_chttp2_add_incoming_goaway(grpc_chttp2_transport* t, uint32_t goaway_error, - grpc_slice goaway_text); + const grpc_slice& goaway_text); void grpc_chttp2_parsing_become_skip_parser(grpc_chttp2_transport* t); diff --git a/src/core/ext/transport/chttp2/transport/parsing.cc b/src/core/ext/transport/chttp2/transport/parsing.cc index 1ff96d3cd36..84b2275ebc4 100644 --- a/src/core/ext/transport/chttp2/transport/parsing.cc +++ b/src/core/ext/transport/chttp2/transport/parsing.cc @@ -45,14 +45,14 @@ static grpc_error* init_goaway_parser(grpc_chttp2_transport* t); static grpc_error* init_skip_frame_parser(grpc_chttp2_transport* t, int is_header); -static grpc_error* parse_frame_slice(grpc_chttp2_transport* t, grpc_slice slice, - int is_last); +static grpc_error* parse_frame_slice(grpc_chttp2_transport* t, + const grpc_slice& slice, int is_last); grpc_error* grpc_chttp2_perform_read(grpc_chttp2_transport* t, - grpc_slice slice) { - uint8_t* beg = GRPC_SLICE_START_PTR(slice); - uint8_t* end = GRPC_SLICE_END_PTR(slice); - uint8_t* cur = beg; + const grpc_slice& slice) { + const uint8_t* beg = GRPC_SLICE_START_PTR(slice); + const uint8_t* end = GRPC_SLICE_END_PTR(slice); + const uint8_t* cur = beg; grpc_error* err; if (cur == end) return GRPC_ERROR_NONE; @@ -312,7 +312,7 @@ static grpc_error* init_frame_parser(grpc_chttp2_transport* t) { } static grpc_error* skip_parser(void* parser, grpc_chttp2_transport* t, - grpc_chttp2_stream* s, grpc_slice slice, + grpc_chttp2_stream* s, const grpc_slice& slice, int is_last) { return GRPC_ERROR_NONE; } @@ -753,8 +753,8 @@ static grpc_error* init_settings_frame_parser(grpc_chttp2_transport* t) { return GRPC_ERROR_NONE; } -static grpc_error* parse_frame_slice(grpc_chttp2_transport* t, grpc_slice slice, - int is_last) { +static grpc_error* parse_frame_slice(grpc_chttp2_transport* t, + const grpc_slice& slice, int is_last) { grpc_chttp2_stream* s = t->incoming_stream; grpc_error* err = t->parser(t->parser_data, t, s, slice, is_last); intptr_t unused; diff --git a/src/core/ext/transport/chttp2/transport/writing.cc b/src/core/ext/transport/chttp2/transport/writing.cc index cf77ddc8278..bc8968a0209 100644 --- a/src/core/ext/transport/chttp2/transport/writing.cc +++ b/src/core/ext/transport/chttp2/transport/writing.cc @@ -108,7 +108,7 @@ static void maybe_initiate_ping(grpc_chttp2_transport* t) { GRPC_STATS_INC_HTTP2_PINGS_SENT(); t->ping_state.last_ping_sent_time = now; if (grpc_http_trace.enabled() || grpc_bdp_estimator_trace.enabled()) { - gpr_log(GPR_INFO, "%s: Ping sent [%p]: %d/%d", + gpr_log(GPR_INFO, "%s: Ping sent [%s]: %d/%d", t->is_client ? "CLIENT" : "SERVER", t->peer_string, t->ping_state.pings_before_data_required, t->ping_policy.max_pings_without_data); diff --git a/src/core/lib/channel/channel_trace.cc b/src/core/lib/channel/channel_trace.cc index f0d21db32a8..d329ccc98de 100644 --- a/src/core/lib/channel/channel_trace.cc +++ b/src/core/lib/channel/channel_trace.cc @@ -41,7 +41,7 @@ namespace grpc_core { namespace channelz { -ChannelTrace::TraceEvent::TraceEvent(Severity severity, grpc_slice data, +ChannelTrace::TraceEvent::TraceEvent(Severity severity, const grpc_slice& data, RefCountedPtr referenced_entity) : severity_(severity), data_(data), @@ -51,7 +51,7 @@ ChannelTrace::TraceEvent::TraceEvent(Severity severity, grpc_slice data, referenced_entity_(std::move(referenced_entity)), memory_usage_(sizeof(TraceEvent) + grpc_slice_memory_usage(data)) {} -ChannelTrace::TraceEvent::TraceEvent(Severity severity, grpc_slice data) +ChannelTrace::TraceEvent::TraceEvent(Severity severity, const grpc_slice& data) : severity_(severity), data_(data), timestamp_(grpc_millis_to_timespec(grpc_core::ExecCtx::Get()->Now(), @@ -107,7 +107,7 @@ void ChannelTrace::AddTraceEventHelper(TraceEvent* new_trace_event) { } } -void ChannelTrace::AddTraceEvent(Severity severity, grpc_slice data) { +void ChannelTrace::AddTraceEvent(Severity severity, const grpc_slice& data) { if (max_event_memory_ == 0) { grpc_slice_unref_internal(data); return; // tracing is disabled if max_event_memory_ == 0 @@ -116,7 +116,7 @@ void ChannelTrace::AddTraceEvent(Severity severity, grpc_slice data) { } void ChannelTrace::AddTraceEventWithReference( - Severity severity, grpc_slice data, + Severity severity, const grpc_slice& data, RefCountedPtr referenced_entity) { if (max_event_memory_ == 0) { grpc_slice_unref_internal(data); diff --git a/src/core/lib/channel/channel_trace.h b/src/core/lib/channel/channel_trace.h index 8ff91ee8c81..f088185a423 100644 --- a/src/core/lib/channel/channel_trace.h +++ b/src/core/lib/channel/channel_trace.h @@ -62,7 +62,7 @@ class ChannelTrace { // TODO(ncteisen): as this call is used more and more throughout the gRPC // stack, determine if it makes more sense to accept a char* instead of a // slice. - void AddTraceEvent(Severity severity, grpc_slice data); + void AddTraceEvent(Severity severity, const grpc_slice& data); // Adds a new trace event to the tracing object. This trace event refers to a // an event that concerns a different channelz entity. For example, if this @@ -72,7 +72,7 @@ class ChannelTrace { // NOTE: see the note in the method above. // // TODO(ncteisen): see the todo in the method above. - void AddTraceEventWithReference(Severity severity, grpc_slice data, + void AddTraceEventWithReference(Severity severity, const grpc_slice& data, RefCountedPtr referenced_entity); // Creates and returns the raw grpc_json object, so a parent channelz @@ -87,12 +87,12 @@ class ChannelTrace { class TraceEvent { public: // Constructor for a TraceEvent that references a channel. - TraceEvent(Severity severity, grpc_slice data, + TraceEvent(Severity severity, const grpc_slice& data, RefCountedPtr referenced_entity_); // Constructor for a TraceEvent that does not reverence a different // channel. - TraceEvent(Severity severity, grpc_slice data); + TraceEvent(Severity severity, const grpc_slice& data); ~TraceEvent(); diff --git a/src/core/lib/channel/channelz.h b/src/core/lib/channel/channelz.h index e43792126f0..e543cda1c2b 100644 --- a/src/core/lib/channel/channelz.h +++ b/src/core/lib/channel/channelz.h @@ -180,11 +180,11 @@ class ChannelNode : public BaseNode { bool ChannelIsDestroyed() { return channel_ == nullptr; } // proxy methods to composed classes. - void AddTraceEvent(ChannelTrace::Severity severity, grpc_slice data) { + void AddTraceEvent(ChannelTrace::Severity severity, const grpc_slice& data) { trace_.AddTraceEvent(severity, data); } void AddTraceEventWithReference(ChannelTrace::Severity severity, - grpc_slice data, + const grpc_slice& data, RefCountedPtr referenced_channel) { trace_.AddTraceEventWithReference(severity, data, std::move(referenced_channel)); @@ -214,11 +214,11 @@ class ServerNode : public BaseNode { intptr_t pagination_limit); // proxy methods to composed classes. - void AddTraceEvent(ChannelTrace::Severity severity, grpc_slice data) { + void AddTraceEvent(ChannelTrace::Severity severity, const grpc_slice& data) { trace_.AddTraceEvent(severity, data); } void AddTraceEventWithReference(ChannelTrace::Severity severity, - grpc_slice data, + const grpc_slice& data, RefCountedPtr referenced_channel) { trace_.AddTraceEventWithReference(severity, data, std::move(referenced_channel)); diff --git a/src/core/lib/channel/context.h b/src/core/lib/channel/context.h index 763e4ffc9fe..81b84f1ca05 100644 --- a/src/core/lib/channel/context.h +++ b/src/core/lib/channel/context.h @@ -35,9 +35,6 @@ typedef enum { /// Reserved for traffic_class_context. GRPC_CONTEXT_TRAFFIC, - /// Value is a \a grpc_grpclb_client_stats. - GRPC_GRPCLB_CLIENT_STATS, - GRPC_CONTEXT_COUNT } grpc_context_index; diff --git a/src/core/lib/compression/algorithm_metadata.h b/src/core/lib/compression/algorithm_metadata.h index 1be79e59c00..d58d2f541a0 100644 --- a/src/core/lib/compression/algorithm_metadata.h +++ b/src/core/lib/compression/algorithm_metadata.h @@ -32,7 +32,7 @@ grpc_slice grpc_compression_algorithm_slice( /** Find compression algorithm based on passed in mdstr - returns * GRPC_COMPRESS_ALGORITHM_COUNT on failure */ grpc_compression_algorithm grpc_compression_algorithm_from_slice( - grpc_slice str); + const grpc_slice& str); /** Return compression algorithm based metadata element */ grpc_mdelem grpc_compression_encoding_mdelem( @@ -51,11 +51,11 @@ grpc_mdelem grpc_stream_compression_encoding_mdelem( /** Find compression algorithm based on passed in mdstr - returns * GRPC_COMPRESS_ALGORITHM_COUNT on failure */ grpc_message_compression_algorithm -grpc_message_compression_algorithm_from_slice(grpc_slice str); +grpc_message_compression_algorithm_from_slice(const grpc_slice& str); /** Find stream compression algorithm based on passed in mdstr - returns * GRPC_STREAM_COMPRESS_ALGORITHM_COUNT on failure */ grpc_stream_compression_algorithm grpc_stream_compression_algorithm_from_slice( - grpc_slice str); + const grpc_slice& str); #endif /* GRPC_CORE_LIB_COMPRESSION_ALGORITHM_METADATA_H */ diff --git a/src/core/lib/compression/compression.cc b/src/core/lib/compression/compression.cc index 48717541a76..9139fa04ee5 100644 --- a/src/core/lib/compression/compression.cc +++ b/src/core/lib/compression/compression.cc @@ -147,7 +147,7 @@ grpc_slice grpc_compression_algorithm_slice( } grpc_compression_algorithm grpc_compression_algorithm_from_slice( - grpc_slice str) { + const grpc_slice& str) { if (grpc_slice_eq(str, GRPC_MDSTR_IDENTITY)) return GRPC_COMPRESS_NONE; if (grpc_slice_eq(str, GRPC_MDSTR_DEFLATE)) return GRPC_COMPRESS_DEFLATE; if (grpc_slice_eq(str, GRPC_MDSTR_GZIP)) return GRPC_COMPRESS_GZIP; diff --git a/src/core/lib/compression/compression_internal.cc b/src/core/lib/compression/compression_internal.cc index 538514caf37..65a36de4290 100644 --- a/src/core/lib/compression/compression_internal.cc +++ b/src/core/lib/compression/compression_internal.cc @@ -32,7 +32,7 @@ /* Interfaces related to MD */ grpc_message_compression_algorithm -grpc_message_compression_algorithm_from_slice(grpc_slice str) { +grpc_message_compression_algorithm_from_slice(const grpc_slice& str) { if (grpc_slice_eq(str, GRPC_MDSTR_IDENTITY)) return GRPC_MESSAGE_COMPRESS_NONE; if (grpc_slice_eq(str, GRPC_MDSTR_DEFLATE)) @@ -42,7 +42,7 @@ grpc_message_compression_algorithm_from_slice(grpc_slice str) { } grpc_stream_compression_algorithm grpc_stream_compression_algorithm_from_slice( - grpc_slice str) { + const grpc_slice& str) { if (grpc_slice_eq(str, GRPC_MDSTR_IDENTITY)) return GRPC_STREAM_COMPRESS_NONE; if (grpc_slice_eq(str, GRPC_MDSTR_GZIP)) return GRPC_STREAM_COMPRESS_GZIP; return GRPC_STREAM_COMPRESS_ALGORITHMS_COUNT; diff --git a/src/core/lib/compression/stream_compression_gzip.cc b/src/core/lib/compression/stream_compression_gzip.cc index 682f712843a..bffdb1fd17d 100644 --- a/src/core/lib/compression/stream_compression_gzip.cc +++ b/src/core/lib/compression/stream_compression_gzip.cc @@ -60,7 +60,7 @@ static bool gzip_flate(grpc_stream_compression_context_gzip* ctx, if (r < 0 && r != Z_BUF_ERROR) { gpr_log(GPR_ERROR, "zlib error (%d)", r); grpc_slice_unref_internal(slice_out); - + grpc_slice_unref_internal(slice); return false; } else if (r == Z_STREAM_END && ctx->flate == inflate) { eoc = true; diff --git a/src/core/lib/debug/trace.h b/src/core/lib/debug/trace.h index 4623494520e..6108fb239bd 100644 --- a/src/core/lib/debug/trace.h +++ b/src/core/lib/debug/trace.h @@ -53,7 +53,8 @@ void grpc_tracer_enable_flag(grpc_core::TraceFlag* flag); class TraceFlag { public: TraceFlag(bool default_enabled, const char* name); - // This needs to be trivially destructible as it is used as global variable. + // TraceFlag needs to be trivially destructible since it is used as global + // variable. ~TraceFlag() = default; const char* name() const { return name_; } diff --git a/src/core/lib/gprpp/thd.h b/src/core/lib/gprpp/thd.h index e61e1c8ed04..cae707061e0 100644 --- a/src/core/lib/gprpp/thd.h +++ b/src/core/lib/gprpp/thd.h @@ -47,6 +47,27 @@ class ThreadInternalsInterface { class Thread { public: + class Options { + public: + Options() : joinable_(true), tracked_(true) {} + /// Set whether the thread is joinable or detached. + Options& set_joinable(bool joinable) { + joinable_ = joinable; + return *this; + } + bool joinable() const { return joinable_; } + + /// Set whether the thread is tracked for fork support. + Options& set_tracked(bool tracked) { + tracked_ = tracked; + return *this; + } + bool tracked() const { return tracked_; } + + private: + bool joinable_; + bool tracked_; + }; /// Default constructor only to allow use in structs that lack constructors /// Does not produce a validly-constructed thread; must later /// use placement new to construct a real thread. Does not init mu_ and cv_ @@ -57,14 +78,17 @@ class Thread { /// with argument \a arg once it is started. /// The optional \a success argument indicates whether the thread /// is successfully created. + /// The optional \a options can be used to set the thread detachable. Thread(const char* thd_name, void (*thd_body)(void* arg), void* arg, - bool* success = nullptr); + bool* success = nullptr, const Options& options = Options()); /// Move constructor for thread. After this is called, the other thread /// no longer represents a living thread object - Thread(Thread&& other) : state_(other.state_), impl_(other.impl_) { + Thread(Thread&& other) + : state_(other.state_), impl_(other.impl_), options_(other.options_) { other.state_ = MOVED; other.impl_ = nullptr; + other.options_ = Options(); } /// Move assignment operator for thread. After this is called, the other @@ -79,27 +103,37 @@ class Thread { // assert it for the time being. state_ = other.state_; impl_ = other.impl_; + options_ = other.options_; other.state_ = MOVED; other.impl_ = nullptr; + other.options_ = Options(); } return *this; } /// The destructor is strictly optional; either the thread never came to life - /// and the constructor itself killed it or it has already been joined and - /// the Join function kills it. The destructor shouldn't have to do anything. - ~Thread() { GPR_ASSERT(impl_ == nullptr); } + /// and the constructor itself killed it, or it has already been joined and + /// the Join function kills it, or it was detached (non-joinable) and it has + /// run to completion and is now killing itself. The destructor shouldn't have + /// to do anything. + ~Thread() { GPR_ASSERT(!options_.joinable() || impl_ == nullptr); } void Start() { if (impl_ != nullptr) { GPR_ASSERT(state_ == ALIVE); state_ = STARTED; impl_->Start(); + // If the Thread is not joinable, then the impl_ will cause the deletion + // of this Thread object when the thread function completes. Since no + // other operation is allowed to a detached thread after Start, there is + // no need to change the value of the impl_ or state_ . The next operation + // on this object will be the deletion, which will trigger the destructor. } else { GPR_ASSERT(state_ == FAILED); } - }; + } + // It is only legal to call Join if the Thread is created as joinable. void Join() { if (impl_ != nullptr) { impl_->Join(); @@ -125,6 +159,7 @@ class Thread { enum ThreadState { FAKE, ALIVE, STARTED, DONE, FAILED, MOVED }; ThreadState state_; internal::ThreadInternalsInterface* impl_; + Options options_; }; } // namespace grpc_core diff --git a/src/core/lib/gprpp/thd_posix.cc b/src/core/lib/gprpp/thd_posix.cc index 2751b221a8f..28932081538 100644 --- a/src/core/lib/gprpp/thd_posix.cc +++ b/src/core/lib/gprpp/thd_posix.cc @@ -44,13 +44,14 @@ struct thd_arg { void (*body)(void* arg); /* body of a thread */ void* arg; /* argument to a thread */ const char* name; /* name of thread. Can be nullptr. */ + bool joinable; + bool tracked; }; -class ThreadInternalsPosix - : public grpc_core::internal::ThreadInternalsInterface { +class ThreadInternalsPosix : public internal::ThreadInternalsInterface { public: ThreadInternalsPosix(const char* thd_name, void (*thd_body)(void* arg), - void* arg, bool* success) + void* arg, bool* success, const Thread::Options& options) : started_(false) { gpr_mu_init(&mu_); gpr_cv_init(&ready_); @@ -63,11 +64,20 @@ class ThreadInternalsPosix info->body = thd_body; info->arg = arg; info->name = thd_name; - grpc_core::Fork::IncThreadCount(); + info->joinable = options.joinable(); + info->tracked = options.tracked(); + if (options.tracked()) { + Fork::IncThreadCount(); + } GPR_ASSERT(pthread_attr_init(&attr) == 0); - GPR_ASSERT(pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_JOINABLE) == - 0); + if (options.joinable()) { + GPR_ASSERT(pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_JOINABLE) == + 0); + } else { + GPR_ASSERT(pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED) == + 0); + } *success = (pthread_create(&pthread_id_, &attr, @@ -97,8 +107,14 @@ class ThreadInternalsPosix } gpr_mu_unlock(&arg.thread->mu_); + if (!arg.joinable) { + Delete(arg.thread); + } + (*arg.body)(arg.arg); - grpc_core::Fork::DecThreadCount(); + if (arg.tracked) { + Fork::DecThreadCount(); + } return nullptr; }, info) == 0); @@ -108,9 +124,11 @@ class ThreadInternalsPosix if (!(*success)) { /* don't use gpr_free, as this was allocated using malloc (see above) */ free(info); - grpc_core::Fork::DecThreadCount(); + if (options.tracked()) { + Fork::DecThreadCount(); + } } - }; + } ~ThreadInternalsPosix() override { gpr_mu_destroy(&mu_); @@ -136,15 +154,15 @@ class ThreadInternalsPosix } // namespace Thread::Thread(const char* thd_name, void (*thd_body)(void* arg), void* arg, - bool* success) { + bool* success, const Options& options) + : options_(options) { bool outcome = false; - impl_ = - grpc_core::New(thd_name, thd_body, arg, &outcome); + impl_ = New(thd_name, thd_body, arg, &outcome, options); if (outcome) { state_ = ALIVE; } else { state_ = FAILED; - grpc_core::Delete(impl_); + Delete(impl_); impl_ = nullptr; } diff --git a/src/core/lib/gprpp/thd_windows.cc b/src/core/lib/gprpp/thd_windows.cc index 2512002a96c..bbb48a58cd6 100644 --- a/src/core/lib/gprpp/thd_windows.cc +++ b/src/core/lib/gprpp/thd_windows.cc @@ -46,6 +46,7 @@ struct thd_info { void (*body)(void* arg); /* body of a thread */ void* arg; /* argument to a thread */ HANDLE join_event; /* the join event */ + bool joinable; /* whether it is joinable */ }; thread_local struct thd_info* g_thd_info; @@ -53,7 +54,8 @@ thread_local struct thd_info* g_thd_info; class ThreadInternalsWindows : public grpc_core::internal::ThreadInternalsInterface { public: - ThreadInternalsWindows(void (*thd_body)(void* arg), void* arg, bool* success) + ThreadInternalsWindows(void (*thd_body)(void* arg), void* arg, bool* success, + const grpc_core::Thread::Options& options) : started_(false) { gpr_mu_init(&mu_); gpr_cv_init(&ready_); @@ -63,21 +65,24 @@ class ThreadInternalsWindows info_->thread = this; info_->body = thd_body; info_->arg = arg; - - info_->join_event = CreateEvent(nullptr, FALSE, FALSE, nullptr); - if (info_->join_event == nullptr) { - gpr_free(info_); - *success = false; - } else { - handle = CreateThread(nullptr, 64 * 1024, thread_body, info_, 0, nullptr); - if (handle == nullptr) { - destroy_thread(); + info_->join_event = nullptr; + info_->joinable = options.joinable(); + if (info_->joinable) { + info_->join_event = CreateEvent(nullptr, FALSE, FALSE, nullptr); + if (info_->join_event == nullptr) { + gpr_free(info_); *success = false; - } else { - CloseHandle(handle); - *success = true; + return; } } + handle = CreateThread(nullptr, 64 * 1024, thread_body, info_, 0, nullptr); + if (handle == nullptr) { + destroy_thread(); + *success = false; + } else { + CloseHandle(handle); + *success = true; + } } ~ThreadInternalsWindows() override { @@ -107,14 +112,24 @@ class ThreadInternalsWindows gpr_inf_future(GPR_CLOCK_MONOTONIC)); } gpr_mu_unlock(&g_thd_info->thread->mu_); + if (!g_thd_info->joinable) { + grpc_core::Delete(g_thd_info->thread); + g_thd_info->thread = nullptr; + } g_thd_info->body(g_thd_info->arg); - BOOL ret = SetEvent(g_thd_info->join_event); - GPR_ASSERT(ret); + if (g_thd_info->joinable) { + BOOL ret = SetEvent(g_thd_info->join_event); + GPR_ASSERT(ret); + } else { + gpr_free(g_thd_info); + } return 0; } void destroy_thread() { - CloseHandle(info_->join_event); + if (info_ != nullptr && info_->joinable) { + CloseHandle(info_->join_event); + } gpr_free(info_); } @@ -129,14 +144,15 @@ class ThreadInternalsWindows namespace grpc_core { Thread::Thread(const char* thd_name, void (*thd_body)(void* arg), void* arg, - bool* success) { + bool* success, const Options& options) + : options_(options) { bool outcome = false; - impl_ = grpc_core::New(thd_body, arg, &outcome); + impl_ = New(thd_body, arg, &outcome, options); if (outcome) { state_ = ALIVE; } else { state_ = FAILED; - grpc_core::Delete(impl_); + Delete(impl_); impl_ = nullptr; } diff --git a/src/core/lib/http/httpcli.cc b/src/core/lib/http/httpcli.cc index 3bd7a2ce590..8c9ce4da0d3 100644 --- a/src/core/lib/http/httpcli.cc +++ b/src/core/lib/http/httpcli.cc @@ -229,7 +229,8 @@ static void internal_request_begin(grpc_httpcli_context* context, const grpc_httpcli_request* request, grpc_millis deadline, grpc_closure* on_done, grpc_httpcli_response* response, - const char* name, grpc_slice request_text) { + const char* name, + const grpc_slice& request_text) { internal_request* req = static_cast(gpr_malloc(sizeof(internal_request))); memset(req, 0, sizeof(*req)); diff --git a/src/core/lib/http/parser.cc b/src/core/lib/http/parser.cc index a37fdda8ea7..7ca1cc9db5f 100644 --- a/src/core/lib/http/parser.cc +++ b/src/core/lib/http/parser.cc @@ -351,7 +351,8 @@ void grpc_http_response_destroy(grpc_http_response* response) { gpr_free(response->hdrs); } -grpc_error* grpc_http_parser_parse(grpc_http_parser* parser, grpc_slice slice, +grpc_error* grpc_http_parser_parse(grpc_http_parser* parser, + const grpc_slice& slice, size_t* start_of_body) { for (size_t i = 0; i < GRPC_SLICE_LENGTH(slice); i++) { bool found_body_start = false; diff --git a/src/core/lib/http/parser.h b/src/core/lib/http/parser.h index a8f47c96c85..b51fd5af09f 100644 --- a/src/core/lib/http/parser.h +++ b/src/core/lib/http/parser.h @@ -101,7 +101,8 @@ void grpc_http_parser_init(grpc_http_parser* parser, grpc_http_type type, void grpc_http_parser_destroy(grpc_http_parser* parser); /* Sets \a start_of_body to the offset in \a slice of the start of the body. */ -grpc_error* grpc_http_parser_parse(grpc_http_parser* parser, grpc_slice slice, +grpc_error* grpc_http_parser_parse(grpc_http_parser* parser, + const grpc_slice& slice, size_t* start_of_body); grpc_error* grpc_http_parser_eof(grpc_http_parser* parser); diff --git a/src/core/lib/iomgr/error.cc b/src/core/lib/iomgr/error.cc index f4abad9b288..f194eb62d48 100644 --- a/src/core/lib/iomgr/error.cc +++ b/src/core/lib/iomgr/error.cc @@ -150,13 +150,12 @@ static void unref_errs(grpc_error* err) { } } -static void unref_slice(grpc_slice slice) { grpc_slice_unref_internal(slice); } - static void unref_strs(grpc_error* err) { for (size_t which = 0; which < GRPC_ERROR_STR_MAX; ++which) { uint8_t slot = err->strs[which]; if (slot != UINT8_MAX) { - unref_slice(*reinterpret_cast(err->arena + slot)); + grpc_slice_unref_internal( + *reinterpret_cast(err->arena + slot)); } } } @@ -231,7 +230,7 @@ static void internal_set_int(grpc_error** err, grpc_error_ints which, } static void internal_set_str(grpc_error** err, grpc_error_strs which, - grpc_slice value) { + const grpc_slice& value) { uint8_t slot = (*err)->strs[which]; if (slot == UINT8_MAX) { slot = get_placement(err, sizeof(value)); @@ -243,7 +242,8 @@ static void internal_set_str(grpc_error** err, grpc_error_strs which, return; } } else { - unref_slice(*reinterpret_cast((*err)->arena + slot)); + grpc_slice_unref_internal( + *reinterpret_cast((*err)->arena + slot)); } (*err)->strs[which] = slot; memcpy((*err)->arena + slot, &value, sizeof(value)); @@ -313,8 +313,8 @@ void grpc_enable_error_creation() { gpr_atm_no_barrier_store(&g_error_creation_allowed, true); } -grpc_error* grpc_error_create(const char* file, int line, grpc_slice desc, - grpc_error** referencing, +grpc_error* grpc_error_create(const char* file, int line, + const grpc_slice& desc, grpc_error** referencing, size_t num_referencing) { GPR_TIMER_SCOPE("grpc_error_create", 0); uint8_t initial_arena_capacity = static_cast( @@ -472,7 +472,7 @@ bool grpc_error_get_int(grpc_error* err, grpc_error_ints which, intptr_t* p) { } grpc_error* grpc_error_set_str(grpc_error* src, grpc_error_strs which, - grpc_slice str) { + const grpc_slice& str) { GPR_TIMER_SCOPE("grpc_error_set_str", 0); grpc_error* new_err = copy_error_and_unref(src); internal_set_str(&new_err, which, str); @@ -620,7 +620,7 @@ static char* key_str(grpc_error_strs which) { return gpr_strdup(error_str_name(which)); } -static char* fmt_str(grpc_slice slice) { +static char* fmt_str(const grpc_slice& slice) { char* s = nullptr; size_t sz = 0; size_t cap = 0; diff --git a/src/core/lib/iomgr/error.h b/src/core/lib/iomgr/error.h index cb740d5b01c..fcc6f0761b3 100644 --- a/src/core/lib/iomgr/error.h +++ b/src/core/lib/iomgr/error.h @@ -138,8 +138,9 @@ void grpc_enable_error_creation(); const char* grpc_error_string(grpc_error* error); /// Create an error - but use GRPC_ERROR_CREATE instead -grpc_error* grpc_error_create(const char* file, int line, grpc_slice desc, - grpc_error** referencing, size_t num_referencing); +grpc_error* grpc_error_create(const char* file, int line, + const grpc_slice& desc, grpc_error** referencing, + size_t num_referencing); /// Create an error (this is the preferred way of generating an error that is /// not due to a system call - for system calls, use GRPC_OS_ERROR or /// GRPC_WSA_ERROR as appropriate) @@ -200,7 +201,7 @@ bool grpc_error_get_int(grpc_error* error, grpc_error_ints which, intptr_t* p); /// This call takes ownership of the slice; the error is responsible for /// eventually unref-ing it. grpc_error* grpc_error_set_str(grpc_error* src, grpc_error_strs which, - grpc_slice str) GRPC_MUST_USE_RESULT; + const grpc_slice& str) GRPC_MUST_USE_RESULT; /// Returns false if the specified string is not set. /// Caller does NOT own the slice. bool grpc_error_get_str(grpc_error* error, grpc_error_strs which, diff --git a/src/core/lib/iomgr/tcp_posix.cc b/src/core/lib/iomgr/tcp_posix.cc index 68cce8a4655..525288a77ae 100644 --- a/src/core/lib/iomgr/tcp_posix.cc +++ b/src/core/lib/iomgr/tcp_posix.cc @@ -250,8 +250,6 @@ static void notify_on_read(grpc_tcp* tcp) { if (grpc_tcp_trace.enabled()) { gpr_log(GPR_INFO, "TCP:%p notify_on_read", tcp); } - GRPC_CLOSURE_INIT(&tcp->read_done_closure, tcp_handle_read, tcp, - grpc_schedule_on_exec_ctx); grpc_fd_notify_on_read(tcp->em_fd, &tcp->read_done_closure); } @@ -1157,6 +1155,8 @@ grpc_endpoint* grpc_tcp_create(grpc_fd* em_fd, grpc_resource_quota_unref_internal(resource_quota); gpr_mu_init(&tcp->tb_mu); tcp->tb_head = nullptr; + GRPC_CLOSURE_INIT(&tcp->read_done_closure, tcp_handle_read, tcp, + grpc_schedule_on_exec_ctx); /* Start being notified on errors if event engine can track errors. */ if (grpc_event_engine_can_track_errors()) { /* Grab a ref to tcp so that we can safely access the tcp struct when diff --git a/src/core/lib/security/credentials/jwt/jwt_verifier.cc b/src/core/lib/security/credentials/jwt/jwt_verifier.cc index 303f13300d9..5b120eddb43 100644 --- a/src/core/lib/security/credentials/jwt/jwt_verifier.cc +++ b/src/core/lib/security/credentials/jwt/jwt_verifier.cc @@ -134,7 +134,8 @@ static void jose_header_destroy(jose_header* h) { } /* Takes ownership of json and buffer. */ -static jose_header* jose_header_from_json(grpc_json* json, grpc_slice buffer) { +static jose_header* jose_header_from_json(grpc_json* json, + const grpc_slice& buffer) { grpc_json* cur; jose_header* h = static_cast(gpr_zalloc(sizeof(jose_header))); h->buffer = buffer; @@ -235,7 +236,8 @@ gpr_timespec grpc_jwt_claims_not_before(const grpc_jwt_claims* claims) { } /* Takes ownership of json and buffer even in case of failure. */ -grpc_jwt_claims* grpc_jwt_claims_from_json(grpc_json* json, grpc_slice buffer) { +grpc_jwt_claims* grpc_jwt_claims_from_json(grpc_json* json, + const grpc_slice& buffer) { grpc_json* cur; grpc_jwt_claims* claims = static_cast(gpr_malloc(sizeof(grpc_jwt_claims))); @@ -350,7 +352,7 @@ typedef struct { /* Takes ownership of the header, claims and signature. */ static verifier_cb_ctx* verifier_cb_ctx_create( grpc_jwt_verifier* verifier, grpc_pollset* pollset, jose_header* header, - grpc_jwt_claims* claims, const char* audience, grpc_slice signature, + grpc_jwt_claims* claims, const char* audience, const grpc_slice& signature, const char* signed_jwt, size_t signed_jwt_len, void* user_data, grpc_jwt_verification_done_cb cb) { grpc_core::ApplicationCallbackExecCtx callback_exec_ctx; @@ -602,7 +604,8 @@ static EVP_PKEY* find_verification_key(const grpc_json* json, } static int verify_jwt_signature(EVP_PKEY* key, const char* alg, - grpc_slice signature, grpc_slice signed_data) { + const grpc_slice& signature, + const grpc_slice& signed_data) { EVP_MD_CTX* md_ctx = EVP_MD_CTX_create(); const EVP_MD* md = evp_md_from_alg(alg); int result = 0; diff --git a/src/core/lib/security/credentials/jwt/jwt_verifier.h b/src/core/lib/security/credentials/jwt/jwt_verifier.h index cdb09870bd5..3f69ada98d5 100644 --- a/src/core/lib/security/credentials/jwt/jwt_verifier.h +++ b/src/core/lib/security/credentials/jwt/jwt_verifier.h @@ -115,7 +115,8 @@ void grpc_jwt_verifier_verify(grpc_jwt_verifier* verifier, /* --- TESTING ONLY exposed functions. --- */ -grpc_jwt_claims* grpc_jwt_claims_from_json(grpc_json* json, grpc_slice buffer); +grpc_jwt_claims* grpc_jwt_claims_from_json(grpc_json* json, + const grpc_slice& buffer); grpc_jwt_verifier_status grpc_jwt_claims_check(const grpc_jwt_claims* claims, const char* audience); const char* grpc_jwt_issuer_email_domain(const char* issuer); diff --git a/src/core/lib/security/security_connector/fake/fake_security_connector.cc b/src/core/lib/security/security_connector/fake/fake_security_connector.cc index a0e2e6f030b..c55fd34d0e2 100644 --- a/src/core/lib/security/security_connector/fake/fake_security_connector.cc +++ b/src/core/lib/security/security_connector/fake/fake_security_connector.cc @@ -26,6 +26,8 @@ #include #include +#include "src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.h" +#include "src/core/ext/filters/client_channel/lb_policy/xds/xds.h" #include "src/core/ext/transport/chttp2/alpn/alpn.h" #include "src/core/lib/channel/channel_args.h" #include "src/core/lib/channel/handshaker.h" @@ -53,8 +55,11 @@ class grpc_fake_channel_security_connector final target_(gpr_strdup(target)), expected_targets_( gpr_strdup(grpc_fake_transport_get_expected_targets(args))), - is_lb_channel_(grpc_core::FindTargetAuthorityTableInArgs(args) != - nullptr) { + is_lb_channel_( + grpc_channel_args_find( + args, GRPC_ARG_ADDRESS_IS_XDS_LOAD_BALANCER) != nullptr || + grpc_channel_args_find( + args, GRPC_ARG_ADDRESS_IS_GRPCLB_LOAD_BALANCER) != nullptr) { const grpc_arg* target_name_override_arg = grpc_channel_args_find(args, GRPC_SSL_TARGET_NAME_OVERRIDE_ARG); if (target_name_override_arg != nullptr) { diff --git a/src/core/lib/security/transport/auth_filters.h b/src/core/lib/security/transport/auth_filters.h index af2104cfbcd..16a8e58ed9a 100644 --- a/src/core/lib/security/transport/auth_filters.h +++ b/src/core/lib/security/transport/auth_filters.h @@ -28,8 +28,8 @@ extern const grpc_channel_filter grpc_client_auth_filter; extern const grpc_channel_filter grpc_server_auth_filter; void grpc_auth_metadata_context_build( - const char* url_scheme, grpc_slice call_host, grpc_slice call_method, - grpc_auth_context* auth_context, + const char* url_scheme, const grpc_slice& call_host, + const grpc_slice& call_method, grpc_auth_context* auth_context, grpc_auth_metadata_context* auth_md_context); void grpc_auth_metadata_context_reset(grpc_auth_metadata_context* context); diff --git a/src/core/lib/security/transport/client_auth_filter.cc b/src/core/lib/security/transport/client_auth_filter.cc index 66f86b8bc52..f90c92efdc2 100644 --- a/src/core/lib/security/transport/client_auth_filter.cc +++ b/src/core/lib/security/transport/client_auth_filter.cc @@ -41,12 +41,42 @@ #define MAX_CREDENTIALS_METADATA_COUNT 4 namespace { + +/* We can have a per-channel credentials. */ +struct channel_data { + channel_data(grpc_channel_security_connector* security_connector, + grpc_auth_context* auth_context) + : security_connector( + security_connector->Ref(DEBUG_LOCATION, "client_auth_filter")), + auth_context(auth_context->Ref(DEBUG_LOCATION, "client_auth_filter")) {} + ~channel_data() { + security_connector.reset(DEBUG_LOCATION, "client_auth_filter"); + auth_context.reset(DEBUG_LOCATION, "client_auth_filter"); + } + + grpc_core::RefCountedPtr security_connector; + grpc_core::RefCountedPtr auth_context; +}; + /* We can have a per-call credentials. */ struct call_data { call_data(grpc_call_element* elem, const grpc_call_element_args& args) - : arena(args.arena), - owning_call(args.call_stack), - call_combiner(args.call_combiner) {} + : owning_call(args.call_stack), call_combiner(args.call_combiner) { + channel_data* chand = static_cast(elem->channel_data); + GPR_ASSERT(args.context != nullptr); + if (args.context[GRPC_CONTEXT_SECURITY].value == nullptr) { + args.context[GRPC_CONTEXT_SECURITY].value = + grpc_client_security_context_create(args.arena, /*creds=*/nullptr); + args.context[GRPC_CONTEXT_SECURITY].destroy = + grpc_client_security_context_destroy; + } + grpc_client_security_context* sec_ctx = + static_cast( + args.context[GRPC_CONTEXT_SECURITY].value); + sec_ctx->auth_context.reset(DEBUG_LOCATION, "client_auth_filter"); + sec_ctx->auth_context = + chand->auth_context->Ref(DEBUG_LOCATION, "client_auth_filter"); + } // This method is technically the dtor of this class. However, since // `get_request_metadata_cancel_closure` can run in parallel to @@ -61,7 +91,6 @@ struct call_data { grpc_auth_metadata_context_reset(&auth_md_context); } - gpr_arena* arena; grpc_call_stack* owning_call; grpc_call_combiner* call_combiner; grpc_core::RefCountedPtr creds; @@ -81,21 +110,6 @@ struct call_data { grpc_closure get_request_metadata_cancel_closure; }; -/* We can have a per-channel credentials. */ -struct channel_data { - channel_data(grpc_channel_security_connector* security_connector, - grpc_auth_context* auth_context) - : security_connector( - security_connector->Ref(DEBUG_LOCATION, "client_auth_filter")), - auth_context(auth_context->Ref(DEBUG_LOCATION, "client_auth_filter")) {} - ~channel_data() { - security_connector.reset(DEBUG_LOCATION, "client_auth_filter"); - auth_context.reset(DEBUG_LOCATION, "client_auth_filter"); - } - - grpc_core::RefCountedPtr security_connector; - grpc_core::RefCountedPtr auth_context; -}; } // namespace void grpc_auth_metadata_context_reset( @@ -155,8 +169,8 @@ static void on_credentials_metadata(void* arg, grpc_error* input_error) { } void grpc_auth_metadata_context_build( - const char* url_scheme, grpc_slice call_host, grpc_slice call_method, - grpc_auth_context* auth_context, + const char* url_scheme, const grpc_slice& call_host, + const grpc_slice& call_method, grpc_auth_context* auth_context, grpc_auth_metadata_context* auth_md_context) { char* service = grpc_slice_to_c_string(call_method); char* last_slash = strrchr(service, '/'); @@ -307,24 +321,6 @@ static void auth_start_transport_stream_op_batch( call_data* calld = static_cast(elem->call_data); channel_data* chand = static_cast(elem->channel_data); - if (!batch->cancel_stream) { - // TODO(hcaseyal): move this to init_call_elem once issue #15927 is - // resolved. - GPR_ASSERT(batch->payload->context != nullptr); - if (batch->payload->context[GRPC_CONTEXT_SECURITY].value == nullptr) { - batch->payload->context[GRPC_CONTEXT_SECURITY].value = - grpc_client_security_context_create(calld->arena, /*creds=*/nullptr); - batch->payload->context[GRPC_CONTEXT_SECURITY].destroy = - grpc_client_security_context_destroy; - } - grpc_client_security_context* sec_ctx = - static_cast( - batch->payload->context[GRPC_CONTEXT_SECURITY].value); - sec_ctx->auth_context.reset(DEBUG_LOCATION, "client_auth_filter"); - sec_ctx->auth_context = - chand->auth_context->Ref(DEBUG_LOCATION, "client_auth_filter"); - } - if (batch->send_initial_metadata) { grpc_metadata_batch* metadata = batch->payload->send_initial_metadata.send_initial_metadata; diff --git a/src/core/lib/slice/percent_encoding.cc b/src/core/lib/slice/percent_encoding.cc index 45cd2cc47f4..79a4805bc91 100644 --- a/src/core/lib/slice/percent_encoding.cc +++ b/src/core/lib/slice/percent_encoding.cc @@ -38,7 +38,7 @@ static bool is_unreserved_character(uint8_t c, return ((unreserved_bytes[c / 8] >> (c % 8)) & 1) != 0; } -grpc_slice grpc_percent_encode_slice(grpc_slice slice, +grpc_slice grpc_percent_encode_slice(const grpc_slice& slice, const uint8_t* unreserved_bytes) { static const uint8_t hex[] = "0123456789ABCDEF"; @@ -86,7 +86,7 @@ static uint8_t dehex(uint8_t c) { GPR_UNREACHABLE_CODE(return 255); } -bool grpc_strict_percent_decode_slice(grpc_slice slice_in, +bool grpc_strict_percent_decode_slice(const grpc_slice& slice_in, const uint8_t* unreserved_bytes, grpc_slice* slice_out) { const uint8_t* p = GRPC_SLICE_START_PTR(slice_in); @@ -126,7 +126,7 @@ bool grpc_strict_percent_decode_slice(grpc_slice slice_in, return true; } -grpc_slice grpc_permissive_percent_decode_slice(grpc_slice slice_in) { +grpc_slice grpc_permissive_percent_decode_slice(const grpc_slice& slice_in) { const uint8_t* p = GRPC_SLICE_START_PTR(slice_in); const uint8_t* in_end = GRPC_SLICE_END_PTR(slice_in); size_t out_length = 0; diff --git a/src/core/lib/slice/percent_encoding.h b/src/core/lib/slice/percent_encoding.h index 6b13ffc3fee..43b20f090f0 100644 --- a/src/core/lib/slice/percent_encoding.h +++ b/src/core/lib/slice/percent_encoding.h @@ -46,7 +46,7 @@ extern const uint8_t grpc_compatible_percent_encoding_unreserved_bytes[256 / 8]; /* Percent-encode a slice, returning the new slice (this cannot fail): unreserved_bytes is a bitfield indicating which bytes are considered unreserved and thus do not need percent encoding */ -grpc_slice grpc_percent_encode_slice(grpc_slice slice, +grpc_slice grpc_percent_encode_slice(const grpc_slice& slice, const uint8_t* unreserved_bytes); /* Percent-decode a slice, strictly. If the input is legal (contains no unreserved bytes, and legal % encodings), @@ -54,12 +54,12 @@ grpc_slice grpc_percent_encode_slice(grpc_slice slice, If the input is not legal, returns false and leaves *slice_out untouched. unreserved_bytes is a bitfield indicating which bytes are considered unreserved and thus do not need percent encoding */ -bool grpc_strict_percent_decode_slice(grpc_slice slice_in, +bool grpc_strict_percent_decode_slice(const grpc_slice& slice_in, const uint8_t* unreserved_bytes, grpc_slice* slice_out); /* Percent-decode a slice, permissively. If a % triplet can not be decoded, pass it through verbatim. This cannot fail. */ -grpc_slice grpc_permissive_percent_decode_slice(grpc_slice slice_in); +grpc_slice grpc_permissive_percent_decode_slice(const grpc_slice& slice_in); #endif /* GRPC_CORE_LIB_SLICE_PERCENT_ENCODING_H */ diff --git a/src/core/lib/slice/slice.cc b/src/core/lib/slice/slice.cc index 31437aa4600..ac935f13e28 100644 --- a/src/core/lib/slice/slice.cc +++ b/src/core/lib/slice/slice.cc @@ -50,19 +50,6 @@ grpc_slice grpc_slice_copy(grpc_slice s) { return out; } -grpc_slice grpc_slice_ref_internal(grpc_slice slice) { - if (slice.refcount) { - slice.refcount->vtable->ref(slice.refcount); - } - return slice; -} - -void grpc_slice_unref_internal(grpc_slice slice) { - if (slice.refcount) { - slice.refcount->vtable->unref(slice.refcount); - } -} - /* Public API */ grpc_slice grpc_slice_ref(grpc_slice slice) { return grpc_slice_ref_internal(slice); diff --git a/src/core/lib/slice/slice_hash_table.h b/src/core/lib/slice/slice_hash_table.h index 4bbcf88e895..942830a3e9c 100644 --- a/src/core/lib/slice/slice_hash_table.h +++ b/src/core/lib/slice/slice_hash_table.h @@ -88,7 +88,7 @@ class SliceHashTable : public RefCounted> { SliceHashTable(size_t num_entries, Entry* entries, ValueCmp value_cmp); virtual ~SliceHashTable(); - void Add(grpc_slice key, T& value); + void Add(const grpc_slice& key, T& value); // Default value comparison function, if none specified by caller. static int DefaultValueCmp(const T& a, const T& b) { return GPR_ICMP(a, b); } @@ -137,7 +137,7 @@ SliceHashTable::~SliceHashTable() { } template -void SliceHashTable::Add(grpc_slice key, T& value) { +void SliceHashTable::Add(const grpc_slice& key, T& value) { const size_t hash = grpc_slice_hash(key); for (size_t offset = 0; offset < size_; ++offset) { const size_t idx = (hash + offset) % size_; diff --git a/src/core/lib/slice/slice_intern.cc b/src/core/lib/slice/slice_intern.cc index e53c040e1aa..0eef38d3f35 100644 --- a/src/core/lib/slice/slice_intern.cc +++ b/src/core/lib/slice/slice_intern.cc @@ -196,7 +196,7 @@ grpc_slice grpc_slice_maybe_static_intern(grpc_slice slice, return slice; } -bool grpc_slice_is_interned(grpc_slice slice) { +bool grpc_slice_is_interned(const grpc_slice& slice) { return (slice.refcount && slice.refcount->vtable == &interned_slice_vtable) || GRPC_IS_STATIC_METADATA_STRING(slice); } diff --git a/src/core/lib/slice/slice_internal.h b/src/core/lib/slice/slice_internal.h index 5b05951522f..0e50866b70e 100644 --- a/src/core/lib/slice/slice_internal.h +++ b/src/core/lib/slice/slice_internal.h @@ -24,15 +24,26 @@ #include #include -grpc_slice grpc_slice_ref_internal(grpc_slice slice); -void grpc_slice_unref_internal(grpc_slice slice); +inline const grpc_slice& grpc_slice_ref_internal(const grpc_slice& slice) { + if (slice.refcount) { + slice.refcount->vtable->ref(slice.refcount); + } + return slice; +} + +inline void grpc_slice_unref_internal(const grpc_slice& slice) { + if (slice.refcount) { + slice.refcount->vtable->unref(slice.refcount); + } +} + void grpc_slice_buffer_reset_and_unref_internal(grpc_slice_buffer* sb); void grpc_slice_buffer_partial_unref_internal(grpc_slice_buffer* sb, size_t idx); void grpc_slice_buffer_destroy_internal(grpc_slice_buffer* sb); /* Check if a slice is interned */ -bool grpc_slice_is_interned(grpc_slice slice); +bool grpc_slice_is_interned(const grpc_slice& slice); void grpc_slice_intern_init(void); void grpc_slice_intern_shutdown(void); diff --git a/src/core/lib/slice/slice_traits.h b/src/core/lib/slice/slice_traits.h index ee01916525e..07d13cd8b54 100644 --- a/src/core/lib/slice/slice_traits.h +++ b/src/core/lib/slice/slice_traits.h @@ -24,8 +24,8 @@ #include #include -bool grpc_slice_is_legal_header(grpc_slice s); -bool grpc_slice_is_legal_nonbin_header(grpc_slice s); -bool grpc_slice_is_bin_suffixed(grpc_slice s); +bool grpc_slice_is_legal_header(const grpc_slice& s); +bool grpc_slice_is_legal_nonbin_header(const grpc_slice& s); +bool grpc_slice_is_bin_suffixed(const grpc_slice& s); #endif /* GRPC_CORE_LIB_SLICE_SLICE_TRAITS_H */ diff --git a/src/core/lib/slice/slice_weak_hash_table.h b/src/core/lib/slice/slice_weak_hash_table.h index dc3ccc5dadd..1335c817a39 100644 --- a/src/core/lib/slice/slice_weak_hash_table.h +++ b/src/core/lib/slice/slice_weak_hash_table.h @@ -46,7 +46,7 @@ class SliceWeakHashTable : public RefCounted> { /// Add a mapping from \a key to \a value, taking ownership of \a key. This /// operation will always succeed. It may discard older entries. - void Add(grpc_slice key, T value) { + void Add(const grpc_slice& key, T value) { const size_t idx = grpc_slice_hash(key) % Size; entries_[idx].Set(key, std::move(value)); return; @@ -54,7 +54,7 @@ class SliceWeakHashTable : public RefCounted> { /// Returns the value from the table associated with / \a key or null if not /// found. - const T* Get(const grpc_slice key) const { + const T* Get(const grpc_slice& key) const { const size_t idx = grpc_slice_hash(key) % Size; const auto& entry = entries_[idx]; return grpc_slice_eq(entry.key(), key) ? entry.value() : nullptr; @@ -79,7 +79,7 @@ class SliceWeakHashTable : public RefCounted> { ~Entry() { if (is_set_) grpc_slice_unref_internal(key_); } - grpc_slice key() const { return key_; } + const grpc_slice& key() const { return key_; } /// Return the entry's value, or null if unset. const T* value() const { @@ -88,7 +88,7 @@ class SliceWeakHashTable : public RefCounted> { } /// Set the \a key and \a value (which is moved) for the entry. - void Set(grpc_slice key, T&& value) { + void Set(const grpc_slice& key, T&& value) { if (is_set_) grpc_slice_unref_internal(key_); key_ = key; value_ = std::move(value); diff --git a/src/core/lib/surface/init.cc b/src/core/lib/surface/init.cc index e507de87c2a..fdb584da68f 100644 --- a/src/core/lib/surface/init.cc +++ b/src/core/lib/surface/init.cc @@ -33,6 +33,7 @@ #include "src/core/lib/debug/stats.h" #include "src/core/lib/debug/trace.h" #include "src/core/lib/gprpp/fork.h" +#include "src/core/lib/gprpp/mutex_lock.h" #include "src/core/lib/http/parser.h" #include "src/core/lib/iomgr/call_combiner.h" #include "src/core/lib/iomgr/combiner.h" @@ -61,10 +62,15 @@ extern void grpc_register_built_in_plugins(void); static gpr_once g_basic_init = GPR_ONCE_INIT; static gpr_mu g_init_mu; static int g_initializations; +static gpr_cv* g_shutting_down_cv; +static bool g_shutting_down; static void do_basic_init(void) { gpr_log_verbosity_init(); gpr_mu_init(&g_init_mu); + g_shutting_down_cv = static_cast(malloc(sizeof(gpr_cv))); + gpr_cv_init(g_shutting_down_cv); + g_shutting_down = false; grpc_register_built_in_plugins(); grpc_cq_global_init(); g_initializations = 0; @@ -118,8 +124,12 @@ void grpc_init(void) { int i; gpr_once_init(&g_basic_init, do_basic_init); - gpr_mu_lock(&g_init_mu); + grpc_core::MutexLock lock(&g_init_mu); if (++g_initializations == 1) { + if (g_shutting_down) { + g_shutting_down = false; + gpr_cv_broadcast(g_shutting_down_cv); + } grpc_core::Fork::GlobalInit(); grpc_fork_handlers_auto_register(); gpr_time_init(); @@ -150,50 +160,88 @@ void grpc_init(void) { grpc_channel_init_finalize(); grpc_iomgr_start(); } - gpr_mu_unlock(&g_init_mu); GRPC_API_TRACE("grpc_init(void)", 0, ()); } -void grpc_shutdown(void) { +void grpc_shutdown_internal_locked(void) { int i; - GRPC_API_TRACE("grpc_shutdown(void)", 0, ()); - gpr_mu_lock(&g_init_mu); - if (--g_initializations == 0) { + { + grpc_core::ExecCtx exec_ctx(0); + grpc_iomgr_shutdown_background_closure(); { - grpc_core::ExecCtx exec_ctx(0); - grpc_iomgr_shutdown_background_closure(); - { - grpc_timer_manager_set_threading( - false); // shutdown timer_manager thread - grpc_core::Executor::ShutdownAll(); - for (i = g_number_of_plugins; i >= 0; i--) { - if (g_all_of_the_plugins[i].destroy != nullptr) { - g_all_of_the_plugins[i].destroy(); - } + grpc_timer_manager_set_threading(false); // shutdown timer_manager thread + grpc_core::Executor::ShutdownAll(); + for (i = g_number_of_plugins; i >= 0; i--) { + if (g_all_of_the_plugins[i].destroy != nullptr) { + g_all_of_the_plugins[i].destroy(); } } - grpc_iomgr_shutdown(); - gpr_timers_global_destroy(); - grpc_tracer_shutdown(); - grpc_mdctx_global_shutdown(); - grpc_core::HandshakerRegistry::Shutdown(); - grpc_slice_intern_shutdown(); - grpc_core::channelz::ChannelzRegistry::Shutdown(); - grpc_stats_shutdown(); - grpc_core::Fork::GlobalShutdown(); } - grpc_core::ExecCtx::GlobalShutdown(); - grpc_core::ApplicationCallbackExecCtx::GlobalShutdown(); + grpc_iomgr_shutdown(); + gpr_timers_global_destroy(); + grpc_tracer_shutdown(); + grpc_mdctx_global_shutdown(); + grpc_core::HandshakerRegistry::Shutdown(); + grpc_slice_intern_shutdown(); + grpc_core::channelz::ChannelzRegistry::Shutdown(); + grpc_stats_shutdown(); + grpc_core::Fork::GlobalShutdown(); + } + grpc_core::ExecCtx::GlobalShutdown(); + grpc_core::ApplicationCallbackExecCtx::GlobalShutdown(); + g_shutting_down = false; + gpr_cv_broadcast(g_shutting_down_cv); +} + +void grpc_shutdown_internal(void* ignored) { + GRPC_API_TRACE("grpc_shutdown_internal", 0, ()); + grpc_core::MutexLock lock(&g_init_mu); + // We have released lock from the shutdown thread and it is possible that + // another grpc_init has been called, and do nothing if that is the case. + if (--g_initializations != 0) { + return; + } + grpc_shutdown_internal_locked(); +} + +void grpc_shutdown(void) { + GRPC_API_TRACE("grpc_shutdown(void)", 0, ()); + grpc_core::MutexLock lock(&g_init_mu); + if (--g_initializations == 0) { + g_initializations++; + g_shutting_down = true; + // spawn a detached thread to do the actual clean up in case we are + // currently in an executor thread. + grpc_core::Thread cleanup_thread( + "grpc_shutdown", grpc_shutdown_internal, nullptr, nullptr, + grpc_core::Thread::Options().set_joinable(false).set_tracked(false)); + cleanup_thread.Start(); + } +} + +void grpc_shutdown_blocking(void) { + GRPC_API_TRACE("grpc_shutdown_blocking(void)", 0, ()); + grpc_core::MutexLock lock(&g_init_mu); + if (--g_initializations == 0) { + g_shutting_down = true; + grpc_shutdown_internal_locked(); } - gpr_mu_unlock(&g_init_mu); } int grpc_is_initialized(void) { int r; gpr_once_init(&g_basic_init, do_basic_init); - gpr_mu_lock(&g_init_mu); + grpc_core::MutexLock lock(&g_init_mu); r = g_initializations > 0; - gpr_mu_unlock(&g_init_mu); return r; } + +void grpc_maybe_wait_for_async_shutdown(void) { + gpr_once_init(&g_basic_init, do_basic_init); + grpc_core::MutexLock lock(&g_init_mu); + while (g_shutting_down) { + gpr_cv_wait(g_shutting_down_cv, &g_init_mu, + gpr_inf_future(GPR_CLOCK_REALTIME)); + } +} diff --git a/src/core/lib/surface/init.h b/src/core/lib/surface/init.h index 193f51447d9..6eaa488d054 100644 --- a/src/core/lib/surface/init.h +++ b/src/core/lib/surface/init.h @@ -22,5 +22,6 @@ void grpc_register_security_filters(void); void grpc_security_pre_init(void); void grpc_security_init(void); +void grpc_maybe_wait_for_async_shutdown(void); #endif /* GRPC_CORE_LIB_SURFACE_INIT_H */ diff --git a/src/core/lib/transport/metadata.cc b/src/core/lib/transport/metadata.cc index 30482a1b3b1..b7e7fd40c00 100644 --- a/src/core/lib/transport/metadata.cc +++ b/src/core/lib/transport/metadata.cc @@ -71,6 +71,12 @@ grpc_core::DebugOnlyTraceFlag grpc_trace_metadata(false, "metadata"); typedef void (*destroy_user_data_func)(void* user_data); +struct UserData { + gpr_mu mu_user_data; + gpr_atm destroy_user_data; + gpr_atm user_data; +}; + /* Shadow structure for grpc_mdelem_data for interned elements */ typedef struct interned_metadata { /* must be byte compatible with grpc_mdelem_data */ @@ -80,9 +86,7 @@ typedef struct interned_metadata { /* private only data */ gpr_atm refcnt; - gpr_mu mu_user_data; - gpr_atm destroy_user_data; - gpr_atm user_data; + UserData user_data; struct interned_metadata* bucket_next; } interned_metadata; @@ -95,6 +99,8 @@ typedef struct allocated_metadata { /* private only data */ gpr_atm refcnt; + + UserData user_data; } allocated_metadata; typedef struct mdtab_shard { @@ -178,16 +184,17 @@ static void gc_mdtab(mdtab_shard* shard) { for (i = 0; i < shard->capacity; i++) { prev_next = &shard->elems[i]; for (md = shard->elems[i]; md; md = next) { - void* user_data = (void*)gpr_atm_no_barrier_load(&md->user_data); + void* user_data = + (void*)gpr_atm_no_barrier_load(&md->user_data.user_data); next = md->bucket_next; if (gpr_atm_acq_load(&md->refcnt) == 0) { grpc_slice_unref_internal(md->key); grpc_slice_unref_internal(md->value); - if (md->user_data) { + if (md->user_data.user_data) { ((destroy_user_data_func)gpr_atm_no_barrier_load( - &md->destroy_user_data))(user_data); + &md->user_data.destroy_user_data))(user_data); } - gpr_mu_destroy(&md->mu_user_data); + gpr_mu_destroy(&md->user_data.mu_user_data); gpr_free(md); *prev_next = next; num_freed++; @@ -251,6 +258,9 @@ grpc_mdelem grpc_mdelem_create( allocated->key = grpc_slice_ref_internal(key); allocated->value = grpc_slice_ref_internal(value); gpr_atm_rel_store(&allocated->refcnt, 1); + allocated->user_data.user_data = 0; + allocated->user_data.destroy_user_data = 0; + gpr_mu_init(&allocated->user_data.mu_user_data); #ifndef NDEBUG if (grpc_trace_metadata.enabled()) { char* key_str = grpc_slice_to_c_string(allocated->key); @@ -299,11 +309,11 @@ grpc_mdelem grpc_mdelem_create( gpr_atm_rel_store(&md->refcnt, 1); md->key = grpc_slice_ref_internal(key); md->value = grpc_slice_ref_internal(value); - md->user_data = 0; - md->destroy_user_data = 0; + md->user_data.user_data = 0; + md->user_data.destroy_user_data = 0; md->bucket_next = shard->elems[idx]; shard->elems[idx] = md; - gpr_mu_init(&md->mu_user_data); + gpr_mu_init(&md->user_data.mu_user_data); #ifndef NDEBUG if (grpc_trace_metadata.enabled()) { char* key_str = grpc_slice_to_c_string(md->key); @@ -450,6 +460,13 @@ void grpc_mdelem_unref(grpc_mdelem gmd DEBUG_ARGS) { if (1 == prev_refcount) { grpc_slice_unref_internal(md->key); grpc_slice_unref_internal(md->value); + if (md->user_data.user_data) { + destroy_user_data_func destroy_user_data = + (destroy_user_data_func)gpr_atm_no_barrier_load( + &md->user_data.destroy_user_data); + destroy_user_data((void*)md->user_data.user_data); + } + gpr_mu_destroy(&md->user_data.mu_user_data); gpr_free(md); } break; @@ -457,58 +474,74 @@ void grpc_mdelem_unref(grpc_mdelem gmd DEBUG_ARGS) { } } +static void* get_user_data(UserData* user_data, void (*destroy_func)(void*)) { + if (gpr_atm_acq_load(&user_data->destroy_user_data) == + (gpr_atm)destroy_func) { + return (void*)gpr_atm_no_barrier_load(&user_data->user_data); + } else { + return nullptr; + } +} + void* grpc_mdelem_get_user_data(grpc_mdelem md, void (*destroy_func)(void*)) { switch (GRPC_MDELEM_STORAGE(md)) { case GRPC_MDELEM_STORAGE_EXTERNAL: - case GRPC_MDELEM_STORAGE_ALLOCATED: return nullptr; case GRPC_MDELEM_STORAGE_STATIC: return (void*)grpc_static_mdelem_user_data[GRPC_MDELEM_DATA(md) - grpc_static_mdelem_table]; + case GRPC_MDELEM_STORAGE_ALLOCATED: { + allocated_metadata* am = + reinterpret_cast(GRPC_MDELEM_DATA(md)); + return get_user_data(&am->user_data, destroy_func); + } case GRPC_MDELEM_STORAGE_INTERNED: { interned_metadata* im = reinterpret_cast GRPC_MDELEM_DATA(md); - void* result; - if (gpr_atm_acq_load(&im->destroy_user_data) == (gpr_atm)destroy_func) { - return (void*)gpr_atm_no_barrier_load(&im->user_data); - } else { - return nullptr; - } - return result; + return get_user_data(&im->user_data, destroy_func); } } GPR_UNREACHABLE_CODE(return nullptr); } +static void* set_user_data(UserData* ud, void (*destroy_func)(void*), + void* user_data) { + GPR_ASSERT((user_data == nullptr) == (destroy_func == nullptr)); + gpr_mu_lock(&ud->mu_user_data); + if (gpr_atm_no_barrier_load(&ud->destroy_user_data)) { + /* user data can only be set once */ + gpr_mu_unlock(&ud->mu_user_data); + if (destroy_func != nullptr) { + destroy_func(user_data); + } + return (void*)gpr_atm_no_barrier_load(&ud->user_data); + } + gpr_atm_no_barrier_store(&ud->user_data, (gpr_atm)user_data); + gpr_atm_rel_store(&ud->destroy_user_data, (gpr_atm)destroy_func); + gpr_mu_unlock(&ud->mu_user_data); + return user_data; +} + void* grpc_mdelem_set_user_data(grpc_mdelem md, void (*destroy_func)(void*), void* user_data) { switch (GRPC_MDELEM_STORAGE(md)) { case GRPC_MDELEM_STORAGE_EXTERNAL: - case GRPC_MDELEM_STORAGE_ALLOCATED: destroy_func(user_data); return nullptr; case GRPC_MDELEM_STORAGE_STATIC: destroy_func(user_data); return (void*)grpc_static_mdelem_user_data[GRPC_MDELEM_DATA(md) - grpc_static_mdelem_table]; + case GRPC_MDELEM_STORAGE_ALLOCATED: { + allocated_metadata* am = + reinterpret_cast(GRPC_MDELEM_DATA(md)); + return set_user_data(&am->user_data, destroy_func, user_data); + } case GRPC_MDELEM_STORAGE_INTERNED: { interned_metadata* im = reinterpret_cast GRPC_MDELEM_DATA(md); GPR_ASSERT(!is_mdelem_static(md)); - GPR_ASSERT((user_data == nullptr) == (destroy_func == nullptr)); - gpr_mu_lock(&im->mu_user_data); - if (gpr_atm_no_barrier_load(&im->destroy_user_data)) { - /* user data can only be set once */ - gpr_mu_unlock(&im->mu_user_data); - if (destroy_func != nullptr) { - destroy_func(user_data); - } - return (void*)gpr_atm_no_barrier_load(&im->user_data); - } - gpr_atm_no_barrier_store(&im->user_data, (gpr_atm)user_data); - gpr_atm_rel_store(&im->destroy_user_data, (gpr_atm)destroy_func); - gpr_mu_unlock(&im->mu_user_data); - return user_data; + return set_user_data(&im->user_data, destroy_func, user_data); } } GPR_UNREACHABLE_CODE(return nullptr); diff --git a/src/core/lib/transport/metadata_batch.cc b/src/core/lib/transport/metadata_batch.cc index 928ed73cdad..49a56e709d5 100644 --- a/src/core/lib/transport/metadata_batch.cc +++ b/src/core/lib/transport/metadata_batch.cc @@ -227,7 +227,7 @@ void grpc_metadata_batch_remove(grpc_metadata_batch* batch, } void grpc_metadata_batch_set_value(grpc_linked_mdelem* storage, - grpc_slice value) { + const grpc_slice& value) { grpc_mdelem old_mdelem = storage->md; grpc_mdelem new_mdelem = grpc_mdelem_from_slices( grpc_slice_ref_internal(GRPC_MDKEY(old_mdelem)), value); diff --git a/src/core/lib/transport/metadata_batch.h b/src/core/lib/transport/metadata_batch.h index f6e8bbf2052..d87a8b0886d 100644 --- a/src/core/lib/transport/metadata_batch.h +++ b/src/core/lib/transport/metadata_batch.h @@ -74,7 +74,7 @@ grpc_error* grpc_metadata_batch_substitute(grpc_metadata_batch* batch, grpc_mdelem new_value); void grpc_metadata_batch_set_value(grpc_linked_mdelem* storage, - grpc_slice value); + const grpc_slice& value); /** Add \a storage to the beginning of \a batch. storage->md is assumed to be valid. diff --git a/src/core/lib/transport/service_config.h b/src/core/lib/transport/service_config.h index af24501e3df..224c6dd576c 100644 --- a/src/core/lib/transport/service_config.h +++ b/src/core/lib/transport/service_config.h @@ -92,7 +92,7 @@ class ServiceConfig : public RefCounted { /// Caller does NOT own a reference to the result. template static RefCountedPtr MethodConfigTableLookup( - const SliceHashTable>& table, grpc_slice path); + const SliceHashTable>& table, const grpc_slice& path); private: // So New() can call our private ctor. @@ -223,7 +223,7 @@ ServiceConfig::CreateMethodConfigTable(CreateValue create_value) { template RefCountedPtr ServiceConfig::MethodConfigTableLookup( - const SliceHashTable>& table, grpc_slice path) { + const SliceHashTable>& table, const grpc_slice& path) { const RefCountedPtr* value = table.Get(path); // If we didn't find a match for the path, try looking for a wildcard // entry (i.e., change "/service/method" to "/service/*"). diff --git a/src/core/lib/transport/timeout_encoding.cc b/src/core/lib/transport/timeout_encoding.cc index c37249920bd..fe22c15fa6d 100644 --- a/src/core/lib/transport/timeout_encoding.cc +++ b/src/core/lib/transport/timeout_encoding.cc @@ -89,7 +89,7 @@ static int is_all_whitespace(const char* p, const char* end) { return p == end; } -int grpc_http2_decode_timeout(grpc_slice text, grpc_millis* timeout) { +int grpc_http2_decode_timeout(const grpc_slice& text, grpc_millis* timeout) { grpc_millis x = 0; const uint8_t* p = GRPC_SLICE_START_PTR(text); const uint8_t* end = GRPC_SLICE_END_PTR(text); diff --git a/src/core/lib/transport/timeout_encoding.h b/src/core/lib/transport/timeout_encoding.h index 8505e32ff09..cc0d37452fd 100644 --- a/src/core/lib/transport/timeout_encoding.h +++ b/src/core/lib/transport/timeout_encoding.h @@ -32,6 +32,6 @@ /* Encode/decode timeouts to the GRPC over HTTP/2 format; encoding may round up arbitrarily */ void grpc_http2_encode_timeout(grpc_millis timeout, char* buffer); -int grpc_http2_decode_timeout(grpc_slice text, grpc_millis* timeout); +int grpc_http2_decode_timeout(const grpc_slice& text, grpc_millis* timeout); #endif /* GRPC_CORE_LIB_TRANSPORT_TIMEOUT_ENCODING_H */ diff --git a/src/core/tsi/alts/handshaker/alts_handshaker_client.cc b/src/core/tsi/alts/handshaker/alts_handshaker_client.cc index 43d0979f4b9..464de9e00d0 100644 --- a/src/core/tsi/alts/handshaker/alts_handshaker_client.cc +++ b/src/core/tsi/alts/handshaker/alts_handshaker_client.cc @@ -363,7 +363,7 @@ static tsi_result handshaker_client_next(alts_handshaker_client* c, alts_grpc_handshaker_client* client = reinterpret_cast(c); grpc_slice_unref_internal(client->recv_bytes); - client->recv_bytes = grpc_slice_ref(*bytes_received); + client->recv_bytes = grpc_slice_ref_internal(*bytes_received); grpc_byte_buffer* buffer = get_serialized_next(bytes_received); if (buffer == nullptr) { gpr_log(GPR_ERROR, "get_serialized_next() failed"); @@ -406,7 +406,7 @@ static const alts_handshaker_client_vtable vtable = { alts_handshaker_client* alts_grpc_handshaker_client_create( alts_tsi_handshaker* handshaker, grpc_channel* channel, const char* handshaker_service_url, grpc_pollset_set* interested_parties, - grpc_alts_credentials_options* options, grpc_slice target_name, + grpc_alts_credentials_options* options, const grpc_slice& target_name, grpc_iomgr_cb_func grpc_cb, tsi_handshaker_on_next_done_cb cb, void* user_data, alts_handshaker_client_vtable* vtable_for_testing, bool is_client) { @@ -487,7 +487,7 @@ void alts_handshaker_client_set_recv_bytes_for_testing( GPR_ASSERT(c != nullptr); alts_grpc_handshaker_client* client = reinterpret_cast(c); - client->recv_bytes = grpc_slice_ref(*recv_bytes); + client->recv_bytes = grpc_slice_ref_internal(*recv_bytes); } void alts_handshaker_client_set_fields_for_testing( diff --git a/src/core/tsi/alts/handshaker/alts_handshaker_client.h b/src/core/tsi/alts/handshaker/alts_handshaker_client.h index 4b489875f3c..319a23c88c7 100644 --- a/src/core/tsi/alts/handshaker/alts_handshaker_client.h +++ b/src/core/tsi/alts/handshaker/alts_handshaker_client.h @@ -138,7 +138,7 @@ void alts_handshaker_client_destroy(alts_handshaker_client* client); alts_handshaker_client* alts_grpc_handshaker_client_create( alts_tsi_handshaker* handshaker, grpc_channel* channel, const char* handshaker_service_url, grpc_pollset_set* interested_parties, - grpc_alts_credentials_options* options, grpc_slice target_name, + grpc_alts_credentials_options* options, const grpc_slice& target_name, grpc_iomgr_cb_func grpc_cb, tsi_handshaker_on_next_done_cb cb, void* user_data, alts_handshaker_client_vtable* vtable_for_testing, bool is_client); diff --git a/src/core/tsi/alts/handshaker/transport_security_common_api.cc b/src/core/tsi/alts/handshaker/transport_security_common_api.cc index 8a7edb53d4f..6c518c1ff31 100644 --- a/src/core/tsi/alts/handshaker/transport_security_common_api.cc +++ b/src/core/tsi/alts/handshaker/transport_security_common_api.cc @@ -106,15 +106,16 @@ bool grpc_gcp_rpc_protocol_versions_encode( } bool grpc_gcp_rpc_protocol_versions_decode( - grpc_slice slice, grpc_gcp_rpc_protocol_versions* versions) { + const grpc_slice& slice, grpc_gcp_rpc_protocol_versions* versions) { if (versions == nullptr) { gpr_log(GPR_ERROR, "version is nullptr in " "grpc_gcp_rpc_protocol_versions_decode()."); return false; } - pb_istream_t stream = pb_istream_from_buffer(GRPC_SLICE_START_PTR(slice), - GRPC_SLICE_LENGTH(slice)); + pb_istream_t stream = + pb_istream_from_buffer(const_cast(GRPC_SLICE_START_PTR(slice)), + GRPC_SLICE_LENGTH(slice)); if (!pb_decode(&stream, grpc_gcp_RpcProtocolVersions_fields, versions)) { gpr_log(GPR_ERROR, "nanopb error: %s", PB_GET_ERROR(&stream)); return false; diff --git a/src/core/tsi/alts/handshaker/transport_security_common_api.h b/src/core/tsi/alts/handshaker/transport_security_common_api.h index ec2a0b4b5e3..27942c8ae4c 100644 --- a/src/core/tsi/alts/handshaker/transport_security_common_api.h +++ b/src/core/tsi/alts/handshaker/transport_security_common_api.h @@ -112,7 +112,7 @@ bool grpc_gcp_rpc_protocol_versions_encode( * The method returns true on success and false otherwise. */ bool grpc_gcp_rpc_protocol_versions_decode( - grpc_slice slice, grpc_gcp_rpc_protocol_versions* versions); + const grpc_slice& slice, grpc_gcp_rpc_protocol_versions* versions); /** * This method performs a deep copy operation on rpc protocol versions diff --git a/src/core/tsi/alts/zero_copy_frame_protector/alts_zero_copy_grpc_protector.cc b/src/core/tsi/alts/zero_copy_frame_protector/alts_zero_copy_grpc_protector.cc index 58aba9b747e..fc40aaa698c 100644 --- a/src/core/tsi/alts/zero_copy_frame_protector/alts_zero_copy_grpc_protector.cc +++ b/src/core/tsi/alts/zero_copy_frame_protector/alts_zero_copy_grpc_protector.cc @@ -105,7 +105,7 @@ static bool read_frame_size(const grpc_slice_buffer* sb, * Creates an alts_grpc_record_protocol object, given key, key size, and flags * to indicate whether the record_protocol object uses the rekeying AEAD, * whether the object is for client or server, whether the object is for - * integrity-only or privacy-integrity mode, and whether the object is is used + * integrity-only or privacy-integrity mode, and whether the object is used * for protect or unprotect. */ static tsi_result create_alts_grpc_record_protocol( diff --git a/src/csharp/README.md b/src/csharp/README.md index 9a91035d06a..291772ff939 100644 --- a/src/csharp/README.md +++ b/src/csharp/README.md @@ -103,5 +103,5 @@ THE NATIVE DEPENDENCY Internally, gRPC C# uses a native library written in C (gRPC C core) and invokes its functionality via P/Invoke. The fact that a native library is used should be fully transparent to the users and just installing the `Grpc.Core` NuGet package is the only step needed to use gRPC C# on all supported platforms. [API Reference]: https://grpc.io/grpc/csharp/api/Grpc.Core.html -[Helloworld Example]: ../../examples/csharp/helloworld +[Helloworld Example]: ../../examples/csharp/Helloworld [RouteGuide Tutorial]: https://grpc.io/docs/tutorials/basic/csharp.html diff --git a/src/objective-c/manual_tests/GrpcIosTest.xcodeproj/project.pbxproj b/src/objective-c/manual_tests/GrpcIosTest.xcodeproj/project.pbxproj index 9063719aa2e..3ad16c3af6e 100644 --- a/src/objective-c/manual_tests/GrpcIosTest.xcodeproj/project.pbxproj +++ b/src/objective-c/manual_tests/GrpcIosTest.xcodeproj/project.pbxproj @@ -12,8 +12,19 @@ 5EDA909C220DF1B00046D27A /* main.m in Sources */ = {isa = PBXBuildFile; fileRef = 5EDA9096220DF1B00046D27A /* main.m */; }; 5EDA909E220DF1B00046D27A /* Main.storyboard in Resources */ = {isa = PBXBuildFile; fileRef = 5EDA9098220DF1B00046D27A /* Main.storyboard */; }; 5EDA909F220DF1B00046D27A /* AppDelegate.m in Sources */ = {isa = PBXBuildFile; fileRef = 5EDA9099220DF1B00046D27A /* AppDelegate.m */; }; + B0C18CA7222DEF140002B502 /* GrpcIosTestUITests.m in Sources */ = {isa = PBXBuildFile; fileRef = B0C18CA6222DEF140002B502 /* GrpcIosTestUITests.m */; }; /* End PBXBuildFile section */ +/* Begin PBXContainerItemProxy section */ + B0C18CA9222DEF140002B502 /* PBXContainerItemProxy */ = { + isa = PBXContainerItemProxy; + containerPortal = 5EDA9073220DF0BC0046D27A /* Project object */; + proxyType = 1; + remoteGlobalIDString = 5EDA907A220DF0BC0046D27A; + remoteInfo = GrpcIosTest; + }; +/* End PBXContainerItemProxy section */ + /* Begin PBXFileReference section */ 1D22EC48A487B02F76135EA3 /* libPods-GrpcIosTest.a */ = {isa = PBXFileReference; explicitFileType = archive.ar; includeInIndex = 0; path = "libPods-GrpcIosTest.a"; sourceTree = BUILT_PRODUCTS_DIR; }; 5EDA907B220DF0BC0046D27A /* GrpcIosTest.app */ = {isa = PBXFileReference; explicitFileType = wrapper.application; includeInIndex = 0; path = GrpcIosTest.app; sourceTree = BUILT_PRODUCTS_DIR; }; @@ -24,6 +35,9 @@ 5EDA9099220DF1B00046D27A /* AppDelegate.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = AppDelegate.m; sourceTree = SOURCE_ROOT; }; 7C9FAFB11727DCA50888C1B8 /* Pods-GrpcIosTest.debug.xcconfig */ = {isa = PBXFileReference; includeInIndex = 1; lastKnownFileType = text.xcconfig; name = "Pods-GrpcIosTest.debug.xcconfig"; path = "Pods/Target Support Files/Pods-GrpcIosTest/Pods-GrpcIosTest.debug.xcconfig"; sourceTree = ""; }; A4E7CA72304A7B43FE8A5BC7 /* Pods-GrpcIosTest.release.xcconfig */ = {isa = PBXFileReference; includeInIndex = 1; lastKnownFileType = text.xcconfig; name = "Pods-GrpcIosTest.release.xcconfig"; path = "Pods/Target Support Files/Pods-GrpcIosTest/Pods-GrpcIosTest.release.xcconfig"; sourceTree = ""; }; + B0C18CA4222DEF140002B502 /* GrpcIosTestUITests.xctest */ = {isa = PBXFileReference; explicitFileType = wrapper.cfbundle; includeInIndex = 0; path = GrpcIosTestUITests.xctest; sourceTree = BUILT_PRODUCTS_DIR; }; + B0C18CA6222DEF140002B502 /* GrpcIosTestUITests.m */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.objc; path = GrpcIosTestUITests.m; sourceTree = ""; }; + B0C18CA8222DEF140002B502 /* Info.plist */ = {isa = PBXFileReference; lastKnownFileType = text.plist.xml; path = Info.plist; sourceTree = ""; }; /* End PBXFileReference section */ /* Begin PBXFrameworksBuildPhase section */ @@ -35,6 +49,13 @@ ); runOnlyForDeploymentPostprocessing = 0; }; + B0C18CA1222DEF140002B502 /* Frameworks */ = { + isa = PBXFrameworksBuildPhase; + buildActionMask = 2147483647; + files = ( + ); + runOnlyForDeploymentPostprocessing = 0; + }; /* End PBXFrameworksBuildPhase section */ /* Begin PBXGroup section */ @@ -55,6 +76,7 @@ 5EDA9096220DF1B00046D27A /* main.m */, 5EDA9098220DF1B00046D27A /* Main.storyboard */, 5EDA9094220DF1B00046D27A /* ViewController.m */, + B0C18CA5222DEF140002B502 /* GrpcIosTestUITests */, 5EDA907C220DF0BC0046D27A /* Products */, 2B8131AC634883AFEC02557C /* Pods */, E73D92116C1C328622A8C77F /* Frameworks */, @@ -65,10 +87,20 @@ isa = PBXGroup; children = ( 5EDA907B220DF0BC0046D27A /* GrpcIosTest.app */, + B0C18CA4222DEF140002B502 /* GrpcIosTestUITests.xctest */, ); name = Products; sourceTree = ""; }; + B0C18CA5222DEF140002B502 /* GrpcIosTestUITests */ = { + isa = PBXGroup; + children = ( + B0C18CA6222DEF140002B502 /* GrpcIosTestUITests.m */, + B0C18CA8222DEF140002B502 /* Info.plist */, + ); + path = GrpcIosTestUITests; + sourceTree = ""; + }; E73D92116C1C328622A8C77F /* Frameworks */ = { isa = PBXGroup; children = ( @@ -99,6 +131,24 @@ productReference = 5EDA907B220DF0BC0046D27A /* GrpcIosTest.app */; productType = "com.apple.product-type.application"; }; + B0C18CA3222DEF140002B502 /* GrpcIosTestUITests */ = { + isa = PBXNativeTarget; + buildConfigurationList = B0C18CAD222DEF140002B502 /* Build configuration list for PBXNativeTarget "GrpcIosTestUITests" */; + buildPhases = ( + B0C18CA0222DEF140002B502 /* Sources */, + B0C18CA1222DEF140002B502 /* Frameworks */, + B0C18CA2222DEF140002B502 /* Resources */, + ); + buildRules = ( + ); + dependencies = ( + B0C18CAA222DEF140002B502 /* PBXTargetDependency */, + ); + name = GrpcIosTestUITests; + productName = GrpcIosTestUITests; + productReference = B0C18CA4222DEF140002B502 /* GrpcIosTestUITests.xctest */; + productType = "com.apple.product-type.bundle.ui-testing"; + }; /* End PBXNativeTarget section */ /* Begin PBXProject section */ @@ -111,6 +161,10 @@ 5EDA907A220DF0BC0046D27A = { CreatedOnToolsVersion = 10.0; }; + B0C18CA3222DEF140002B502 = { + CreatedOnToolsVersion = 10.0; + TestTargetID = 5EDA907A220DF0BC0046D27A; + }; }; }; buildConfigurationList = 5EDA9076220DF0BC0046D27A /* Build configuration list for PBXProject "GrpcIosTest" */; @@ -127,6 +181,7 @@ projectRoot = ""; targets = ( 5EDA907A220DF0BC0046D27A /* GrpcIosTest */, + B0C18CA3222DEF140002B502 /* GrpcIosTestUITests */, ); }; /* End PBXProject section */ @@ -140,6 +195,13 @@ ); runOnlyForDeploymentPostprocessing = 0; }; + B0C18CA2222DEF140002B502 /* Resources */ = { + isa = PBXResourcesBuildPhase; + buildActionMask = 2147483647; + files = ( + ); + runOnlyForDeploymentPostprocessing = 0; + }; /* End PBXResourcesBuildPhase section */ /* Begin PBXShellScriptBuildPhase section */ @@ -200,8 +262,24 @@ ); runOnlyForDeploymentPostprocessing = 0; }; + B0C18CA0222DEF140002B502 /* Sources */ = { + isa = PBXSourcesBuildPhase; + buildActionMask = 2147483647; + files = ( + B0C18CA7222DEF140002B502 /* GrpcIosTestUITests.m in Sources */, + ); + runOnlyForDeploymentPostprocessing = 0; + }; /* End PBXSourcesBuildPhase section */ +/* Begin PBXTargetDependency section */ + B0C18CAA222DEF140002B502 /* PBXTargetDependency */ = { + isa = PBXTargetDependency; + target = 5EDA907A220DF0BC0046D27A /* GrpcIosTest */; + targetProxy = B0C18CA9222DEF140002B502 /* PBXContainerItemProxy */; + }; +/* End PBXTargetDependency section */ + /* Begin XCBuildConfiguration section */ 5EDA908F220DF0BD0046D27A /* Debug */ = { isa = XCBuildConfiguration; @@ -321,7 +399,7 @@ buildSettings = { ASSETCATALOG_COMPILER_APPICON_NAME = AppIcon; CODE_SIGN_STYLE = Manual; - DEVELOPMENT_TEAM = ""; + DEVELOPMENT_TEAM = EQHXZ8M8AV; INFOPLIST_FILE = Info.plist; IPHONEOS_DEPLOYMENT_TARGET = 11.0; LD_RUNPATH_SEARCH_PATHS = ( @@ -330,7 +408,7 @@ ); PRODUCT_BUNDLE_IDENTIFIER = io.grpc.GrpcIosTest; PRODUCT_NAME = "$(TARGET_NAME)"; - PROVISIONING_PROFILE_SPECIFIER = ""; + PROVISIONING_PROFILE_SPECIFIER = "Google Development"; TARGETED_DEVICE_FAMILY = "1,2"; }; name = Debug; @@ -341,7 +419,7 @@ buildSettings = { ASSETCATALOG_COMPILER_APPICON_NAME = AppIcon; CODE_SIGN_STYLE = Manual; - DEVELOPMENT_TEAM = ""; + DEVELOPMENT_TEAM = EQHXZ8M8AV; INFOPLIST_FILE = Info.plist; IPHONEOS_DEPLOYMENT_TARGET = 11.0; LD_RUNPATH_SEARCH_PATHS = ( @@ -350,11 +428,51 @@ ); PRODUCT_BUNDLE_IDENTIFIER = io.grpc.GrpcIosTest; PRODUCT_NAME = "$(TARGET_NAME)"; - PROVISIONING_PROFILE_SPECIFIER = ""; + PROVISIONING_PROFILE_SPECIFIER = "Google Development"; TARGETED_DEVICE_FAMILY = "1,2"; }; name = Release; }; + B0C18CAB222DEF140002B502 /* Debug */ = { + isa = XCBuildConfiguration; + buildSettings = { + CODE_SIGN_STYLE = Manual; + DEVELOPMENT_TEAM = EQHXZ8M8AV; + INFOPLIST_FILE = GrpcIosTestUITests/Info.plist; + LD_RUNPATH_SEARCH_PATHS = ( + "$(inherited)", + "@executable_path/Frameworks", + "@loader_path/Frameworks", + ); + PRODUCT_BUNDLE_IDENTIFIER = com.google.GrpcIosTestUITests; + PRODUCT_NAME = "$(TARGET_NAME)"; + PROVISIONING_PROFILE = ""; + PROVISIONING_PROFILE_SPECIFIER = "Google Development"; + TARGETED_DEVICE_FAMILY = "1,2"; + TEST_TARGET_NAME = GrpcIosTest; + }; + name = Debug; + }; + B0C18CAC222DEF140002B502 /* Release */ = { + isa = XCBuildConfiguration; + buildSettings = { + CODE_SIGN_STYLE = Manual; + DEVELOPMENT_TEAM = EQHXZ8M8AV; + INFOPLIST_FILE = GrpcIosTestUITests/Info.plist; + LD_RUNPATH_SEARCH_PATHS = ( + "$(inherited)", + "@executable_path/Frameworks", + "@loader_path/Frameworks", + ); + PRODUCT_BUNDLE_IDENTIFIER = com.google.GrpcIosTestUITests; + PRODUCT_NAME = "$(TARGET_NAME)"; + PROVISIONING_PROFILE = ""; + PROVISIONING_PROFILE_SPECIFIER = "Google Development"; + TARGETED_DEVICE_FAMILY = "1,2"; + TEST_TARGET_NAME = GrpcIosTest; + }; + name = Release; + }; /* End XCBuildConfiguration section */ /* Begin XCConfigurationList section */ @@ -376,6 +494,15 @@ defaultConfigurationIsVisible = 0; defaultConfigurationName = Release; }; + B0C18CAD222DEF140002B502 /* Build configuration list for PBXNativeTarget "GrpcIosTestUITests" */ = { + isa = XCConfigurationList; + buildConfigurations = ( + B0C18CAB222DEF140002B502 /* Debug */, + B0C18CAC222DEF140002B502 /* Release */, + ); + defaultConfigurationIsVisible = 0; + defaultConfigurationName = Release; + }; /* End XCConfigurationList section */ }; rootObject = 5EDA9073220DF0BC0046D27A /* Project object */; diff --git a/src/objective-c/manual_tests/GrpcIosTestUITests/GrpcIosTestUITests.m b/src/objective-c/manual_tests/GrpcIosTestUITests/GrpcIosTestUITests.m new file mode 100644 index 00000000000..b0a929e689d --- /dev/null +++ b/src/objective-c/manual_tests/GrpcIosTestUITests/GrpcIosTestUITests.m @@ -0,0 +1,174 @@ +/* + * + * Copyright 2019 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +#import + +NSTimeInterval const kWaitTime = 30; + +@interface GrpcIosTestUITests : XCTestCase +@end + +@implementation GrpcIosTestUITests { + XCUIApplication *testApp; + XCUIApplication *settingsApp; +} + +- (void)setUp { + self.continueAfterFailure = NO; + [[[XCUIApplication alloc] init] launch]; + testApp = [[XCUIApplication alloc] initWithBundleIdentifier:@"io.grpc.GrpcIosTest"]; + settingsApp = [[XCUIApplication alloc] initWithBundleIdentifier:@"com.apple.Preferences"]; + [settingsApp activate]; + // Go back to the first page of Settings. + XCUIElement *backButton = settingsApp.navigationBars.buttons.firstMatch; + while (backButton.exists) { + [backButton tap]; + } + XCTAssert([settingsApp.navigationBars[@"Settings"] waitForExistenceWithTimeout:kWaitTime]); + // Turn off airplane mode + [self setAirplaneMode:NO]; +} + +- (void)tearDown { +} + +- (void)doUnaryCall { + [testApp activate]; + [testApp.buttons[@"Unary call"] tap]; +} + +- (void)doStreamingCall { + [testApp activate]; + [testApp.buttons[@"Start streaming call"] tap]; + [testApp.buttons[@"Send Message"] tap]; + [testApp.buttons[@"Stop streaming call"] tap]; +} + +- (void)expectCallSuccess { + XCTAssert([testApp.staticTexts[@"Call done"] waitForExistenceWithTimeout:kWaitTime]); +} + +- (void)expectCallFailed { + XCTAssert([testApp.staticTexts[@"Call failed"] waitForExistenceWithTimeout:kWaitTime]); +} + +- (void)setAirplaneMode:(BOOL)to { + [settingsApp activate]; + XCUIElement *mySwitch = settingsApp.tables.element.cells.switches[@"Airplane Mode"]; + BOOL from = [(NSString *)mySwitch.value boolValue]; + if (from != to) { + [mySwitch tap]; + // wait for gRPC to detect the change + sleep(10); + } + XCTAssert([(NSString *)mySwitch.value boolValue] == to); +} + +- (void)testBackgroundBeforeUnaryCall { + // Open test app + [testApp activate]; + + // Send test app to background + [XCUIDevice.sharedDevice pressButton:XCUIDeviceButtonHome]; + sleep(5); + + // Bring test app to foreground and make a unary call. Call should succeed + [self doUnaryCall]; + [self expectCallSuccess]; +} + +- (void)testBackgroundBeforeStreamingCall { + // Open test app + [testApp activate]; + + // Send test app to background + [XCUIDevice.sharedDevice pressButton:XCUIDeviceButtonHome]; + sleep(5); + + // Bring test app to foreground and make a streaming call. Call should succeed. + [self doStreamingCall]; + [self expectCallSuccess]; +} + +- (void)testUnaryCallAfterNetworkFlap { + // Open test app and make a unary call. Channel to server should be open after this. + [self doUnaryCall]; + [self expectCallSuccess]; + + // Toggle airplane mode on and off + [self setAirplaneMode:YES]; + [self setAirplaneMode:NO]; + + // Bring test app to foreground and make a unary call. The call should succeed + [self doUnaryCall]; + [self expectCallSuccess]; +} + +- (void)testStreamingCallAfterNetworkFlap { + // Open test app and make a unary call. Channel to server should be open after this. + [self doUnaryCall]; + [self expectCallSuccess]; + + // Toggle airplane mode on and off + [self setAirplaneMode:YES]; + [self setAirplaneMode:NO]; + + [self doStreamingCall]; + [self expectCallSuccess]; +} + +- (void)testUnaryCallWhileNetworkDown { + // Open test app and make a unary call. Channel to server should be open after this. + [self doUnaryCall]; + [self expectCallSuccess]; + + // Turn on airplane mode + [self setAirplaneMode:YES]; + + // Unary call should fail + [self doUnaryCall]; + [self expectCallFailed]; + + // Turn off airplane mode + [self setAirplaneMode:NO]; + + // Unary call should succeed + [self doUnaryCall]; + [self expectCallSuccess]; +} + +- (void)testStreamingCallWhileNetworkDown { + // Open test app and make a unary call. Channel to server should be open after this. + [self doUnaryCall]; + [self expectCallSuccess]; + + // Turn on airplane mode + [self setAirplaneMode:YES]; + + // Streaming call should fail + [self doStreamingCall]; + [self expectCallFailed]; + + // Turn off airplane mode + [self setAirplaneMode:NO]; + + // Unary call should succeed + [self doStreamingCall]; + [self expectCallSuccess]; +} +@end diff --git a/src/objective-c/manual_tests/GrpcIosTestUITests/Info.plist b/src/objective-c/manual_tests/GrpcIosTestUITests/Info.plist new file mode 100644 index 00000000000..6c40a6cd0c4 --- /dev/null +++ b/src/objective-c/manual_tests/GrpcIosTestUITests/Info.plist @@ -0,0 +1,22 @@ + + + + + CFBundleDevelopmentRegion + $(DEVELOPMENT_LANGUAGE) + CFBundleExecutable + $(EXECUTABLE_NAME) + CFBundleIdentifier + $(PRODUCT_BUNDLE_IDENTIFIER) + CFBundleInfoDictionaryVersion + 6.0 + CFBundleName + $(PRODUCT_NAME) + CFBundlePackageType + BNDL + CFBundleShortVersionString + 1.0 + CFBundleVersion + 1 + + diff --git a/src/objective-c/manual_tests/Main.storyboard b/src/objective-c/manual_tests/Main.storyboard index e88f30e324b..e7e0530efcd 100644 --- a/src/objective-c/manual_tests/Main.storyboard +++ b/src/objective-c/manual_tests/Main.storyboard @@ -4,6 +4,7 @@ + diff --git a/src/objective-c/manual_tests/Podfile b/src/objective-c/manual_tests/Podfile index 7cb650a3412..919e649be71 100644 --- a/src/objective-c/manual_tests/Podfile +++ b/src/objective-c/manual_tests/Podfile @@ -18,8 +18,8 @@ GrpcIosTest pod 'BoringSSL-GRPC', :podspec => "#{GRPC_LOCAL_SRC}/src/objective-c", :inhibit_warnings => true - pod 'gRPC', :path => GRPC_LOCAL_SRC - pod 'gRPC-Core', :path => GRPC_LOCAL_SRC + pod 'gRPC/CFStream', :path => GRPC_LOCAL_SRC + pod 'gRPC-Core/CFStream-Implementation', :path => GRPC_LOCAL_SRC pod 'gRPC-RxLibrary', :path => GRPC_LOCAL_SRC pod 'gRPC-ProtoRPC', :path => GRPC_LOCAL_SRC, :inhibit_warnings => true pod 'RemoteTest', :path => "../tests/RemoteTestClient", :inhibit_warnings => true diff --git a/src/objective-c/manual_tests/ViewController.m b/src/objective-c/manual_tests/ViewController.m index 00bb516bdfc..813b176f3e8 100644 --- a/src/objective-c/manual_tests/ViewController.m +++ b/src/objective-c/manual_tests/ViewController.m @@ -27,7 +27,7 @@ NSString *const kRemoteHost = @"grpc-test.sandbox.googleapis.com"; const int32_t kMessageSize = 100; @interface ViewController : UIViewController - +@property(strong, nonatomic) UILabel *fromLabel; @end @implementation ViewController { @@ -35,16 +35,25 @@ const int32_t kMessageSize = 100; dispatch_queue_t _dispatchQueue; GRPCStreamingProtoCall *_call; } +- (instancetype)init { + self = [super init]; + return self; +} - (void)viewDidLoad { [super viewDidLoad]; _dispatchQueue = dispatch_queue_create(NULL, DISPATCH_QUEUE_SERIAL); + _fromLabel = [[UILabel alloc] initWithFrame:CGRectMake(100, 500, 200, 20)]; + _fromLabel.textColor = [UIColor blueColor]; + _fromLabel.backgroundColor = [UIColor whiteColor]; + [self.view addSubview:_fromLabel]; } - (IBAction)tapUnaryCall:(id)sender { if (_service == nil) { _service = [RMTTestService serviceWithHost:kRemoteHost]; } + self->_fromLabel.text = @""; // Set up request proto message RMTSimpleRequest *request = [RMTSimpleRequest message]; @@ -61,6 +70,7 @@ const int32_t kMessageSize = 100; if (_service == nil) { _service = [RMTTestService serviceWithHost:kRemoteHost]; } + self->_fromLabel.text = @""; // Set up request proto message RMTStreamingOutputCallRequest *request = RMTStreamingOutputCallRequest.message; @@ -92,7 +102,6 @@ const int32_t kMessageSize = 100; if (_call == nil) return; [_call finish]; - _call = nil; } @@ -107,6 +116,15 @@ const int32_t kMessageSize = 100; - (void)didCloseWithTrailingMetadata:(NSDictionary *)trailingMetadata error:(nullable NSError *)error { NSLog(@"Recv trailing metadata: %@, error: %@", trailingMetadata, error); + if (error == nil) { + dispatch_async(dispatch_get_main_queue(), ^{ + self->_fromLabel.text = @"Call done"; + }); + } else { + dispatch_async(dispatch_get_main_queue(), ^{ + self->_fromLabel.text = @"Call failed"; + }); + } } - (dispatch_queue_t)dispatchQueue { diff --git a/src/objective-c/manual_tests/main.m b/src/objective-c/manual_tests/main.m index 2797c6f17f2..451b50cc0e2 100644 --- a/src/objective-c/manual_tests/main.m +++ b/src/objective-c/manual_tests/main.m @@ -21,6 +21,8 @@ int main(int argc, char* argv[]) { @autoreleasepool { + // enable CFStream API + setenv("grpc_cfstream", "1", 1); return UIApplicationMain(argc, argv, nil, NSStringFromClass([AppDelegate class])); } } diff --git a/src/php/ext/grpc/php_grpc.c b/src/php/ext/grpc/php_grpc.c index 111c6f4867d..fa6f0be837b 100644 --- a/src/php/ext/grpc/php_grpc.c +++ b/src/php/ext/grpc/php_grpc.c @@ -361,7 +361,7 @@ PHP_MSHUTDOWN_FUNCTION(grpc) { zend_hash_destroy(&grpc_target_upper_bound_map); grpc_shutdown_timeval(TSRMLS_C); grpc_php_shutdown_completion_queue(TSRMLS_C); - grpc_shutdown(); + grpc_shutdown_blocking(); GRPC_G(initialized) = 0; } return SUCCESS; diff --git a/src/python/grpcio/grpc/_channel.py b/src/python/grpcio/grpc/_channel.py index 1d2495cdd21..ed4c871b684 100644 --- a/src/python/grpcio/grpc/_channel.py +++ b/src/python/grpcio/grpc/_channel.py @@ -247,7 +247,7 @@ def _consume_request_iterator(request_iterator, state, call, request_serializer, consumption_thread.start() -class _Rendezvous(grpc.RpcError, grpc.Future, grpc.Call): +class _Rendezvous(grpc.RpcError, grpc.Future, grpc.Call): # pylint: disable=too-many-ancestors def __init__(self, state, call, response_deserializer, deadline): super(_Rendezvous, self).__init__() @@ -488,6 +488,18 @@ def _stream_unary_invocation_operationses_and_tags(metadata, metadata, initial_metadata_flags)) +def _determine_deadline(user_deadline): + parent_deadline = cygrpc.get_deadline_from_context() + if parent_deadline is None and user_deadline is None: + return None + elif parent_deadline is not None and user_deadline is None: + return parent_deadline + elif user_deadline is not None and parent_deadline is None: + return user_deadline + else: + return min(parent_deadline, user_deadline) + + class _UnaryUnaryMultiCallable(grpc.UnaryUnaryMultiCallable): # pylint: disable=too-many-arguments @@ -527,9 +539,10 @@ class _UnaryUnaryMultiCallable(grpc.UnaryUnaryMultiCallable): if state is None: raise rendezvous # pylint: disable-msg=raising-bad-type else: + deadline_to_propagate = _determine_deadline(deadline) call = self._channel.segregated_call( cygrpc.PropagationConstants.GRPC_PROPAGATE_DEFAULTS, - self._method, None, deadline, metadata, None + self._method, None, deadline_to_propagate, metadata, None if credentials is None else credentials._credentials, (( operations, None, @@ -619,8 +632,8 @@ class _UnaryStreamMultiCallable(grpc.UnaryStreamMultiCallable): event_handler = _event_handler(state, self._response_deserializer) call = self._managed_call( cygrpc.PropagationConstants.GRPC_PROPAGATE_DEFAULTS, - self._method, None, deadline, metadata, None - if credentials is None else credentials._credentials, + self._method, None, _determine_deadline(deadline), metadata, + None if credentials is None else credentials._credentials, operationses, event_handler, self._context) return _Rendezvous(state, call, self._response_deserializer, deadline) @@ -644,9 +657,10 @@ class _StreamUnaryMultiCallable(grpc.StreamUnaryMultiCallable): state = _RPCState(_STREAM_UNARY_INITIAL_DUE, None, None, None, None) initial_metadata_flags = _InitialMetadataFlags().with_wait_for_ready( wait_for_ready) + deadline_to_propagate = _determine_deadline(deadline) call = self._channel.segregated_call( cygrpc.PropagationConstants.GRPC_PROPAGATE_DEFAULTS, self._method, - None, deadline, metadata, None + None, deadline_to_propagate, metadata, None if credentials is None else credentials._credentials, _stream_unary_invocation_operationses_and_tags( metadata, initial_metadata_flags), self._context) @@ -734,9 +748,10 @@ class _StreamStreamMultiCallable(grpc.StreamStreamMultiCallable): (cygrpc.ReceiveInitialMetadataOperation(_EMPTY_FLAGS),), ) event_handler = _event_handler(state, self._response_deserializer) + deadline_to_propagate = _determine_deadline(deadline) call = self._managed_call( cygrpc.PropagationConstants.GRPC_PROPAGATE_DEFAULTS, self._method, - None, deadline, metadata, None + None, deadline_to_propagate, metadata, None if credentials is None else credentials._credentials, operationses, event_handler, self._context) _consume_request_iterator(request_iterator, state, call, diff --git a/src/python/grpcio/grpc/_cython/_cygrpc/_hooks.pyx.pxi b/src/python/grpcio/grpc/_cython/_cygrpc/_hooks.pyx.pxi index 6d1c36b2b35..de4d71b8196 100644 --- a/src/python/grpcio/grpc/_cython/_cygrpc/_hooks.pyx.pxi +++ b/src/python/grpcio/grpc/_cython/_cygrpc/_hooks.pyx.pxi @@ -16,7 +16,7 @@ cdef object _custom_op_on_c_call(int op, grpc_call *call): raise NotImplementedError("No custom hooks are implemented") -def install_context_from_call(Call call): +def install_context_from_request_call_event(RequestCallEvent event): pass def uninstall_context(): @@ -30,3 +30,6 @@ cdef class CensusContext: def set_census_context_on_call(_CallState call_state, CensusContext census_ctx): pass + +def get_deadline_from_context(): + return None diff --git a/src/python/grpcio/grpc/_cython/_cygrpc/call.pyx.pxi b/src/python/grpcio/grpc/_cython/_cygrpc/call.pyx.pxi index 24e85b08e72..0a31d9c52ff 100644 --- a/src/python/grpcio/grpc/_cython/_cygrpc/call.pyx.pxi +++ b/src/python/grpcio/grpc/_cython/_cygrpc/call.pyx.pxi @@ -87,7 +87,7 @@ cdef class Call: def __dealloc__(self): if self.c_call != NULL: grpc_call_unref(self.c_call) - grpc_shutdown() + grpc_shutdown_blocking() # The object *should* always be valid from Python. Used for debugging. @property diff --git a/src/python/grpcio/grpc/_cython/_cygrpc/channel.pyx.pxi b/src/python/grpcio/grpc/_cython/_cygrpc/channel.pyx.pxi index 70d4abb7308..24c11e63a6b 100644 --- a/src/python/grpcio/grpc/_cython/_cygrpc/channel.pyx.pxi +++ b/src/python/grpcio/grpc/_cython/_cygrpc/channel.pyx.pxi @@ -399,7 +399,7 @@ cdef _close(Channel channel, grpc_status_code code, object details, _destroy_c_completion_queue(state.c_connectivity_completion_queue) grpc_channel_destroy(state.c_channel) state.c_channel = NULL - grpc_shutdown() + grpc_shutdown_blocking() state.condition.notify_all() else: # Another call to close already completed in the past or is currently diff --git a/src/python/grpcio/grpc/_cython/_cygrpc/completion_queue.pyx.pxi b/src/python/grpcio/grpc/_cython/_cygrpc/completion_queue.pyx.pxi index 3c33b46dbb8..a4d425ac564 100644 --- a/src/python/grpcio/grpc/_cython/_cygrpc/completion_queue.pyx.pxi +++ b/src/python/grpcio/grpc/_cython/_cygrpc/completion_queue.pyx.pxi @@ -118,4 +118,4 @@ cdef class CompletionQueue: self.c_completion_queue, c_deadline, NULL) self._interpret_event(event) grpc_completion_queue_destroy(self.c_completion_queue) - grpc_shutdown() + grpc_shutdown_blocking() diff --git a/src/python/grpcio/grpc/_cython/_cygrpc/credentials.pxd.pxi b/src/python/grpcio/grpc/_cython/_cygrpc/credentials.pxd.pxi index 1cef7269707..af069acc287 100644 --- a/src/python/grpcio/grpc/_cython/_cygrpc/credentials.pxd.pxi +++ b/src/python/grpcio/grpc/_cython/_cygrpc/credentials.pxd.pxi @@ -53,9 +53,6 @@ cdef class ChannelCredentials: cdef grpc_channel_credentials *c(self) except * - # TODO(https://github.com/grpc/grpc/issues/12531): remove. - cdef grpc_channel_credentials *c_credentials - cdef class SSLSessionCacheLRU: diff --git a/src/python/grpcio/grpc/_cython/_cygrpc/credentials.pyx.pxi b/src/python/grpcio/grpc/_cython/_cygrpc/credentials.pyx.pxi index 2f51be40ce4..5fb9ddf7b7d 100644 --- a/src/python/grpcio/grpc/_cython/_cygrpc/credentials.pyx.pxi +++ b/src/python/grpcio/grpc/_cython/_cygrpc/credentials.pyx.pxi @@ -61,7 +61,7 @@ cdef int _get_metadata( cdef void _destroy(void *state) with gil: cpython.Py_DECREF(state) - grpc_shutdown() + grpc_shutdown_blocking() cdef class MetadataPluginCallCredentials(CallCredentials): @@ -125,7 +125,7 @@ cdef class SSLSessionCacheLRU: def __dealloc__(self): if self._cache != NULL: grpc_ssl_session_cache_destroy(self._cache) - grpc_shutdown() + grpc_shutdown_blocking() cdef class SSLChannelCredentials(ChannelCredentials): @@ -191,7 +191,7 @@ cdef class ServerCertificateConfig: def __dealloc__(self): grpc_ssl_server_certificate_config_destroy(self.c_cert_config) gpr_free(self.c_ssl_pem_key_cert_pairs) - grpc_shutdown() + grpc_shutdown_blocking() cdef class ServerCredentials: @@ -207,7 +207,7 @@ cdef class ServerCredentials: def __dealloc__(self): if self.c_credentials != NULL: grpc_server_credentials_release(self.c_credentials) - grpc_shutdown() + grpc_shutdown_blocking() cdef const char* _get_c_pem_root_certs(pem_root_certs): if pem_root_certs is None: diff --git a/src/python/grpcio/grpc/_cython/_cygrpc/grpc.pxi b/src/python/grpcio/grpc/_cython/_cygrpc/grpc.pxi index fc7a9ba4395..759479089d4 100644 --- a/src/python/grpcio/grpc/_cython/_cygrpc/grpc.pxi +++ b/src/python/grpcio/grpc/_cython/_cygrpc/grpc.pxi @@ -319,7 +319,7 @@ cdef extern from "grpc/grpc.h": grpc_op_data data void grpc_init() nogil - void grpc_shutdown() nogil + void grpc_shutdown_blocking() nogil int grpc_is_initialized() nogil ctypedef struct grpc_completion_queue_factory: diff --git a/src/python/grpcio/grpc/_cython/_cygrpc/records.pyx.pxi b/src/python/grpcio/grpc/_cython/_cygrpc/records.pyx.pxi index fe98d559f34..d612199a482 100644 --- a/src/python/grpcio/grpc/_cython/_cygrpc/records.pyx.pxi +++ b/src/python/grpcio/grpc/_cython/_cygrpc/records.pyx.pxi @@ -134,7 +134,7 @@ cdef class CallDetails: def __dealloc__(self): with nogil: grpc_call_details_destroy(&self.c_details) - grpc_shutdown() + grpc_shutdown_blocking() @property def method(self): diff --git a/src/python/grpcio/grpc/_cython/_cygrpc/server.pyx.pxi b/src/python/grpcio/grpc/_cython/_cygrpc/server.pyx.pxi index ef74f61e043..fe55ea885e4 100644 --- a/src/python/grpcio/grpc/_cython/_cygrpc/server.pyx.pxi +++ b/src/python/grpcio/grpc/_cython/_cygrpc/server.pyx.pxi @@ -151,4 +151,4 @@ cdef class Server: def __dealloc__(self): if self.c_server == NULL: - grpc_shutdown() + grpc_shutdown_blocking() diff --git a/src/python/grpcio/grpc/_cython/_cygrpc/tag.pyx.pxi b/src/python/grpcio/grpc/_cython/_cygrpc/tag.pyx.pxi index be5013c8f7b..e80dc88767e 100644 --- a/src/python/grpcio/grpc/_cython/_cygrpc/tag.pyx.pxi +++ b/src/python/grpcio/grpc/_cython/_cygrpc/tag.pyx.pxi @@ -56,18 +56,19 @@ cdef class _BatchOperationTag: self._retained_call = call cdef void prepare(self) except *: + cdef Operation operation self.c_nops = 0 if self._operations is None else len(self._operations) if 0 < self.c_nops: self.c_ops = gpr_malloc(sizeof(grpc_op) * self.c_nops) for index, operation in enumerate(self._operations): - (operation).c() - self.c_ops[index] = (operation).c_op + operation.c() + self.c_ops[index] = operation.c_op cdef BatchOperationEvent event(self, grpc_event c_event): + cdef Operation operation if 0 < self.c_nops: - for index, operation in enumerate(self._operations): - (operation).c_op = self.c_ops[index] - (operation).un_c() + for operation in self._operations: + operation.un_c() gpr_free(self.c_ops) return BatchOperationEvent( c_event.type, c_event.success, self._user_tag, self._operations) @@ -84,4 +85,4 @@ cdef class _ServerShutdownTag(_Tag): cdef ServerShutdownEvent event(self, grpc_event c_event): self._shutting_down_server.notify_shutdown_complete() - return ServerShutdownEvent(c_event.type, c_event.success, self._user_tag) \ No newline at end of file + return ServerShutdownEvent(c_event.type, c_event.success, self._user_tag) diff --git a/src/python/grpcio/grpc/_interceptor.py b/src/python/grpcio/grpc/_interceptor.py index fc0ad77eb9e..6c4e396ac23 100644 --- a/src/python/grpcio/grpc/_interceptor.py +++ b/src/python/grpcio/grpc/_interceptor.py @@ -80,7 +80,7 @@ def _unwrap_client_call_details(call_details, default_details): return method, timeout, metadata, credentials, wait_for_ready -class _FailureOutcome(grpc.RpcError, grpc.Future, grpc.Call): +class _FailureOutcome(grpc.RpcError, grpc.Future, grpc.Call): # pylint: disable=too-many-ancestors def __init__(self, exception, traceback): super(_FailureOutcome, self).__init__() @@ -126,7 +126,7 @@ class _FailureOutcome(grpc.RpcError, grpc.Future, grpc.Call): def traceback(self, ignored_timeout=None): return self._traceback - def add_callback(self, callback): + def add_callback(self, unused_callback): return False def add_done_callback(self, fn): diff --git a/src/python/grpcio/grpc/_server.py b/src/python/grpcio/grpc/_server.py index 9224b2ac672..90136aef3c2 100644 --- a/src/python/grpcio/grpc/_server.py +++ b/src/python/grpcio/grpc/_server.py @@ -498,7 +498,7 @@ def _status(rpc_event, state, serialized_response): def _unary_response_in_pool(rpc_event, state, behavior, argument_thunk, request_deserializer, response_serializer): - cygrpc.install_context_from_call(rpc_event.call) + cygrpc.install_context_from_request_call_event(rpc_event) try: argument = argument_thunk() if argument is not None: @@ -515,7 +515,7 @@ def _unary_response_in_pool(rpc_event, state, behavior, argument_thunk, def _stream_response_in_pool(rpc_event, state, behavior, argument_thunk, request_deserializer, response_serializer): - cygrpc.install_context_from_call(rpc_event.call) + cygrpc.install_context_from_request_call_event(rpc_event) def send_response(response): if response is None: diff --git a/src/python/grpcio_tests/tests/unit/_resource_exhausted_test.py b/src/python/grpcio_tests/tests/unit/_resource_exhausted_test.py index 517c2d2f97b..ecd2ccadbde 100644 --- a/src/python/grpcio_tests/tests/unit/_resource_exhausted_test.py +++ b/src/python/grpcio_tests/tests/unit/_resource_exhausted_test.py @@ -42,7 +42,7 @@ class _TestTrigger(object): self._finish_condition = threading.Condition() self._start_condition = threading.Condition() - # Wait for all calls be be blocked in their handler + # Wait for all calls be blocked in their handler def await_calls(self): with self._start_condition: while self._pending_calls < self._total_call_count: diff --git a/src/ruby/ext/grpc/rb_grpc_imports.generated.c b/src/ruby/ext/grpc/rb_grpc_imports.generated.c index 47250ec7141..fdbe0df4e52 100644 --- a/src/ruby/ext/grpc/rb_grpc_imports.generated.c +++ b/src/ruby/ext/grpc/rb_grpc_imports.generated.c @@ -39,6 +39,7 @@ grpc_register_plugin_type grpc_register_plugin_import; grpc_init_type grpc_init_import; grpc_shutdown_type grpc_shutdown_import; grpc_is_initialized_type grpc_is_initialized_import; +grpc_shutdown_blocking_type grpc_shutdown_blocking_import; grpc_version_string_type grpc_version_string_import; grpc_g_stands_for_type grpc_g_stands_for_import; grpc_completion_queue_factory_lookup_type grpc_completion_queue_factory_lookup_import; @@ -306,6 +307,7 @@ void grpc_rb_load_imports(HMODULE library) { grpc_init_import = (grpc_init_type) GetProcAddress(library, "grpc_init"); grpc_shutdown_import = (grpc_shutdown_type) GetProcAddress(library, "grpc_shutdown"); grpc_is_initialized_import = (grpc_is_initialized_type) GetProcAddress(library, "grpc_is_initialized"); + grpc_shutdown_blocking_import = (grpc_shutdown_blocking_type) GetProcAddress(library, "grpc_shutdown_blocking"); grpc_version_string_import = (grpc_version_string_type) GetProcAddress(library, "grpc_version_string"); grpc_g_stands_for_import = (grpc_g_stands_for_type) GetProcAddress(library, "grpc_g_stands_for"); grpc_completion_queue_factory_lookup_import = (grpc_completion_queue_factory_lookup_type) GetProcAddress(library, "grpc_completion_queue_factory_lookup"); diff --git a/src/ruby/ext/grpc/rb_grpc_imports.generated.h b/src/ruby/ext/grpc/rb_grpc_imports.generated.h index 9437f6d3918..cf16f0ca33b 100644 --- a/src/ruby/ext/grpc/rb_grpc_imports.generated.h +++ b/src/ruby/ext/grpc/rb_grpc_imports.generated.h @@ -92,6 +92,9 @@ extern grpc_shutdown_type grpc_shutdown_import; typedef int(*grpc_is_initialized_type)(void); extern grpc_is_initialized_type grpc_is_initialized_import; #define grpc_is_initialized grpc_is_initialized_import +typedef void(*grpc_shutdown_blocking_type)(void); +extern grpc_shutdown_blocking_type grpc_shutdown_blocking_import; +#define grpc_shutdown_blocking grpc_shutdown_blocking_import typedef const char*(*grpc_version_string_type)(void); extern grpc_version_string_type grpc_version_string_import; #define grpc_version_string grpc_version_string_import diff --git a/templates/Makefile.template b/templates/Makefile.template index 31cf14a71c1..71391f8139a 100644 --- a/templates/Makefile.template +++ b/templates/Makefile.template @@ -274,6 +274,28 @@ LDFLAGS += -pthread endif + # If we are installing into a non-default prefix, both + # the libraries we build, and the apps users build, + # need to know how to find the libraries they depend on. + # There is much gnashing of teeth about this subject. + # It's tricky to do that without editing images during install, + # as you don't want tests during build to find previously installed and + # now stale libraries, etc. + ifeq ($(SYSTEM),Linux) + ifneq ($(prefix),/usr) + # Linux best practice for rpath on installed files is probably: + # 1) .pc file provides -Wl,-rpath,$(prefix)/lib + # 2) binaries we install into $(prefix)/bin use -Wl,-rpath,$ORIGIN/../lib + # 3) libraries we install into $(prefix)/lib use -Wl,-rpath,$ORIGIN + # cf. https://www.akkadia.org/drepper/dsohowto.pdf + # Doing all of that right is hard, but using -Wl,-rpath,$ORIGIN is always + # safe, and solves problems seen in the wild. Note that $ORIGIN + # is a literal string interpreted much later by ld.so. Escape it + # here with a dollar sign so Make doesn't expand $O. + LDFLAGS += '-Wl,-rpath,$$ORIGIN' + endif + endif + # # The steps for cross-compiling are as follows: # First, clone and make install of grpc using the native compilers for the host. diff --git a/test/core/client_channel/resolvers/dns_resolver_cooldown_test.cc b/test/core/client_channel/resolvers/dns_resolver_cooldown_test.cc index 16210b8164b..3157d6019f3 100644 --- a/test/core/client_channel/resolvers/dns_resolver_cooldown_test.cc +++ b/test/core/client_channel/resolvers/dns_resolver_cooldown_test.cc @@ -18,6 +18,7 @@ #include +#include #include #include "src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.h" @@ -281,7 +282,7 @@ int main(int argc, char** argv) { grpc_core::ExecCtx exec_ctx; GRPC_COMBINER_UNREF(g_combiner, "test"); } - grpc_shutdown(); + grpc_shutdown_blocking(); GPR_ASSERT(g_all_callbacks_invoked); return 0; } diff --git a/test/core/end2end/end2end_nosec_tests.cc b/test/core/end2end/end2end_nosec_tests.cc index 614d1f98e2b..3ab55527da6 100644 --- a/test/core/end2end/end2end_nosec_tests.cc +++ b/test/core/end2end/end2end_nosec_tests.cc @@ -70,6 +70,8 @@ extern void filter_call_init_fails(grpc_end2end_test_config config); extern void filter_call_init_fails_pre_init(void); extern void filter_causes_close(grpc_end2end_test_config config); extern void filter_causes_close_pre_init(void); +extern void filter_context(grpc_end2end_test_config config); +extern void filter_context_pre_init(void); extern void filter_latency(grpc_end2end_test_config config); extern void filter_latency_pre_init(void); extern void filter_status_code(grpc_end2end_test_config config); @@ -207,6 +209,7 @@ void grpc_end2end_tests_pre_init(void) { empty_batch_pre_init(); filter_call_init_fails_pre_init(); filter_causes_close_pre_init(); + filter_context_pre_init(); filter_latency_pre_init(); filter_status_code_pre_init(); graceful_server_shutdown_pre_init(); @@ -292,6 +295,7 @@ void grpc_end2end_tests(int argc, char **argv, empty_batch(config); filter_call_init_fails(config); filter_causes_close(config); + filter_context(config); filter_latency(config); filter_status_code(config); graceful_server_shutdown(config); @@ -432,6 +436,10 @@ void grpc_end2end_tests(int argc, char **argv, filter_causes_close(config); continue; } + if (0 == strcmp("filter_context", argv[i])) { + filter_context(config); + continue; + } if (0 == strcmp("filter_latency", argv[i])) { filter_latency(config); continue; diff --git a/test/core/end2end/end2end_tests.cc b/test/core/end2end/end2end_tests.cc index 9d3d231b3c5..b680da4433f 100644 --- a/test/core/end2end/end2end_tests.cc +++ b/test/core/end2end/end2end_tests.cc @@ -72,6 +72,8 @@ extern void filter_call_init_fails(grpc_end2end_test_config config); extern void filter_call_init_fails_pre_init(void); extern void filter_causes_close(grpc_end2end_test_config config); extern void filter_causes_close_pre_init(void); +extern void filter_context(grpc_end2end_test_config config); +extern void filter_context_pre_init(void); extern void filter_latency(grpc_end2end_test_config config); extern void filter_latency_pre_init(void); extern void filter_status_code(grpc_end2end_test_config config); @@ -210,6 +212,7 @@ void grpc_end2end_tests_pre_init(void) { empty_batch_pre_init(); filter_call_init_fails_pre_init(); filter_causes_close_pre_init(); + filter_context_pre_init(); filter_latency_pre_init(); filter_status_code_pre_init(); graceful_server_shutdown_pre_init(); @@ -296,6 +299,7 @@ void grpc_end2end_tests(int argc, char **argv, empty_batch(config); filter_call_init_fails(config); filter_causes_close(config); + filter_context(config); filter_latency(config); filter_status_code(config); graceful_server_shutdown(config); @@ -440,6 +444,10 @@ void grpc_end2end_tests(int argc, char **argv, filter_causes_close(config); continue; } + if (0 == strcmp("filter_context", argv[i])) { + filter_context(config); + continue; + } if (0 == strcmp("filter_latency", argv[i])) { filter_latency(config); continue; diff --git a/test/core/end2end/fuzzers/api_fuzzer.cc b/test/core/end2end/fuzzers/api_fuzzer.cc index 57bc8ad768c..74a30913b24 100644 --- a/test/core/end2end/fuzzers/api_fuzzer.cc +++ b/test/core/end2end/fuzzers/api_fuzzer.cc @@ -1200,6 +1200,6 @@ extern "C" int LLVMFuzzerTestOneInput(const uint8_t* data, size_t size) { grpc_resource_quota_unref(g_resource_quota); - grpc_shutdown(); + grpc_shutdown_blocking(); return 0; } diff --git a/test/core/end2end/fuzzers/client_fuzzer.cc b/test/core/end2end/fuzzers/client_fuzzer.cc index 8520fb53755..55e6ce695ad 100644 --- a/test/core/end2end/fuzzers/client_fuzzer.cc +++ b/test/core/end2end/fuzzers/client_fuzzer.cc @@ -40,9 +40,8 @@ static void dont_log(gpr_log_func_args* args) {} extern "C" int LLVMFuzzerTestOneInput(const uint8_t* data, size_t size) { grpc_test_only_set_slice_hash_seed(0); - struct grpc_memory_counters counters; if (squelch) gpr_set_log_function(dont_log); - if (leak_check) grpc_memory_counters_init(); + grpc_core::testing::LeakDetector leak_detector(leak_check); grpc_init(); { grpc_core::ExecCtx exec_ctx; @@ -159,11 +158,6 @@ extern "C" int LLVMFuzzerTestOneInput(const uint8_t* data, size_t size) { grpc_byte_buffer_destroy(response_payload_recv); } } - grpc_shutdown(); - if (leak_check) { - counters = grpc_memory_counters_snapshot(); - grpc_memory_counters_destroy(); - GPR_ASSERT(counters.total_size_relative == 0); - } + grpc_shutdown_blocking(); return 0; } diff --git a/test/core/end2end/fuzzers/client_fuzzer_corpus/clusterfuzz-testcase-minimized-grpc_client_fuzzer-5765697914404864 b/test/core/end2end/fuzzers/client_fuzzer_corpus/clusterfuzz-testcase-minimized-grpc_client_fuzzer-5765697914404864 new file mode 100644 index 00000000000..e8a60f5a9b5 Binary files /dev/null and b/test/core/end2end/fuzzers/client_fuzzer_corpus/clusterfuzz-testcase-minimized-grpc_client_fuzzer-5765697914404864 differ diff --git a/test/core/end2end/fuzzers/server_fuzzer.cc b/test/core/end2end/fuzzers/server_fuzzer.cc index 644f98e37ac..f010066ea27 100644 --- a/test/core/end2end/fuzzers/server_fuzzer.cc +++ b/test/core/end2end/fuzzers/server_fuzzer.cc @@ -37,9 +37,8 @@ static void dont_log(gpr_log_func_args* args) {} extern "C" int LLVMFuzzerTestOneInput(const uint8_t* data, size_t size) { grpc_test_only_set_slice_hash_seed(0); - struct grpc_memory_counters counters; if (squelch) gpr_set_log_function(dont_log); - if (leak_check) grpc_memory_counters_init(); + grpc_core::testing::LeakDetector leak_detector(leak_check); grpc_init(); { grpc_core::ExecCtx exec_ctx; @@ -136,10 +135,5 @@ extern "C" int LLVMFuzzerTestOneInput(const uint8_t* data, size_t size) { grpc_completion_queue_destroy(cq); } grpc_shutdown(); - if (leak_check) { - counters = grpc_memory_counters_snapshot(); - grpc_memory_counters_destroy(); - GPR_ASSERT(counters.total_size_relative == 0); - } return 0; } diff --git a/test/core/end2end/gen_build_yaml.py b/test/core/end2end/gen_build_yaml.py index 0ff1b7ee796..f8ac7036530 100755 --- a/test/core/end2end/gen_build_yaml.py +++ b/test/core/end2end/gen_build_yaml.py @@ -124,6 +124,7 @@ END2END_TESTS = { 'empty_batch': default_test_options._replace(cpu_cost=LOWCPU), 'filter_causes_close': default_test_options._replace(cpu_cost=LOWCPU), 'filter_call_init_fails': default_test_options, + 'filter_context': default_test_options, 'filter_latency': default_test_options._replace(cpu_cost=LOWCPU), 'filter_status_code': default_test_options._replace(cpu_cost=LOWCPU), 'graceful_server_shutdown': default_test_options._replace( diff --git a/test/core/end2end/generate_tests.bzl b/test/core/end2end/generate_tests.bzl index ec32aa5102c..5174a7e5af5 100755 --- a/test/core/end2end/generate_tests.bzl +++ b/test/core/end2end/generate_tests.bzl @@ -215,6 +215,7 @@ END2END_TESTS = { "empty_batch": _test_options(), "filter_causes_close": _test_options(), "filter_call_init_fails": _test_options(), + "filter_context": _test_options(), "graceful_server_shutdown": _test_options(exclude_inproc = True), "hpack_size": _test_options( proxyable = False, diff --git a/test/core/end2end/tests/filter_context.cc b/test/core/end2end/tests/filter_context.cc new file mode 100644 index 00000000000..1d5d9e5e46a --- /dev/null +++ b/test/core/end2end/tests/filter_context.cc @@ -0,0 +1,318 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +#include "test/core/end2end/end2end_tests.h" + +#include +#include +#include +#include + +#include +#include +#include +#include + +#include "src/core/lib/channel/channel_stack_builder.h" +#include "src/core/lib/surface/channel_init.h" +#include "test/core/end2end/cq_verifier.h" + +enum { TIMEOUT = 200000 }; + +static bool g_enable_filter = false; + +static void* tag(intptr_t t) { return (void*)t; } + +static grpc_end2end_test_fixture begin_test(grpc_end2end_test_config config, + const char* test_name, + grpc_channel_args* client_args, + grpc_channel_args* server_args) { + grpc_end2end_test_fixture f; + gpr_log(GPR_INFO, "Running test: %s/%s", test_name, config.name); + f = config.create_fixture(client_args, server_args); + config.init_server(&f, server_args); + config.init_client(&f, client_args); + return f; +} + +static gpr_timespec n_seconds_from_now(int n) { + return grpc_timeout_seconds_to_deadline(n); +} + +static gpr_timespec five_seconds_from_now(void) { + return n_seconds_from_now(5); +} + +static void drain_cq(grpc_completion_queue* cq) { + grpc_event ev; + do { + ev = grpc_completion_queue_next(cq, five_seconds_from_now(), nullptr); + } while (ev.type != GRPC_QUEUE_SHUTDOWN); +} + +static void shutdown_server(grpc_end2end_test_fixture* f) { + if (!f->server) return; + grpc_server_shutdown_and_notify(f->server, f->shutdown_cq, tag(1000)); + GPR_ASSERT(grpc_completion_queue_pluck(f->shutdown_cq, tag(1000), + grpc_timeout_seconds_to_deadline(5), + nullptr) + .type == GRPC_OP_COMPLETE); + grpc_server_destroy(f->server); + f->server = nullptr; +} + +static void shutdown_client(grpc_end2end_test_fixture* f) { + if (!f->client) return; + grpc_channel_destroy(f->client); + f->client = nullptr; +} + +static void end_test(grpc_end2end_test_fixture* f) { + shutdown_server(f); + shutdown_client(f); + + grpc_completion_queue_shutdown(f->cq); + drain_cq(f->cq); + grpc_completion_queue_destroy(f->cq); + grpc_completion_queue_destroy(f->shutdown_cq); +} + +// Simple request to test that filters see a consistent view of the +// call context. +static void test_request(grpc_end2end_test_config config) { + grpc_call* c; + grpc_call* s; + grpc_slice request_payload_slice = + grpc_slice_from_copied_string("hello world"); + grpc_byte_buffer* request_payload = + grpc_raw_byte_buffer_create(&request_payload_slice, 1); + grpc_end2end_test_fixture f = + begin_test(config, "filter_context", nullptr, nullptr); + cq_verifier* cqv = cq_verifier_create(f.cq); + grpc_op ops[6]; + grpc_op* op; + grpc_metadata_array initial_metadata_recv; + grpc_metadata_array trailing_metadata_recv; + grpc_metadata_array request_metadata_recv; + grpc_byte_buffer* request_payload_recv = nullptr; + grpc_call_details call_details; + grpc_status_code status; + grpc_call_error error; + grpc_slice details; + int was_cancelled = 2; + + gpr_timespec deadline = five_seconds_from_now(); + c = grpc_channel_create_call(f.client, nullptr, GRPC_PROPAGATE_DEFAULTS, f.cq, + grpc_slice_from_static_string("/foo"), nullptr, + deadline, nullptr); + GPR_ASSERT(c); + + grpc_metadata_array_init(&initial_metadata_recv); + grpc_metadata_array_init(&trailing_metadata_recv); + grpc_metadata_array_init(&request_metadata_recv); + grpc_call_details_init(&call_details); + + memset(ops, 0, sizeof(ops)); + op = ops; + op->op = GRPC_OP_SEND_INITIAL_METADATA; + op->data.send_initial_metadata.count = 0; + op->data.send_initial_metadata.metadata = nullptr; + op->flags = 0; + op->reserved = nullptr; + op++; + op->op = GRPC_OP_SEND_MESSAGE; + op->data.send_message.send_message = request_payload; + op->flags = 0; + op->reserved = nullptr; + op++; + op->op = GRPC_OP_SEND_CLOSE_FROM_CLIENT; + op->flags = 0; + op->reserved = nullptr; + op++; + op->op = GRPC_OP_RECV_INITIAL_METADATA; + op->data.recv_initial_metadata.recv_initial_metadata = &initial_metadata_recv; + op->flags = 0; + op->reserved = nullptr; + op++; + op->op = GRPC_OP_RECV_STATUS_ON_CLIENT; + op->data.recv_status_on_client.trailing_metadata = &trailing_metadata_recv; + op->data.recv_status_on_client.status = &status; + op->data.recv_status_on_client.status_details = &details; + op->flags = 0; + op->reserved = nullptr; + op++; + error = grpc_call_start_batch(c, ops, static_cast(op - ops), tag(1), + nullptr); + GPR_ASSERT(GRPC_CALL_OK == error); + + error = + grpc_server_request_call(f.server, &s, &call_details, + &request_metadata_recv, f.cq, f.cq, tag(101)); + GPR_ASSERT(GRPC_CALL_OK == error); + + CQ_EXPECT_COMPLETION(cqv, tag(101), 1); + cq_verify(cqv); + + memset(ops, 0, sizeof(ops)); + op = ops; + op->op = GRPC_OP_SEND_INITIAL_METADATA; + op->data.send_initial_metadata.count = 0; + op->flags = 0; + op->reserved = nullptr; + op++; + op->op = GRPC_OP_SEND_STATUS_FROM_SERVER; + op->data.send_status_from_server.trailing_metadata_count = 0; + op->data.send_status_from_server.status = GRPC_STATUS_UNIMPLEMENTED; + grpc_slice status_string = grpc_slice_from_static_string("xyz"); + op->data.send_status_from_server.status_details = &status_string; + op->flags = 0; + op->reserved = nullptr; + op++; + op->op = GRPC_OP_RECV_CLOSE_ON_SERVER; + op->data.recv_close_on_server.cancelled = &was_cancelled; + op->flags = 0; + op->reserved = nullptr; + op++; + error = grpc_call_start_batch(s, ops, static_cast(op - ops), tag(102), + nullptr); + GPR_ASSERT(GRPC_CALL_OK == error); + + CQ_EXPECT_COMPLETION(cqv, tag(102), 1); + CQ_EXPECT_COMPLETION(cqv, tag(1), 1); + cq_verify(cqv); + + GPR_ASSERT(status == GRPC_STATUS_UNIMPLEMENTED); + GPR_ASSERT(0 == grpc_slice_str_cmp(details, "xyz")); + + grpc_slice_unref(details); + grpc_metadata_array_destroy(&initial_metadata_recv); + grpc_metadata_array_destroy(&trailing_metadata_recv); + grpc_metadata_array_destroy(&request_metadata_recv); + grpc_call_details_destroy(&call_details); + + grpc_call_unref(s); + grpc_call_unref(c); + + cq_verifier_destroy(cqv); + + grpc_byte_buffer_destroy(request_payload); + grpc_byte_buffer_destroy(request_payload_recv); + + end_test(&f); + config.tear_down_data(&f); +} + +/******************************************************************************* + * Test context filter + */ + +struct call_data { + grpc_call_context_element* context; +}; + +static grpc_error* init_call_elem(grpc_call_element* elem, + const grpc_call_element_args* args) { + call_data* calld = static_cast(elem->call_data); + calld->context = args->context; + gpr_log(GPR_INFO, "init_call_elem(): context=%p", args->context); + return GRPC_ERROR_NONE; +} + +static void start_transport_stream_op_batch( + grpc_call_element* elem, grpc_transport_stream_op_batch* batch) { + call_data* calld = static_cast(elem->call_data); + // If batch payload context is not null (which will happen in some + // cancellation cases), make sure we get the same context here that we + // saw in init_call_elem(). + gpr_log(GPR_INFO, "start_transport_stream_op_batch(): context=%p", + batch->payload->context); + if (batch->payload->context != nullptr) { + GPR_ASSERT(calld->context == batch->payload->context); + } + grpc_call_next_op(elem, batch); +} + +static void destroy_call_elem(grpc_call_element* elem, + const grpc_call_final_info* final_info, + grpc_closure* ignored) {} + +static grpc_error* init_channel_elem(grpc_channel_element* elem, + grpc_channel_element_args* args) { + return GRPC_ERROR_NONE; +} + +static void destroy_channel_elem(grpc_channel_element* elem) {} + +static const grpc_channel_filter test_filter = { + start_transport_stream_op_batch, + grpc_channel_next_op, + sizeof(call_data), + init_call_elem, + grpc_call_stack_ignore_set_pollset_or_pollset_set, + destroy_call_elem, + 0, + init_channel_elem, + destroy_channel_elem, + grpc_channel_next_get_info, + "filter_context"}; + +/******************************************************************************* + * Registration + */ + +static bool maybe_add_filter(grpc_channel_stack_builder* builder, void* arg) { + grpc_channel_filter* filter = static_cast(arg); + if (g_enable_filter) { + // Want to add the filter as close to the end as possible, to make + // sure that all of the filters work well together. However, we + // can't add it at the very end, because the connected channel filter + // must be the last one. So we add it right before the last one. + grpc_channel_stack_builder_iterator* it = + grpc_channel_stack_builder_create_iterator_at_last(builder); + GPR_ASSERT(grpc_channel_stack_builder_move_prev(it)); + const bool retval = grpc_channel_stack_builder_add_filter_before( + it, filter, nullptr, nullptr); + grpc_channel_stack_builder_iterator_destroy(it); + return retval; + } else { + return true; + } +} + +static void init_plugin(void) { + grpc_channel_init_register_stage(GRPC_CLIENT_CHANNEL, INT_MAX, + maybe_add_filter, (void*)&test_filter); + grpc_channel_init_register_stage(GRPC_CLIENT_SUBCHANNEL, INT_MAX, + maybe_add_filter, (void*)&test_filter); + grpc_channel_init_register_stage(GRPC_CLIENT_DIRECT_CHANNEL, INT_MAX, + maybe_add_filter, (void*)&test_filter); + grpc_channel_init_register_stage(GRPC_SERVER_CHANNEL, INT_MAX, + maybe_add_filter, (void*)&test_filter); +} + +static void destroy_plugin(void) {} + +void filter_context(grpc_end2end_test_config config) { + g_enable_filter = true; + test_request(config); + g_enable_filter = false; +} + +void filter_context_pre_init(void) { + grpc_register_plugin(init_plugin, destroy_plugin); +} diff --git a/test/core/handshake/readahead_handshaker_server_ssl.cc b/test/core/handshake/readahead_handshaker_server_ssl.cc index e4584105e65..d91f2d2fe63 100644 --- a/test/core/handshake/readahead_handshaker_server_ssl.cc +++ b/test/core/handshake/readahead_handshaker_server_ssl.cc @@ -83,6 +83,6 @@ int main(int argc, char* argv[]) { UniquePtr(New())); const char* full_alpn_list[] = {"grpc-exp", "h2"}; GPR_ASSERT(server_ssl_test(full_alpn_list, 2, "grpc-exp")); - grpc_shutdown(); + grpc_shutdown_blocking(); return 0; } diff --git a/test/core/iomgr/resolve_address_test.cc b/test/core/iomgr/resolve_address_test.cc index b041a15ff34..f59a992416d 100644 --- a/test/core/iomgr/resolve_address_test.cc +++ b/test/core/iomgr/resolve_address_test.cc @@ -323,7 +323,11 @@ static bool mock_ipv6_disabled_source_addr_factory_get_source_addr( } void mock_ipv6_disabled_source_addr_factory_destroy( - address_sorting_source_addr_factory* factory) {} + address_sorting_source_addr_factory* factory) { + mock_ipv6_disabled_source_addr_factory* f = + reinterpret_cast(factory); + gpr_free(f); +} const address_sorting_source_addr_factory_vtable kMockIpv6DisabledSourceAddrFactoryVtable = { @@ -390,9 +394,11 @@ int main(int argc, char** argv) { // Run a test case in which c-ares's address sorter // thinks that IPv4 is available and IPv6 isn't. grpc_init(); - mock_ipv6_disabled_source_addr_factory factory; - factory.base.vtable = &kMockIpv6DisabledSourceAddrFactoryVtable; - address_sorting_override_source_addr_factory_for_testing(&factory.base); + mock_ipv6_disabled_source_addr_factory* factory = + static_cast( + gpr_malloc(sizeof(mock_ipv6_disabled_source_addr_factory))); + factory->base.vtable = &kMockIpv6DisabledSourceAddrFactoryVtable; + address_sorting_override_source_addr_factory_for_testing(&factory->base); test_localhost_result_has_ipv4_first_when_ipv6_isnt_available(); grpc_shutdown(); } diff --git a/test/core/json/fuzzer.cc b/test/core/json/fuzzer.cc index 6dafabb95b3..8b3e9792d15 100644 --- a/test/core/json/fuzzer.cc +++ b/test/core/json/fuzzer.cc @@ -31,8 +31,7 @@ bool leak_check = true; extern "C" int LLVMFuzzerTestOneInput(const uint8_t* data, size_t size) { char* s; - struct grpc_memory_counters counters; - grpc_memory_counters_init(); + grpc_core::testing::LeakDetector leak_detector(true); s = static_cast(gpr_malloc(size)); memcpy(s, data, size); grpc_json* x; @@ -40,8 +39,5 @@ extern "C" int LLVMFuzzerTestOneInput(const uint8_t* data, size_t size) { grpc_json_destroy(x); } gpr_free(s); - counters = grpc_memory_counters_snapshot(); - grpc_memory_counters_destroy(); - GPR_ASSERT(counters.total_size_relative == 0); return 0; } diff --git a/test/core/memory_usage/client.cc b/test/core/memory_usage/client.cc index 467586ea5f4..097288c5efa 100644 --- a/test/core/memory_usage/client.cc +++ b/test/core/memory_usage/client.cc @@ -285,7 +285,7 @@ int main(int argc, char** argv) { grpc_slice_unref(slice); grpc_completion_queue_destroy(cq); - grpc_shutdown(); + grpc_shutdown_blocking(); gpr_log(GPR_INFO, "---------client stats--------"); gpr_log( diff --git a/test/core/memory_usage/server.cc b/test/core/memory_usage/server.cc index 7424797e6f5..6fb14fa31a0 100644 --- a/test/core/memory_usage/server.cc +++ b/test/core/memory_usage/server.cc @@ -318,7 +318,7 @@ int main(int argc, char** argv) { grpc_server_destroy(server); grpc_completion_queue_destroy(cq); - grpc_shutdown(); + grpc_shutdown_blocking(); grpc_memory_counters_destroy(); return 0; } diff --git a/test/core/security/alts_credentials_fuzzer.cc b/test/core/security/alts_credentials_fuzzer.cc index bf18f0a589e..abe50031687 100644 --- a/test/core/security/alts_credentials_fuzzer.cc +++ b/test/core/security/alts_credentials_fuzzer.cc @@ -66,10 +66,7 @@ extern "C" int LLVMFuzzerTestOneInput(const uint8_t* data, size_t size) { gpr_set_log_function(dont_log); } gpr_free(grpc_trace_fuzzer); - struct grpc_memory_counters counters; - if (leak_check) { - grpc_memory_counters_init(); - } + grpc_core::testing::LeakDetector leak_detector(leak_check); input_stream inp = {data, data + size}; grpc_init(); bool is_on_gcp = grpc_alts_is_running_on_gcp(); @@ -111,10 +108,5 @@ extern "C" int LLVMFuzzerTestOneInput(const uint8_t* data, size_t size) { gpr_free(handshaker_service_url); } grpc_shutdown(); - if (leak_check) { - counters = grpc_memory_counters_snapshot(); - grpc_memory_counters_destroy(); - GPR_ASSERT(counters.total_size_relative == 0); - } return 0; } diff --git a/test/core/security/ssl_server_fuzzer.cc b/test/core/security/ssl_server_fuzzer.cc index 8533644aceb..5846964eb90 100644 --- a/test/core/security/ssl_server_fuzzer.cc +++ b/test/core/security/ssl_server_fuzzer.cc @@ -52,9 +52,8 @@ static void on_handshake_done(void* arg, grpc_error* error) { } extern "C" int LLVMFuzzerTestOneInput(const uint8_t* data, size_t size) { - struct grpc_memory_counters counters; if (squelch) gpr_set_log_function(dont_log); - if (leak_check) grpc_memory_counters_init(); + grpc_core::testing::LeakDetector leak_detector(leak_check); grpc_init(); { grpc_core::ExecCtx exec_ctx; @@ -118,11 +117,6 @@ extern "C" int LLVMFuzzerTestOneInput(const uint8_t* data, size_t size) { grpc_core::ExecCtx::Get()->Flush(); } - grpc_shutdown(); - if (leak_check) { - counters = grpc_memory_counters_snapshot(); - grpc_memory_counters_destroy(); - GPR_ASSERT(counters.total_size_relative == 0); - } + grpc_shutdown_blocking(); return 0; } diff --git a/test/core/slice/percent_decode_fuzzer.cc b/test/core/slice/percent_decode_fuzzer.cc index 81eb031014f..11f71d92c46 100644 --- a/test/core/slice/percent_decode_fuzzer.cc +++ b/test/core/slice/percent_decode_fuzzer.cc @@ -31,24 +31,23 @@ bool squelch = true; bool leak_check = true; extern "C" int LLVMFuzzerTestOneInput(const uint8_t* data, size_t size) { - struct grpc_memory_counters counters; grpc_init(); - grpc_memory_counters_init(); - grpc_slice input = grpc_slice_from_copied_buffer((const char*)data, size); - grpc_slice output; - if (grpc_strict_percent_decode_slice( - input, grpc_url_percent_encoding_unreserved_bytes, &output)) { - grpc_slice_unref(output); + { + grpc_core::testing::LeakDetector leak_detector(true); + grpc_slice input = grpc_slice_from_copied_buffer((const char*)data, size); + grpc_slice output; + if (grpc_strict_percent_decode_slice( + input, grpc_url_percent_encoding_unreserved_bytes, &output)) { + grpc_slice_unref(output); + } + if (grpc_strict_percent_decode_slice( + input, grpc_compatible_percent_encoding_unreserved_bytes, + &output)) { + grpc_slice_unref(output); + } + grpc_slice_unref(grpc_permissive_percent_decode_slice(input)); + grpc_slice_unref(input); } - if (grpc_strict_percent_decode_slice( - input, grpc_compatible_percent_encoding_unreserved_bytes, &output)) { - grpc_slice_unref(output); - } - grpc_slice_unref(grpc_permissive_percent_decode_slice(input)); - grpc_slice_unref(input); - counters = grpc_memory_counters_snapshot(); - grpc_memory_counters_destroy(); - grpc_shutdown(); - GPR_ASSERT(counters.total_size_relative == 0); + grpc_shutdown_blocking(); return 0; } diff --git a/test/core/slice/percent_encode_fuzzer.cc b/test/core/slice/percent_encode_fuzzer.cc index 1fd197e180a..1da982bba28 100644 --- a/test/core/slice/percent_encode_fuzzer.cc +++ b/test/core/slice/percent_encode_fuzzer.cc @@ -31,28 +31,26 @@ bool squelch = true; bool leak_check = true; static void test(const uint8_t* data, size_t size, const uint8_t* dict) { - struct grpc_memory_counters counters; grpc_init(); - grpc_memory_counters_init(); - grpc_slice input = - grpc_slice_from_copied_buffer(reinterpret_cast(data), size); - grpc_slice output = grpc_percent_encode_slice(input, dict); - grpc_slice decoded_output; - // encoder must always produce decodable output - GPR_ASSERT(grpc_strict_percent_decode_slice(output, dict, &decoded_output)); - grpc_slice permissive_decoded_output = - grpc_permissive_percent_decode_slice(output); - // and decoded output must always match the input - GPR_ASSERT(grpc_slice_eq(input, decoded_output)); - GPR_ASSERT(grpc_slice_eq(input, permissive_decoded_output)); - grpc_slice_unref(input); - grpc_slice_unref(output); - grpc_slice_unref(decoded_output); - grpc_slice_unref(permissive_decoded_output); - counters = grpc_memory_counters_snapshot(); - grpc_memory_counters_destroy(); - grpc_shutdown(); - GPR_ASSERT(counters.total_size_relative == 0); + { + grpc_core::testing::LeakDetector leak_detector(true); + grpc_slice input = grpc_slice_from_copied_buffer( + reinterpret_cast(data), size); + grpc_slice output = grpc_percent_encode_slice(input, dict); + grpc_slice decoded_output; + // encoder must always produce decodable output + GPR_ASSERT(grpc_strict_percent_decode_slice(output, dict, &decoded_output)); + grpc_slice permissive_decoded_output = + grpc_permissive_percent_decode_slice(output); + // and decoded output must always match the input + GPR_ASSERT(grpc_slice_eq(input, decoded_output)); + GPR_ASSERT(grpc_slice_eq(input, permissive_decoded_output)); + grpc_slice_unref(input); + grpc_slice_unref(output); + grpc_slice_unref(decoded_output); + grpc_slice_unref(permissive_decoded_output); + } + grpc_shutdown_blocking(); } extern "C" int LLVMFuzzerTestOneInput(const uint8_t* data, size_t size) { diff --git a/test/core/surface/init_test.cc b/test/core/surface/init_test.cc index 1bcd13a0b89..583dd1b6de9 100644 --- a/test/core/surface/init_test.cc +++ b/test/core/surface/init_test.cc @@ -18,6 +18,9 @@ #include #include +#include + +#include "src/core/lib/surface/init.h" #include "test/core/util/test_config.h" static int g_flag; @@ -30,6 +33,17 @@ static void test(int rounds) { for (i = 0; i < rounds; i++) { grpc_shutdown(); } + grpc_maybe_wait_for_async_shutdown(); +} + +static void test_blocking(int rounds) { + int i; + for (i = 0; i < rounds; i++) { + grpc_init(); + } + for (i = 0; i < rounds; i++) { + grpc_shutdown_blocking(); + } } static void test_mixed(void) { @@ -39,6 +53,7 @@ static void test_mixed(void) { grpc_init(); grpc_shutdown(); grpc_shutdown(); + grpc_maybe_wait_for_async_shutdown(); } static void plugin_init(void) { g_flag = 1; } @@ -48,7 +63,7 @@ static void test_plugin() { grpc_register_plugin(plugin_init, plugin_destroy); grpc_init(); GPR_ASSERT(g_flag == 1); - grpc_shutdown(); + grpc_shutdown_blocking(); GPR_ASSERT(g_flag == 2); } @@ -57,6 +72,7 @@ static void test_repeatedly() { grpc_init(); grpc_shutdown(); } + grpc_maybe_wait_for_async_shutdown(); } int main(int argc, char** argv) { @@ -64,6 +80,9 @@ int main(int argc, char** argv) { test(1); test(2); test(3); + test_blocking(1); + test_blocking(2); + test_blocking(3); test_mixed(); test_plugin(); test_repeatedly(); diff --git a/test/core/surface/public_headers_must_be_c89.c b/test/core/surface/public_headers_must_be_c89.c index 1c9b67027c5..04d0506b3c2 100644 --- a/test/core/surface/public_headers_must_be_c89.c +++ b/test/core/surface/public_headers_must_be_c89.c @@ -78,6 +78,7 @@ int main(int argc, char **argv) { printf("%lx", (unsigned long) grpc_init); printf("%lx", (unsigned long) grpc_shutdown); printf("%lx", (unsigned long) grpc_is_initialized); + printf("%lx", (unsigned long) grpc_shutdown_blocking); printf("%lx", (unsigned long) grpc_version_string); printf("%lx", (unsigned long) grpc_g_stands_for); printf("%lx", (unsigned long) grpc_completion_queue_factory_lookup); diff --git a/test/core/transport/metadata_test.cc b/test/core/transport/metadata_test.cc index 9a49d28ccce..e6b73de2de5 100644 --- a/test/core/transport/metadata_test.cc +++ b/test/core/transport/metadata_test.cc @@ -289,6 +289,28 @@ static void test_user_data_works(void) { grpc_shutdown(); } +static void test_user_data_works_for_allocated_md(void) { + int* ud1; + int* ud2; + grpc_mdelem md; + gpr_log(GPR_INFO, "test_user_data_works"); + + grpc_init(); + grpc_core::ExecCtx exec_ctx; + ud1 = static_cast(gpr_malloc(sizeof(int))); + *ud1 = 1; + ud2 = static_cast(gpr_malloc(sizeof(int))); + *ud2 = 2; + md = grpc_mdelem_from_slices(grpc_slice_from_static_string("abc"), + grpc_slice_from_static_string("123")); + grpc_mdelem_set_user_data(md, gpr_free, ud1); + grpc_mdelem_set_user_data(md, gpr_free, ud2); + GPR_ASSERT(grpc_mdelem_get_user_data(md, gpr_free) == ud1); + GRPC_MDELEM_UNREF(md); + + grpc_shutdown(); +} + static void verify_ascii_header_size(const char* key, const char* value, bool intern_key, bool intern_value) { grpc_mdelem elem = grpc_mdelem_from_slices( @@ -386,6 +408,7 @@ int main(int argc, char** argv) { test_create_many_persistant_metadata(); test_things_stick_around(); test_user_data_works(); + test_user_data_works_for_allocated_md(); grpc_shutdown(); return 0; } diff --git a/test/core/tsi/alts/zero_copy_frame_protector/alts_zero_copy_grpc_protector_test.cc b/test/core/tsi/alts/zero_copy_frame_protector/alts_zero_copy_grpc_protector_test.cc index 3ee8323a310..62d799f18b3 100644 --- a/test/core/tsi/alts/zero_copy_frame_protector/alts_zero_copy_grpc_protector_test.cc +++ b/test/core/tsi/alts/zero_copy_frame_protector/alts_zero_copy_grpc_protector_test.cc @@ -175,7 +175,7 @@ static void seal_unseal_small_buffer(tsi_zero_copy_grpc_protector* sender, GPR_ASSERT(tsi_zero_copy_grpc_protector_protect( sender, &var->original_sb, &var->protected_sb) == TSI_OK); /* Splits protected slice buffer into two: first one is staging_sb, and - * second one is is protected_sb. */ + * second one is protected_sb. */ uint32_t staging_sb_size = gsec_test_bias_random_uint32( static_cast(var->protected_sb.length - 1)) + diff --git a/test/core/util/memory_counters.cc b/test/core/util/memory_counters.cc index d0da05d9b4d..787fb76e48b 100644 --- a/test/core/util/memory_counters.cc +++ b/test/core/util/memory_counters.cc @@ -16,13 +16,18 @@ * */ +#include #include #include +#include #include +#include #include +#include #include "src/core/lib/gpr/alloc.h" +#include "src/core/lib/surface/init.h" #include "test/core/util/memory_counters.h" static struct grpc_memory_counters g_memory_counters; @@ -110,3 +115,29 @@ struct grpc_memory_counters grpc_memory_counters_snapshot() { NO_BARRIER_LOAD(&g_memory_counters.total_allocs_absolute); return counters; } + +namespace grpc_core { +namespace testing { + +LeakDetector::LeakDetector(bool enable) : enabled_(enable) { + if (enabled_) { + grpc_memory_counters_init(); + } +} + +LeakDetector::~LeakDetector() { + // Wait for grpc_shutdown() to finish its async work. + grpc_maybe_wait_for_async_shutdown(); + if (enabled_) { + struct grpc_memory_counters counters = grpc_memory_counters_snapshot(); + if (counters.total_size_relative != 0) { + gpr_log(GPR_ERROR, "Leaking %" PRIuPTR " bytes", + static_cast(counters.total_size_relative)); + GPR_ASSERT(0); + } + grpc_memory_counters_destroy(); + } +} + +} // namespace testing +} // namespace grpc_core diff --git a/test/core/util/memory_counters.h b/test/core/util/memory_counters.h index c23a13e5c85..c92a001ff13 100644 --- a/test/core/util/memory_counters.h +++ b/test/core/util/memory_counters.h @@ -32,4 +32,22 @@ void grpc_memory_counters_init(); void grpc_memory_counters_destroy(); struct grpc_memory_counters grpc_memory_counters_snapshot(); +namespace grpc_core { +namespace testing { + +// At destruction time, it will check there is no memory leak. +// The object should be created before grpc_init() is called and destroyed after +// grpc_shutdown() is returned. +class LeakDetector { + public: + explicit LeakDetector(bool enable); + ~LeakDetector(); + + private: + const bool enabled_; +}; + +} // namespace testing +} // namespace grpc_core + #endif diff --git a/test/core/util/port.cc b/test/core/util/port.cc index 303306de452..fe4caa6faf6 100644 --- a/test/core/util/port.cc +++ b/test/core/util/port.cc @@ -66,7 +66,7 @@ static void free_chosen_ports(void) { for (i = 0; i < num_chosen_ports; i++) { grpc_free_port_using_server(chosen_ports[i]); } - grpc_shutdown(); + grpc_shutdown_blocking(); gpr_free(chosen_ports); } diff --git a/test/core/util/test_config.cc b/test/core/util/test_config.cc index fe80bb2d4d0..0c0492fdbbd 100644 --- a/test/core/util/test_config.cc +++ b/test/core/util/test_config.cc @@ -31,6 +31,7 @@ #include "src/core/lib/gpr/env.h" #include "src/core/lib/gpr/string.h" #include "src/core/lib/gpr/useful.h" +#include "src/core/lib/surface/init.h" int64_t g_fixture_slowdown_factor = 1; int64_t g_poller_slowdown_factor = 1; @@ -405,7 +406,7 @@ TestEnvironment::TestEnvironment(int argc, char** argv) { grpc_test_init(argc, argv); } -TestEnvironment::~TestEnvironment() {} +TestEnvironment::~TestEnvironment() { grpc_maybe_wait_for_async_shutdown(); } } // namespace testing } // namespace grpc diff --git a/test/core/util/test_lb_policies.cc b/test/core/util/test_lb_policies.cc index 0a01e483f13..745162f637f 100644 --- a/test/core/util/test_lb_policies.cc +++ b/test/core/util/test_lb_policies.cc @@ -147,10 +147,8 @@ class InterceptRecvTrailingMetadataLoadBalancingPolicy } grpc_channel* CreateChannel(const char* target, - grpc_client_channel_type type, const grpc_channel_args& args) override { - return parent_->channel_control_helper()->CreateChannel(target, type, - args); + return parent_->channel_control_helper()->CreateChannel(target, args); } void UpdateState(grpc_connectivity_state state, grpc_error* state_error, diff --git a/test/cpp/end2end/BUILD b/test/cpp/end2end/BUILD index d80fa33a83a..43dee177e7a 100644 --- a/test/cpp/end2end/BUILD +++ b/test/cpp/end2end/BUILD @@ -439,6 +439,28 @@ grpc_cc_test( ], ) +grpc_cc_test( + name = "xds_end2end_test", + srcs = ["xds_end2end_test.cc"], + external_deps = [ + "gmock", + "gtest", + ], + deps = [ + ":test_service_impl", + "//:gpr", + "//:grpc", + "//:grpc++", + "//:grpc_resolver_fake", + "//src/proto/grpc/lb/v1:load_balancer_proto", + "//src/proto/grpc/testing:echo_messages_proto", + "//src/proto/grpc/testing:echo_proto", + "//src/proto/grpc/testing/duplicate:echo_duplicate_proto", + "//test/core/util:grpc_test_util", + "//test/cpp/util:test_util", + ], +) + grpc_cc_test( name = "proto_server_reflection_test", srcs = ["proto_server_reflection_test.cc"], diff --git a/test/cpp/end2end/cfstream_test.cc b/test/cpp/end2end/cfstream_test.cc index 9039329d815..6ca206e5f36 100644 --- a/test/cpp/end2end/cfstream_test.cc +++ b/test/cpp/end2end/cfstream_test.cc @@ -270,9 +270,6 @@ int main(int argc, char** argv) { ::testing::InitGoogleTest(&argc, argv); grpc_test_init(argc, argv); gpr_setenv("grpc_cfstream", "1"); - // TODO (pjaikumar): remove the line below when - // https://github.com/grpc/grpc/issues/18080 has been fixed. - gpr_setenv("GRPC_DNS_RESOLVER", "native"); const auto result = RUN_ALL_TESTS(); return result; } diff --git a/test/cpp/end2end/client_lb_end2end_test.cc b/test/cpp/end2end/client_lb_end2end_test.cc index 049b732e1a0..996ba0edbbe 100644 --- a/test/cpp/end2end/client_lb_end2end_test.cc +++ b/test/cpp/end2end/client_lb_end2end_test.cc @@ -153,7 +153,13 @@ class ClientLbEnd2endTest : public ::testing::Test { for (size_t i = 0; i < servers_.size(); ++i) { servers_[i]->Shutdown(); } - grpc_shutdown(); + // Explicitly destroy all the members so that we can make sure grpc_shutdown + // has finished by the end of this function, and thus all the registered + // LB policy factories are removed. + stub_.reset(); + servers_.clear(); + creds_.reset(); + grpc_shutdown_blocking(); } void CreateServers(size_t num_servers, diff --git a/test/cpp/end2end/flaky_network_test.cc b/test/cpp/end2end/flaky_network_test.cc index 20c8fb59fa2..63a6897f931 100644 --- a/test/cpp/end2end/flaky_network_test.cc +++ b/test/cpp/end2end/flaky_network_test.cc @@ -339,12 +339,18 @@ TEST_F(FlakyNetworkTest, NetworkTransition) { TEST_F(FlakyNetworkTest, ServerUnreachableWithKeepalive) { const int kKeepAliveTimeMs = 1000; const int kKeepAliveTimeoutMs = 1000; + const int kReconnectBackoffMs = 1000; ChannelArguments args; args.SetInt(GRPC_ARG_KEEPALIVE_TIME_MS, kKeepAliveTimeMs); args.SetInt(GRPC_ARG_KEEPALIVE_TIMEOUT_MS, kKeepAliveTimeoutMs); args.SetInt(GRPC_ARG_KEEPALIVE_PERMIT_WITHOUT_CALLS, 1); args.SetInt(GRPC_ARG_HTTP2_MAX_PINGS_WITHOUT_DATA, 0); + // max time for a connection attempt + args.SetInt(GRPC_ARG_MIN_RECONNECT_BACKOFF_MS, kReconnectBackoffMs); + // max time between reconnect attempts + args.SetInt(GRPC_ARG_MAX_RECONNECT_BACKOFF_MS, kReconnectBackoffMs); + gpr_log(GPR_DEBUG, "FlakyNetworkTest.ServerUnreachableWithKeepalive start"); auto channel = BuildChannel("pick_first", args); auto stub = BuildStub(channel); // Channel should be in READY state after we send an RPC @@ -363,15 +369,18 @@ TEST_F(FlakyNetworkTest, ServerUnreachableWithKeepalive) { }); // break network connectivity + gpr_log(GPR_DEBUG, "Adding iptables rule to drop packets"); DropPackets(); std::this_thread::sleep_for(std::chrono::milliseconds(10000)); EXPECT_TRUE(WaitForChannelNotReady(channel.get())); // bring network interface back up RestoreNetwork(); + gpr_log(GPR_DEBUG, "Removed iptables rule to drop packets"); EXPECT_TRUE(WaitForChannelReady(channel.get())); EXPECT_EQ(channel->GetState(false), GRPC_CHANNEL_READY); shutdown.store(true); sender.join(); + gpr_log(GPR_DEBUG, "FlakyNetworkTest.ServerUnreachableWithKeepalive end"); } // @@ -421,7 +430,7 @@ TEST_F(FlakyNetworkTest, FlakyNetwork) { // simulate flaky network (packet loss, corruption and delays) FlakeNetwork(); for (int i = 0; i < kMessageCount; ++i) { - EXPECT_TRUE(SendRpc(stub)); + SendRpc(stub); } // remove network flakiness UnflakeNetwork(); diff --git a/test/cpp/end2end/grpclb_end2end_test.cc b/test/cpp/end2end/grpclb_end2end_test.cc index b56e65e50af..31353ba1304 100644 --- a/test/cpp/end2end/grpclb_end2end_test.cc +++ b/test/cpp/end2end/grpclb_end2end_test.cc @@ -723,6 +723,150 @@ TEST_F(SingleBalancerTest, SelectGrpclbWithMigrationServiceConfig) { EXPECT_EQ("grpclb", channel_->GetLoadBalancingPolicyName()); } +TEST_F(SingleBalancerTest, UsePickFirstChildPolicy) { + SetNextResolutionAllBalancers( + "{\n" + " \"loadBalancingConfig\":[\n" + " { \"grpclb\":{\n" + " \"childPolicy\":[\n" + " { \"pick_first\":{} }\n" + " ]\n" + " } }\n" + " ]\n" + "}"); + ScheduleResponseForBalancer( + 0, BalancerServiceImpl::BuildResponseForBackends(GetBackendPorts(), {}), + 0); + const size_t kNumRpcs = num_backends_ * 2; + CheckRpcSendOk(kNumRpcs, 1000 /* timeout_ms */, true /* wait_for_ready */); + balancers_[0]->NotifyDoneWithServerlists(); + // Check that all requests went to the first backend. This verifies + // that we used pick_first instead of round_robin as the child policy. + EXPECT_EQ(backend_servers_[0].service_->request_count(), kNumRpcs); + for (size_t i = 1; i < backends_.size(); ++i) { + EXPECT_EQ(backend_servers_[i].service_->request_count(), 0UL); + } + // The balancer got a single request. + EXPECT_EQ(1U, balancer_servers_[0].service_->request_count()); + // and sent a single response. + EXPECT_EQ(1U, balancer_servers_[0].service_->response_count()); + // Check LB policy name for the channel. + EXPECT_EQ("grpclb", channel_->GetLoadBalancingPolicyName()); +} + +TEST_F(SingleBalancerTest, SwapChildPolicy) { + SetNextResolutionAllBalancers( + "{\n" + " \"loadBalancingConfig\":[\n" + " { \"grpclb\":{\n" + " \"childPolicy\":[\n" + " { \"pick_first\":{} }\n" + " ]\n" + " } }\n" + " ]\n" + "}"); + ScheduleResponseForBalancer( + 0, BalancerServiceImpl::BuildResponseForBackends(GetBackendPorts(), {}), + 0); + const size_t kNumRpcs = num_backends_ * 2; + CheckRpcSendOk(kNumRpcs, 1000 /* timeout_ms */, true /* wait_for_ready */); + // Check that all requests went to the first backend. This verifies + // that we used pick_first instead of round_robin as the child policy. + EXPECT_EQ(backend_servers_[0].service_->request_count(), kNumRpcs); + for (size_t i = 1; i < backends_.size(); ++i) { + EXPECT_EQ(backend_servers_[i].service_->request_count(), 0UL); + } + // Send new resolution that removes child policy from service config. + SetNextResolutionAllBalancers("{}"); + WaitForAllBackends(); + CheckRpcSendOk(kNumRpcs, 1000 /* timeout_ms */, true /* wait_for_ready */); + // Check that every backend saw the same number of requests. This verifies + // that we used round_robin. + for (size_t i = 0; i < backends_.size(); ++i) { + EXPECT_EQ(backend_servers_[i].service_->request_count(), 2UL); + } + // Done. + balancers_[0]->NotifyDoneWithServerlists(); + // The balancer got a single request. + EXPECT_EQ(1U, balancer_servers_[0].service_->request_count()); + // and sent a single response. + EXPECT_EQ(1U, balancer_servers_[0].service_->response_count()); + // Check LB policy name for the channel. + EXPECT_EQ("grpclb", channel_->GetLoadBalancingPolicyName()); +} + +TEST_F(SingleBalancerTest, UpdatesGoToMostRecentChildPolicy) { + const int kFallbackTimeoutMs = 200 * grpc_test_slowdown_factor(); + ResetStub(kFallbackTimeoutMs); + int unreachable_balancer_port = grpc_pick_unused_port_or_die(); + int unreachable_backend_port = grpc_pick_unused_port_or_die(); + // Phase 1: Start with RR pointing to first backend. + gpr_log(GPR_INFO, "PHASE 1: Initial setup with RR with first backend"); + SetNextResolution( + { + // Unreachable balancer. + {unreachable_balancer_port, true, ""}, + // Fallback address: first backend. + {backend_servers_[0].port_, false, ""}, + }, + "{\n" + " \"loadBalancingConfig\":[\n" + " { \"grpclb\":{\n" + " \"childPolicy\":[\n" + " { \"round_robin\":{} }\n" + " ]\n" + " } }\n" + " ]\n" + "}"); + // RPCs should go to first backend. + WaitForBackend(0); + // Phase 2: Switch to PF pointing to unreachable backend. + gpr_log(GPR_INFO, "PHASE 2: Update to use PF with unreachable backend"); + SetNextResolution( + { + // Unreachable balancer. + {unreachable_balancer_port, true, ""}, + // Fallback address: unreachable backend. + {unreachable_backend_port, false, ""}, + }, + "{\n" + " \"loadBalancingConfig\":[\n" + " { \"grpclb\":{\n" + " \"childPolicy\":[\n" + " { \"pick_first\":{} }\n" + " ]\n" + " } }\n" + " ]\n" + "}"); + // RPCs should continue to go to the first backend, because the new + // PF child policy will never go into state READY. + WaitForBackend(0); + // Phase 3: Switch back to RR pointing to second and third backends. + // This ensures that we create a new policy rather than updating the + // pending PF policy. + gpr_log(GPR_INFO, "PHASE 3: Update to use RR again with two backends"); + SetNextResolution( + { + // Unreachable balancer. + {unreachable_balancer_port, true, ""}, + // Fallback address: second and third backends. + {backend_servers_[1].port_, false, ""}, + {backend_servers_[2].port_, false, ""}, + }, + "{\n" + " \"loadBalancingConfig\":[\n" + " { \"grpclb\":{\n" + " \"childPolicy\":[\n" + " { \"round_robin\":{} }\n" + " ]\n" + " } }\n" + " ]\n" + "}"); + // RPCs should go to the second and third backends. + WaitForBackend(1); + WaitForBackend(2); +} + TEST_F(SingleBalancerTest, SameBackendListedMultipleTimes) { SetNextResolutionAllBalancers(); // Same backend listed twice. @@ -1483,6 +1627,9 @@ class SingleBalancerWithClientLoadReportingTest : public GrpclbEnd2endTest { SingleBalancerWithClientLoadReportingTest() : GrpclbEnd2endTest(4, 1, 3) {} }; +// TODO(roth): Add test that when switching balancers, we don't include +// any calls that were sent prior to connecting to the new balancer. + TEST_F(SingleBalancerWithClientLoadReportingTest, Vanilla) { SetNextResolutionAllBalancers(); const size_t kNumRpcsPerAddress = 100; diff --git a/test/cpp/end2end/xds_end2end_test.cc b/test/cpp/end2end/xds_end2end_test.cc new file mode 100644 index 00000000000..09556675d43 --- /dev/null +++ b/test/cpp/end2end/xds_end2end_test.cc @@ -0,0 +1,1214 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "src/core/ext/filters/client_channel/parse_address.h" +#include "src/core/ext/filters/client_channel/resolver/fake/fake_resolver.h" +#include "src/core/ext/filters/client_channel/server_address.h" +#include "src/core/lib/gpr/env.h" +#include "src/core/lib/gprpp/ref_counted_ptr.h" +#include "src/core/lib/iomgr/sockaddr.h" +#include "src/core/lib/security/credentials/fake/fake_credentials.h" +#include "src/cpp/client/secure_credentials.h" +#include "src/cpp/server/secure_server_credentials.h" + +#include "test/core/util/port.h" +#include "test/core/util/test_config.h" +#include "test/cpp/end2end/test_service_impl.h" + +#include "src/proto/grpc/lb/v1/load_balancer.grpc.pb.h" +#include "src/proto/grpc/testing/echo.grpc.pb.h" + +#include +#include + +// TODO(dgq): Other scenarios in need of testing: +// - Send a serverlist with faulty ip:port addresses (port > 2^16, etc). +// - Test reception of invalid serverlist +// - Test pinging +// - Test against a non-LB server. +// - Random LB server closing the stream unexpectedly. +// - Test using DNS-resolvable names (localhost?) +// - Test handling of creation of faulty RR instance by having the LB return a +// serverlist with non-existent backends after having initially returned a +// valid one. +// +// Findings from end to end testing to be covered here: +// - Handling of LB servers restart, including reconnection after backing-off +// retries. +// - Destruction of load balanced channel (and therefore of xds instance) +// while: +// 1) the internal LB call is still active. This should work by virtue +// of the weak reference the LB call holds. The call should be terminated as +// part of the xds shutdown process. +// 2) the retry timer is active. Again, the weak reference it holds should +// prevent a premature call to \a glb_destroy. +// - Restart of backend servers with no changes to serverlist. This exercises +// the RR handover mechanism. + +using std::chrono::system_clock; + +using grpc::lb::v1::LoadBalanceRequest; +using grpc::lb::v1::LoadBalanceResponse; +using grpc::lb::v1::LoadBalancer; + +namespace grpc { +namespace testing { +namespace { + +template +class CountedService : public ServiceType { + public: + size_t request_count() { + std::unique_lock lock(mu_); + return request_count_; + } + + size_t response_count() { + std::unique_lock lock(mu_); + return response_count_; + } + + void IncreaseResponseCount() { + std::unique_lock lock(mu_); + ++response_count_; + } + void IncreaseRequestCount() { + std::unique_lock lock(mu_); + ++request_count_; + } + + void ResetCounters() { + std::unique_lock lock(mu_); + request_count_ = 0; + response_count_ = 0; + } + + protected: + std::mutex mu_; + + private: + size_t request_count_ = 0; + size_t response_count_ = 0; +}; + +using BackendService = CountedService; +using BalancerService = CountedService; + +const char g_kCallCredsMdKey[] = "Balancer should not ..."; +const char g_kCallCredsMdValue[] = "... receive me"; + +class BackendServiceImpl : public BackendService { + public: + BackendServiceImpl() {} + + Status Echo(ServerContext* context, const EchoRequest* request, + EchoResponse* response) override { + // Backend should receive the call credentials metadata. + auto call_credentials_entry = + context->client_metadata().find(g_kCallCredsMdKey); + EXPECT_NE(call_credentials_entry, context->client_metadata().end()); + if (call_credentials_entry != context->client_metadata().end()) { + EXPECT_EQ(call_credentials_entry->second, g_kCallCredsMdValue); + } + IncreaseRequestCount(); + const auto status = TestServiceImpl::Echo(context, request, response); + IncreaseResponseCount(); + AddClient(context->peer()); + return status; + } + + // Returns true on its first invocation, false otherwise. + bool Shutdown() { + std::unique_lock lock(mu_); + const bool prev = !shutdown_; + shutdown_ = true; + gpr_log(GPR_INFO, "Backend: shut down"); + return prev; + } + + std::set clients() { + std::unique_lock lock(clients_mu_); + return clients_; + } + + private: + void AddClient(const grpc::string& client) { + std::unique_lock lock(clients_mu_); + clients_.insert(client); + } + + std::mutex mu_; + bool shutdown_ = false; + std::mutex clients_mu_; + std::set clients_; +}; + +grpc::string Ip4ToPackedString(const char* ip_str) { + struct in_addr ip4; + GPR_ASSERT(inet_pton(AF_INET, ip_str, &ip4) == 1); + return grpc::string(reinterpret_cast(&ip4), sizeof(ip4)); +} + +struct ClientStats { + size_t num_calls_started = 0; + size_t num_calls_finished = 0; + size_t num_calls_finished_with_client_failed_to_send = 0; + size_t num_calls_finished_known_received = 0; + std::map drop_token_counts; + + ClientStats& operator+=(const ClientStats& other) { + num_calls_started += other.num_calls_started; + num_calls_finished += other.num_calls_finished; + num_calls_finished_with_client_failed_to_send += + other.num_calls_finished_with_client_failed_to_send; + num_calls_finished_known_received += + other.num_calls_finished_known_received; + for (const auto& p : other.drop_token_counts) { + drop_token_counts[p.first] += p.second; + } + return *this; + } +}; + +class BalancerServiceImpl : public BalancerService { + public: + using Stream = ServerReaderWriter; + using ResponseDelayPair = std::pair; + + explicit BalancerServiceImpl(int client_load_reporting_interval_seconds) + : client_load_reporting_interval_seconds_( + client_load_reporting_interval_seconds), + shutdown_(false) {} + + Status BalanceLoad(ServerContext* context, Stream* stream) override { + // TODO(juanlishen): Clean up the scoping. + gpr_log(GPR_INFO, "LB[%p]: BalanceLoad", this); + { + std::unique_lock lock(mu_); + if (shutdown_) goto done; + } + + { + // Balancer shouldn't receive the call credentials metadata. + EXPECT_EQ(context->client_metadata().find(g_kCallCredsMdKey), + context->client_metadata().end()); + LoadBalanceRequest request; + std::vector responses_and_delays; + + if (!stream->Read(&request)) { + goto done; + } + IncreaseRequestCount(); + gpr_log(GPR_INFO, "LB[%p]: received initial message '%s'", this, + request.DebugString().c_str()); + + { + LoadBalanceResponse initial_response; + initial_response.mutable_initial_response() + ->mutable_client_stats_report_interval() + ->set_seconds(client_load_reporting_interval_seconds_); + stream->Write(initial_response); + } + + { + std::unique_lock lock(mu_); + responses_and_delays = responses_and_delays_; + } + for (const auto& response_and_delay : responses_and_delays) { + { + std::unique_lock lock(mu_); + if (shutdown_) goto done; + } + SendResponse(stream, response_and_delay.first, + response_and_delay.second); + } + { + std::unique_lock lock(mu_); + if (shutdown_) goto done; + serverlist_cond_.wait(lock, [this] { return serverlist_ready_; }); + } + + if (client_load_reporting_interval_seconds_ > 0) { + request.Clear(); + if (stream->Read(&request)) { + gpr_log(GPR_INFO, "LB[%p]: received client load report message '%s'", + this, request.DebugString().c_str()); + GPR_ASSERT(request.has_client_stats()); + // We need to acquire the lock here in order to prevent the notify_one + // below from firing before its corresponding wait is executed. + std::lock_guard lock(mu_); + client_stats_.num_calls_started += + request.client_stats().num_calls_started(); + client_stats_.num_calls_finished += + request.client_stats().num_calls_finished(); + client_stats_.num_calls_finished_with_client_failed_to_send += + request.client_stats() + .num_calls_finished_with_client_failed_to_send(); + client_stats_.num_calls_finished_known_received += + request.client_stats().num_calls_finished_known_received(); + for (const auto& drop_token_count : + request.client_stats().calls_finished_with_drop()) { + client_stats_ + .drop_token_counts[drop_token_count.load_balance_token()] += + drop_token_count.num_calls(); + } + load_report_ready_ = true; + load_report_cond_.notify_one(); + } + } + } + done: + gpr_log(GPR_INFO, "LB[%p]: done", this); + return Status::OK; + } + + void add_response(const LoadBalanceResponse& response, int send_after_ms) { + std::unique_lock lock(mu_); + responses_and_delays_.push_back(std::make_pair(response, send_after_ms)); + } + + // Returns true on its first invocation, false otherwise. + bool Shutdown() { + bool prev; + { + std::unique_lock lock(mu_); + prev = !shutdown_; + shutdown_ = true; + } + NotifyDoneWithServerlists(); + gpr_log(GPR_INFO, "LB[%p]: shut down", this); + return prev; + } + + static LoadBalanceResponse BuildResponseForBackends( + const std::vector& backend_ports, + const std::map& drop_token_counts) { + LoadBalanceResponse response; + for (const auto& drop_token_count : drop_token_counts) { + for (size_t i = 0; i < drop_token_count.second; ++i) { + auto* server = response.mutable_server_list()->add_servers(); + server->set_drop(true); + server->set_load_balance_token(drop_token_count.first); + } + } + for (const int& backend_port : backend_ports) { + auto* server = response.mutable_server_list()->add_servers(); + server->set_ip_address(Ip4ToPackedString("127.0.0.1")); + server->set_port(backend_port); + static int token_count = 0; + char* token; + gpr_asprintf(&token, "token%03d", ++token_count); + server->set_load_balance_token(token); + gpr_free(token); + } + return response; + } + + const ClientStats& WaitForLoadReport() { + std::unique_lock lock(mu_); + load_report_cond_.wait(lock, [this] { return load_report_ready_; }); + load_report_ready_ = false; + return client_stats_; + } + + void NotifyDoneWithServerlists() { + std::lock_guard lock(mu_); + serverlist_ready_ = true; + serverlist_cond_.notify_all(); + } + + private: + void SendResponse(Stream* stream, const LoadBalanceResponse& response, + int delay_ms) { + gpr_log(GPR_INFO, "LB[%p]: sleeping for %d ms...", this, delay_ms); + if (delay_ms > 0) { + gpr_sleep_until(grpc_timeout_milliseconds_to_deadline(delay_ms)); + } + gpr_log(GPR_INFO, "LB[%p]: Woke up! Sending response '%s'", this, + response.DebugString().c_str()); + IncreaseResponseCount(); + stream->Write(response); + } + + const int client_load_reporting_interval_seconds_; + std::vector responses_and_delays_; + std::mutex mu_; + std::condition_variable load_report_cond_; + bool load_report_ready_ = false; + std::condition_variable serverlist_cond_; + bool serverlist_ready_ = false; + ClientStats client_stats_; + bool shutdown_; +}; + +class XdsEnd2endTest : public ::testing::Test { + protected: + XdsEnd2endTest(int num_backends, int num_balancers, + int client_load_reporting_interval_seconds) + : server_host_("localhost"), + num_backends_(num_backends), + num_balancers_(num_balancers), + client_load_reporting_interval_seconds_( + client_load_reporting_interval_seconds) { + // Make the backup poller poll very frequently in order to pick up + // updates from all the subchannels's FDs. + gpr_setenv("GRPC_CLIENT_CHANNEL_BACKUP_POLL_INTERVAL_MS", "1"); + } + + void SetUp() override { + response_generator_ = + grpc_core::MakeRefCounted(); + lb_channel_response_generator_ = + grpc_core::MakeRefCounted(); + // Start the backends. + for (size_t i = 0; i < num_backends_; ++i) { + backends_.emplace_back(new BackendServiceImpl()); + backend_servers_.emplace_back(ServerThread( + "backend", server_host_, backends_.back().get())); + } + // Start the load balancers. + for (size_t i = 0; i < num_balancers_; ++i) { + balancers_.emplace_back( + new BalancerServiceImpl(client_load_reporting_interval_seconds_)); + balancer_servers_.emplace_back(ServerThread( + "balancer", server_host_, balancers_.back().get())); + } + ResetStub(); + } + + void TearDown() override { + for (size_t i = 0; i < backends_.size(); ++i) { + if (backends_[i]->Shutdown()) backend_servers_[i].Shutdown(); + } + for (size_t i = 0; i < balancers_.size(); ++i) { + if (balancers_[i]->Shutdown()) balancer_servers_[i].Shutdown(); + } + } + + void ResetStub(int fallback_timeout = 0, + const grpc::string& expected_targets = "") { + ChannelArguments args; + // TODO(juanlishen): Add setter to ChannelArguments. + args.SetInt(GRPC_ARG_XDS_FALLBACK_TIMEOUT_MS, fallback_timeout); + args.SetPointer(GRPC_ARG_FAKE_RESOLVER_RESPONSE_GENERATOR, + response_generator_.get()); + if (!expected_targets.empty()) { + args.SetString(GRPC_ARG_FAKE_SECURITY_EXPECTED_TARGETS, expected_targets); + } + std::ostringstream uri; + uri << "fake:///" << kApplicationTargetName_; + // TODO(dgq): templatize tests to run everything using both secure and + // insecure channel credentials. + grpc_channel_credentials* channel_creds = + grpc_fake_transport_security_credentials_create(); + grpc_call_credentials* call_creds = grpc_md_only_test_credentials_create( + g_kCallCredsMdKey, g_kCallCredsMdValue, false); + std::shared_ptr creds( + new SecureChannelCredentials(grpc_composite_channel_credentials_create( + channel_creds, call_creds, nullptr))); + call_creds->Unref(); + channel_creds->Unref(); + channel_ = CreateCustomChannel(uri.str(), creds, args); + stub_ = grpc::testing::EchoTestService::NewStub(channel_); + } + + void ResetBackendCounters() { + for (const auto& backend : backends_) backend->ResetCounters(); + } + + ClientStats WaitForLoadReports() { + ClientStats client_stats; + for (const auto& balancer : balancers_) { + client_stats += balancer->WaitForLoadReport(); + } + return client_stats; + } + + bool SeenAllBackends() { + for (const auto& backend : backends_) { + if (backend->request_count() == 0) return false; + } + return true; + } + + void SendRpcAndCount(int* num_total, int* num_ok, int* num_failure, + int* num_drops) { + const Status status = SendRpc(); + if (status.ok()) { + ++*num_ok; + } else { + if (status.error_message() == "Call dropped by load balancing policy") { + ++*num_drops; + } else { + ++*num_failure; + } + } + ++*num_total; + } + + std::tuple WaitForAllBackends( + int num_requests_multiple_of = 1) { + int num_ok = 0; + int num_failure = 0; + int num_drops = 0; + int num_total = 0; + while (!SeenAllBackends()) { + SendRpcAndCount(&num_total, &num_ok, &num_failure, &num_drops); + } + while (num_total % num_requests_multiple_of != 0) { + SendRpcAndCount(&num_total, &num_ok, &num_failure, &num_drops); + } + ResetBackendCounters(); + gpr_log(GPR_INFO, + "Performed %d warm up requests (a multiple of %d) against the " + "backends. %d succeeded, %d failed, %d dropped.", + num_total, num_requests_multiple_of, num_ok, num_failure, + num_drops); + return std::make_tuple(num_ok, num_failure, num_drops); + } + + void WaitForBackend(size_t backend_idx) { + do { + (void)SendRpc(); + } while (backends_[backend_idx]->request_count() == 0); + ResetBackendCounters(); + } + + grpc_core::ServerAddressList CreateLbAddressesFromPortList( + const std::vector& ports) { + grpc_core::ServerAddressList addresses; + for (int port : ports) { + char* lb_uri_str; + gpr_asprintf(&lb_uri_str, "ipv4:127.0.0.1:%d", port); + grpc_uri* lb_uri = grpc_uri_parse(lb_uri_str, true); + GPR_ASSERT(lb_uri != nullptr); + grpc_resolved_address address; + GPR_ASSERT(grpc_parse_uri(lb_uri, &address)); + std::vector args_to_add; + grpc_channel_args* args = grpc_channel_args_copy_and_add( + nullptr, args_to_add.data(), args_to_add.size()); + addresses.emplace_back(address.addr, address.len, args); + grpc_uri_destroy(lb_uri); + gpr_free(lb_uri_str); + } + return addresses; + } + + void SetNextResolution(const std::vector& ports, + const char* service_config_json = nullptr, + grpc_core::FakeResolverResponseGenerator* + lb_channel_response_generator = nullptr) { + grpc_core::ExecCtx exec_ctx; + grpc_core::ServerAddressList addresses = + CreateLbAddressesFromPortList(ports); + std::vector args = { + CreateServerAddressListChannelArg(&addresses), + grpc_core::FakeResolverResponseGenerator::MakeChannelArg( + lb_channel_response_generator == nullptr + ? lb_channel_response_generator_.get() + : lb_channel_response_generator)}; + if (service_config_json != nullptr) { + args.push_back(grpc_channel_arg_string_create( + const_cast(GRPC_ARG_SERVICE_CONFIG), + const_cast(service_config_json))); + } + grpc_channel_args fake_result = {args.size(), args.data()}; + response_generator_->SetResponse(&fake_result); + } + + void SetNextResolutionForLbChannelAllBalancers( + const char* service_config_json = nullptr, + grpc_core::FakeResolverResponseGenerator* lb_channel_response_generator = + nullptr) { + std::vector ports; + for (size_t i = 0; i < balancer_servers_.size(); ++i) { + ports.emplace_back(balancer_servers_[i].port_); + } + SetNextResolutionForLbChannel(ports, service_config_json, + lb_channel_response_generator); + } + + void SetNextResolutionForLbChannel( + const std::vector& ports, const char* service_config_json = nullptr, + grpc_core::FakeResolverResponseGenerator* lb_channel_response_generator = + nullptr) { + grpc_core::ExecCtx exec_ctx; + grpc_core::ServerAddressList addresses = + CreateLbAddressesFromPortList(ports); + std::vector args = { + CreateServerAddressListChannelArg(&addresses), + }; + if (service_config_json != nullptr) { + args.push_back(grpc_channel_arg_string_create( + const_cast(GRPC_ARG_SERVICE_CONFIG), + const_cast(service_config_json))); + } + grpc_channel_args fake_result = {args.size(), args.data()}; + if (lb_channel_response_generator == nullptr) { + lb_channel_response_generator = lb_channel_response_generator_.get(); + } + lb_channel_response_generator->SetResponse(&fake_result); + } + + void SetNextReresolutionResponse(const std::vector& ports) { + grpc_core::ExecCtx exec_ctx; + grpc_core::ServerAddressList addresses = + CreateLbAddressesFromPortList(ports); + grpc_arg fake_addresses = CreateServerAddressListChannelArg(&addresses); + grpc_channel_args fake_result = {1, &fake_addresses}; + response_generator_->SetReresolutionResponse(&fake_result); + } + + const std::vector GetBackendPorts(const size_t start_index = 0) const { + std::vector backend_ports; + for (size_t i = start_index; i < backend_servers_.size(); ++i) { + backend_ports.push_back(backend_servers_[i].port_); + } + return backend_ports; + } + + void ScheduleResponseForBalancer(size_t i, + const LoadBalanceResponse& response, + int delay_ms) { + balancers_.at(i)->add_response(response, delay_ms); + } + + Status SendRpc(EchoResponse* response = nullptr, int timeout_ms = 1000, + bool wait_for_ready = false) { + const bool local_response = (response == nullptr); + if (local_response) response = new EchoResponse; + EchoRequest request; + request.set_message(kRequestMessage_); + ClientContext context; + context.set_deadline(grpc_timeout_milliseconds_to_deadline(timeout_ms)); + if (wait_for_ready) context.set_wait_for_ready(true); + Status status = stub_->Echo(&context, request, response); + if (local_response) delete response; + return status; + } + + void CheckRpcSendOk(const size_t times = 1, const int timeout_ms = 1000, + bool wait_for_ready = false) { + for (size_t i = 0; i < times; ++i) { + EchoResponse response; + const Status status = SendRpc(&response, timeout_ms, wait_for_ready); + EXPECT_TRUE(status.ok()) << "code=" << status.error_code() + << " message=" << status.error_message(); + EXPECT_EQ(response.message(), kRequestMessage_); + } + } + + void CheckRpcSendFailure() { + const Status status = SendRpc(); + EXPECT_FALSE(status.ok()); + } + + template + struct ServerThread { + explicit ServerThread(const grpc::string& type, + const grpc::string& server_host, T* service) + : type_(type), service_(service) { + std::mutex mu; + // We need to acquire the lock here in order to prevent the notify_one + // by ServerThread::Start from firing before the wait below is hit. + std::unique_lock lock(mu); + port_ = grpc_pick_unused_port_or_die(); + gpr_log(GPR_INFO, "starting %s server on port %d", type_.c_str(), port_); + std::condition_variable cond; + thread_.reset(new std::thread( + std::bind(&ServerThread::Start, this, server_host, &mu, &cond))); + cond.wait(lock); + gpr_log(GPR_INFO, "%s server startup complete", type_.c_str()); + } + + void Start(const grpc::string& server_host, std::mutex* mu, + std::condition_variable* cond) { + // We need to acquire the lock here in order to prevent the notify_one + // below from firing before its corresponding wait is executed. + std::lock_guard lock(*mu); + std::ostringstream server_address; + server_address << server_host << ":" << port_; + ServerBuilder builder; + std::shared_ptr creds(new SecureServerCredentials( + grpc_fake_transport_security_server_credentials_create())); + builder.AddListeningPort(server_address.str(), creds); + builder.RegisterService(service_); + server_ = builder.BuildAndStart(); + cond->notify_one(); + } + + void Shutdown() { + gpr_log(GPR_INFO, "%s about to shutdown", type_.c_str()); + server_->Shutdown(grpc_timeout_milliseconds_to_deadline(0)); + thread_->join(); + gpr_log(GPR_INFO, "%s shutdown completed", type_.c_str()); + } + + int port_; + grpc::string type_; + std::unique_ptr server_; + T* service_; + std::unique_ptr thread_; + }; + + const grpc::string server_host_; + const size_t num_backends_; + const size_t num_balancers_; + const int client_load_reporting_interval_seconds_; + std::shared_ptr channel_; + std::unique_ptr stub_; + std::vector> backends_; + std::vector> balancers_; + std::vector> backend_servers_; + std::vector> balancer_servers_; + grpc_core::RefCountedPtr + response_generator_; + grpc_core::RefCountedPtr + lb_channel_response_generator_; + const grpc::string kRequestMessage_ = "Live long and prosper."; + const grpc::string kApplicationTargetName_ = "application_target_name"; + const grpc::string kDefaultServiceConfig_ = + "{\n" + " \"loadBalancingConfig\":[\n" + " { \"does_not_exist\":{} },\n" + " { \"xds_experimental\":{ \"balancerName\": \"fake:///lb\" } }\n" + " ]\n" + "}"; +}; + +class SingleBalancerTest : public XdsEnd2endTest { + public: + SingleBalancerTest() : XdsEnd2endTest(4, 1, 0) {} +}; + +TEST_F(SingleBalancerTest, Vanilla) { + SetNextResolution({}, kDefaultServiceConfig_.c_str()); + SetNextResolutionForLbChannelAllBalancers(); + const size_t kNumRpcsPerAddress = 100; + ScheduleResponseForBalancer( + 0, BalancerServiceImpl::BuildResponseForBackends(GetBackendPorts(), {}), + 0); + // Make sure that trying to connect works without a call. + channel_->GetState(true /* try_to_connect */); + // We need to wait for all backends to come online. + WaitForAllBackends(); + // Send kNumRpcsPerAddress RPCs per server. + CheckRpcSendOk(kNumRpcsPerAddress * num_backends_); + // Each backend should have gotten 100 requests. + for (size_t i = 0; i < backends_.size(); ++i) { + EXPECT_EQ(kNumRpcsPerAddress, + backend_servers_[i].service_->request_count()); + } + balancers_[0]->NotifyDoneWithServerlists(); + // The balancer got a single request. + EXPECT_EQ(1U, balancer_servers_[0].service_->request_count()); + // and sent a single response. + EXPECT_EQ(1U, balancer_servers_[0].service_->response_count()); + + // Check LB policy name for the channel. + EXPECT_EQ("xds_experimental", channel_->GetLoadBalancingPolicyName()); +} + +TEST_F(SingleBalancerTest, SameBackendListedMultipleTimes) { + SetNextResolution({}, kDefaultServiceConfig_.c_str()); + SetNextResolutionForLbChannelAllBalancers(); + // Same backend listed twice. + std::vector ports; + ports.push_back(backend_servers_[0].port_); + ports.push_back(backend_servers_[0].port_); + const size_t kNumRpcsPerAddress = 10; + ScheduleResponseForBalancer( + 0, BalancerServiceImpl::BuildResponseForBackends(ports, {}), 0); + // We need to wait for the backend to come online. + WaitForBackend(0); + // Send kNumRpcsPerAddress RPCs per server. + CheckRpcSendOk(kNumRpcsPerAddress * ports.size()); + // Backend should have gotten 20 requests. + EXPECT_EQ(kNumRpcsPerAddress * 2, + backend_servers_[0].service_->request_count()); + // And they should have come from a single client port, because of + // subchannel sharing. + EXPECT_EQ(1UL, backends_[0]->clients().size()); + balancers_[0]->NotifyDoneWithServerlists(); +} + +TEST_F(SingleBalancerTest, SecureNaming) { + // TODO(juanlishen): Use separate fake creds for the balancer channel. + ResetStub(0, kApplicationTargetName_ + ";lb"); + SetNextResolution({}, kDefaultServiceConfig_.c_str()); + SetNextResolutionForLbChannel({balancer_servers_[0].port_}); + const size_t kNumRpcsPerAddress = 100; + ScheduleResponseForBalancer( + 0, BalancerServiceImpl::BuildResponseForBackends(GetBackendPorts(), {}), + 0); + // Make sure that trying to connect works without a call. + channel_->GetState(true /* try_to_connect */); + // We need to wait for all backends to come online. + WaitForAllBackends(); + // Send kNumRpcsPerAddress RPCs per server. + CheckRpcSendOk(kNumRpcsPerAddress * num_backends_); + + // Each backend should have gotten 100 requests. + for (size_t i = 0; i < backends_.size(); ++i) { + EXPECT_EQ(kNumRpcsPerAddress, + backend_servers_[i].service_->request_count()); + } + // The balancer got a single request. + EXPECT_EQ(1U, balancer_servers_[0].service_->request_count()); + // and sent a single response. + EXPECT_EQ(1U, balancer_servers_[0].service_->response_count()); +} + +TEST_F(SingleBalancerTest, SecureNamingDeathTest) { + ::testing::FLAGS_gtest_death_test_style = "threadsafe"; + // Make sure that we blow up (via abort() from the security connector) when + // the name from the balancer doesn't match expectations. + ASSERT_DEATH( + { + ResetStub(0, kApplicationTargetName_ + ";lb"); + SetNextResolution({}, + "{\n" + " \"loadBalancingConfig\":[\n" + " { \"does_not_exist\":{} },\n" + " { \"xds_experimental\":{ \"balancerName\": " + "\"fake:///wrong_lb\" } }\n" + " ]\n" + "}"); + SetNextResolutionForLbChannel({balancer_servers_[0].port_}); + channel_->WaitForConnected(grpc_timeout_seconds_to_deadline(1)); + }, + ""); +} + +TEST_F(SingleBalancerTest, InitiallyEmptyServerlist) { + SetNextResolution({}, kDefaultServiceConfig_.c_str()); + SetNextResolutionForLbChannelAllBalancers(); + const int kServerlistDelayMs = 500 * grpc_test_slowdown_factor(); + const int kCallDeadlineMs = kServerlistDelayMs * 2; + // First response is an empty serverlist, sent right away. + ScheduleResponseForBalancer(0, LoadBalanceResponse(), 0); + // Send non-empty serverlist only after kServerlistDelayMs + ScheduleResponseForBalancer( + 0, BalancerServiceImpl::BuildResponseForBackends(GetBackendPorts(), {}), + kServerlistDelayMs); + const auto t0 = system_clock::now(); + // Client will block: LB will initially send empty serverlist. + CheckRpcSendOk(1, kCallDeadlineMs, true /* wait_for_ready */); + const auto ellapsed_ms = + std::chrono::duration_cast( + system_clock::now() - t0); + // but eventually, the LB sends a serverlist update that allows the call to + // proceed. The call delay must be larger than the delay in sending the + // populated serverlist but under the call's deadline (which is enforced by + // the call's deadline). + EXPECT_GT(ellapsed_ms.count(), kServerlistDelayMs); + balancers_[0]->NotifyDoneWithServerlists(); + // The balancer got a single request. + EXPECT_EQ(1U, balancer_servers_[0].service_->request_count()); + // and sent two responses. + EXPECT_EQ(2U, balancer_servers_[0].service_->response_count()); +} + +TEST_F(SingleBalancerTest, AllServersUnreachableFailFast) { + SetNextResolution({}, kDefaultServiceConfig_.c_str()); + SetNextResolutionForLbChannelAllBalancers(); + const size_t kNumUnreachableServers = 5; + std::vector ports; + for (size_t i = 0; i < kNumUnreachableServers; ++i) { + ports.push_back(grpc_pick_unused_port_or_die()); + } + ScheduleResponseForBalancer( + 0, BalancerServiceImpl::BuildResponseForBackends(ports, {}), 0); + const Status status = SendRpc(); + // The error shouldn't be DEADLINE_EXCEEDED. + EXPECT_EQ(StatusCode::UNAVAILABLE, status.error_code()); + balancers_[0]->NotifyDoneWithServerlists(); + // The balancer got a single request. + EXPECT_EQ(1U, balancer_servers_[0].service_->request_count()); + // and sent a single response. + EXPECT_EQ(1U, balancer_servers_[0].service_->response_count()); +} + +// The fallback tests are deferred because the fallback mode hasn't been +// supported yet. + +// TODO(juanlishen): Add TEST_F(SingleBalancerTest, Fallback) + +// TODO(juanlishen): Add TEST_F(SingleBalancerTest, FallbackUpdate) + +TEST_F(SingleBalancerTest, BackendsRestart) { + SetNextResolution({}, kDefaultServiceConfig_.c_str()); + SetNextResolutionForLbChannelAllBalancers(); + const size_t kNumRpcsPerAddress = 100; + ScheduleResponseForBalancer( + 0, BalancerServiceImpl::BuildResponseForBackends(GetBackendPorts(), {}), + 0); + // Make sure that trying to connect works without a call. + channel_->GetState(true /* try_to_connect */); + // Send kNumRpcsPerAddress RPCs per server. + CheckRpcSendOk(kNumRpcsPerAddress * num_backends_); + balancers_[0]->NotifyDoneWithServerlists(); + // The balancer got a single request. + EXPECT_EQ(1U, balancer_servers_[0].service_->request_count()); + // and sent a single response. + EXPECT_EQ(1U, balancer_servers_[0].service_->response_count()); + for (size_t i = 0; i < backends_.size(); ++i) { + if (backends_[i]->Shutdown()) backend_servers_[i].Shutdown(); + } + CheckRpcSendFailure(); + for (size_t i = 0; i < num_backends_; ++i) { + backends_.emplace_back(new BackendServiceImpl()); + backend_servers_.emplace_back(ServerThread( + "backend", server_host_, backends_.back().get())); + } + // The following RPC will fail due to the backend ports having changed. It + // will nonetheless exercise the xds-roundrobin handling of the RR policy + // having gone into shutdown. + // TODO(dgq): implement the "backend restart" component as well. We need extra + // machinery to either update the LB responses "on the fly" or instruct + // backends which ports to restart on. + CheckRpcSendFailure(); +} + +class UpdatesTest : public XdsEnd2endTest { + public: + UpdatesTest() : XdsEnd2endTest(4, 3, 0) {} +}; + +TEST_F(UpdatesTest, UpdateBalancersButKeepUsingOriginalBalancer) { + SetNextResolution({}, kDefaultServiceConfig_.c_str()); + SetNextResolutionForLbChannelAllBalancers(); + const std::vector first_backend{GetBackendPorts()[0]}; + const std::vector second_backend{GetBackendPorts()[1]}; + ScheduleResponseForBalancer( + 0, BalancerServiceImpl::BuildResponseForBackends(first_backend, {}), 0); + ScheduleResponseForBalancer( + 1, BalancerServiceImpl::BuildResponseForBackends(second_backend, {}), 0); + + // Wait until the first backend is ready. + WaitForBackend(0); + + // Send 10 requests. + gpr_log(GPR_INFO, "========= BEFORE FIRST BATCH =========="); + CheckRpcSendOk(10); + gpr_log(GPR_INFO, "========= DONE WITH FIRST BATCH =========="); + + // All 10 requests should have gone to the first backend. + EXPECT_EQ(10U, backend_servers_[0].service_->request_count()); + + // Balancer 0 got a single request. + EXPECT_EQ(1U, balancer_servers_[0].service_->request_count()); + // and sent a single response. + EXPECT_EQ(1U, balancer_servers_[0].service_->response_count()); + EXPECT_EQ(0U, balancer_servers_[1].service_->request_count()); + EXPECT_EQ(0U, balancer_servers_[1].service_->response_count()); + EXPECT_EQ(0U, balancer_servers_[2].service_->request_count()); + EXPECT_EQ(0U, balancer_servers_[2].service_->response_count()); + + gpr_log(GPR_INFO, "========= ABOUT TO UPDATE 1 =========="); + SetNextResolutionForLbChannel({balancer_servers_[1].port_}); + gpr_log(GPR_INFO, "========= UPDATE 1 DONE =========="); + + EXPECT_EQ(0U, backend_servers_[1].service_->request_count()); + gpr_timespec deadline = gpr_time_add( + gpr_now(GPR_CLOCK_REALTIME), gpr_time_from_millis(10000, GPR_TIMESPAN)); + // Send 10 seconds worth of RPCs + do { + CheckRpcSendOk(); + } while (gpr_time_cmp(gpr_now(GPR_CLOCK_REALTIME), deadline) < 0); + // The current LB call is still working, so xds continued using it to the + // first balancer, which doesn't assign the second backend. + EXPECT_EQ(0U, backend_servers_[1].service_->request_count()); + + EXPECT_EQ(1U, balancer_servers_[0].service_->request_count()); + EXPECT_EQ(1U, balancer_servers_[0].service_->response_count()); + EXPECT_EQ(0U, balancer_servers_[1].service_->request_count()); + EXPECT_EQ(0U, balancer_servers_[1].service_->response_count()); + EXPECT_EQ(0U, balancer_servers_[2].service_->request_count()); + EXPECT_EQ(0U, balancer_servers_[2].service_->response_count()); +} + +TEST_F(UpdatesTest, UpdateBalancerName) { + SetNextResolution({}, kDefaultServiceConfig_.c_str()); + SetNextResolutionForLbChannelAllBalancers(); + const std::vector first_backend{GetBackendPorts()[0]}; + const std::vector second_backend{GetBackendPorts()[1]}; + ScheduleResponseForBalancer( + 0, BalancerServiceImpl::BuildResponseForBackends(first_backend, {}), 0); + ScheduleResponseForBalancer( + 1, BalancerServiceImpl::BuildResponseForBackends(second_backend, {}), 0); + + // Wait until the first backend is ready. + WaitForBackend(0); + + // Send 10 requests. + gpr_log(GPR_INFO, "========= BEFORE FIRST BATCH =========="); + CheckRpcSendOk(10); + gpr_log(GPR_INFO, "========= DONE WITH FIRST BATCH =========="); + + // All 10 requests should have gone to the first backend. + EXPECT_EQ(10U, backend_servers_[0].service_->request_count()); + + // Balancer 0 got a single request. + EXPECT_EQ(1U, balancer_servers_[0].service_->request_count()); + // and sent a single response. + EXPECT_EQ(1U, balancer_servers_[0].service_->response_count()); + EXPECT_EQ(0U, balancer_servers_[1].service_->request_count()); + EXPECT_EQ(0U, balancer_servers_[1].service_->response_count()); + EXPECT_EQ(0U, balancer_servers_[2].service_->request_count()); + EXPECT_EQ(0U, balancer_servers_[2].service_->response_count()); + + std::vector ports; + ports.emplace_back(balancer_servers_[1].port_); + auto new_lb_channel_response_generator = + grpc_core::MakeRefCounted(); + SetNextResolutionForLbChannel(ports, nullptr, + new_lb_channel_response_generator.get()); + gpr_log(GPR_INFO, "========= ABOUT TO UPDATE BALANCER NAME =========="); + SetNextResolution({}, + "{\n" + " \"loadBalancingConfig\":[\n" + " { \"does_not_exist\":{} },\n" + " { \"xds_experimental\":{ \"balancerName\": " + "\"fake:///updated_lb\" } }\n" + " ]\n" + "}", + new_lb_channel_response_generator.get()); + gpr_log(GPR_INFO, "========= UPDATED BALANCER NAME =========="); + + // Wait until update has been processed, as signaled by the second backend + // receiving a request. + EXPECT_EQ(0U, backend_servers_[1].service_->request_count()); + WaitForBackend(1); + + backend_servers_[1].service_->ResetCounters(); + gpr_log(GPR_INFO, "========= BEFORE SECOND BATCH =========="); + CheckRpcSendOk(10); + gpr_log(GPR_INFO, "========= DONE WITH SECOND BATCH =========="); + // All 10 requests should have gone to the second backend. + EXPECT_EQ(10U, backend_servers_[1].service_->request_count()); + + EXPECT_EQ(1U, balancer_servers_[0].service_->request_count()); + EXPECT_EQ(1U, balancer_servers_[0].service_->response_count()); + EXPECT_EQ(1U, balancer_servers_[1].service_->request_count()); + EXPECT_EQ(1U, balancer_servers_[1].service_->response_count()); + EXPECT_EQ(0U, balancer_servers_[2].service_->request_count()); + EXPECT_EQ(0U, balancer_servers_[2].service_->response_count()); +} + +// Send an update with the same set of LBs as the one in SetUp() in order to +// verify that the LB channel inside xds keeps the initial connection (which +// by definition is also present in the update). +TEST_F(UpdatesTest, UpdateBalancersRepeated) { + SetNextResolution({}, kDefaultServiceConfig_.c_str()); + SetNextResolutionForLbChannelAllBalancers(); + const std::vector first_backend{GetBackendPorts()[0]}; + const std::vector second_backend{GetBackendPorts()[0]}; + + ScheduleResponseForBalancer( + 0, BalancerServiceImpl::BuildResponseForBackends(first_backend, {}), 0); + ScheduleResponseForBalancer( + 1, BalancerServiceImpl::BuildResponseForBackends(second_backend, {}), 0); + + // Wait until the first backend is ready. + WaitForBackend(0); + + // Send 10 requests. + gpr_log(GPR_INFO, "========= BEFORE FIRST BATCH =========="); + CheckRpcSendOk(10); + gpr_log(GPR_INFO, "========= DONE WITH FIRST BATCH =========="); + + // All 10 requests should have gone to the first backend. + EXPECT_EQ(10U, backend_servers_[0].service_->request_count()); + + // Balancer 0 got a single request. + EXPECT_EQ(1U, balancer_servers_[0].service_->request_count()); + // and sent a single response. + EXPECT_EQ(1U, balancer_servers_[0].service_->response_count()); + EXPECT_EQ(0U, balancer_servers_[1].service_->request_count()); + EXPECT_EQ(0U, balancer_servers_[1].service_->response_count()); + EXPECT_EQ(0U, balancer_servers_[2].service_->request_count()); + EXPECT_EQ(0U, balancer_servers_[2].service_->response_count()); + + std::vector ports; + ports.emplace_back(balancer_servers_[0].port_); + ports.emplace_back(balancer_servers_[1].port_); + ports.emplace_back(balancer_servers_[2].port_); + gpr_log(GPR_INFO, "========= ABOUT TO UPDATE 1 =========="); + SetNextResolutionForLbChannel(ports); + gpr_log(GPR_INFO, "========= UPDATE 1 DONE =========="); + + EXPECT_EQ(0U, backend_servers_[1].service_->request_count()); + gpr_timespec deadline = gpr_time_add( + gpr_now(GPR_CLOCK_REALTIME), gpr_time_from_millis(10000, GPR_TIMESPAN)); + // Send 10 seconds worth of RPCs + do { + CheckRpcSendOk(); + } while (gpr_time_cmp(gpr_now(GPR_CLOCK_REALTIME), deadline) < 0); + // xds continued using the original LB call to the first balancer, which + // doesn't assign the second backend. + EXPECT_EQ(0U, backend_servers_[1].service_->request_count()); + + ports.clear(); + ports.emplace_back(balancer_servers_[0].port_); + ports.emplace_back(balancer_servers_[1].port_); + gpr_log(GPR_INFO, "========= ABOUT TO UPDATE 2 =========="); + SetNextResolutionForLbChannel(ports); + gpr_log(GPR_INFO, "========= UPDATE 2 DONE =========="); + + EXPECT_EQ(0U, backend_servers_[1].service_->request_count()); + deadline = gpr_time_add(gpr_now(GPR_CLOCK_REALTIME), + gpr_time_from_millis(10000, GPR_TIMESPAN)); + // Send 10 seconds worth of RPCs + do { + CheckRpcSendOk(); + } while (gpr_time_cmp(gpr_now(GPR_CLOCK_REALTIME), deadline) < 0); + // xds continued using the original LB call to the first balancer, which + // doesn't assign the second backend. + EXPECT_EQ(0U, backend_servers_[1].service_->request_count()); +} + +TEST_F(UpdatesTest, UpdateBalancersDeadUpdate) { + SetNextResolution({}, kDefaultServiceConfig_.c_str()); + SetNextResolutionForLbChannel({balancer_servers_[0].port_}); + const std::vector first_backend{GetBackendPorts()[0]}; + const std::vector second_backend{GetBackendPorts()[1]}; + + ScheduleResponseForBalancer( + 0, BalancerServiceImpl::BuildResponseForBackends(first_backend, {}), 0); + ScheduleResponseForBalancer( + 1, BalancerServiceImpl::BuildResponseForBackends(second_backend, {}), 0); + + // Start servers and send 10 RPCs per server. + gpr_log(GPR_INFO, "========= BEFORE FIRST BATCH =========="); + CheckRpcSendOk(10); + gpr_log(GPR_INFO, "========= DONE WITH FIRST BATCH =========="); + // All 10 requests should have gone to the first backend. + EXPECT_EQ(10U, backend_servers_[0].service_->request_count()); + + // Kill balancer 0 + gpr_log(GPR_INFO, "********** ABOUT TO KILL BALANCER 0 *************"); + if (balancers_[0]->Shutdown()) balancer_servers_[0].Shutdown(); + gpr_log(GPR_INFO, "********** KILLED BALANCER 0 *************"); + + // This is serviced by the existing child policy. + gpr_log(GPR_INFO, "========= BEFORE SECOND BATCH =========="); + CheckRpcSendOk(10); + gpr_log(GPR_INFO, "========= DONE WITH SECOND BATCH =========="); + // All 10 requests should again have gone to the first backend. + EXPECT_EQ(20U, backend_servers_[0].service_->request_count()); + EXPECT_EQ(0U, backend_servers_[1].service_->request_count()); + + // Balancer 0 got a single request. + EXPECT_EQ(1U, balancer_servers_[0].service_->request_count()); + // and sent a single response. + EXPECT_EQ(1U, balancer_servers_[0].service_->response_count()); + EXPECT_EQ(0U, balancer_servers_[1].service_->request_count()); + EXPECT_EQ(0U, balancer_servers_[1].service_->response_count()); + EXPECT_EQ(0U, balancer_servers_[2].service_->request_count()); + EXPECT_EQ(0U, balancer_servers_[2].service_->response_count()); + + gpr_log(GPR_INFO, "========= ABOUT TO UPDATE 1 =========="); + SetNextResolutionForLbChannel({balancer_servers_[1].port_}); + gpr_log(GPR_INFO, "========= UPDATE 1 DONE =========="); + + // Wait until update has been processed, as signaled by the second backend + // receiving a request. In the meantime, the client continues to be serviced + // (by the first backend) without interruption. + EXPECT_EQ(0U, backend_servers_[1].service_->request_count()); + WaitForBackend(1); + + // This is serviced by the updated RR policy + backend_servers_[1].service_->ResetCounters(); + gpr_log(GPR_INFO, "========= BEFORE THIRD BATCH =========="); + CheckRpcSendOk(10); + gpr_log(GPR_INFO, "========= DONE WITH THIRD BATCH =========="); + // All 10 requests should have gone to the second backend. + EXPECT_EQ(10U, backend_servers_[1].service_->request_count()); + + EXPECT_EQ(1U, balancer_servers_[0].service_->request_count()); + EXPECT_EQ(1U, balancer_servers_[0].service_->response_count()); + // The second balancer, published as part of the first update, may end up + // getting two requests (that is, 1 <= #req <= 2) if the LB call retry timer + // firing races with the arrival of the update containing the second + // balancer. + EXPECT_GE(balancer_servers_[1].service_->request_count(), 1U); + EXPECT_GE(balancer_servers_[1].service_->response_count(), 1U); + EXPECT_LE(balancer_servers_[1].service_->request_count(), 2U); + EXPECT_LE(balancer_servers_[1].service_->response_count(), 2U); + EXPECT_EQ(0U, balancer_servers_[2].service_->request_count()); + EXPECT_EQ(0U, balancer_servers_[2].service_->response_count()); +} + +// The re-resolution tests are deferred because they rely on the fallback mode, +// which hasn't been supported. + +// TODO(juanlishen): Add TEST_F(UpdatesTest, ReresolveDeadBackend). + +// TODO(juanlishen): Add TEST_F(UpdatesWithClientLoadReportingTest, +// ReresolveDeadBalancer) + +// The drop tests are deferred because the drop handling hasn't been added yet. + +// TODO(roth): Add TEST_F(SingleBalancerTest, Drop) + +// TODO(roth): Add TEST_F(SingleBalancerTest, DropAllFirst) + +// TODO(roth): Add TEST_F(SingleBalancerTest, DropAll) + +class SingleBalancerWithClientLoadReportingTest : public XdsEnd2endTest { + public: + SingleBalancerWithClientLoadReportingTest() : XdsEnd2endTest(4, 1, 3) {} +}; + +// The client load reporting tests are deferred because the client load +// reporting hasn't been supported yet. + +// TODO(vpowar): Add TEST_F(SingleBalancerWithClientLoadReportingTest, Vanilla) + +// TODO(roth): Add TEST_F(SingleBalancerWithClientLoadReportingTest, Drop) + +} // namespace +} // namespace testing +} // namespace grpc + +int main(int argc, char** argv) { + grpc_init(); + grpc::testing::TestEnvironment env(argc, argv); + ::testing::InitGoogleTest(&argc, argv); + const auto result = RUN_ALL_TESTS(); + grpc_shutdown(); + return result; +} diff --git a/test/cpp/microbenchmarks/bm_call_create.cc b/test/cpp/microbenchmarks/bm_call_create.cc index e57650fe5b7..c1c8651ba43 100644 --- a/test/cpp/microbenchmarks/bm_call_create.cc +++ b/test/cpp/microbenchmarks/bm_call_create.cc @@ -318,30 +318,18 @@ static void FilterDestroy(void* arg, grpc_error* error) { gpr_free(arg); } static void DoNothing(void* arg, grpc_error* error) {} -class FakeClientChannelFactory : public grpc_client_channel_factory { +class FakeClientChannelFactory : public grpc_core::ClientChannelFactory { public: - FakeClientChannelFactory() { vtable = &vtable_; } - - private: - static void NoRef(grpc_client_channel_factory* factory) {} - static void NoUnref(grpc_client_channel_factory* factory) {} - static grpc_core::Subchannel* CreateSubchannel( - grpc_client_channel_factory* factory, const grpc_channel_args* args) { + grpc_core::Subchannel* CreateSubchannel( + const grpc_channel_args* args) override { return nullptr; } - static grpc_channel* CreateClientChannel(grpc_client_channel_factory* factory, - const char* target, - grpc_client_channel_type type, - const grpc_channel_args* args) { + grpc_channel* CreateChannel(const char* target, + const grpc_channel_args* args) override { return nullptr; } - - static const grpc_client_channel_factory_vtable vtable_; }; -const grpc_client_channel_factory_vtable FakeClientChannelFactory::vtable_ = { - NoRef, NoUnref, CreateSubchannel, CreateClientChannel}; - static grpc_arg StringArg(const char* key, const char* value) { grpc_arg a; a.type = GRPC_ARG_STRING; @@ -506,13 +494,13 @@ static void BM_IsolatedFilter(benchmark::State& state) { TrackCounters track_counters; Fixture fixture; std::ostringstream label; - - std::vector args; FakeClientChannelFactory fake_client_channel_factory; - args.push_back(grpc_client_channel_factory_create_channel_arg( - &fake_client_channel_factory)); - args.push_back(StringArg(GRPC_ARG_SERVER_URI, "localhost")); + std::vector args = { + grpc_core::ClientChannelFactory::CreateChannelArg( + &fake_client_channel_factory), + StringArg(GRPC_ARG_SERVER_URI, "localhost"), + }; grpc_channel_args channel_args = {args.size(), &args[0]}; std::vector filters; diff --git a/test/cpp/naming/address_sorting_test.cc b/test/cpp/naming/address_sorting_test.cc index db784a6476a..78ad49ec984 100644 --- a/test/cpp/naming/address_sorting_test.cc +++ b/test/cpp/naming/address_sorting_test.cc @@ -197,7 +197,7 @@ void VerifyLbAddrOutputs(const grpc_core::ServerAddressList addresses, class AddressSortingTest : public ::testing::Test { protected: void SetUp() override { grpc_init(); } - void TearDown() override { grpc_shutdown(); } + void TearDown() override { grpc_shutdown_blocking(); } }; /* Tests for rule 1 */ diff --git a/test/cpp/util/grpc_tool_test.cc b/test/cpp/util/grpc_tool_test.cc index b96b00f2db2..57cdbeb7b76 100644 --- a/test/cpp/util/grpc_tool_test.cc +++ b/test/cpp/util/grpc_tool_test.cc @@ -258,14 +258,6 @@ class GrpcToolTest : public ::testing::Test { void ShutdownServer() { server_->Shutdown(); } - void ExitWhenError(int argc, const char** argv, const CliCredentials& cred, - GrpcToolOutputCallback callback) { - int result = GrpcToolMainLib(argc, argv, cred, callback); - if (result) { - exit(result); - } - } - std::unique_ptr server_; TestServiceImpl service_; reflection::ProtoServerReflectionPlugin plugin_; @@ -418,11 +410,9 @@ TEST_F(GrpcToolTest, TypeNotFound) { const char* argv[] = {"grpc_cli", "type", server_address.c_str(), "grpc.testing.DummyRequest"}; - EXPECT_DEATH(ExitWhenError(ArraySize(argv), argv, TestCliCredentials(), - std::bind(PrintStream, &output_stream, - std::placeholders::_1)), - ".*Type grpc.testing.DummyRequest not found.*"); - + EXPECT_TRUE(1 == GrpcToolMainLib(ArraySize(argv), argv, TestCliCredentials(), + std::bind(PrintStream, &output_stream, + std::placeholders::_1))); ShutdownServer(); } diff --git a/third_party/cares/cares.BUILD b/third_party/cares/cares.BUILD index 54b8c57b1d6..ffa03aeb12c 100644 --- a/third_party/cares/cares.BUILD +++ b/third_party/cares/cares.BUILD @@ -170,4 +170,5 @@ cc_library( visibility = [ "//visibility:public", ], + alwayslink = 1, ) diff --git a/tools/internal_ci/README.md b/tools/internal_ci/README.md index af582c471e4..fdf70774327 100644 --- a/tools/internal_ci/README.md +++ b/tools/internal_ci/README.md @@ -1,7 +1,7 @@ # Kokoro CI job configurations and testing scripts -gRPC uses a continous integration tool called "Kokoro" (a.k.a "internal CI") +gRPC uses a continuous integration tool called "Kokoro" (a.k.a "internal CI") for running majority of its open source tests. This directory contains the external part of kokoro test job configurations (the actual job definitions live in an internal repository) and the shell -scripts that act as entry points to exectute the actual tests. +scripts that act as entry points to execute the actual tests. diff --git a/tools/internal_ci/linux/grpc_flaky_network_in_docker.sh b/tools/internal_ci/linux/grpc_flaky_network_in_docker.sh index eb6216c62c3..7fc8f146727 100755 --- a/tools/internal_ci/linux/grpc_flaky_network_in_docker.sh +++ b/tools/internal_ci/linux/grpc_flaky_network_in_docker.sh @@ -28,4 +28,4 @@ cd /var/local/git/grpc/test/cpp/end2end # iptables is used to drop traffic between client and server apt-get install -y iptables -bazel test --spawn_strategy=standalone --genrule_strategy=standalone --test_output=all :flaky_network_test --test_env=GRPC_VERBOSITY=debug --test_env=GRPC_TRACE=channel,client_channel,call_error,connectivity_state +bazel test --spawn_strategy=standalone --genrule_strategy=standalone --test_output=all :flaky_network_test --test_env=GRPC_VERBOSITY=debug --test_env=GRPC_TRACE=channel,client_channel,call_error,connectivity_state,tcp diff --git a/tools/interop_matrix/README.md b/tools/interop_matrix/README.md index ecd71be7f87..9d5c777cdee 100644 --- a/tools/interop_matrix/README.md +++ b/tools/interop_matrix/README.md @@ -3,7 +3,7 @@ This directory contains scripts that facilitate building and running gRPC interoperability tests for combinations of language/runtimes (known as matrix). The setup builds gRPC docker images for each language/runtime and upload it to Google Container Registry (GCR). These images, encapsulating gRPC stack -from specific releases/tag, are used to test version compatiblity between gRPC release versions. +from specific releases/tag, are used to test version compatibility between gRPC release versions. ## Step-by-step instructions for adding a GCR image for a new release for compatibility test We have continuous nightly test setup to test gRPC backward compatibility between old clients and latest server. When a gRPC developer creates a new gRPC release, s/he is also responsible to add the just-released gRPC client to the nightly test. The steps are: diff --git a/tools/interop_matrix/client_matrix.py b/tools/interop_matrix/client_matrix.py index dba10c7e7fd..654dfd6faef 100644 --- a/tools/interop_matrix/client_matrix.py +++ b/tools/interop_matrix/client_matrix.py @@ -142,6 +142,7 @@ LANG_RELEASE_MATRIX = { ('v1.16.1', ReleaseInfo()), ('v1.17.1', ReleaseInfo()), ('v1.18.0', ReleaseInfo()), + ('v1.19.0', ReleaseInfo()), ]), 'python': OrderedDict([ diff --git a/tools/interop_matrix/run_interop_matrix_tests.py b/tools/interop_matrix/run_interop_matrix_tests.py index de054e5d878..3f92c8e6641 100755 --- a/tools/interop_matrix/run_interop_matrix_tests.py +++ b/tools/interop_matrix/run_interop_matrix_tests.py @@ -224,7 +224,8 @@ def _pull_images_for_lang(lang, images): cmdline=cmdline, shortname='pull_image_%s' % (image), timeout_seconds=_PULL_IMAGE_TIMEOUT_SECONDS, - shell=True) + shell=True, + flake_retries=2) download_specs.append(spec) # too many image downloads at once tend to get stuck max_pull_jobs = min(args.jobs, _MAX_PARALLEL_DOWNLOADS) diff --git a/tools/run_tests/generated/sources_and_headers.json b/tools/run_tests/generated/sources_and_headers.json index 95b6ae65008..5a1eafda6c2 100644 --- a/tools/run_tests/generated/sources_and_headers.json +++ b/tools/run_tests/generated/sources_and_headers.json @@ -5002,6 +5002,28 @@ "third_party": false, "type": "target" }, + { + "deps": [ + "gpr", + "grpc", + "grpc++", + "grpc++_test_util", + "grpc_test_util" + ], + "headers": [ + "src/proto/grpc/lb/v1/load_balancer.grpc.pb.h", + "src/proto/grpc/lb/v1/load_balancer.pb.h", + "src/proto/grpc/lb/v1/load_balancer_mock.grpc.pb.h" + ], + "is_filegroup": false, + "language": "c++", + "name": "xds_end2end_test", + "src": [ + "test/cpp/end2end/xds_end2end_test.cc" + ], + "third_party": false, + "type": "target" + }, { "deps": [ "gpr", @@ -8869,6 +8891,7 @@ "test/core/end2end/tests/empty_batch.cc", "test/core/end2end/tests/filter_call_init_fails.cc", "test/core/end2end/tests/filter_causes_close.cc", + "test/core/end2end/tests/filter_context.cc", "test/core/end2end/tests/filter_latency.cc", "test/core/end2end/tests/filter_status_code.cc", "test/core/end2end/tests/graceful_server_shutdown.cc", @@ -8967,6 +8990,7 @@ "test/core/end2end/tests/empty_batch.cc", "test/core/end2end/tests/filter_call_init_fails.cc", "test/core/end2end/tests/filter_causes_close.cc", + "test/core/end2end/tests/filter_context.cc", "test/core/end2end/tests/filter_latency.cc", "test/core/end2end/tests/filter_status_code.cc", "test/core/end2end/tests/graceful_server_shutdown.cc", diff --git a/tools/run_tests/generated/tests.json b/tools/run_tests/generated/tests.json index 9a202ecf167..9df57b5e151 100644 --- a/tools/run_tests/generated/tests.json +++ b/tools/run_tests/generated/tests.json @@ -5686,6 +5686,30 @@ ], "uses_polling": true }, + { + "args": [], + "benchmark": false, + "ci_platforms": [ + "linux", + "mac", + "posix", + "windows" + ], + "cpu_cost": 1.0, + "exclude_configs": [], + "exclude_iomgrs": [], + "flaky": false, + "gtest": true, + "language": "c++", + "name": "xds_end2end_test", + "platforms": [ + "linux", + "mac", + "posix", + "windows" + ], + "uses_polling": true + }, { "args": [], "benchmark": false, @@ -7951,6 +7975,29 @@ "posix" ] }, + { + "args": [ + "filter_context" + ], + "ci_platforms": [ + "windows", + "linux", + "mac", + "posix" + ], + "cpu_cost": 1.0, + "exclude_configs": [], + "exclude_iomgrs": [], + "flaky": false, + "language": "c", + "name": "h2_census_test", + "platforms": [ + "windows", + "linux", + "mac", + "posix" + ] + }, { "args": [ "filter_latency" @@ -9703,6 +9750,29 @@ "posix" ] }, + { + "args": [ + "filter_context" + ], + "ci_platforms": [ + "windows", + "linux", + "mac", + "posix" + ], + "cpu_cost": 1.0, + "exclude_configs": [], + "exclude_iomgrs": [], + "flaky": false, + "language": "c", + "name": "h2_compress_test", + "platforms": [ + "windows", + "linux", + "mac", + "posix" + ] + }, { "args": [ "filter_latency" @@ -11411,6 +11481,28 @@ "posix" ] }, + { + "args": [ + "filter_context" + ], + "ci_platforms": [ + "windows", + "linux", + "posix" + ], + "cpu_cost": 1.0, + "exclude_configs": [], + "exclude_iomgrs": [], + "flaky": false, + "language": "c", + "name": "h2_fakesec_test", + "platforms": [ + "windows", + "linux", + "mac", + "posix" + ] + }, { "args": [ "filter_latency" @@ -12991,6 +13083,29 @@ "posix" ] }, + { + "args": [ + "filter_context" + ], + "ci_platforms": [ + "linux", + "mac", + "posix" + ], + "cpu_cost": 1.0, + "exclude_configs": [], + "exclude_iomgrs": [ + "uv" + ], + "flaky": false, + "language": "c", + "name": "h2_fd_test", + "platforms": [ + "linux", + "mac", + "posix" + ] + }, { "args": [ "filter_latency" @@ -14304,6 +14419,29 @@ "posix" ] }, + { + "args": [ + "filter_context" + ], + "ci_platforms": [ + "windows", + "linux", + "mac", + "posix" + ], + "cpu_cost": 1.0, + "exclude_configs": [], + "exclude_iomgrs": [], + "flaky": false, + "language": "c", + "name": "h2_full_test", + "platforms": [ + "windows", + "linux", + "mac", + "posix" + ] + }, { "args": [ "filter_latency" @@ -15970,6 +16108,25 @@ "linux" ] }, + { + "args": [ + "filter_context" + ], + "ci_platforms": [ + "linux" + ], + "cpu_cost": 1.0, + "exclude_configs": [], + "exclude_iomgrs": [ + "uv" + ], + "flaky": false, + "language": "c", + "name": "h2_full+pipe_test", + "platforms": [ + "linux" + ] + }, { "args": [ "filter_latency" @@ -17500,6 +17657,29 @@ "posix" ] }, + { + "args": [ + "filter_context" + ], + "ci_platforms": [ + "windows", + "linux", + "mac", + "posix" + ], + "cpu_cost": 1.0, + "exclude_configs": [], + "exclude_iomgrs": [], + "flaky": false, + "language": "c", + "name": "h2_full+trace_test", + "platforms": [ + "windows", + "linux", + "mac", + "posix" + ] + }, { "args": [ "filter_latency" @@ -19206,6 +19386,29 @@ "posix" ] }, + { + "args": [ + "filter_context" + ], + "ci_platforms": [ + "windows", + "linux", + "mac", + "posix" + ], + "cpu_cost": 1.0, + "exclude_configs": [], + "exclude_iomgrs": [], + "flaky": false, + "language": "c", + "name": "h2_full+workarounds_test", + "platforms": [ + "windows", + "linux", + "mac", + "posix" + ] + }, { "args": [ "filter_latency" @@ -20979,14 +21182,14 @@ }, { "args": [ - "filter_latency" + "filter_context" ], "ci_platforms": [ "windows", "linux", "posix" ], - "cpu_cost": 0.1, + "cpu_cost": 1.0, "exclude_configs": [], "exclude_iomgrs": [ "uv" @@ -21003,7 +21206,7 @@ }, { "args": [ - "filter_status_code" + "filter_latency" ], "ci_platforms": [ "windows", @@ -21027,7 +21230,7 @@ }, { "args": [ - "graceful_server_shutdown" + "filter_status_code" ], "ci_platforms": [ "windows", @@ -21051,7 +21254,7 @@ }, { "args": [ - "high_initial_seqno" + "graceful_server_shutdown" ], "ci_platforms": [ "windows", @@ -21075,7 +21278,7 @@ }, { "args": [ - "hpack_size" + "high_initial_seqno" ], "ci_platforms": [ "windows", @@ -21099,14 +21302,14 @@ }, { "args": [ - "idempotent_request" + "hpack_size" ], "ci_platforms": [ "windows", "linux", "posix" ], - "cpu_cost": 1.0, + "cpu_cost": 0.1, "exclude_configs": [], "exclude_iomgrs": [ "uv" @@ -21123,7 +21326,7 @@ }, { "args": [ - "invoke_large_request" + "idempotent_request" ], "ci_platforms": [ "windows", @@ -21147,31 +21350,7 @@ }, { "args": [ - "keepalive_timeout" - ], - "ci_platforms": [ - "windows", - "linux", - "posix" - ], - "cpu_cost": 0.1, - "exclude_configs": [], - "exclude_iomgrs": [ - "uv" - ], - "flaky": false, - "language": "c", - "name": "h2_http_proxy_test", - "platforms": [ - "windows", - "linux", - "mac", - "posix" - ] - }, - { - "args": [ - "large_metadata" + "invoke_large_request" ], "ci_platforms": [ "windows", @@ -21195,7 +21374,7 @@ }, { "args": [ - "max_concurrent_streams" + "keepalive_timeout" ], "ci_platforms": [ "windows", @@ -21219,14 +21398,14 @@ }, { "args": [ - "max_connection_age" + "large_metadata" ], "ci_platforms": [ "windows", "linux", "posix" ], - "cpu_cost": 0.1, + "cpu_cost": 1.0, "exclude_configs": [], "exclude_iomgrs": [ "uv" @@ -21243,7 +21422,7 @@ }, { "args": [ - "max_connection_idle" + "max_concurrent_streams" ], "ci_platforms": [ "windows", @@ -21267,7 +21446,7 @@ }, { "args": [ - "max_message_length" + "max_connection_age" ], "ci_platforms": [ "windows", @@ -21291,14 +21470,14 @@ }, { "args": [ - "negative_deadline" + "max_connection_idle" ], "ci_platforms": [ "windows", "linux", "posix" ], - "cpu_cost": 1.0, + "cpu_cost": 0.1, "exclude_configs": [], "exclude_iomgrs": [ "uv" @@ -21315,14 +21494,14 @@ }, { "args": [ - "no_error_on_hotpath" + "max_message_length" ], "ci_platforms": [ "windows", "linux", "posix" ], - "cpu_cost": 1.0, + "cpu_cost": 0.1, "exclude_configs": [], "exclude_iomgrs": [ "uv" @@ -21339,7 +21518,55 @@ }, { "args": [ - "no_logging" + "negative_deadline" + ], + "ci_platforms": [ + "windows", + "linux", + "posix" + ], + "cpu_cost": 1.0, + "exclude_configs": [], + "exclude_iomgrs": [ + "uv" + ], + "flaky": false, + "language": "c", + "name": "h2_http_proxy_test", + "platforms": [ + "windows", + "linux", + "mac", + "posix" + ] + }, + { + "args": [ + "no_error_on_hotpath" + ], + "ci_platforms": [ + "windows", + "linux", + "posix" + ], + "cpu_cost": 1.0, + "exclude_configs": [], + "exclude_iomgrs": [ + "uv" + ], + "flaky": false, + "language": "c", + "name": "h2_http_proxy_test", + "platforms": [ + "windows", + "linux", + "mac", + "posix" + ] + }, + { + "args": [ + "no_logging" ], "ci_platforms": [ "windows", @@ -22758,6 +22985,29 @@ "posix" ] }, + { + "args": [ + "filter_context" + ], + "ci_platforms": [ + "linux", + "mac", + "posix" + ], + "cpu_cost": 1.0, + "exclude_configs": [], + "exclude_iomgrs": [ + "uv" + ], + "flaky": false, + "language": "c", + "name": "h2_local_ipv4_test", + "platforms": [ + "linux", + "mac", + "posix" + ] + }, { "args": [ "filter_latency" @@ -24460,6 +24710,29 @@ "posix" ] }, + { + "args": [ + "filter_context" + ], + "ci_platforms": [ + "linux", + "mac", + "posix" + ], + "cpu_cost": 1.0, + "exclude_configs": [], + "exclude_iomgrs": [ + "uv" + ], + "flaky": false, + "language": "c", + "name": "h2_local_ipv6_test", + "platforms": [ + "linux", + "mac", + "posix" + ] + }, { "args": [ "filter_latency" @@ -26162,6 +26435,29 @@ "posix" ] }, + { + "args": [ + "filter_context" + ], + "ci_platforms": [ + "linux", + "mac", + "posix" + ], + "cpu_cost": 1.0, + "exclude_configs": [], + "exclude_iomgrs": [ + "uv" + ], + "flaky": false, + "language": "c", + "name": "h2_local_uds_test", + "platforms": [ + "linux", + "mac", + "posix" + ] + }, { "args": [ "filter_latency" @@ -27933,14 +28229,14 @@ }, { "args": [ - "filter_latency" + "filter_context" ], "ci_platforms": [ "windows", "linux", "posix" ], - "cpu_cost": 0.1, + "cpu_cost": 1.0, "exclude_configs": [], "exclude_iomgrs": [ "uv" @@ -27957,7 +28253,7 @@ }, { "args": [ - "filter_status_code" + "filter_latency" ], "ci_platforms": [ "windows", @@ -27981,7 +28277,7 @@ }, { "args": [ - "graceful_server_shutdown" + "filter_status_code" ], "ci_platforms": [ "windows", @@ -28005,7 +28301,7 @@ }, { "args": [ - "high_initial_seqno" + "graceful_server_shutdown" ], "ci_platforms": [ "windows", @@ -28029,7 +28325,7 @@ }, { "args": [ - "hpack_size" + "high_initial_seqno" ], "ci_platforms": [ "windows", @@ -28053,55 +28349,7 @@ }, { "args": [ - "idempotent_request" - ], - "ci_platforms": [ - "windows", - "linux", - "posix" - ], - "cpu_cost": 1.0, - "exclude_configs": [], - "exclude_iomgrs": [ - "uv" - ], - "flaky": false, - "language": "c", - "name": "h2_oauth2_test", - "platforms": [ - "windows", - "linux", - "mac", - "posix" - ] - }, - { - "args": [ - "invoke_large_request" - ], - "ci_platforms": [ - "windows", - "linux", - "posix" - ], - "cpu_cost": 1.0, - "exclude_configs": [], - "exclude_iomgrs": [ - "uv" - ], - "flaky": false, - "language": "c", - "name": "h2_oauth2_test", - "platforms": [ - "windows", - "linux", - "mac", - "posix" - ] - }, - { - "args": [ - "keepalive_timeout" + "hpack_size" ], "ci_platforms": [ "windows", @@ -28125,7 +28373,7 @@ }, { "args": [ - "large_metadata" + "idempotent_request" ], "ci_platforms": [ "windows", @@ -28149,62 +28397,14 @@ }, { "args": [ - "max_concurrent_streams" - ], - "ci_platforms": [ - "windows", - "linux", - "posix" - ], - "cpu_cost": 0.1, - "exclude_configs": [], - "exclude_iomgrs": [ - "uv" - ], - "flaky": false, - "language": "c", - "name": "h2_oauth2_test", - "platforms": [ - "windows", - "linux", - "mac", - "posix" - ] - }, - { - "args": [ - "max_connection_age" - ], - "ci_platforms": [ - "windows", - "linux", - "posix" - ], - "cpu_cost": 0.1, - "exclude_configs": [], - "exclude_iomgrs": [ - "uv" - ], - "flaky": false, - "language": "c", - "name": "h2_oauth2_test", - "platforms": [ - "windows", - "linux", - "mac", - "posix" - ] - }, - { - "args": [ - "max_connection_idle" + "invoke_large_request" ], "ci_platforms": [ "windows", "linux", "posix" ], - "cpu_cost": 0.1, + "cpu_cost": 1.0, "exclude_configs": [], "exclude_iomgrs": [ "uv" @@ -28221,7 +28421,7 @@ }, { "args": [ - "max_message_length" + "keepalive_timeout" ], "ci_platforms": [ "windows", @@ -28245,31 +28445,151 @@ }, { "args": [ - "negative_deadline" - ], - "ci_platforms": [ - "windows", - "linux", - "posix" - ], - "cpu_cost": 1.0, - "exclude_configs": [], - "exclude_iomgrs": [ - "uv" - ], - "flaky": false, - "language": "c", - "name": "h2_oauth2_test", - "platforms": [ - "windows", - "linux", - "mac", - "posix" - ] - }, - { - "args": [ - "no_error_on_hotpath" + "large_metadata" + ], + "ci_platforms": [ + "windows", + "linux", + "posix" + ], + "cpu_cost": 1.0, + "exclude_configs": [], + "exclude_iomgrs": [ + "uv" + ], + "flaky": false, + "language": "c", + "name": "h2_oauth2_test", + "platforms": [ + "windows", + "linux", + "mac", + "posix" + ] + }, + { + "args": [ + "max_concurrent_streams" + ], + "ci_platforms": [ + "windows", + "linux", + "posix" + ], + "cpu_cost": 0.1, + "exclude_configs": [], + "exclude_iomgrs": [ + "uv" + ], + "flaky": false, + "language": "c", + "name": "h2_oauth2_test", + "platforms": [ + "windows", + "linux", + "mac", + "posix" + ] + }, + { + "args": [ + "max_connection_age" + ], + "ci_platforms": [ + "windows", + "linux", + "posix" + ], + "cpu_cost": 0.1, + "exclude_configs": [], + "exclude_iomgrs": [ + "uv" + ], + "flaky": false, + "language": "c", + "name": "h2_oauth2_test", + "platforms": [ + "windows", + "linux", + "mac", + "posix" + ] + }, + { + "args": [ + "max_connection_idle" + ], + "ci_platforms": [ + "windows", + "linux", + "posix" + ], + "cpu_cost": 0.1, + "exclude_configs": [], + "exclude_iomgrs": [ + "uv" + ], + "flaky": false, + "language": "c", + "name": "h2_oauth2_test", + "platforms": [ + "windows", + "linux", + "mac", + "posix" + ] + }, + { + "args": [ + "max_message_length" + ], + "ci_platforms": [ + "windows", + "linux", + "posix" + ], + "cpu_cost": 0.1, + "exclude_configs": [], + "exclude_iomgrs": [ + "uv" + ], + "flaky": false, + "language": "c", + "name": "h2_oauth2_test", + "platforms": [ + "windows", + "linux", + "mac", + "posix" + ] + }, + { + "args": [ + "negative_deadline" + ], + "ci_platforms": [ + "windows", + "linux", + "posix" + ], + "cpu_cost": 1.0, + "exclude_configs": [], + "exclude_iomgrs": [ + "uv" + ], + "flaky": false, + "language": "c", + "name": "h2_oauth2_test", + "platforms": [ + "windows", + "linux", + "mac", + "posix" + ] + }, + { + "args": [ + "no_error_on_hotpath" ], "ci_platforms": [ "windows", @@ -29683,6 +30003,30 @@ "posix" ] }, + { + "args": [ + "filter_context" + ], + "ci_platforms": [ + "windows", + "linux", + "posix" + ], + "cpu_cost": 1.0, + "exclude_configs": [], + "exclude_iomgrs": [ + "uv" + ], + "flaky": false, + "language": "c", + "name": "h2_proxy_test", + "platforms": [ + "windows", + "linux", + "mac", + "posix" + ] + }, { "args": [ "filter_latency" @@ -30787,6 +31131,30 @@ "posix" ] }, + { + "args": [ + "filter_context" + ], + "ci_platforms": [ + "windows", + "linux", + "posix" + ], + "cpu_cost": 1.0, + "exclude_configs": [], + "exclude_iomgrs": [ + "uv" + ], + "flaky": false, + "language": "c", + "name": "h2_sockpair_test", + "platforms": [ + "windows", + "linux", + "mac", + "posix" + ] + }, { "args": [ "filter_latency" @@ -32037,14 +32405,14 @@ }, { "args": [ - "filter_latency" + "filter_context" ], "ci_platforms": [ "windows", "linux", "posix" ], - "cpu_cost": 0.1, + "cpu_cost": 1.0, "exclude_configs": [], "exclude_iomgrs": [ "uv" @@ -32061,7 +32429,7 @@ }, { "args": [ - "filter_status_code" + "filter_latency" ], "ci_platforms": [ "windows", @@ -32085,7 +32453,7 @@ }, { "args": [ - "graceful_server_shutdown" + "filter_status_code" ], "ci_platforms": [ "windows", @@ -32109,7 +32477,7 @@ }, { "args": [ - "high_initial_seqno" + "graceful_server_shutdown" ], "ci_platforms": [ "windows", @@ -32133,14 +32501,14 @@ }, { "args": [ - "idempotent_request" + "high_initial_seqno" ], "ci_platforms": [ "windows", "linux", "posix" ], - "cpu_cost": 1.0, + "cpu_cost": 0.1, "exclude_configs": [], "exclude_iomgrs": [ "uv" @@ -32157,7 +32525,7 @@ }, { "args": [ - "invoke_large_request" + "idempotent_request" ], "ci_platforms": [ "windows", @@ -32181,31 +32549,7 @@ }, { "args": [ - "keepalive_timeout" - ], - "ci_platforms": [ - "windows", - "linux", - "posix" - ], - "cpu_cost": 0.1, - "exclude_configs": [], - "exclude_iomgrs": [ - "uv" - ], - "flaky": false, - "language": "c", - "name": "h2_sockpair+trace_test", - "platforms": [ - "windows", - "linux", - "mac", - "posix" - ] - }, - { - "args": [ - "large_metadata" + "invoke_large_request" ], "ci_platforms": [ "windows", @@ -32229,7 +32573,7 @@ }, { "args": [ - "max_concurrent_streams" + "keepalive_timeout" ], "ci_platforms": [ "windows", @@ -32253,14 +32597,14 @@ }, { "args": [ - "max_connection_age" + "large_metadata" ], "ci_platforms": [ "windows", "linux", "posix" ], - "cpu_cost": 0.1, + "cpu_cost": 1.0, "exclude_configs": [], "exclude_iomgrs": [ "uv" @@ -32277,7 +32621,7 @@ }, { "args": [ - "max_message_length" + "max_concurrent_streams" ], "ci_platforms": [ "windows", @@ -32301,14 +32645,14 @@ }, { "args": [ - "negative_deadline" + "max_connection_age" ], "ci_platforms": [ "windows", "linux", "posix" ], - "cpu_cost": 1.0, + "cpu_cost": 0.1, "exclude_configs": [], "exclude_iomgrs": [ "uv" @@ -32325,14 +32669,14 @@ }, { "args": [ - "no_error_on_hotpath" + "max_message_length" ], "ci_platforms": [ "windows", "linux", "posix" ], - "cpu_cost": 1.0, + "cpu_cost": 0.1, "exclude_configs": [], "exclude_iomgrs": [ "uv" @@ -32349,7 +32693,7 @@ }, { "args": [ - "no_op" + "negative_deadline" ], "ci_platforms": [ "windows", @@ -32373,7 +32717,7 @@ }, { "args": [ - "payload" + "no_error_on_hotpath" ], "ci_platforms": [ "windows", @@ -32397,14 +32741,14 @@ }, { "args": [ - "ping_pong_streaming" + "no_op" ], "ci_platforms": [ "windows", "linux", "posix" ], - "cpu_cost": 0.1, + "cpu_cost": 1.0, "exclude_configs": [], "exclude_iomgrs": [ "uv" @@ -32421,7 +32765,55 @@ }, { "args": [ - "registered_call" + "payload" + ], + "ci_platforms": [ + "windows", + "linux", + "posix" + ], + "cpu_cost": 1.0, + "exclude_configs": [], + "exclude_iomgrs": [ + "uv" + ], + "flaky": false, + "language": "c", + "name": "h2_sockpair+trace_test", + "platforms": [ + "windows", + "linux", + "mac", + "posix" + ] + }, + { + "args": [ + "ping_pong_streaming" + ], + "ci_platforms": [ + "windows", + "linux", + "posix" + ], + "cpu_cost": 0.1, + "exclude_configs": [], + "exclude_iomgrs": [ + "uv" + ], + "flaky": false, + "language": "c", + "name": "h2_sockpair+trace_test", + "platforms": [ + "windows", + "linux", + "mac", + "posix" + ] + }, + { + "args": [ + "registered_call" ], "ci_platforms": [ "windows", @@ -33243,6 +33635,32 @@ "posix" ] }, + { + "args": [ + "filter_context" + ], + "ci_platforms": [ + "windows", + "linux", + "posix" + ], + "cpu_cost": 1.0, + "exclude_configs": [ + "msan" + ], + "exclude_iomgrs": [ + "uv" + ], + "flaky": false, + "language": "c", + "name": "h2_sockpair_1byte_test", + "platforms": [ + "windows", + "linux", + "mac", + "posix" + ] + }, { "args": [ "filter_latency" @@ -34638,6 +35056,29 @@ "posix" ] }, + { + "args": [ + "filter_context" + ], + "ci_platforms": [ + "windows", + "linux", + "mac", + "posix" + ], + "cpu_cost": 1.0, + "exclude_configs": [], + "exclude_iomgrs": [], + "flaky": false, + "language": "c", + "name": "h2_ssl_test", + "platforms": [ + "windows", + "linux", + "mac", + "posix" + ] + }, { "args": [ "filter_latency" @@ -36337,6 +36778,30 @@ "posix" ] }, + { + "args": [ + "filter_context" + ], + "ci_platforms": [ + "windows", + "linux", + "posix" + ], + "cpu_cost": 1.0, + "exclude_configs": [], + "exclude_iomgrs": [ + "uv" + ], + "flaky": false, + "language": "c", + "name": "h2_ssl_proxy_test", + "platforms": [ + "windows", + "linux", + "mac", + "posix" + ] + }, { "args": [ "filter_latency" @@ -37494,6 +37959,29 @@ "posix" ] }, + { + "args": [ + "filter_context" + ], + "ci_platforms": [ + "linux", + "mac", + "posix" + ], + "cpu_cost": 1.0, + "exclude_configs": [], + "exclude_iomgrs": [ + "uv" + ], + "flaky": false, + "language": "c", + "name": "h2_uds_test", + "platforms": [ + "linux", + "mac", + "posix" + ] + }, { "args": [ "filter_latency" @@ -39081,6 +39569,29 @@ "posix" ] }, + { + "args": [ + "filter_context" + ], + "ci_platforms": [ + "windows", + "linux", + "mac", + "posix" + ], + "cpu_cost": 1.0, + "exclude_configs": [], + "exclude_iomgrs": [], + "flaky": false, + "language": "c", + "name": "inproc_test", + "platforms": [ + "windows", + "linux", + "mac", + "posix" + ] + }, { "args": [ "filter_latency" @@ -40164,6 +40675,29 @@ "posix" ] }, + { + "args": [ + "filter_context" + ], + "ci_platforms": [ + "windows", + "linux", + "mac", + "posix" + ], + "cpu_cost": 1.0, + "exclude_configs": [], + "exclude_iomgrs": [], + "flaky": false, + "language": "c", + "name": "h2_census_nosec_test", + "platforms": [ + "windows", + "linux", + "mac", + "posix" + ] + }, { "args": [ "filter_latency" @@ -41893,6 +42427,29 @@ "posix" ] }, + { + "args": [ + "filter_context" + ], + "ci_platforms": [ + "windows", + "linux", + "mac", + "posix" + ], + "cpu_cost": 1.0, + "exclude_configs": [], + "exclude_iomgrs": [], + "flaky": false, + "language": "c", + "name": "h2_compress_nosec_test", + "platforms": [ + "windows", + "linux", + "mac", + "posix" + ] + }, { "args": [ "filter_latency" @@ -43482,6 +44039,29 @@ "posix" ] }, + { + "args": [ + "filter_context" + ], + "ci_platforms": [ + "linux", + "mac", + "posix" + ], + "cpu_cost": 1.0, + "exclude_configs": [], + "exclude_iomgrs": [ + "uv" + ], + "flaky": false, + "language": "c", + "name": "h2_fd_nosec_test", + "platforms": [ + "linux", + "mac", + "posix" + ] + }, { "args": [ "filter_latency" @@ -44772,6 +45352,29 @@ "posix" ] }, + { + "args": [ + "filter_context" + ], + "ci_platforms": [ + "windows", + "linux", + "mac", + "posix" + ], + "cpu_cost": 1.0, + "exclude_configs": [], + "exclude_iomgrs": [], + "flaky": false, + "language": "c", + "name": "h2_full_nosec_test", + "platforms": [ + "windows", + "linux", + "mac", + "posix" + ] + }, { "args": [ "filter_latency" @@ -46419,6 +47022,25 @@ "linux" ] }, + { + "args": [ + "filter_context" + ], + "ci_platforms": [ + "linux" + ], + "cpu_cost": 1.0, + "exclude_configs": [], + "exclude_iomgrs": [ + "uv" + ], + "flaky": false, + "language": "c", + "name": "h2_full+pipe_nosec_test", + "platforms": [ + "linux" + ] + }, { "args": [ "filter_latency" @@ -47926,6 +48548,29 @@ "posix" ] }, + { + "args": [ + "filter_context" + ], + "ci_platforms": [ + "windows", + "linux", + "mac", + "posix" + ], + "cpu_cost": 1.0, + "exclude_configs": [], + "exclude_iomgrs": [], + "flaky": false, + "language": "c", + "name": "h2_full+trace_nosec_test", + "platforms": [ + "windows", + "linux", + "mac", + "posix" + ] + }, { "args": [ "filter_latency" @@ -49609,6 +50254,29 @@ "posix" ] }, + { + "args": [ + "filter_context" + ], + "ci_platforms": [ + "windows", + "linux", + "mac", + "posix" + ], + "cpu_cost": 1.0, + "exclude_configs": [], + "exclude_iomgrs": [], + "flaky": false, + "language": "c", + "name": "h2_full+workarounds_nosec_test", + "platforms": [ + "windows", + "linux", + "mac", + "posix" + ] + }, { "args": [ "filter_latency" @@ -51356,6 +52024,30 @@ "posix" ] }, + { + "args": [ + "filter_context" + ], + "ci_platforms": [ + "windows", + "linux", + "posix" + ], + "cpu_cost": 1.0, + "exclude_configs": [], + "exclude_iomgrs": [ + "uv" + ], + "flaky": false, + "language": "c", + "name": "h2_http_proxy_nosec_test", + "platforms": [ + "windows", + "linux", + "mac", + "posix" + ] + }, { "args": [ "filter_latency" @@ -53108,6 +53800,30 @@ "posix" ] }, + { + "args": [ + "filter_context" + ], + "ci_platforms": [ + "windows", + "linux", + "posix" + ], + "cpu_cost": 1.0, + "exclude_configs": [], + "exclude_iomgrs": [ + "uv" + ], + "flaky": false, + "language": "c", + "name": "h2_proxy_nosec_test", + "platforms": [ + "windows", + "linux", + "mac", + "posix" + ] + }, { "args": [ "filter_latency" @@ -54188,6 +54904,30 @@ "posix" ] }, + { + "args": [ + "filter_context" + ], + "ci_platforms": [ + "windows", + "linux", + "posix" + ], + "cpu_cost": 1.0, + "exclude_configs": [], + "exclude_iomgrs": [ + "uv" + ], + "flaky": false, + "language": "c", + "name": "h2_sockpair_nosec_test", + "platforms": [ + "windows", + "linux", + "mac", + "posix" + ] + }, { "args": [ "filter_latency" @@ -55412,6 +56152,30 @@ "posix" ] }, + { + "args": [ + "filter_context" + ], + "ci_platforms": [ + "windows", + "linux", + "posix" + ], + "cpu_cost": 1.0, + "exclude_configs": [], + "exclude_iomgrs": [ + "uv" + ], + "flaky": false, + "language": "c", + "name": "h2_sockpair+trace_nosec_test", + "platforms": [ + "windows", + "linux", + "mac", + "posix" + ] + }, { "args": [ "filter_latency" @@ -56594,6 +57358,32 @@ "posix" ] }, + { + "args": [ + "filter_context" + ], + "ci_platforms": [ + "windows", + "linux", + "posix" + ], + "cpu_cost": 1.0, + "exclude_configs": [ + "msan" + ], + "exclude_iomgrs": [ + "uv" + ], + "flaky": false, + "language": "c", + "name": "h2_sockpair_1byte_nosec_test", + "platforms": [ + "windows", + "linux", + "mac", + "posix" + ] + }, { "args": [ "filter_latency" @@ -57918,6 +58708,29 @@ "posix" ] }, + { + "args": [ + "filter_context" + ], + "ci_platforms": [ + "linux", + "mac", + "posix" + ], + "cpu_cost": 1.0, + "exclude_configs": [], + "exclude_iomgrs": [ + "uv" + ], + "flaky": false, + "language": "c", + "name": "h2_uds_nosec_test", + "platforms": [ + "linux", + "mac", + "posix" + ] + }, { "args": [ "filter_latency" @@ -136665,6 +137478,29 @@ ], "uses_polling": false }, + { + "args": [ + "test/core/end2end/fuzzers/client_fuzzer_corpus/clusterfuzz-testcase-minimized-grpc_client_fuzzer-5765697914404864" + ], + "ci_platforms": [ + "linux" + ], + "cpu_cost": 0.1, + "exclude_configs": [ + "tsan" + ], + "exclude_iomgrs": [ + "uv" + ], + "flaky": false, + "language": "c", + "name": "client_fuzzer_one_entry", + "platforms": [ + "mac", + "linux" + ], + "uses_polling": false + }, { "args": [ "test/core/end2end/fuzzers/client_fuzzer_corpus/crash-12b69708d452b3cefe2da4a708a1030a661d37fc" diff --git a/tools/run_tests/run_tests.py b/tools/run_tests/run_tests.py index 986e7cd58c2..ed1c41e3256 100755 --- a/tools/run_tests/run_tests.py +++ b/tools/run_tests/run_tests.py @@ -106,6 +106,7 @@ def platform_string(): _DEFAULT_TIMEOUT_SECONDS = 5 * 60 +_PRE_BUILD_STEP_TIMEOUT_SECONDS = 10 * 60 def run_shell_command(cmd, env=None, cwd=None): @@ -1634,7 +1635,10 @@ def build_step_environ(cfg): build_steps = list( set( jobset.JobSpec( - cmdline, environ=build_step_environ(build_config), flake_retries=2) + cmdline, + environ=build_step_environ(build_config), + timeout_seconds=_PRE_BUILD_STEP_TIMEOUT_SECONDS, + flake_retries=2) for l in languages for cmdline in l.pre_build_steps())) if make_targets: