diff --git a/.github/ISSUE_TEMPLATE/bug_report.md b/.github/ISSUE_TEMPLATE/bug_report.md index 9011a669d3d..f02cf704d1b 100644 --- a/.github/ISSUE_TEMPLATE/bug_report.md +++ b/.github/ISSUE_TEMPLATE/bug_report.md @@ -2,7 +2,7 @@ name: Report a bug about: Create a report to help us improve labels: kind/bug, priority/P2 -assignees: markdroth +assignees: nicolasnoble --- diff --git a/.github/ISSUE_TEMPLATE/cleanup_request.md b/.github/ISSUE_TEMPLATE/cleanup_request.md index 9cf36e6fa0b..11e8be8ef3e 100644 --- a/.github/ISSUE_TEMPLATE/cleanup_request.md +++ b/.github/ISSUE_TEMPLATE/cleanup_request.md @@ -2,7 +2,7 @@ name: Request a cleanup about: Suggest a cleanup in our repository labels: kind/internal cleanup, priority/P2 -assignees: markdroth +assignees: nicolasnoble --- diff --git a/.github/ISSUE_TEMPLATE/feature_request.md b/.github/ISSUE_TEMPLATE/feature_request.md index 0d9906a9eb0..fb3dae79227 100644 --- a/.github/ISSUE_TEMPLATE/feature_request.md +++ b/.github/ISSUE_TEMPLATE/feature_request.md @@ -2,7 +2,7 @@ name: Request a feature about: Suggest an idea for this project labels: kind/enhancement, priority/P2 -assignees: markdroth +assignees: nicolasnoble --- diff --git a/.github/pull_request_template.md b/.github/pull_request_template.md index a85cfad9c7a..57af6c21597 100644 --- a/.github/pull_request_template.md +++ b/.github/pull_request_template.md @@ -8,4 +8,4 @@ If you know who should review your pull request, please remove the mentioning be --> -@markdroth +@nicolasnoble diff --git a/BUILD b/BUILD index a62ca7e8ec2..81de8203a03 100644 --- a/BUILD +++ b/BUILD @@ -319,8 +319,9 @@ grpc_cc_library( deps = [ "grpc_common", "grpc_lb_policy_cds", + "grpc_lb_policy_eds", "grpc_lb_policy_grpclb", - "grpc_lb_policy_xds", + "grpc_lb_policy_lrs", "grpc_resolver_xds", ], ) @@ -337,8 +338,9 @@ grpc_cc_library( deps = [ "grpc_common", "grpc_lb_policy_cds_secure", + "grpc_lb_policy_eds_secure", "grpc_lb_policy_grpclb_secure", - "grpc_lb_policy_xds_secure", + "grpc_lb_policy_lrs_secure", "grpc_resolver_xds_secure", "grpc_secure", "grpc_transport_chttp2_client_secure", @@ -1023,7 +1025,9 @@ grpc_cc_library( "grpc_deadline_filter", "grpc_client_authority_filter", "grpc_lb_policy_pick_first", + "grpc_lb_policy_priority", "grpc_lb_policy_round_robin", + "grpc_lb_policy_weighted_target", "grpc_client_idle_filter", "grpc_max_age_filter", "grpc_message_size_filter", @@ -1235,6 +1239,21 @@ grpc_cc_library( ], ) +grpc_cc_library( + name = "grpc_grpclb_balancer_addresses", + srcs = [ + "src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_balancer_addresses.cc", + ], + hdrs = [ + "src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_balancer_addresses.h", + ], + language = "c++", + deps = [ + "grpc_base", + "grpc_client_channel", + ], +) + grpc_cc_library( name = "grpc_lb_policy_grpclb", srcs = [ @@ -1255,6 +1274,7 @@ grpc_cc_library( deps = [ "grpc_base", "grpc_client_channel", + "grpc_grpclb_balancer_addresses", "grpc_lb_upb", "grpc_resolver_fake", "grpc_transport_chttp2_client_insecure", @@ -1281,6 +1301,7 @@ grpc_cc_library( deps = [ "grpc_base", "grpc_client_channel", + "grpc_grpclb_balancer_addresses", "grpc_lb_upb", "grpc_resolver_fake", "grpc_secure", @@ -1340,41 +1361,75 @@ grpc_cc_library( ) grpc_cc_library( - name = "grpc_lb_policy_xds", + name = "grpc_lb_policy_cds", srcs = [ - "src/core/ext/filters/client_channel/lb_policy/xds/xds.cc", + "src/core/ext/filters/client_channel/lb_policy/xds/cds.cc", + ], + language = "c++", + deps = [ + "grpc_base", + "grpc_client_channel", + "grpc_xds_client", + ], +) + +grpc_cc_library( + name = "grpc_lb_policy_cds_secure", + srcs = [ + "src/core/ext/filters/client_channel/lb_policy/xds/cds.cc", + ], + language = "c++", + deps = [ + "grpc_base", + "grpc_client_channel", + "grpc_xds_client_secure", + ], +) + +grpc_cc_library( + name = "grpc_lb_policy_eds", + srcs = [ + "src/core/ext/filters/client_channel/lb_policy/xds/eds.cc", ], hdrs = [ "src/core/ext/filters/client_channel/lb_policy/xds/xds.h", ], + external_deps = [ + "absl/strings", + ], language = "c++", deps = [ "grpc_base", "grpc_client_channel", + "grpc_lb_address_filtering", "grpc_xds_client", ], ) grpc_cc_library( - name = "grpc_lb_policy_xds_secure", + name = "grpc_lb_policy_eds_secure", srcs = [ - "src/core/ext/filters/client_channel/lb_policy/xds/xds.cc", + "src/core/ext/filters/client_channel/lb_policy/xds/eds.cc", ], hdrs = [ "src/core/ext/filters/client_channel/lb_policy/xds/xds.h", ], + external_deps = [ + "absl/strings", + ], language = "c++", deps = [ "grpc_base", "grpc_client_channel", + "grpc_lb_address_filtering", "grpc_xds_client_secure", ], ) grpc_cc_library( - name = "grpc_lb_policy_cds", + name = "grpc_lb_policy_lrs", srcs = [ - "src/core/ext/filters/client_channel/lb_policy/xds/cds.cc", + "src/core/ext/filters/client_channel/lb_policy/xds/lrs.cc", ], language = "c++", deps = [ @@ -1385,9 +1440,9 @@ grpc_cc_library( ) grpc_cc_library( - name = "grpc_lb_policy_cds_secure", + name = "grpc_lb_policy_lrs_secure", srcs = [ - "src/core/ext/filters/client_channel/lb_policy/xds/cds.cc", + "src/core/ext/filters/client_channel/lb_policy/xds/lrs.cc", ], language = "c++", deps = [ @@ -1397,6 +1452,24 @@ grpc_cc_library( ], ) +grpc_cc_library( + name = "grpc_lb_address_filtering", + srcs = [ + "src/core/ext/filters/client_channel/lb_policy/address_filtering.cc", + ], + hdrs = [ + "src/core/ext/filters/client_channel/lb_policy/address_filtering.h", + ], + external_deps = [ + "absl/strings", + ], + language = "c++", + deps = [ + "grpc_base", + "grpc_client_channel", + ], +) + grpc_cc_library( name = "grpc_lb_subchannel_list", hdrs = [ @@ -1435,6 +1508,35 @@ grpc_cc_library( ], ) +grpc_cc_library( + name = "grpc_lb_policy_priority", + srcs = [ + "src/core/ext/filters/client_channel/lb_policy/priority/priority.cc", + ], + external_deps = [ + "absl/strings", + ], + language = "c++", + deps = [ + "grpc_base", + "grpc_client_channel", + "grpc_lb_address_filtering", + ], +) + +grpc_cc_library( + name = "grpc_lb_policy_weighted_target", + srcs = [ + "src/core/ext/filters/client_channel/lb_policy/weighted_target/weighted_target.cc", + ], + language = "c++", + deps = [ + "grpc_base", + "grpc_client_channel", + "grpc_lb_address_filtering", + ], +) + grpc_cc_library( name = "lb_server_load_reporting_filter", srcs = [ @@ -1606,6 +1708,7 @@ grpc_cc_library( deps = [ "grpc_base", "grpc_client_channel", + "grpc_grpclb_balancer_addresses", "grpc_resolver_dns_selection", ], ) diff --git a/BUILD.gn b/BUILD.gn index ee40a4db057..129ad2b2639 100644 --- a/BUILD.gn +++ b/BUILD.gn @@ -223,12 +223,16 @@ config("grpc_config") { "src/core/ext/filters/client_channel/http_proxy.h", "src/core/ext/filters/client_channel/lb_policy.cc", "src/core/ext/filters/client_channel/lb_policy.h", + "src/core/ext/filters/client_channel/lb_policy/address_filtering.cc", + "src/core/ext/filters/client_channel/lb_policy/address_filtering.h", "src/core/ext/filters/client_channel/lb_policy/child_policy_handler.cc", "src/core/ext/filters/client_channel/lb_policy/child_policy_handler.h", "src/core/ext/filters/client_channel/lb_policy/grpclb/client_load_reporting_filter.cc", "src/core/ext/filters/client_channel/lb_policy/grpclb/client_load_reporting_filter.h", "src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.cc", "src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.h", + "src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_balancer_addresses.cc", + "src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_balancer_addresses.h", "src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_channel.h", "src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_channel_secure.cc", "src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_client_stats.cc", @@ -236,10 +240,13 @@ config("grpc_config") { "src/core/ext/filters/client_channel/lb_policy/grpclb/load_balancer_api.cc", "src/core/ext/filters/client_channel/lb_policy/grpclb/load_balancer_api.h", "src/core/ext/filters/client_channel/lb_policy/pick_first/pick_first.cc", + "src/core/ext/filters/client_channel/lb_policy/priority/priority.cc", "src/core/ext/filters/client_channel/lb_policy/round_robin/round_robin.cc", "src/core/ext/filters/client_channel/lb_policy/subchannel_list.h", + "src/core/ext/filters/client_channel/lb_policy/weighted_target/weighted_target.cc", "src/core/ext/filters/client_channel/lb_policy/xds/cds.cc", - "src/core/ext/filters/client_channel/lb_policy/xds/xds.cc", + "src/core/ext/filters/client_channel/lb_policy/xds/eds.cc", + "src/core/ext/filters/client_channel/lb_policy/xds/lrs.cc", "src/core/ext/filters/client_channel/lb_policy/xds/xds.h", "src/core/ext/filters/client_channel/lb_policy_factory.h", "src/core/ext/filters/client_channel/lb_policy_registry.cc", @@ -960,6 +967,7 @@ config("grpc_config") { ":address_sorting", ":upb", ":absl/types:optional", + ":absl/strings:strings", ":absl/container:inlined_vector", "//third_party/cares", ":address_sorting", diff --git a/CMakeLists.txt b/CMakeLists.txt index c0b5279d257..27bf340b65a 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -457,7 +457,6 @@ if(gRPC_BUILD_TESTS) add_dependencies(buildtests_c compression_test) add_dependencies(buildtests_c concurrent_connectivity_test) add_dependencies(buildtests_c connection_refused_test) - add_dependencies(buildtests_c control_plane_credentials_test) add_dependencies(buildtests_c cpu_test) add_dependencies(buildtests_c dns_resolver_connectivity_using_ares_resolver_test) add_dependencies(buildtests_c dns_resolver_connectivity_using_native_resolver_test) @@ -1316,16 +1315,21 @@ add_library(grpc src/core/ext/filters/client_channel/http_connect_handshaker.cc src/core/ext/filters/client_channel/http_proxy.cc src/core/ext/filters/client_channel/lb_policy.cc + src/core/ext/filters/client_channel/lb_policy/address_filtering.cc src/core/ext/filters/client_channel/lb_policy/child_policy_handler.cc src/core/ext/filters/client_channel/lb_policy/grpclb/client_load_reporting_filter.cc src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.cc + src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_balancer_addresses.cc src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_channel_secure.cc src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_client_stats.cc src/core/ext/filters/client_channel/lb_policy/grpclb/load_balancer_api.cc src/core/ext/filters/client_channel/lb_policy/pick_first/pick_first.cc + src/core/ext/filters/client_channel/lb_policy/priority/priority.cc src/core/ext/filters/client_channel/lb_policy/round_robin/round_robin.cc + src/core/ext/filters/client_channel/lb_policy/weighted_target/weighted_target.cc src/core/ext/filters/client_channel/lb_policy/xds/cds.cc - src/core/ext/filters/client_channel/lb_policy/xds/xds.cc + src/core/ext/filters/client_channel/lb_policy/xds/eds.cc + src/core/ext/filters/client_channel/lb_policy/xds/lrs.cc src/core/ext/filters/client_channel/lb_policy_registry.cc src/core/ext/filters/client_channel/local_subchannel_pool.cc src/core/ext/filters/client_channel/parse_address.cc @@ -1743,6 +1747,7 @@ target_link_libraries(grpc address_sorting upb absl::optional + absl::strings absl::inlined_vector ) if(_gRPC_PLATFORM_IOS OR _gRPC_PLATFORM_MAC) @@ -1969,16 +1974,21 @@ add_library(grpc_unsecure src/core/ext/filters/client_channel/http_connect_handshaker.cc src/core/ext/filters/client_channel/http_proxy.cc src/core/ext/filters/client_channel/lb_policy.cc + src/core/ext/filters/client_channel/lb_policy/address_filtering.cc src/core/ext/filters/client_channel/lb_policy/child_policy_handler.cc src/core/ext/filters/client_channel/lb_policy/grpclb/client_load_reporting_filter.cc src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.cc + src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_balancer_addresses.cc src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_channel.cc src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_client_stats.cc src/core/ext/filters/client_channel/lb_policy/grpclb/load_balancer_api.cc src/core/ext/filters/client_channel/lb_policy/pick_first/pick_first.cc + src/core/ext/filters/client_channel/lb_policy/priority/priority.cc src/core/ext/filters/client_channel/lb_policy/round_robin/round_robin.cc + src/core/ext/filters/client_channel/lb_policy/weighted_target/weighted_target.cc src/core/ext/filters/client_channel/lb_policy/xds/cds.cc - src/core/ext/filters/client_channel/lb_policy/xds/xds.cc + src/core/ext/filters/client_channel/lb_policy/xds/eds.cc + src/core/ext/filters/client_channel/lb_policy/xds/lrs.cc src/core/ext/filters/client_channel/lb_policy_registry.cc src/core/ext/filters/client_channel/local_subchannel_pool.cc src/core/ext/filters/client_channel/parse_address.cc @@ -2320,6 +2330,7 @@ target_link_libraries(grpc_unsecure address_sorting upb absl::optional + absl::strings absl::inlined_vector ) if(_gRPC_PLATFORM_IOS OR _gRPC_PLATFORM_MAC) @@ -4823,40 +4834,6 @@ target_link_libraries(connection_refused_test ) -endif() -if(gRPC_BUILD_TESTS) - -add_executable(control_plane_credentials_test - test/core/end2end/cq_verifier.cc - test/core/end2end/data/client_certs.cc - test/core/end2end/data/server1_cert.cc - test/core/end2end/data/server1_key.cc - test/core/end2end/data/test_root_cert.cc - test/core/security/control_plane_credentials_test.cc -) - -target_include_directories(control_plane_credentials_test - PRIVATE - ${CMAKE_CURRENT_SOURCE_DIR} - ${CMAKE_CURRENT_SOURCE_DIR}/include - ${_gRPC_ADDRESS_SORTING_INCLUDE_DIR} - ${_gRPC_SSL_INCLUDE_DIR} - ${_gRPC_UPB_GENERATED_DIR} - ${_gRPC_UPB_GRPC_GENERATED_DIR} - ${_gRPC_UPB_INCLUDE_DIR} - ${_gRPC_ZLIB_INCLUDE_DIR} -) - -target_link_libraries(control_plane_credentials_test - ${_gRPC_ALLTARGETS_LIBRARIES} - grpc_test_util - grpc - gpr - address_sorting - upb -) - - endif() if(gRPC_BUILD_TESTS) diff --git a/Makefile b/Makefile index c4c6332736b..d6c8cd19efa 100644 --- a/Makefile +++ b/Makefile @@ -1047,7 +1047,6 @@ completion_queue_threading_test: $(BINDIR)/$(CONFIG)/completion_queue_threading_ compression_test: $(BINDIR)/$(CONFIG)/compression_test concurrent_connectivity_test: $(BINDIR)/$(CONFIG)/concurrent_connectivity_test connection_refused_test: $(BINDIR)/$(CONFIG)/connection_refused_test -control_plane_credentials_test: $(BINDIR)/$(CONFIG)/control_plane_credentials_test cpu_test: $(BINDIR)/$(CONFIG)/cpu_test dns_resolver_connectivity_using_ares_resolver_test: $(BINDIR)/$(CONFIG)/dns_resolver_connectivity_using_ares_resolver_test dns_resolver_connectivity_using_native_resolver_test: $(BINDIR)/$(CONFIG)/dns_resolver_connectivity_using_native_resolver_test @@ -1424,7 +1423,6 @@ buildtests_c: privatelibs_c \ $(BINDIR)/$(CONFIG)/compression_test \ $(BINDIR)/$(CONFIG)/concurrent_connectivity_test \ $(BINDIR)/$(CONFIG)/connection_refused_test \ - $(BINDIR)/$(CONFIG)/control_plane_credentials_test \ $(BINDIR)/$(CONFIG)/cpu_test \ $(BINDIR)/$(CONFIG)/dns_resolver_connectivity_using_ares_resolver_test \ $(BINDIR)/$(CONFIG)/dns_resolver_connectivity_using_native_resolver_test \ @@ -1916,16 +1914,12 @@ test_c: buildtests_c $(Q) $(BINDIR)/$(CONFIG)/cmdline_test || ( echo test cmdline_test failed ; exit 1 ) $(E) "[RUN] Testing combiner_test" $(Q) $(BINDIR)/$(CONFIG)/combiner_test || ( echo test combiner_test failed ; exit 1 ) - $(E) "[RUN] Testing completion_queue_threading_test" - $(Q) $(BINDIR)/$(CONFIG)/completion_queue_threading_test || ( echo test completion_queue_threading_test failed ; exit 1 ) $(E) "[RUN] Testing compression_test" $(Q) $(BINDIR)/$(CONFIG)/compression_test || ( echo test compression_test failed ; exit 1 ) $(E) "[RUN] Testing concurrent_connectivity_test" $(Q) $(BINDIR)/$(CONFIG)/concurrent_connectivity_test || ( echo test concurrent_connectivity_test failed ; exit 1 ) $(E) "[RUN] Testing connection_refused_test" $(Q) $(BINDIR)/$(CONFIG)/connection_refused_test || ( echo test connection_refused_test failed ; exit 1 ) - $(E) "[RUN] Testing control_plane_credentials_test" - $(Q) $(BINDIR)/$(CONFIG)/control_plane_credentials_test || ( echo test control_plane_credentials_test failed ; exit 1 ) $(E) "[RUN] Testing cpu_test" $(Q) $(BINDIR)/$(CONFIG)/cpu_test || ( echo test cpu_test failed ; exit 1 ) $(E) "[RUN] Testing dns_resolver_connectivity_using_ares_resolver_test" @@ -2182,6 +2176,8 @@ test_cxx: buildtests_cxx $(Q) $(BINDIR)/$(CONFIG)/bm_error || ( echo test bm_error failed ; exit 1 ) $(E) "[RUN] Testing bm_fullstack_streaming_ping_pong" $(Q) $(BINDIR)/$(CONFIG)/bm_fullstack_streaming_ping_pong || ( echo test bm_fullstack_streaming_ping_pong failed ; exit 1 ) + $(E) "[RUN] Testing bm_fullstack_streaming_pump" + $(Q) $(BINDIR)/$(CONFIG)/bm_fullstack_streaming_pump || ( echo test bm_fullstack_streaming_pump failed ; exit 1 ) $(E) "[RUN] Testing bm_fullstack_unary_ping_pong" $(Q) $(BINDIR)/$(CONFIG)/bm_fullstack_unary_ping_pong || ( echo test bm_fullstack_unary_ping_pong failed ; exit 1 ) $(E) "[RUN] Testing bm_metadata" @@ -2210,8 +2206,6 @@ test_cxx: buildtests_cxx $(Q) $(BINDIR)/$(CONFIG)/cli_call_test || ( echo test cli_call_test failed ; exit 1 ) $(E) "[RUN] Testing client_callback_end2end_test" $(Q) $(BINDIR)/$(CONFIG)/client_callback_end2end_test || ( echo test client_callback_end2end_test failed ; exit 1 ) - $(E) "[RUN] Testing client_channel_stress_test" - $(Q) $(BINDIR)/$(CONFIG)/client_channel_stress_test || ( echo test client_channel_stress_test failed ; exit 1 ) $(E) "[RUN] Testing client_interceptors_end2end_test" $(Q) $(BINDIR)/$(CONFIG)/client_interceptors_end2end_test || ( echo test client_interceptors_end2end_test failed ; exit 1 ) $(E) "[RUN] Testing codegen_test_full" @@ -3646,16 +3640,21 @@ LIBGRPC_SRC = \ src/core/ext/filters/client_channel/http_connect_handshaker.cc \ src/core/ext/filters/client_channel/http_proxy.cc \ src/core/ext/filters/client_channel/lb_policy.cc \ + src/core/ext/filters/client_channel/lb_policy/address_filtering.cc \ src/core/ext/filters/client_channel/lb_policy/child_policy_handler.cc \ src/core/ext/filters/client_channel/lb_policy/grpclb/client_load_reporting_filter.cc \ src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.cc \ + src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_balancer_addresses.cc \ src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_channel_secure.cc \ src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_client_stats.cc \ src/core/ext/filters/client_channel/lb_policy/grpclb/load_balancer_api.cc \ src/core/ext/filters/client_channel/lb_policy/pick_first/pick_first.cc \ + src/core/ext/filters/client_channel/lb_policy/priority/priority.cc \ src/core/ext/filters/client_channel/lb_policy/round_robin/round_robin.cc \ + src/core/ext/filters/client_channel/lb_policy/weighted_target/weighted_target.cc \ src/core/ext/filters/client_channel/lb_policy/xds/cds.cc \ - src/core/ext/filters/client_channel/lb_policy/xds/xds.cc \ + src/core/ext/filters/client_channel/lb_policy/xds/eds.cc \ + src/core/ext/filters/client_channel/lb_policy/xds/lrs.cc \ src/core/ext/filters/client_channel/lb_policy_registry.cc \ src/core/ext/filters/client_channel/local_subchannel_pool.cc \ src/core/ext/filters/client_channel/parse_address.cc \ @@ -4274,16 +4273,21 @@ LIBGRPC_UNSECURE_SRC = \ src/core/ext/filters/client_channel/http_connect_handshaker.cc \ src/core/ext/filters/client_channel/http_proxy.cc \ src/core/ext/filters/client_channel/lb_policy.cc \ + src/core/ext/filters/client_channel/lb_policy/address_filtering.cc \ src/core/ext/filters/client_channel/lb_policy/child_policy_handler.cc \ src/core/ext/filters/client_channel/lb_policy/grpclb/client_load_reporting_filter.cc \ src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.cc \ + src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_balancer_addresses.cc \ src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_channel.cc \ src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_client_stats.cc \ src/core/ext/filters/client_channel/lb_policy/grpclb/load_balancer_api.cc \ src/core/ext/filters/client_channel/lb_policy/pick_first/pick_first.cc \ + src/core/ext/filters/client_channel/lb_policy/priority/priority.cc \ src/core/ext/filters/client_channel/lb_policy/round_robin/round_robin.cc \ + src/core/ext/filters/client_channel/lb_policy/weighted_target/weighted_target.cc \ src/core/ext/filters/client_channel/lb_policy/xds/cds.cc \ - src/core/ext/filters/client_channel/lb_policy/xds/xds.cc \ + src/core/ext/filters/client_channel/lb_policy/xds/eds.cc \ + src/core/ext/filters/client_channel/lb_policy/xds/lrs.cc \ src/core/ext/filters/client_channel/lb_policy_registry.cc \ src/core/ext/filters/client_channel/local_subchannel_pool.cc \ src/core/ext/filters/client_channel/parse_address.cc \ @@ -7840,53 +7844,6 @@ endif endif -CONTROL_PLANE_CREDENTIALS_TEST_SRC = \ - test/core/end2end/cq_verifier.cc \ - test/core/end2end/data/client_certs.cc \ - test/core/end2end/data/server1_cert.cc \ - test/core/end2end/data/server1_key.cc \ - test/core/end2end/data/test_root_cert.cc \ - test/core/security/control_plane_credentials_test.cc \ - -CONTROL_PLANE_CREDENTIALS_TEST_OBJS = $(addprefix $(OBJDIR)/$(CONFIG)/, $(addsuffix .o, $(basename $(CONTROL_PLANE_CREDENTIALS_TEST_SRC)))) -ifeq ($(NO_SECURE),true) - -# You can't build secure targets if you don't have OpenSSL. - -$(BINDIR)/$(CONFIG)/control_plane_credentials_test: openssl_dep_error - -else - - - -$(BINDIR)/$(CONFIG)/control_plane_credentials_test: $(CONTROL_PLANE_CREDENTIALS_TEST_OBJS) $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr.a $(LIBDIR)/$(CONFIG)/libaddress_sorting.a $(LIBDIR)/$(CONFIG)/libupb.a - $(E) "[LD] Linking $@" - $(Q) mkdir -p `dirname $@` - $(Q) $(LDXX) $(LDFLAGS) $(CONTROL_PLANE_CREDENTIALS_TEST_OBJS) $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr.a $(LIBDIR)/$(CONFIG)/libaddress_sorting.a $(LIBDIR)/$(CONFIG)/libupb.a $(LDLIBS) $(LDLIBS_SECURE) -o $(BINDIR)/$(CONFIG)/control_plane_credentials_test - -endif - -$(OBJDIR)/$(CONFIG)/test/core/end2end/cq_verifier.o: $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr.a $(LIBDIR)/$(CONFIG)/libaddress_sorting.a $(LIBDIR)/$(CONFIG)/libupb.a - -$(OBJDIR)/$(CONFIG)/test/core/end2end/data/client_certs.o: $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr.a $(LIBDIR)/$(CONFIG)/libaddress_sorting.a $(LIBDIR)/$(CONFIG)/libupb.a - -$(OBJDIR)/$(CONFIG)/test/core/end2end/data/server1_cert.o: $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr.a $(LIBDIR)/$(CONFIG)/libaddress_sorting.a $(LIBDIR)/$(CONFIG)/libupb.a - -$(OBJDIR)/$(CONFIG)/test/core/end2end/data/server1_key.o: $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr.a $(LIBDIR)/$(CONFIG)/libaddress_sorting.a $(LIBDIR)/$(CONFIG)/libupb.a - -$(OBJDIR)/$(CONFIG)/test/core/end2end/data/test_root_cert.o: $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr.a $(LIBDIR)/$(CONFIG)/libaddress_sorting.a $(LIBDIR)/$(CONFIG)/libupb.a - -$(OBJDIR)/$(CONFIG)/test/core/security/control_plane_credentials_test.o: $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr.a $(LIBDIR)/$(CONFIG)/libaddress_sorting.a $(LIBDIR)/$(CONFIG)/libupb.a - -deps_control_plane_credentials_test: $(CONTROL_PLANE_CREDENTIALS_TEST_OBJS:.o=.dep) - -ifneq ($(NO_SECURE),true) -ifneq ($(NO_DEPS),true) --include $(CONTROL_PLANE_CREDENTIALS_TEST_OBJS:.o=.dep) -endif -endif - - CPU_TEST_SRC = \ test/core/gpr/cpu_test.cc \ diff --git a/build_autogenerated.yaml b/build_autogenerated.yaml index 56b33c42228..30dba593d4f 100644 --- a/build_autogenerated.yaml +++ b/build_autogenerated.yaml @@ -382,9 +382,11 @@ libs: - src/core/ext/filters/client_channel/http_connect_handshaker.h - src/core/ext/filters/client_channel/http_proxy.h - src/core/ext/filters/client_channel/lb_policy.h + - src/core/ext/filters/client_channel/lb_policy/address_filtering.h - src/core/ext/filters/client_channel/lb_policy/child_policy_handler.h - src/core/ext/filters/client_channel/lb_policy/grpclb/client_load_reporting_filter.h - src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.h + - src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_balancer_addresses.h - src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_channel.h - src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_client_stats.h - src/core/ext/filters/client_channel/lb_policy/grpclb/load_balancer_api.h @@ -739,16 +741,21 @@ libs: - src/core/ext/filters/client_channel/http_connect_handshaker.cc - src/core/ext/filters/client_channel/http_proxy.cc - src/core/ext/filters/client_channel/lb_policy.cc + - src/core/ext/filters/client_channel/lb_policy/address_filtering.cc - src/core/ext/filters/client_channel/lb_policy/child_policy_handler.cc - src/core/ext/filters/client_channel/lb_policy/grpclb/client_load_reporting_filter.cc - src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.cc + - src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_balancer_addresses.cc - src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_channel_secure.cc - src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_client_stats.cc - src/core/ext/filters/client_channel/lb_policy/grpclb/load_balancer_api.cc - src/core/ext/filters/client_channel/lb_policy/pick_first/pick_first.cc + - src/core/ext/filters/client_channel/lb_policy/priority/priority.cc - src/core/ext/filters/client_channel/lb_policy/round_robin/round_robin.cc + - src/core/ext/filters/client_channel/lb_policy/weighted_target/weighted_target.cc - src/core/ext/filters/client_channel/lb_policy/xds/cds.cc - - src/core/ext/filters/client_channel/lb_policy/xds/xds.cc + - src/core/ext/filters/client_channel/lb_policy/xds/eds.cc + - src/core/ext/filters/client_channel/lb_policy/xds/lrs.cc - src/core/ext/filters/client_channel/lb_policy_registry.cc - src/core/ext/filters/client_channel/local_subchannel_pool.cc - src/core/ext/filters/client_channel/parse_address.cc @@ -1130,6 +1137,7 @@ libs: - address_sorting - upb - absl/types:optional + - absl/strings:strings - absl/container:inlined_vector baselib: true dll: true @@ -1276,9 +1284,11 @@ libs: - src/core/ext/filters/client_channel/http_connect_handshaker.h - src/core/ext/filters/client_channel/http_proxy.h - src/core/ext/filters/client_channel/lb_policy.h + - src/core/ext/filters/client_channel/lb_policy/address_filtering.h - src/core/ext/filters/client_channel/lb_policy/child_policy_handler.h - src/core/ext/filters/client_channel/lb_policy/grpclb/client_load_reporting_filter.h - src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.h + - src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_balancer_addresses.h - src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_channel.h - src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_client_stats.h - src/core/ext/filters/client_channel/lb_policy/grpclb/load_balancer_api.h @@ -1568,16 +1578,21 @@ libs: - src/core/ext/filters/client_channel/http_connect_handshaker.cc - src/core/ext/filters/client_channel/http_proxy.cc - src/core/ext/filters/client_channel/lb_policy.cc + - src/core/ext/filters/client_channel/lb_policy/address_filtering.cc - src/core/ext/filters/client_channel/lb_policy/child_policy_handler.cc - src/core/ext/filters/client_channel/lb_policy/grpclb/client_load_reporting_filter.cc - src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.cc + - src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_balancer_addresses.cc - src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_channel.cc - src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_client_stats.cc - src/core/ext/filters/client_channel/lb_policy/grpclb/load_balancer_api.cc - src/core/ext/filters/client_channel/lb_policy/pick_first/pick_first.cc + - src/core/ext/filters/client_channel/lb_policy/priority/priority.cc - src/core/ext/filters/client_channel/lb_policy/round_robin/round_robin.cc + - src/core/ext/filters/client_channel/lb_policy/weighted_target/weighted_target.cc - src/core/ext/filters/client_channel/lb_policy/xds/cds.cc - - src/core/ext/filters/client_channel/lb_policy/xds/xds.cc + - src/core/ext/filters/client_channel/lb_policy/xds/eds.cc + - src/core/ext/filters/client_channel/lb_policy/xds/lrs.cc - src/core/ext/filters/client_channel/lb_policy_registry.cc - src/core/ext/filters/client_channel/local_subchannel_pool.cc - src/core/ext/filters/client_channel/parse_address.cc @@ -1884,6 +1899,7 @@ libs: - address_sorting - upb - absl/types:optional + - absl/strings:strings - absl/container:inlined_vector baselib: true dll: true @@ -3090,6 +3106,7 @@ targets: - mac - name: completion_queue_threading_test build: test + run: false language: c headers: [] src: @@ -3139,25 +3156,6 @@ targets: - gpr - address_sorting - upb -- name: control_plane_credentials_test - build: test - language: c - headers: - - test/core/end2end/cq_verifier.h - - test/core/end2end/data/ssl_test_data.h - src: - - test/core/end2end/cq_verifier.cc - - test/core/end2end/data/client_certs.cc - - test/core/end2end/data/server1_cert.cc - - test/core/end2end/data/server1_key.cc - - test/core/end2end/data/test_root_cert.cc - - test/core/security/control_plane_credentials_test.cc - deps: - - grpc_test_util - - grpc - - gpr - - address_sorting - - upb - name: cpu_test build: test language: c @@ -5129,7 +5127,6 @@ targets: - posix - name: bm_fullstack_streaming_pump build: test - run: false language: c++ headers: - test/cpp/microbenchmarks/fullstack_streaming_pump.h @@ -5513,6 +5510,7 @@ targets: - name: client_channel_stress_test gtest: true build: test + run: false language: c++ headers: - test/cpp/end2end/test_service_impl.h diff --git a/config.m4 b/config.m4 index 7a38e36cd71..6b450315f4f 100644 --- a/config.m4 +++ b/config.m4 @@ -50,16 +50,21 @@ if test "$PHP_GRPC" != "no"; then src/core/ext/filters/client_channel/http_connect_handshaker.cc \ src/core/ext/filters/client_channel/http_proxy.cc \ src/core/ext/filters/client_channel/lb_policy.cc \ + src/core/ext/filters/client_channel/lb_policy/address_filtering.cc \ src/core/ext/filters/client_channel/lb_policy/child_policy_handler.cc \ src/core/ext/filters/client_channel/lb_policy/grpclb/client_load_reporting_filter.cc \ src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.cc \ + src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_balancer_addresses.cc \ src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_channel_secure.cc \ src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_client_stats.cc \ src/core/ext/filters/client_channel/lb_policy/grpclb/load_balancer_api.cc \ src/core/ext/filters/client_channel/lb_policy/pick_first/pick_first.cc \ + src/core/ext/filters/client_channel/lb_policy/priority/priority.cc \ src/core/ext/filters/client_channel/lb_policy/round_robin/round_robin.cc \ + src/core/ext/filters/client_channel/lb_policy/weighted_target/weighted_target.cc \ src/core/ext/filters/client_channel/lb_policy/xds/cds.cc \ - src/core/ext/filters/client_channel/lb_policy/xds/xds.cc \ + src/core/ext/filters/client_channel/lb_policy/xds/eds.cc \ + src/core/ext/filters/client_channel/lb_policy/xds/lrs.cc \ src/core/ext/filters/client_channel/lb_policy_registry.cc \ src/core/ext/filters/client_channel/local_subchannel_pool.cc \ src/core/ext/filters/client_channel/parse_address.cc \ @@ -820,7 +825,9 @@ if test "$PHP_GRPC" != "no"; then PHP_ADD_BUILD_DIR($ext_builddir/src/core/ext/filters/client_channel/lb_policy) PHP_ADD_BUILD_DIR($ext_builddir/src/core/ext/filters/client_channel/lb_policy/grpclb) PHP_ADD_BUILD_DIR($ext_builddir/src/core/ext/filters/client_channel/lb_policy/pick_first) + PHP_ADD_BUILD_DIR($ext_builddir/src/core/ext/filters/client_channel/lb_policy/priority) PHP_ADD_BUILD_DIR($ext_builddir/src/core/ext/filters/client_channel/lb_policy/round_robin) + PHP_ADD_BUILD_DIR($ext_builddir/src/core/ext/filters/client_channel/lb_policy/weighted_target) PHP_ADD_BUILD_DIR($ext_builddir/src/core/ext/filters/client_channel/lb_policy/xds) PHP_ADD_BUILD_DIR($ext_builddir/src/core/ext/filters/client_channel/resolver/dns) PHP_ADD_BUILD_DIR($ext_builddir/src/core/ext/filters/client_channel/resolver/dns/c_ares) diff --git a/config.w32 b/config.w32 index 089504a1841..59cdbdb1ce4 100644 --- a/config.w32 +++ b/config.w32 @@ -19,16 +19,21 @@ if (PHP_GRPC != "no") { "src\\core\\ext\\filters\\client_channel\\http_connect_handshaker.cc " + "src\\core\\ext\\filters\\client_channel\\http_proxy.cc " + "src\\core\\ext\\filters\\client_channel\\lb_policy.cc " + + "src\\core\\ext\\filters\\client_channel\\lb_policy\\address_filtering.cc " + "src\\core\\ext\\filters\\client_channel\\lb_policy\\child_policy_handler.cc " + "src\\core\\ext\\filters\\client_channel\\lb_policy\\grpclb\\client_load_reporting_filter.cc " + "src\\core\\ext\\filters\\client_channel\\lb_policy\\grpclb\\grpclb.cc " + + "src\\core\\ext\\filters\\client_channel\\lb_policy\\grpclb\\grpclb_balancer_addresses.cc " + "src\\core\\ext\\filters\\client_channel\\lb_policy\\grpclb\\grpclb_channel_secure.cc " + "src\\core\\ext\\filters\\client_channel\\lb_policy\\grpclb\\grpclb_client_stats.cc " + "src\\core\\ext\\filters\\client_channel\\lb_policy\\grpclb\\load_balancer_api.cc " + "src\\core\\ext\\filters\\client_channel\\lb_policy\\pick_first\\pick_first.cc " + + "src\\core\\ext\\filters\\client_channel\\lb_policy\\priority\\priority.cc " + "src\\core\\ext\\filters\\client_channel\\lb_policy\\round_robin\\round_robin.cc " + + "src\\core\\ext\\filters\\client_channel\\lb_policy\\weighted_target\\weighted_target.cc " + "src\\core\\ext\\filters\\client_channel\\lb_policy\\xds\\cds.cc " + - "src\\core\\ext\\filters\\client_channel\\lb_policy\\xds\\xds.cc " + + "src\\core\\ext\\filters\\client_channel\\lb_policy\\xds\\eds.cc " + + "src\\core\\ext\\filters\\client_channel\\lb_policy\\xds\\lrs.cc " + "src\\core\\ext\\filters\\client_channel\\lb_policy_registry.cc " + "src\\core\\ext\\filters\\client_channel\\local_subchannel_pool.cc " + "src\\core\\ext\\filters\\client_channel\\parse_address.cc " + @@ -820,7 +825,9 @@ if (PHP_GRPC != "no") { FSO.CreateFolder(base_dir+"\\ext\\grpc\\src\\core\\ext\\filters\\client_channel\\lb_policy"); FSO.CreateFolder(base_dir+"\\ext\\grpc\\src\\core\\ext\\filters\\client_channel\\lb_policy\\grpclb"); FSO.CreateFolder(base_dir+"\\ext\\grpc\\src\\core\\ext\\filters\\client_channel\\lb_policy\\pick_first"); + FSO.CreateFolder(base_dir+"\\ext\\grpc\\src\\core\\ext\\filters\\client_channel\\lb_policy\\priority"); FSO.CreateFolder(base_dir+"\\ext\\grpc\\src\\core\\ext\\filters\\client_channel\\lb_policy\\round_robin"); + FSO.CreateFolder(base_dir+"\\ext\\grpc\\src\\core\\ext\\filters\\client_channel\\lb_policy\\weighted_target"); FSO.CreateFolder(base_dir+"\\ext\\grpc\\src\\core\\ext\\filters\\client_channel\\lb_policy\\xds"); FSO.CreateFolder(base_dir+"\\ext\\grpc\\src\\core\\ext\\filters\\client_channel\\resolver"); FSO.CreateFolder(base_dir+"\\ext\\grpc\\src\\core\\ext\\filters\\client_channel\\resolver\\dns"); diff --git a/doc/environment_variables.md b/doc/environment_variables.md index e79c390c9a3..ec1b4f86717 100644 --- a/doc/environment_variables.md +++ b/doc/environment_variables.md @@ -57,6 +57,7 @@ some configuration as environment variables that can be set. - compression - traces compression operations - connectivity_state - traces connectivity state changes to channels - cronet - traces state in the cronet transport engine + - eds_lb - traces eds LB policy - executor - traces grpc's internal thread pool ('the executor') - glb - traces the grpclb load balancer - handshaker - traces handshaking state @@ -65,13 +66,16 @@ some configuration as environment variables that can be set. - http2_stream_state - traces all http2 stream state mutations. - http1 - traces HTTP/1.x operations performed by gRPC - inproc - traces the in-process transport + - http_keepalive - traces gRPC keepalive pings - flowctl - traces http2 flow control + - lrs_lb - traces lrs LB policy - op_failure - traces error information when failure is pushed onto a completion queue - pick_first - traces the pick first load balancing policy - plugin_credentials - traces plugin credentials - pollable_refcount - traces reference counting of 'pollable' objects (only in DEBUG) + - priority_lb - traces priority LB policy - resource_quota - trace resource quota objects internals - round_robin - traces the round_robin load balancing policy - queue_pluck @@ -84,8 +88,8 @@ some configuration as environment variables that can be set. - transport_security - traces metadata about secure channel establishment - tcp - traces bytes in and out of a channel - tsi - traces tsi transport security + - weighted_target_lb - traces weighted_target LB policy - xds_client - traces xds client - - xds_lb - traces xds LB policy - xds_resolver - traces xds resolver The following tracers will only run in binaries built in DEBUG mode. This is diff --git a/examples/cpp/compression/greeter_client.cc b/examples/cpp/compression/greeter_client.cc index a8428174644..99e2a5973fc 100644 --- a/examples/cpp/compression/greeter_client.cc +++ b/examples/cpp/compression/greeter_client.cc @@ -85,7 +85,7 @@ int main(int argc, char** argv) { args.SetCompressionAlgorithm(GRPC_COMPRESS_GZIP); GreeterClient greeter(grpc::CreateCustomChannel( "localhost:50051", grpc::InsecureChannelCredentials(), args)); - std::string user("world"); + std::string user("world world world world"); std::string reply = greeter.SayHello(user); std::cout << "Greeter received: " << reply << std::endl; diff --git a/examples/python/multiprocessing/BUILD b/examples/python/multiprocessing/BUILD index ea9b6a3ec6f..1d831e729b4 100644 --- a/examples/python/multiprocessing/BUILD +++ b/examples/python/multiprocessing/BUILD @@ -37,6 +37,7 @@ py_binary( name = "client", testonly = 1, srcs = ["client.py"], + imports = ["."], python_version = "PY3", srcs_version = "PY3", deps = [ @@ -50,6 +51,7 @@ py_binary( name = "server", testonly = 1, srcs = ["server.py"], + imports = ["."], python_version = "PY3", srcs_version = "PY3", deps = [ diff --git a/examples/python/multiprocessing/README.md b/examples/python/multiprocessing/README.md index 709a815aca5..5dce50ad3bd 100644 --- a/examples/python/multiprocessing/README.md +++ b/examples/python/multiprocessing/README.md @@ -1,28 +1,27 @@ ## Multiprocessing with gRPC Python Multiprocessing allows application developers to sidestep the Python global -interpreter lock and achieve true concurrency on multicore systems. +interpreter lock and achieve true parallelism on multicore systems. Unfortunately, using multiprocessing and gRPC Python is not yet as simple as instantiating your server with a `futures.ProcessPoolExecutor`. The library is implemented as a C extension, maintaining much of the state that drives the system in native code. As such, upon calling -[`fork`](http://man7.org/linux/man-pages/man2/fork.2.html), much of the -state copied into the child process is invalid, leading to hangs and crashes. - -However, calling `fork` without `exec` in your python process is supported -*before* any gRPC servers have been instantiated. Application developers can +[`fork`](http://man7.org/linux/man-pages/man2/fork.2.html), any threads in a +critical section may leave the state of the gRPC library invalid in the child +process. See this [excellent research +paper](https://www.microsoft.com/en-us/research/uploads/prod/2019/04/fork-hotos19.pdf) +for a thorough discussion of the topic. + +Calling `fork` without `exec` in your process *is* supported +before any gRPC servers have been instantiated. Application developers can take advantage of this to parallelize their CPU-intensive operations. ## Calculating Prime Numbers with Multiple Processes This example calculates the first 10,000 prime numbers as an RPC. We instantiate one server per subprocess, balancing requests between the servers using the -[`SO_REUSEPORT`](https://lwn.net/Articles/542629/) socket option. Note that this -option is not available in `manylinux1` distributions, which are, as of the time -of writing, the only gRPC Python wheels available on PyPI. To take advantage of this -feature, you'll need to build from source, either using bazel (as we do for -these examples) or via pip, using `pip install grpcio --no-binary grpcio`. +[`SO_REUSEPORT`](https://lwn.net/Articles/542629/) socket option. ```python _PROCESS_COUNT = multiprocessing.cpu_count() @@ -65,3 +64,11 @@ For example, ``` bazel run //examples/python/multiprocessing:client -- [::]:33915 ``` + +Alternatively, generate code using the following and then run the client and server +directly: + +```python +cd examples/python/helloworld +python -m grpc_tools.protoc -I . prime.proto --python_out=. --grpc_python_out=. +``` diff --git a/examples/python/multiprocessing/client.py b/examples/python/multiprocessing/client.py index b9acc65fdc5..7676bd4ec88 100644 --- a/examples/python/multiprocessing/client.py +++ b/examples/python/multiprocessing/client.py @@ -26,8 +26,8 @@ import sys import grpc -from examples.python.multiprocessing import prime_pb2 -from examples.python.multiprocessing import prime_pb2_grpc +import prime_pb2 +import prime_pb2_grpc _PROCESS_COUNT = 8 _MAXIMUM_CANDIDATE = 10000 diff --git a/examples/python/multiprocessing/server.py b/examples/python/multiprocessing/server.py index ad788b8eb51..a5ee00755e6 100644 --- a/examples/python/multiprocessing/server.py +++ b/examples/python/multiprocessing/server.py @@ -29,8 +29,8 @@ import sys import grpc -from examples.python.multiprocessing import prime_pb2 -from examples.python.multiprocessing import prime_pb2_grpc +import prime_pb2 +import prime_pb2_grpc _LOGGER = logging.getLogger(__name__) @@ -67,12 +67,6 @@ def _run_server(bind_address): _LOGGER.info('Starting new server.') options = (('grpc.so_reuseport', 1),) - # WARNING: This example takes advantage of SO_REUSEPORT. Due to the - # limitations of manylinux1, none of our precompiled Linux wheels currently - # support this option. (https://github.com/grpc/grpc/issues/18210). To take - # advantage of this feature, install from source with - # `pip install grpcio --no-binary grpcio`. - server = grpc.server(futures.ThreadPoolExecutor( max_workers=_THREAD_CONCURRENCY,), options=options) diff --git a/gRPC-C++.podspec b/gRPC-C++.podspec index 8315e96f6ad..77980999b59 100644 --- a/gRPC-C++.podspec +++ b/gRPC-C++.podspec @@ -233,9 +233,11 @@ Pod::Spec.new do |s| 'src/core/ext/filters/client_channel/http_connect_handshaker.h', 'src/core/ext/filters/client_channel/http_proxy.h', 'src/core/ext/filters/client_channel/lb_policy.h', + 'src/core/ext/filters/client_channel/lb_policy/address_filtering.h', 'src/core/ext/filters/client_channel/lb_policy/child_policy_handler.h', 'src/core/ext/filters/client_channel/lb_policy/grpclb/client_load_reporting_filter.h', 'src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.h', + 'src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_balancer_addresses.h', 'src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_channel.h', 'src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_client_stats.h', 'src/core/ext/filters/client_channel/lb_policy/grpclb/load_balancer_api.h', @@ -682,9 +684,11 @@ Pod::Spec.new do |s| 'src/core/ext/filters/client_channel/http_connect_handshaker.h', 'src/core/ext/filters/client_channel/http_proxy.h', 'src/core/ext/filters/client_channel/lb_policy.h', + 'src/core/ext/filters/client_channel/lb_policy/address_filtering.h', 'src/core/ext/filters/client_channel/lb_policy/child_policy_handler.h', 'src/core/ext/filters/client_channel/lb_policy/grpclb/client_load_reporting_filter.h', 'src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.h', + 'src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_balancer_addresses.h', 'src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_channel.h', 'src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_client_stats.h', 'src/core/ext/filters/client_channel/lb_policy/grpclb/load_balancer_api.h', diff --git a/gRPC-Core.podspec b/gRPC-Core.podspec index f87d5f4603a..749e77b3fa7 100644 --- a/gRPC-Core.podspec +++ b/gRPC-Core.podspec @@ -206,12 +206,16 @@ Pod::Spec.new do |s| 'src/core/ext/filters/client_channel/http_proxy.h', 'src/core/ext/filters/client_channel/lb_policy.cc', 'src/core/ext/filters/client_channel/lb_policy.h', + 'src/core/ext/filters/client_channel/lb_policy/address_filtering.cc', + 'src/core/ext/filters/client_channel/lb_policy/address_filtering.h', 'src/core/ext/filters/client_channel/lb_policy/child_policy_handler.cc', 'src/core/ext/filters/client_channel/lb_policy/child_policy_handler.h', 'src/core/ext/filters/client_channel/lb_policy/grpclb/client_load_reporting_filter.cc', 'src/core/ext/filters/client_channel/lb_policy/grpclb/client_load_reporting_filter.h', 'src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.cc', 'src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.h', + 'src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_balancer_addresses.cc', + 'src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_balancer_addresses.h', 'src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_channel.h', 'src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_channel_secure.cc', 'src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_client_stats.cc', @@ -219,10 +223,13 @@ Pod::Spec.new do |s| 'src/core/ext/filters/client_channel/lb_policy/grpclb/load_balancer_api.cc', 'src/core/ext/filters/client_channel/lb_policy/grpclb/load_balancer_api.h', 'src/core/ext/filters/client_channel/lb_policy/pick_first/pick_first.cc', + 'src/core/ext/filters/client_channel/lb_policy/priority/priority.cc', 'src/core/ext/filters/client_channel/lb_policy/round_robin/round_robin.cc', 'src/core/ext/filters/client_channel/lb_policy/subchannel_list.h', + 'src/core/ext/filters/client_channel/lb_policy/weighted_target/weighted_target.cc', 'src/core/ext/filters/client_channel/lb_policy/xds/cds.cc', - 'src/core/ext/filters/client_channel/lb_policy/xds/xds.cc', + 'src/core/ext/filters/client_channel/lb_policy/xds/eds.cc', + 'src/core/ext/filters/client_channel/lb_policy/xds/lrs.cc', 'src/core/ext/filters/client_channel/lb_policy/xds/xds.h', 'src/core/ext/filters/client_channel/lb_policy_factory.h', 'src/core/ext/filters/client_channel/lb_policy_registry.cc', @@ -1030,9 +1037,11 @@ Pod::Spec.new do |s| 'src/core/ext/filters/client_channel/http_connect_handshaker.h', 'src/core/ext/filters/client_channel/http_proxy.h', 'src/core/ext/filters/client_channel/lb_policy.h', + 'src/core/ext/filters/client_channel/lb_policy/address_filtering.h', 'src/core/ext/filters/client_channel/lb_policy/child_policy_handler.h', 'src/core/ext/filters/client_channel/lb_policy/grpclb/client_load_reporting_filter.h', 'src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.h', + 'src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_balancer_addresses.h', 'src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_channel.h', 'src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_client_stats.h', 'src/core/ext/filters/client_channel/lb_policy/grpclb/load_balancer_api.h', diff --git a/grpc.gemspec b/grpc.gemspec index cf0b389f50a..29e5d3d436b 100644 --- a/grpc.gemspec +++ b/grpc.gemspec @@ -128,12 +128,16 @@ Gem::Specification.new do |s| s.files += %w( src/core/ext/filters/client_channel/http_proxy.h ) s.files += %w( src/core/ext/filters/client_channel/lb_policy.cc ) s.files += %w( src/core/ext/filters/client_channel/lb_policy.h ) + s.files += %w( src/core/ext/filters/client_channel/lb_policy/address_filtering.cc ) + s.files += %w( src/core/ext/filters/client_channel/lb_policy/address_filtering.h ) s.files += %w( src/core/ext/filters/client_channel/lb_policy/child_policy_handler.cc ) s.files += %w( src/core/ext/filters/client_channel/lb_policy/child_policy_handler.h ) s.files += %w( src/core/ext/filters/client_channel/lb_policy/grpclb/client_load_reporting_filter.cc ) s.files += %w( src/core/ext/filters/client_channel/lb_policy/grpclb/client_load_reporting_filter.h ) s.files += %w( src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.cc ) s.files += %w( src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.h ) + s.files += %w( src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_balancer_addresses.cc ) + s.files += %w( src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_balancer_addresses.h ) s.files += %w( src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_channel.h ) s.files += %w( src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_channel_secure.cc ) s.files += %w( src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_client_stats.cc ) @@ -141,10 +145,13 @@ Gem::Specification.new do |s| s.files += %w( src/core/ext/filters/client_channel/lb_policy/grpclb/load_balancer_api.cc ) s.files += %w( src/core/ext/filters/client_channel/lb_policy/grpclb/load_balancer_api.h ) s.files += %w( src/core/ext/filters/client_channel/lb_policy/pick_first/pick_first.cc ) + s.files += %w( src/core/ext/filters/client_channel/lb_policy/priority/priority.cc ) s.files += %w( src/core/ext/filters/client_channel/lb_policy/round_robin/round_robin.cc ) s.files += %w( src/core/ext/filters/client_channel/lb_policy/subchannel_list.h ) + s.files += %w( src/core/ext/filters/client_channel/lb_policy/weighted_target/weighted_target.cc ) s.files += %w( src/core/ext/filters/client_channel/lb_policy/xds/cds.cc ) - s.files += %w( src/core/ext/filters/client_channel/lb_policy/xds/xds.cc ) + s.files += %w( src/core/ext/filters/client_channel/lb_policy/xds/eds.cc ) + s.files += %w( src/core/ext/filters/client_channel/lb_policy/xds/lrs.cc ) s.files += %w( src/core/ext/filters/client_channel/lb_policy/xds/xds.h ) s.files += %w( src/core/ext/filters/client_channel/lb_policy_factory.h ) s.files += %w( src/core/ext/filters/client_channel/lb_policy_registry.cc ) diff --git a/grpc.gyp b/grpc.gyp index c32c0dca814..a62c034d65a 100644 --- a/grpc.gyp +++ b/grpc.gyp @@ -426,6 +426,7 @@ 'address_sorting', 'upb', 'absl/types:optional', + 'absl/strings:strings', 'absl/container:inlined_vector', ], 'sources': [ @@ -442,16 +443,21 @@ 'src/core/ext/filters/client_channel/http_connect_handshaker.cc', 'src/core/ext/filters/client_channel/http_proxy.cc', 'src/core/ext/filters/client_channel/lb_policy.cc', + 'src/core/ext/filters/client_channel/lb_policy/address_filtering.cc', 'src/core/ext/filters/client_channel/lb_policy/child_policy_handler.cc', 'src/core/ext/filters/client_channel/lb_policy/grpclb/client_load_reporting_filter.cc', 'src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.cc', + 'src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_balancer_addresses.cc', 'src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_channel_secure.cc', 'src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_client_stats.cc', 'src/core/ext/filters/client_channel/lb_policy/grpclb/load_balancer_api.cc', 'src/core/ext/filters/client_channel/lb_policy/pick_first/pick_first.cc', + 'src/core/ext/filters/client_channel/lb_policy/priority/priority.cc', 'src/core/ext/filters/client_channel/lb_policy/round_robin/round_robin.cc', + 'src/core/ext/filters/client_channel/lb_policy/weighted_target/weighted_target.cc', 'src/core/ext/filters/client_channel/lb_policy/xds/cds.cc', - 'src/core/ext/filters/client_channel/lb_policy/xds/xds.cc', + 'src/core/ext/filters/client_channel/lb_policy/xds/eds.cc', + 'src/core/ext/filters/client_channel/lb_policy/xds/lrs.cc', 'src/core/ext/filters/client_channel/lb_policy_registry.cc', 'src/core/ext/filters/client_channel/local_subchannel_pool.cc', 'src/core/ext/filters/client_channel/parse_address.cc', @@ -915,6 +921,7 @@ 'address_sorting', 'upb', 'absl/types:optional', + 'absl/strings:strings', 'absl/container:inlined_vector', ], 'sources': [ @@ -931,16 +938,21 @@ 'src/core/ext/filters/client_channel/http_connect_handshaker.cc', 'src/core/ext/filters/client_channel/http_proxy.cc', 'src/core/ext/filters/client_channel/lb_policy.cc', + 'src/core/ext/filters/client_channel/lb_policy/address_filtering.cc', 'src/core/ext/filters/client_channel/lb_policy/child_policy_handler.cc', 'src/core/ext/filters/client_channel/lb_policy/grpclb/client_load_reporting_filter.cc', 'src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.cc', + 'src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_balancer_addresses.cc', 'src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_channel.cc', 'src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_client_stats.cc', 'src/core/ext/filters/client_channel/lb_policy/grpclb/load_balancer_api.cc', 'src/core/ext/filters/client_channel/lb_policy/pick_first/pick_first.cc', + 'src/core/ext/filters/client_channel/lb_policy/priority/priority.cc', 'src/core/ext/filters/client_channel/lb_policy/round_robin/round_robin.cc', + 'src/core/ext/filters/client_channel/lb_policy/weighted_target/weighted_target.cc', 'src/core/ext/filters/client_channel/lb_policy/xds/cds.cc', - 'src/core/ext/filters/client_channel/lb_policy/xds/xds.cc', + 'src/core/ext/filters/client_channel/lb_policy/xds/eds.cc', + 'src/core/ext/filters/client_channel/lb_policy/xds/lrs.cc', 'src/core/ext/filters/client_channel/lb_policy_registry.cc', 'src/core/ext/filters/client_channel/local_subchannel_pool.cc', 'src/core/ext/filters/client_channel/parse_address.cc', diff --git a/include/grpc/impl/codegen/grpc_types.h b/include/grpc/impl/codegen/grpc_types.h index ab4c39f9310..ff45450f3a3 100644 --- a/include/grpc/impl/codegen/grpc_types.h +++ b/include/grpc/impl/codegen/grpc_types.h @@ -348,18 +348,11 @@ typedef struct { balancer before using fallback backend addresses from the resolver. If 0, enter fallback mode immediately. Default value is 10000. */ #define GRPC_ARG_XDS_FALLBACK_TIMEOUT_MS "grpc.xds_fallback_timeout_ms" -/* Time in milliseconds to wait before a locality is deleted after it's removed - from the received EDS update. If 0, delete the locality immediately. Default - value is 15 minutes. */ -#define GRPC_ARG_LOCALITY_RETENTION_INTERVAL_MS \ - "grpc.xds_locality_retention_interval_ms" -/* Timeout in milliseconds to wait for the localities of a specific priority to - complete their initial connection attempt before xDS fails over to the next - priority. Specifically, the connection attempt of a priority is considered - completed when any locality of that priority is ready or all the localities - of that priority fail to connect. If 0, failover happens immediately. Default - value is 10 seconds. */ -#define GRPC_ARG_XDS_FAILOVER_TIMEOUT_MS "grpc.xds_failover_timeout_ms" +/* Timeout in milliseconds to wait for the child of a specific priority to + complete its initial connection attempt before the priority LB policy fails + over to the next priority. Default value is 10 seconds. */ +#define GRPC_ARG_PRIORITY_FAILOVER_TIMEOUT_MS \ + "grpc.priority_failover_timeout_ms" /* Timeout in milliseconds to wait for a resource to be returned from * the xds server before assuming that it does not exist. * The default is 15 seconds. */ diff --git a/include/grpcpp/impl/codegen/client_callback_impl.h b/include/grpcpp/impl/codegen/client_callback_impl.h index 83b183e4a5b..8e683743e06 100644 --- a/include/grpcpp/impl/codegen/client_callback_impl.h +++ b/include/grpcpp/impl/codegen/client_callback_impl.h @@ -267,8 +267,12 @@ class ClientBidiReactor { /// StartWritesDone that indicates that there will be no more write ops. /// The number of RemoveHold calls must match the total number of AddHold /// calls plus the number of holds added by AddMultipleHolds. + /// The argument to AddMultipleHolds must be positive. void AddHold() { AddMultipleHolds(1); } - void AddMultipleHolds(int holds) { stream_->AddHold(holds); } + void AddMultipleHolds(int holds) { + GPR_CODEGEN_DEBUG_ASSERT(holds > 0); + stream_->AddHold(holds); + } void RemoveHold() { stream_->RemoveHold(); } /// Notifies the application that all operations associated with this RPC @@ -331,7 +335,10 @@ class ClientReadReactor { void StartRead(Response* resp) { reader_->Read(resp); } void AddHold() { AddMultipleHolds(1); } - void AddMultipleHolds(int holds) { reader_->AddHold(holds); } + void AddMultipleHolds(int holds) { + GPR_CODEGEN_DEBUG_ASSERT(holds > 0); + reader_->AddHold(holds); + } void RemoveHold() { reader_->RemoveHold(); } virtual void OnDone(const ::grpc::Status& /*s*/) {} @@ -364,7 +371,10 @@ class ClientWriteReactor { void StartWritesDone() { writer_->WritesDone(); } void AddHold() { AddMultipleHolds(1); } - void AddMultipleHolds(int holds) { writer_->AddHold(holds); } + void AddMultipleHolds(int holds) { + GPR_CODEGEN_DEBUG_ASSERT(holds > 0); + writer_->AddHold(holds); + } void RemoveHold() { writer_->RemoveHold(); } virtual void OnDone(const ::grpc::Status& /*s*/) {} diff --git a/include/grpcpp/impl/codegen/method_handler_impl.h b/include/grpcpp/impl/codegen/method_handler_impl.h index 6368b63e1ab..2de193457a1 100644 --- a/include/grpcpp/impl/codegen/method_handler_impl.h +++ b/include/grpcpp/impl/codegen/method_handler_impl.h @@ -303,10 +303,13 @@ class BidiStreamingHandler ::grpc_impl::ServerReaderWriter*)> func, ServiceType* service) + // TODO(vjpai): When gRPC supports C++14, move-capture func in the below : TemplatedBidiStreamingHandler< ::grpc_impl::ServerReaderWriter, false>( - std::bind(func, service, std::placeholders::_1, - std::placeholders::_2)) {} + [func, service]( + ::grpc_impl::ServerContext* ctx, + ::grpc_impl::ServerReaderWriter* + streamer) { return func(service, ctx, streamer); }) {} }; template @@ -321,7 +324,7 @@ class StreamedUnaryHandler func) : TemplatedBidiStreamingHandler< ::grpc_impl::ServerUnaryStreamer, true>( - func) {} + std::move(func)) {} }; template @@ -336,7 +339,7 @@ class SplitServerStreamingHandler func) : TemplatedBidiStreamingHandler< ::grpc_impl::ServerSplitStreamer, false>( - func) {} + std::move(func)) {} }; /// General method handler class for errors that prevent real method use diff --git a/include/grpcpp/impl/codegen/sync_stream_impl.h b/include/grpcpp/impl/codegen/sync_stream_impl.h index 80b0bd73d2a..1a67467ebbc 100644 --- a/include/grpcpp/impl/codegen/sync_stream_impl.h +++ b/include/grpcpp/impl/codegen/sync_stream_impl.h @@ -419,7 +419,7 @@ class ClientReaderWriterInterface : public internal::ClientStreamingInterface, virtual void WaitForInitialMetadata() = 0; /// Half close writing from the client. (signal that the stream of messages - /// coming from the clinet is complete). + /// coming from the client is complete). /// Blocks until currently-pending writes are completed. /// Thread-safe with respect to \a ReaderInterface::Read /// diff --git a/package.xml b/package.xml index d9d5d1516d8..06d1669ec48 100644 --- a/package.xml +++ b/package.xml @@ -108,12 +108,16 @@ + + + + @@ -121,10 +125,13 @@ + + - + + diff --git a/src/compiler/cpp_generator.cc b/src/compiler/cpp_generator.cc index 882e3d60c67..21cf1d69ea7 100644 --- a/src/compiler/cpp_generator.cc +++ b/src/compiler/cpp_generator.cc @@ -1343,11 +1343,14 @@ void PrintHeaderServerMethodStreamedUnary( printer->Print(*vars, "WithStreamedUnaryMethod_$Method$() {\n" " ::grpc::Service::MarkMethodStreamed($Idx$,\n" - " new ::grpc::internal::StreamedUnaryHandler< $Request$, " - "$Response$>(std::bind" - "(&WithStreamedUnaryMethod_$Method$::" - "Streamed$Method$, this, std::placeholders::_1, " - "std::placeholders::_2)));\n" + " new ::grpc::internal::StreamedUnaryHandler<\n" + " $Request$, $Response$>(\n" + " [this](::grpc_impl::ServerContext* context,\n" + " ::grpc_impl::ServerUnaryStreamer<\n" + " $Request$, $Response$>* streamer) {\n" + " return this->Streamed$Method$(context,\n" + " streamer);\n" + " }));\n" "}\n"); printer->Print(*vars, "~WithStreamedUnaryMethod_$Method$() override {\n" @@ -1391,16 +1394,18 @@ void PrintHeaderServerMethodSplitStreaming( "{}\n"); printer->Print(" public:\n"); printer->Indent(); - printer->Print( - *vars, - "WithSplitStreamingMethod_$Method$() {\n" - " ::grpc::Service::MarkMethodStreamed($Idx$,\n" - " new ::grpc::internal::SplitServerStreamingHandler< $Request$, " - "$Response$>(std::bind" - "(&WithSplitStreamingMethod_$Method$::" - "Streamed$Method$, this, std::placeholders::_1, " - "std::placeholders::_2)));\n" - "}\n"); + printer->Print(*vars, + "WithSplitStreamingMethod_$Method$() {\n" + " ::grpc::Service::MarkMethodStreamed($Idx$,\n" + " new ::grpc::internal::SplitServerStreamingHandler<\n" + " $Request$, $Response$>(\n" + " [this](::grpc_impl::ServerContext* context,\n" + " ::grpc_impl::ServerSplitStreamer<\n" + " $Request$, $Response$>* streamer) {\n" + " return this->Streamed$Method$(context,\n" + " streamer);\n" + " }));\n" + "}\n"); printer->Print(*vars, "~WithSplitStreamingMethod_$Method$() override {\n" " BaseClassMustBeDerivedFromService(this);\n" @@ -2251,7 +2256,12 @@ void PrintSourceService(grpc_generator::Printer* printer, " new ::grpc::internal::RpcMethodHandler< $ns$$Service$::Service, " "$Request$, " "$Response$>(\n" - " std::mem_fn(&$ns$$Service$::Service::$Method$), this)));\n"); + " []($ns$$Service$::Service* service,\n" + " ::grpc_impl::ServerContext* ctx,\n" + " const $Request$* req,\n" + " $Response$* resp) {\n" + " return service->$Method$(ctx, req, resp);\n" + " }, this)));\n"); } else if (ClientOnlyStreaming(method.get())) { printer->Print( *vars, @@ -2260,7 +2270,12 @@ void PrintSourceService(grpc_generator::Printer* printer, " ::grpc::internal::RpcMethod::CLIENT_STREAMING,\n" " new ::grpc::internal::ClientStreamingHandler< " "$ns$$Service$::Service, $Request$, $Response$>(\n" - " std::mem_fn(&$ns$$Service$::Service::$Method$), this)));\n"); + " []($ns$$Service$::Service* service,\n" + " ::grpc_impl::ServerContext* ctx,\n" + " ::grpc_impl::ServerReader<$Request$>* reader,\n" + " $Response$* resp) {\n" + " return service->$Method$(ctx, reader, resp);\n" + " }, this)));\n"); } else if (ServerOnlyStreaming(method.get())) { printer->Print( *vars, @@ -2269,16 +2284,25 @@ void PrintSourceService(grpc_generator::Printer* printer, " ::grpc::internal::RpcMethod::SERVER_STREAMING,\n" " new ::grpc::internal::ServerStreamingHandler< " "$ns$$Service$::Service, $Request$, $Response$>(\n" - " std::mem_fn(&$ns$$Service$::Service::$Method$), this)));\n"); + " []($ns$$Service$::Service* service,\n" + " ::grpc_impl::ServerContext* ctx,\n" + " const $Request$* req,\n" + " ::grpc_impl::ServerWriter<$Response$>* writer) {\n" + " return service->$Method$(ctx, req, writer);\n" + " }, this)));\n"); } else if (method->BidiStreaming()) { - printer->Print( - *vars, - "AddMethod(new ::grpc::internal::RpcServiceMethod(\n" - " $prefix$$Service$_method_names[$Idx$],\n" - " ::grpc::internal::RpcMethod::BIDI_STREAMING,\n" - " new ::grpc::internal::BidiStreamingHandler< " - "$ns$$Service$::Service, $Request$, $Response$>(\n" - " std::mem_fn(&$ns$$Service$::Service::$Method$), this)));\n"); + printer->Print(*vars, + "AddMethod(new ::grpc::internal::RpcServiceMethod(\n" + " $prefix$$Service$_method_names[$Idx$],\n" + " ::grpc::internal::RpcMethod::BIDI_STREAMING,\n" + " new ::grpc::internal::BidiStreamingHandler< " + "$ns$$Service$::Service, $Request$, $Response$>(\n" + " []($ns$$Service$::Service* service,\n" + " ::grpc_impl::ServerContext* ctx,\n" + " ::grpc_impl::ServerReaderWriter<$Response$,\n" + " $Request$>* stream) {\n" + " return service->$Method$(ctx, stream);\n" + " }, this)));\n"); } } printer->Outdent(); diff --git a/src/core/ext/filters/client_channel/client_channel.cc b/src/core/ext/filters/client_channel/client_channel.cc index 343026c8bd8..1fcc5302760 100644 --- a/src/core/ext/filters/client_channel/client_channel.cc +++ b/src/core/ext/filters/client_channel/client_channel.cc @@ -251,7 +251,7 @@ class ChannelData { grpc_error* DoPingLocked(grpc_transport_op* op); - static void StartTransportOpLocked(grpc_transport_op* op); + void StartTransportOpLocked(grpc_transport_op* op); void TryToConnectLocked(); @@ -1012,7 +1012,7 @@ class ChannelData::SubchannelWrapper : public SubchannelInterface { last_seen_state_(initial_state) {} ~WatcherWrapper() { - auto* parent = parent_.release(); /* ref owned by lambda */ + auto* parent = parent_.release(); // ref owned by lambda parent->chand_->work_serializer_->Run( [parent]() { parent->Unref(DEBUG_LOCATION, "WatcherWrapper"); }, DEBUG_LOCATION); @@ -1025,8 +1025,13 @@ class ChannelData::SubchannelWrapper : public SubchannelInterface { "subchannel %p; hopping into work_serializer", parent_->chand_, parent_.get(), parent_->subchannel_); } - // Will delete itself. - new Updater(Ref()); + Ref(); // ref owned by lambda + parent_->chand_->work_serializer_->Run( + [this]() { + ApplyUpdateInControlPlaneWorkSerializer(); + Unref(); + }, + DEBUG_LOCATION); } grpc_pollset_set* interested_parties() override { @@ -1046,45 +1051,25 @@ class ChannelData::SubchannelWrapper : public SubchannelInterface { grpc_connectivity_state last_seen_state() const { return last_seen_state_; } private: - class Updater { - public: - Updater(RefCountedPtr parent) - : parent_(std::move(parent)) { - parent_->parent_->chand_->work_serializer_->Run( - [this]() { ApplyUpdateInControlPlaneWorkSerializer(); }, - DEBUG_LOCATION); + void ApplyUpdateInControlPlaneWorkSerializer() { + if (GRPC_TRACE_FLAG_ENABLED(grpc_client_channel_routing_trace)) { + gpr_log(GPR_INFO, + "chand=%p: processing connectivity change in work serializer " + "for subchannel wrapper %p subchannel %p " + "watcher=%p", + parent_->chand_, parent_.get(), parent_->subchannel_, + watcher_.get()); } - - private: - void ApplyUpdateInControlPlaneWorkSerializer() { - if (GRPC_TRACE_FLAG_ENABLED(grpc_client_channel_routing_trace)) { - gpr_log(GPR_INFO, - "chand=%p: processing connectivity change in work serializer " - "for subchannel wrapper %p subchannel %p " - "watcher=%p", - parent_->parent_->chand_, parent_->parent_.get(), - parent_->parent_->subchannel_, parent_->watcher_.get()); - } - grpc_connectivity_state state; - RefCountedPtr connected_subchannel; - if (!parent_->PopConnectivityStateChange(&state, - &connected_subchannel)) { - // There should be atleast one connectivity change in the queue. - GPR_DEBUG_ASSERT(false); - } - // Ignore update if the parent WatcherWrapper has been replaced - // since this callback was scheduled. - if (parent_->watcher_ != nullptr) { - parent_->last_seen_state_ = state; - parent_->parent_->MaybeUpdateConnectedSubchannel( - std::move(connected_subchannel)); - parent_->watcher_->OnConnectivityStateChange(state); - } - delete this; + ConnectivityStateChange state_change = PopConnectivityStateChange(); + // Ignore update if the parent WatcherWrapper has been replaced + // since this callback was scheduled. + if (watcher_ != nullptr) { + last_seen_state_ = state_change.state; + parent_->MaybeUpdateConnectedSubchannel( + std::move(state_change.connected_subchannel)); + watcher_->OnConnectivityStateChange(state_change.state); } - - RefCountedPtr parent_; - }; + } std::unique_ptr watcher_; @@ -1616,25 +1601,6 @@ void ChannelData::ProcessLbPolicy( grpc_channel_args_find(resolver_result.args, GRPC_ARG_LB_POLICY_NAME); policy_name = grpc_channel_arg_get_string(channel_arg); } - // Special case: If at least one balancer address is present, we use - // the grpclb policy, regardless of what the resolver has returned. - bool found_balancer_address = false; - for (size_t i = 0; i < resolver_result.addresses.size(); ++i) { - const ServerAddress& address = resolver_result.addresses[i]; - if (address.IsBalancer()) { - found_balancer_address = true; - break; - } - } - if (found_balancer_address) { - if (policy_name != nullptr && strcmp(policy_name, "grpclb") != 0) { - gpr_log(GPR_INFO, - "resolver requested LB policy %s but provided at least one " - "balancer address -- forcing use of grpclb LB policy", - policy_name); - } - policy_name = "grpclb"; - } // Use pick_first if nothing was specified and we didn't select grpclb // above. if (policy_name == nullptr) policy_name = "pick_first"; @@ -1812,20 +1778,17 @@ grpc_error* ChannelData::DoPingLocked(grpc_transport_op* op) { } void ChannelData::StartTransportOpLocked(grpc_transport_op* op) { - grpc_channel_element* elem = - static_cast(op->handler_private.extra_arg); - ChannelData* chand = static_cast(elem->channel_data); // Connectivity watch. if (op->start_connectivity_watch != nullptr) { - chand->state_tracker_.AddWatcher(op->start_connectivity_watch_state, - std::move(op->start_connectivity_watch)); + state_tracker_.AddWatcher(op->start_connectivity_watch_state, + std::move(op->start_connectivity_watch)); } if (op->stop_connectivity_watch != nullptr) { - chand->state_tracker_.RemoveWatcher(op->stop_connectivity_watch); + state_tracker_.RemoveWatcher(op->stop_connectivity_watch); } // Ping. if (op->send_ping.on_initiate != nullptr || op->send_ping.on_ack != nullptr) { - grpc_error* error = chand->DoPingLocked(op); + grpc_error* error = DoPingLocked(op); if (error != GRPC_ERROR_NONE) { ExecCtx::Run(DEBUG_LOCATION, op->send_ping.on_initiate, GRPC_ERROR_REF(error)); @@ -1837,40 +1800,39 @@ void ChannelData::StartTransportOpLocked(grpc_transport_op* op) { } // Reset backoff. if (op->reset_connect_backoff) { - if (chand->resolving_lb_policy_ != nullptr) { - chand->resolving_lb_policy_->ResetBackoffLocked(); + if (resolving_lb_policy_ != nullptr) { + resolving_lb_policy_->ResetBackoffLocked(); } } // Disconnect or enter IDLE. if (op->disconnect_with_error != GRPC_ERROR_NONE) { if (GRPC_TRACE_FLAG_ENABLED(grpc_client_channel_call_trace)) { - gpr_log(GPR_INFO, "chand=%p: disconnect_with_error: %s", chand, + gpr_log(GPR_INFO, "chand=%p: disconnect_with_error: %s", this, grpc_error_string(op->disconnect_with_error)); } - chand->DestroyResolvingLoadBalancingPolicyLocked(); + DestroyResolvingLoadBalancingPolicyLocked(); intptr_t value; if (grpc_error_get_int(op->disconnect_with_error, GRPC_ERROR_INT_CHANNEL_CONNECTIVITY_STATE, &value) && static_cast(value) == GRPC_CHANNEL_IDLE) { - if (chand->disconnect_error() == GRPC_ERROR_NONE) { + if (disconnect_error() == GRPC_ERROR_NONE) { // Enter IDLE state. - chand->UpdateStateAndPickerLocked(GRPC_CHANNEL_IDLE, - "channel entering IDLE", nullptr); + UpdateStateAndPickerLocked(GRPC_CHANNEL_IDLE, "channel entering IDLE", + nullptr); } GRPC_ERROR_UNREF(op->disconnect_with_error); } else { // Disconnect. - GPR_ASSERT(chand->disconnect_error_.Load(MemoryOrder::RELAXED) == + GPR_ASSERT(disconnect_error_.Load(MemoryOrder::RELAXED) == GRPC_ERROR_NONE); - chand->disconnect_error_.Store(op->disconnect_with_error, - MemoryOrder::RELEASE); - chand->UpdateStateAndPickerLocked( + disconnect_error_.Store(op->disconnect_with_error, MemoryOrder::RELEASE); + UpdateStateAndPickerLocked( GRPC_CHANNEL_SHUTDOWN, "shutdown from API", absl::make_unique( GRPC_ERROR_REF(op->disconnect_with_error))); } } - GRPC_CHANNEL_STACK_UNREF(chand->owning_stack_, "start_transport_op"); + GRPC_CHANNEL_STACK_UNREF(owning_stack_, "start_transport_op"); ExecCtx::Run(DEBUG_LOCATION, op->on_consumed, GRPC_ERROR_NONE); } @@ -1883,10 +1845,9 @@ void ChannelData::StartTransportOp(grpc_channel_element* elem, grpc_pollset_set_add_pollset(chand->interested_parties_, op->bind_pollset); } // Pop into control plane work_serializer for remaining ops. - op->handler_private.extra_arg = elem; GRPC_CHANNEL_STACK_REF(chand->owning_stack_, "start_transport_op"); chand->work_serializer_->Run( - [op]() { ChannelData::StartTransportOpLocked(op); }, DEBUG_LOCATION); + [chand, op]() { chand->StartTransportOpLocked(op); }, DEBUG_LOCATION); } void ChannelData::GetChannelInfo(grpc_channel_element* elem, @@ -2257,10 +2218,32 @@ void CallData::FreeCachedSendOpDataForCompletedBatch( void CallData::RecvTrailingMetadataReadyForLoadBalancingPolicy( void* arg, grpc_error* error) { CallData* calld = static_cast(arg); + // Set error if call did not succeed. + grpc_error* error_for_lb = GRPC_ERROR_NONE; + if (error != GRPC_ERROR_NONE) { + error_for_lb = error; + } else { + const auto& fields = calld->recv_trailing_metadata_->idx.named; + GPR_ASSERT(fields.grpc_status != nullptr); + grpc_status_code status = + grpc_get_status_code_from_metadata(fields.grpc_status->md); + std::string msg; + if (status != GRPC_STATUS_OK) { + error_for_lb = grpc_error_set_int( + GRPC_ERROR_CREATE_FROM_STATIC_STRING("call failed"), + GRPC_ERROR_INT_GRPC_STATUS, status); + if (fields.grpc_message != nullptr) { + error_for_lb = grpc_error_set_str( + error_for_lb, GRPC_ERROR_STR_GRPC_MESSAGE, + grpc_slice_ref_internal(GRPC_MDVALUE(fields.grpc_message->md))); + } + } + } // Invoke callback to LB policy. Metadata trailing_metadata(calld, calld->recv_trailing_metadata_); - calld->lb_recv_trailing_metadata_ready_(error, &trailing_metadata, + calld->lb_recv_trailing_metadata_ready_(error_for_lb, &trailing_metadata, &calld->lb_call_state_); + if (error == GRPC_ERROR_NONE) GRPC_ERROR_UNREF(error_for_lb); // Chain to original callback. Closure::Run(DEBUG_LOCATION, calld->original_recv_trailing_metadata_ready_, GRPC_ERROR_REF(error)); @@ -3864,7 +3847,9 @@ bool CallData::PickSubchannelLocked(grpc_call_element* elem, // The incoming call will make the channel exit IDLE. if (chand->picker() == nullptr) { GRPC_CHANNEL_STACK_REF(chand->owning_stack(), "PickSubchannelLocked"); - // Bounce into the control plane work serializer to exit IDLE. + // Bounce into the control plane work serializer to exit IDLE. Since we are + // holding on to the data plane mutex here, we offload it on the ExecCtx so + // that we don't deadlock with ourselves. ExecCtx::Run( DEBUG_LOCATION, GRPC_CLOSURE_CREATE( @@ -3964,8 +3949,10 @@ bool CallData::PickSubchannelLocked(grpc_call_element* elem, if (pick_queued_) RemoveCallFromQueuedPicksLocked(elem); // Handle drops. if (GPR_UNLIKELY(result.subchannel == nullptr)) { - result.error = GRPC_ERROR_CREATE_FROM_STATIC_STRING( - "Call dropped by load balancing policy"); + result.error = grpc_error_set_int( + GRPC_ERROR_CREATE_FROM_STATIC_STRING( + "Call dropped by load balancing policy"), + GRPC_ERROR_INT_GRPC_STATUS, GRPC_STATUS_UNAVAILABLE); } else { // Grab a ref to the connected subchannel while we're still // holding the data plane mutex. diff --git a/src/core/ext/filters/client_channel/lb_policy/address_filtering.cc b/src/core/ext/filters/client_channel/lb_policy/address_filtering.cc new file mode 100644 index 00000000000..67843df78e5 --- /dev/null +++ b/src/core/ext/filters/client_channel/lb_policy/address_filtering.cc @@ -0,0 +1,83 @@ +// +// Copyright 2020 gRPC authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +#include + +#include "src/core/ext/filters/client_channel/lb_policy/address_filtering.h" + +#include "src/core/lib/channel/channel_args.h" + +#define GRPC_ARG_HIERARCHICAL_PATH "grpc.internal.address.hierarchical_path" + +namespace grpc_core { + +namespace { + +void* HierarchicalPathCopy(void* p) { + std::vector* path = static_cast*>(p); + return static_cast(new std::vector(*path)); +} + +void HierarchicalPathDestroy(void* p) { + std::vector* path = static_cast*>(p); + delete path; +} + +int HierarchicalPathCompare(void* p1, void* p2) { + std::vector* path1 = static_cast*>(p1); + std::vector* path2 = static_cast*>(p2); + for (size_t i = 0; i < path1->size(); ++i) { + if (path2->size() == i) return 1; + int r = (*path1)[i].compare((*path2)[i]); + if (r != 0) return r; + } + if (path2->size() > path1->size()) return -1; + return 0; +} + +const grpc_arg_pointer_vtable hierarchical_path_arg_vtable = { + HierarchicalPathCopy, HierarchicalPathDestroy, HierarchicalPathCompare}; + +} // namespace + +grpc_arg MakeHierarchicalPathArg(const std::vector& path) { + return grpc_channel_arg_pointer_create( + const_cast(GRPC_ARG_HIERARCHICAL_PATH), + const_cast*>(&path), + &hierarchical_path_arg_vtable); +} + +HierarchicalAddressMap MakeHierarchicalAddressMap( + const ServerAddressList& addresses) { + HierarchicalAddressMap result; + for (const ServerAddress& address : addresses) { + auto* path = grpc_channel_args_find_pointer>( + address.args(), GRPC_ARG_HIERARCHICAL_PATH); + if (path == nullptr || path->empty()) continue; + auto it = path->begin(); + ServerAddressList& target_list = result[*it]; + ++it; + std::vector remaining_path(it, path->end()); + const char* name_to_remove = GRPC_ARG_HIERARCHICAL_PATH; + grpc_arg new_arg = MakeHierarchicalPathArg(remaining_path); + grpc_channel_args* new_args = grpc_channel_args_copy_and_add_and_remove( + address.args(), &name_to_remove, 1, &new_arg, 1); + target_list.emplace_back(address.address(), new_args); + } + return result; +} + +} // namespace grpc_core diff --git a/src/core/ext/filters/client_channel/lb_policy/address_filtering.h b/src/core/ext/filters/client_channel/lb_policy/address_filtering.h new file mode 100644 index 00000000000..03a1c228e7a --- /dev/null +++ b/src/core/ext/filters/client_channel/lb_policy/address_filtering.h @@ -0,0 +1,99 @@ +// +// Copyright 2020 gRPC authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +#ifndef GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_LB_POLICY_ADDRESS_FILTERING_H +#define GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_LB_POLICY_ADDRESS_FILTERING_H + +#include + +#include +#include +#include + +#include "absl/strings/string_view.h" + +#include "src/core/ext/filters/client_channel/server_address.h" + +// The resolver returns a flat list of addresses. When a hierarchy of +// LB policies is in use, each leaf of the hierarchy will need a +// different subset of those addresses. This library provides a +// mechanism for determining which address is passed to which leaf +// policy. +// +// Each address will have an associated path that indicates which child +// it should be sent to at each level of the hierarchy to wind up at the +// right leaf policy. Each LB policy will look at the first element of +// the path of each address to determine which child to send the address +// to. It will then remove that first element when passing the address +// down to its child. +// +// For example, consider the following LB policy hierarchy: +// +// - priority +// - child0 (weighted_target) +// - localityA (round_robin) +// - localityB (round_robin) +// - child1 (weighted_target) +// - localityC (round_robin) +// - localityD (round_robin) +// +// Now consider the following addresses: +// - 10.0.0.1:80 path=["child0", "localityA"] +// - 10.0.0.2:80 path=["child0", "localityB"] +// - 10.0.0.3:80 path=["child1", "localityC"] +// - 10.0.0.4:80 path=["child1", "localityD"] +// +// The priority policy will split this up into two lists, one for each +// of its children: +// - child0: +// - 10.0.0.1:80 path=["localityA"] +// - 10.0.0.2:80 path=["localityB"] +// - child1: +// - 10.0.0.3:80 path=["localityC"] +// - 10.0.0.4:80 path=["localityD"] +// +// The weighted_target policy for child0 will split its list up into two +// lists, one for each of its children: +// - localityA: +// - 10.0.0.1:80 path=[] +// - localityB: +// - 10.0.0.2:80 path=[] +// +// Similarly, the weighted_target policy for child1 will split its list +// up into two lists, one for each of its children: +// - localityC: +// - 10.0.0.3:80 path=[] +// - localityD: +// - 10.0.0.4:80 path=[] + +namespace grpc_core { + +// Constructs a channel arg containing the hierarchical path +// to be associated with an address. +grpc_arg MakeHierarchicalPathArg(const std::vector& path); + +// A map from the next path element to the addresses that fall under +// that path element. +using HierarchicalAddressMap = std::map; + +// Splits up the addresses into a separate list for each child. +HierarchicalAddressMap MakeHierarchicalAddressMap( + const ServerAddressList& addresses); + +} // namespace grpc_core + +#endif /* GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_LB_POLICY_ADDRESS_FILTERING_H \ + */ diff --git a/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.cc b/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.cc index 9e126f8e7ae..9f30e76c9a1 100644 --- a/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.cc +++ b/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.cc @@ -74,6 +74,7 @@ #include "src/core/ext/filters/client_channel/lb_policy/child_policy_handler.h" #include "src/core/ext/filters/client_channel/lb_policy/grpclb/client_load_reporting_filter.h" #include "src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.h" +#include "src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_balancer_addresses.h" #include "src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_channel.h" #include "src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_client_stats.h" #include "src/core/ext/filters/client_channel/lb_policy/grpclb/load_balancer_api.h" @@ -1224,25 +1225,11 @@ void GrpcLb::BalancerCallState::OnBalancerStatusReceivedLocked( // helper code for creating balancer channel // -ServerAddressList ExtractBalancerAddresses(const ServerAddressList& addresses) { - ServerAddressList balancer_addresses; - for (size_t i = 0; i < addresses.size(); ++i) { - if (addresses[i].IsBalancer()) { - // Strip out the is_balancer channel arg, since we don't want to - // recursively use the grpclb policy in the channel used to talk to - // the balancers. Note that we do NOT strip out the balancer_name - // channel arg, since we need that to set the authority correctly - // to talk to the balancers. - static const char* args_to_remove[] = { - GRPC_ARG_ADDRESS_IS_BALANCER, - }; - balancer_addresses.emplace_back( - addresses[i].address(), - grpc_channel_args_copy_and_remove(addresses[i].args(), args_to_remove, - GPR_ARRAY_SIZE(args_to_remove))); - } - } - return balancer_addresses; +ServerAddressList ExtractBalancerAddresses(const grpc_channel_args& args) { + const ServerAddressList* addresses = + FindGrpclbBalancerAddressesInChannelArgs(args); + if (addresses != nullptr) return *addresses; + return ServerAddressList(); } /* Returns the channel args for the LB channel, used to create a bidirectional @@ -1438,27 +1425,25 @@ void GrpcLb::UpdateLocked(UpdateArgs args) { // helpers for UpdateLocked() // -// Returns the backend addresses extracted from the given addresses. -ServerAddressList ExtractBackendAddresses(const ServerAddressList& addresses) { +ServerAddressList AddNullLbTokenToAddresses( + const ServerAddressList& addresses) { static const char* lb_token = ""; grpc_arg arg = grpc_channel_arg_pointer_create( const_cast(GRPC_ARG_GRPCLB_ADDRESS_LB_TOKEN), const_cast(lb_token), &lb_token_arg_vtable); - ServerAddressList backend_addresses; + ServerAddressList addresses_out; for (size_t i = 0; i < addresses.size(); ++i) { - if (!addresses[i].IsBalancer()) { - backend_addresses.emplace_back( - addresses[i].address(), - grpc_channel_args_copy_and_add(addresses[i].args(), &arg, 1)); - } + addresses_out.emplace_back( + addresses[i].address(), + grpc_channel_args_copy_and_add(addresses[i].args(), &arg, 1)); } - return backend_addresses; + return addresses_out; } void GrpcLb::ProcessAddressesAndChannelArgsLocked( const ServerAddressList& addresses, const grpc_channel_args& args) { // Update fallback address list. - fallback_backend_addresses_ = ExtractBackendAddresses(addresses); + fallback_backend_addresses_ = AddNullLbTokenToAddresses(addresses); // Make sure that GRPC_ARG_LB_POLICY_NAME is set in channel args, // since we use this to trigger the client_load_reporting filter. static const char* args_to_remove[] = {GRPC_ARG_LB_POLICY_NAME}; @@ -1468,7 +1453,7 @@ void GrpcLb::ProcessAddressesAndChannelArgsLocked( args_ = grpc_channel_args_copy_and_add_and_remove( &args, args_to_remove, GPR_ARRAY_SIZE(args_to_remove), &new_arg, 1); // Construct args for balancer channel. - ServerAddressList balancer_addresses = ExtractBalancerAddresses(addresses); + ServerAddressList balancer_addresses = ExtractBalancerAddresses(args); grpc_channel_args* lb_channel_args = BuildBalancerChannelArgs( balancer_addresses, response_generator_.get(), &args); // Create balancer channel if needed. diff --git a/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_balancer_addresses.cc b/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_balancer_addresses.cc new file mode 100644 index 00000000000..2888c3b94ae --- /dev/null +++ b/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_balancer_addresses.cc @@ -0,0 +1,89 @@ +// +// Copyright 2019 gRPC authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +#include + +#include "src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_balancer_addresses.h" + +#include "src/core/lib/channel/channel_args.h" +#include "src/core/lib/gpr/useful.h" + +// Channel arg key for the list of balancer addresses. +#define GRPC_ARG_GRPCLB_BALANCER_ADDRESSES "grpc.grpclb_balancer_addresses" +// Channel arg key for a string indicating an address's balancer name. +#define GRPC_ARG_ADDRESS_BALANCER_NAME "grpc.address_balancer_name" + +namespace grpc_core { + +namespace { + +void* BalancerAddressesArgCopy(void* p) { + ServerAddressList* address_list = static_cast(p); + return new ServerAddressList(*address_list); +} + +void BalancerAddressesArgDestroy(void* p) { + ServerAddressList* address_list = static_cast(p); + delete address_list; +} + +int BalancerAddressesArgCmp(void* p, void* q) { + ServerAddressList* address_list1 = static_cast(p); + ServerAddressList* address_list2 = static_cast(q); + if (address_list1 == nullptr || address_list2 == nullptr) { + return GPR_ICMP(address_list1, address_list2); + } + if (address_list1->size() > address_list2->size()) return 1; + if (address_list1->size() < address_list2->size()) return -1; + for (size_t i = 0; i < address_list1->size(); ++i) { + int retval = (*address_list1)[i].Cmp((*address_list2)[i]); + if (retval != 0) return retval; + } + return 0; +} + +const grpc_arg_pointer_vtable kBalancerAddressesArgVtable = { + BalancerAddressesArgCopy, BalancerAddressesArgDestroy, + BalancerAddressesArgCmp}; + +} // namespace + +grpc_arg CreateGrpclbBalancerAddressesArg( + const ServerAddressList* address_list) { + return grpc_channel_arg_pointer_create( + const_cast(GRPC_ARG_GRPCLB_BALANCER_ADDRESSES), + const_cast(address_list), + &kBalancerAddressesArgVtable); +} + +const ServerAddressList* FindGrpclbBalancerAddressesInChannelArgs( + const grpc_channel_args& args) { + return grpc_channel_args_find_pointer( + &args, const_cast(GRPC_ARG_GRPCLB_BALANCER_ADDRESSES)); +} + +grpc_arg CreateGrpclbBalancerNameArg(const char* balancer_name) { + return grpc_channel_arg_string_create( + const_cast(GRPC_ARG_ADDRESS_BALANCER_NAME), + const_cast(balancer_name)); +} + +const char* FindGrpclbBalancerNameInChannelArgs(const grpc_channel_args& args) { + return grpc_channel_args_find_string( + &args, const_cast(GRPC_ARG_ADDRESS_BALANCER_NAME)); +} + +} // namespace grpc_core diff --git a/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_balancer_addresses.h b/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_balancer_addresses.h new file mode 100644 index 00000000000..9b6b259deca --- /dev/null +++ b/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_balancer_addresses.h @@ -0,0 +1,40 @@ +// +// Copyright 2019 gRPC authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +#ifndef GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_LB_POLICY_GRPCLB_GRPCLB_BALANCER_ADDRESSES_H +#define GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_LB_POLICY_GRPCLB_GRPCLB_BALANCER_ADDRESSES_H + +#include + +#include + +#include "src/core/ext/filters/client_channel/server_address.h" + +namespace grpc_core { + +grpc_arg CreateGrpclbBalancerAddressesArg( + const ServerAddressList* address_list); +const ServerAddressList* FindGrpclbBalancerAddressesInChannelArgs( + const grpc_channel_args& args); + +grpc_arg CreateGrpclbBalancerNameArg(const char* balancer_name); +const char* FindGrpclbBalancerNameInChannelArgs(const grpc_channel_args& args); + +} // namespace grpc_core + +#endif /* \ +GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_LB_POLICY_GRPCLB_GRPCLB_BALANCER_ADDRESSES_H \ + */ diff --git a/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_channel_secure.cc b/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_channel_secure.cc index 5bc4f5157ad..414f8274317 100644 --- a/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_channel_secure.cc +++ b/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_channel_secure.cc @@ -27,6 +27,7 @@ #include #include "src/core/ext/filters/client_channel/client_channel.h" +#include "src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_balancer_addresses.h" #include "src/core/ext/filters/client_channel/server_address.h" #include "src/core/lib/channel/channel_args.h" #include "src/core/lib/gpr/string.h" @@ -55,8 +56,8 @@ RefCountedPtr CreateTargetAuthorityTable( grpc_sockaddr_to_string(&addr_str, &addresses[i].address(), true) > 0); target_authority_entries[i].key = grpc_slice_from_copied_string(addr_str); gpr_free(addr_str); - char* balancer_name = grpc_channel_arg_get_string(grpc_channel_args_find( - addresses[i].args(), GRPC_ARG_ADDRESS_BALANCER_NAME)); + const char* balancer_name = + FindGrpclbBalancerNameInChannelArgs(*addresses[i].args()); target_authority_entries[i].value.reset(gpr_strdup(balancer_name)); } RefCountedPtr target_authority_table = diff --git a/src/core/ext/filters/client_channel/lb_policy/priority/priority.cc b/src/core/ext/filters/client_channel/lb_policy/priority/priority.cc new file mode 100644 index 00000000000..6bcb9bf2231 --- /dev/null +++ b/src/core/ext/filters/client_channel/lb_policy/priority/priority.cc @@ -0,0 +1,870 @@ +// +// Copyright 2018 gRPC authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +#include + +#include +#include + +#include "absl/strings/str_cat.h" + +#include + +#include "src/core/ext/filters/client_channel/lb_policy.h" +#include "src/core/ext/filters/client_channel/lb_policy/address_filtering.h" +#include "src/core/ext/filters/client_channel/lb_policy/child_policy_handler.h" +#include "src/core/ext/filters/client_channel/lb_policy_factory.h" +#include "src/core/ext/filters/client_channel/lb_policy_registry.h" +#include "src/core/lib/channel/channel_args.h" +#include "src/core/lib/gprpp/orphanable.h" +#include "src/core/lib/gprpp/ref_counted_ptr.h" +#include "src/core/lib/iomgr/timer.h" +#include "src/core/lib/iomgr/work_serializer.h" + +namespace grpc_core { + +TraceFlag grpc_lb_priority_trace(false, "priority_lb"); + +namespace { + +constexpr char kPriority[] = "priority_experimental"; + +// How long we keep a child around for after it is no longer being used +// (either because it has been removed from the config or because we +// have switched to a higher-priority child). +constexpr int kChildRetentionIntervalMs = 15 * 60 * 1000; + +// Default for how long we wait for a newly created child to get connected +// before starting to attempt the next priority. Overridable via channel arg. +constexpr int kDefaultChildFailoverTimeoutMs = 10000; + +// Config for priority LB policy. +class PriorityLbConfig : public LoadBalancingPolicy::Config { + public: + PriorityLbConfig( + std::map> + children, + std::vector priorities) + : children_(std::move(children)), priorities_(std::move(priorities)) {} + + const char* name() const override { return kPriority; } + + const std::map>& + children() const { + return children_; + } + const std::vector& priorities() const { return priorities_; } + + private: + const std::map> + children_; + const std::vector priorities_; +}; + +// priority LB policy. +class PriorityLb : public LoadBalancingPolicy { + public: + explicit PriorityLb(Args args); + + const char* name() const override { return kPriority; } + + void UpdateLocked(UpdateArgs args) override; + void ExitIdleLocked() override; + void ResetBackoffLocked() override; + + private: + // Each ChildPriority holds a ref to the PriorityLb. + class ChildPriority : public InternallyRefCounted { + public: + ChildPriority(RefCountedPtr priority_policy, std::string name); + + ~ChildPriority() { + priority_policy_.reset(DEBUG_LOCATION, "ChildPriority"); + } + + const std::string& name() const { return name_; } + + void UpdateLocked(RefCountedPtr config); + void ExitIdleLocked(); + void ResetBackoffLocked(); + void DeactivateLocked(); + void MaybeReactivateLocked(); + void MaybeCancelFailoverTimerLocked(); + + void Orphan() override; + + std::unique_ptr GetPicker() { + return absl::make_unique(picker_wrapper_); + } + + grpc_connectivity_state connectivity_state() const { + return connectivity_state_; + } + bool failover_timer_callback_pending() const { + return failover_timer_callback_pending_; + } + + private: + // A simple wrapper for ref-counting a picker from the child policy. + class RefCountedPicker : public RefCounted { + public: + explicit RefCountedPicker(std::unique_ptr picker) + : picker_(std::move(picker)) {} + PickResult Pick(PickArgs args) { return picker_->Pick(args); } + + private: + std::unique_ptr picker_; + }; + + // A non-ref-counted wrapper for RefCountedPicker. + class RefCountedPickerWrapper : public SubchannelPicker { + public: + explicit RefCountedPickerWrapper(RefCountedPtr picker) + : picker_(std::move(picker)) {} + PickResult Pick(PickArgs args) override { return picker_->Pick(args); } + + private: + RefCountedPtr picker_; + }; + + class Helper : public ChannelControlHelper { + public: + explicit Helper(RefCountedPtr priority) + : priority_(std::move(priority)) {} + + ~Helper() { priority_.reset(DEBUG_LOCATION, "Helper"); } + + RefCountedPtr CreateSubchannel( + const grpc_channel_args& args) override; + void UpdateState(grpc_connectivity_state state, + std::unique_ptr picker) override; + void RequestReresolution() override; + void AddTraceEvent(TraceSeverity severity, StringView message) override; + + private: + RefCountedPtr priority_; + }; + + // Methods for dealing with the child policy. + OrphanablePtr CreateChildPolicyLocked( + const grpc_channel_args* args); + + void OnConnectivityStateUpdateLocked( + grpc_connectivity_state state, + std::unique_ptr picker); + + void StartFailoverTimerLocked(); + + static void OnFailoverTimer(void* arg, grpc_error* error); + void OnFailoverTimerLocked(grpc_error* error); + static void OnDeactivationTimer(void* arg, grpc_error* error); + void OnDeactivationTimerLocked(grpc_error* error); + + RefCountedPtr priority_policy_; + const std::string name_; + + OrphanablePtr child_policy_; + + grpc_connectivity_state connectivity_state_ = GRPC_CHANNEL_CONNECTING; + RefCountedPtr picker_wrapper_; + + // States for delayed removal. + grpc_timer deactivation_timer_; + grpc_closure on_deactivation_timer_; + bool deactivation_timer_callback_pending_ = false; + + // States of failover. + grpc_timer failover_timer_; + grpc_closure on_failover_timer_; + bool failover_timer_callback_pending_ = false; + }; + + ~PriorityLb(); + + void ShutdownLocked() override; + + // Returns UINT32_MAX if child is not in current priority list. + uint32_t GetChildPriorityLocked(const std::string& child_name) const; + + void HandleChildConnectivityStateChangeLocked(ChildPriority* child); + void DeleteChild(ChildPriority* child); + + void TryNextPriorityLocked(bool report_connecting); + void SelectPriorityLocked(uint32_t priority); + + const int child_failover_timeout_ms_; + + // Current channel args and config from the resolver. + const grpc_channel_args* args_ = nullptr; + RefCountedPtr config_; + HierarchicalAddressMap addresses_; + + // Internal state. + bool shutting_down_ = false; + + std::map> children_; + // The priority that is being used. + uint32_t current_priority_ = UINT32_MAX; + // Points to the current child from before the most recent update. + // We will continue to use this child until we decide which of the new + // children to use. + ChildPriority* current_child_from_before_update_ = nullptr; +}; + +// +// PriorityLb +// + +PriorityLb::PriorityLb(Args args) + : LoadBalancingPolicy(std::move(args)), + child_failover_timeout_ms_(grpc_channel_args_find_integer( + args.args, GRPC_ARG_PRIORITY_FAILOVER_TIMEOUT_MS, + {kDefaultChildFailoverTimeoutMs, 0, INT_MAX})) { + if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_priority_trace)) { + gpr_log(GPR_INFO, "[priority_lb %p] created", this); + } +} + +PriorityLb::~PriorityLb() { + if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_priority_trace)) { + gpr_log(GPR_INFO, "[priority_lb %p] destroying priority LB policy", this); + } + grpc_channel_args_destroy(args_); +} + +void PriorityLb::ShutdownLocked() { + if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_priority_trace)) { + gpr_log(GPR_INFO, "[priority_lb %p] shutting down", this); + } + shutting_down_ = true; + children_.clear(); +} + +void PriorityLb::ExitIdleLocked() { + if (current_priority_ != UINT32_MAX) { + const std::string& child_name = config_->priorities()[current_priority_]; + if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_priority_trace)) { + gpr_log(GPR_INFO, + "[priority_lb %p] exiting IDLE for current priority %d child %s", + this, current_priority_, child_name.c_str()); + } + children_[child_name]->ExitIdleLocked(); + } +} + +void PriorityLb::ResetBackoffLocked() { + for (const auto& p : children_) p.second->ResetBackoffLocked(); +} + +void PriorityLb::UpdateLocked(UpdateArgs args) { + if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_priority_trace)) { + gpr_log(GPR_INFO, "[priority_lb %p] received update", this); + } + // Save current child. + if (current_priority_ != UINT32_MAX) { + const std::string& child_name = config_->priorities()[current_priority_]; + current_child_from_before_update_ = children_[child_name].get(); + // Unset current_priority_, since it was an index into the old + // config's priority list and may no longer be valid. It will be + // reset later by TryNextPriorityLocked(), but we unset it here in + // case updating any of our children triggers a state update. + current_priority_ = UINT32_MAX; + } + // Update config. + config_ = std::move(args.config); + // Update args. + grpc_channel_args_destroy(args_); + args_ = args.args; + args.args = nullptr; + // Update addresses. + addresses_ = MakeHierarchicalAddressMap(args.addresses); + // Check all existing children against the new config. + for (const auto& p : children_) { + const std::string& child_name = p.first; + auto& child = p.second; + auto config_it = config_->children().find(child_name); + if (config_it == config_->children().end()) { + // Existing child not found in new config. Deactivate it. + child->DeactivateLocked(); + } else { + // Existing child found in new config. Update it. + child->UpdateLocked(config_it->second); + } + } + // Try to get connected. + TryNextPriorityLocked(/*report_connecting=*/children_.empty()); +} + +uint32_t PriorityLb::GetChildPriorityLocked( + const std::string& child_name) const { + for (uint32_t priority = 0; priority < config_->priorities().size(); + ++priority) { + if (config_->priorities()[priority] == child_name) return priority; + } + return UINT32_MAX; +} + +void PriorityLb::HandleChildConnectivityStateChangeLocked( + ChildPriority* child) { + // Special case for the child that was the current child before the + // most recent update. + if (child == current_child_from_before_update_) { + if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_priority_trace)) { + gpr_log(GPR_INFO, + "[priority_lb %p] state update for current child from before " + "config update", + this); + } + if (child->connectivity_state() == GRPC_CHANNEL_READY || + child->connectivity_state() == GRPC_CHANNEL_IDLE) { + // If it's still READY or IDLE, we stick with this child, so pass + // the new picker up to our parent. + channel_control_helper()->UpdateState(child->connectivity_state(), + child->GetPicker()); + } else { + // If it's no longer READY or IDLE, we should stop using it. + // We already started trying other priorities as a result of the + // update, but calling TryNextPriorityLocked() ensures that we will + // properly select between CONNECTING and TRANSIENT_FAILURE as the + // new state to report to our parent. + current_child_from_before_update_ = nullptr; + TryNextPriorityLocked(/*report_connecting=*/true); + } + return; + } + // Otherwise, find the child's priority. + uint32_t child_priority = GetChildPriorityLocked(child->name()); + if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_priority_trace)) { + gpr_log(GPR_INFO, "[priority_lb %p] state update for priority %d, child %s", + this, child_priority, child->name().c_str()); + } + // Ignore priorities not in the current config. + if (child_priority == UINT32_MAX) return; + // Ignore lower-than-current priorities. + if (child_priority > current_priority_) return; + // If a child reports TRANSIENT_FAILURE, start trying the next priority. + // Note that even if this is for a higher-than-current priority, we + // may still need to create some children between this priority and + // the current one (e.g., if we got an update that inserted new + // priorities ahead of the current one). + if (child->connectivity_state() == GRPC_CHANNEL_TRANSIENT_FAILURE) { + TryNextPriorityLocked( + /*report_connecting=*/child_priority == current_priority_); + return; + } + // The update is for a higher-than-current priority (or for any + // priority if we don't have any current priority). + if (child_priority < current_priority_) { + // If the child reports READY or IDLE, switch to that priority. + // Otherwise, ignore the update. + if (child->connectivity_state() == GRPC_CHANNEL_READY || + child->connectivity_state() == GRPC_CHANNEL_IDLE) { + SelectPriorityLocked(child_priority); + } + return; + } + // The current priority has returned a new picker, so pass it up to + // our parent. + channel_control_helper()->UpdateState(child->connectivity_state(), + child->GetPicker()); +} + +void PriorityLb::DeleteChild(ChildPriority* child) { + // If this was the current child from before the most recent update, + // stop using it. We already started trying other priorities as a + // result of the update, but calling TryNextPriorityLocked() ensures that + // we will properly select between CONNECTING and TRANSIENT_FAILURE as the + // new state to report to our parent. + if (current_child_from_before_update_ == child) { + current_child_from_before_update_ = nullptr; + TryNextPriorityLocked(/*report_connecting=*/true); + } + children_.erase(child->name()); +} + +void PriorityLb::TryNextPriorityLocked(bool report_connecting) { + for (uint32_t priority = 0; priority < config_->priorities().size(); + ++priority) { + // If the child for the priority does not exist yet, create it. + const std::string& child_name = config_->priorities()[priority]; + if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_priority_trace)) { + gpr_log(GPR_INFO, "[priority_lb %p] trying priority %d, child %s", this, + priority, child_name.c_str()); + } + auto& child = children_[child_name]; + if (child == nullptr) { + if (report_connecting) { + channel_control_helper()->UpdateState( + GRPC_CHANNEL_CONNECTING, + absl::make_unique(Ref(DEBUG_LOCATION, "QueuePicker"))); + } + child = MakeOrphanable( + Ref(DEBUG_LOCATION, "ChildPriority"), child_name); + child->UpdateLocked(config_->children().find(child_name)->second); + return; + } + // The child already exists. + child->MaybeReactivateLocked(); + // If the child is in state READY or IDLE, switch to it. + if (child->connectivity_state() == GRPC_CHANNEL_READY || + child->connectivity_state() == GRPC_CHANNEL_IDLE) { + SelectPriorityLocked(priority); + return; + } + // Child is not READY or IDLE. + // If its failover timer is still pending, give it time to fire. + if (child->failover_timer_callback_pending()) { + if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_priority_trace)) { + gpr_log(GPR_INFO, + "[priority_lb %p] priority %d, child %s: child still " + "attempting to connect, will wait", + this, priority, child_name.c_str()); + } + if (report_connecting) { + channel_control_helper()->UpdateState( + GRPC_CHANNEL_CONNECTING, + absl::make_unique(Ref(DEBUG_LOCATION, "QueuePicker"))); + } + return; + } + // Child has been failing for a while. Move on to the next priority. + } + // If there are no more priorities to try, report TRANSIENT_FAILURE. + if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_priority_trace)) { + gpr_log(GPR_INFO, + "[priority_lb %p] no priority reachable, putting channel in " + "TRANSIENT_FAILURE", + this); + } + current_priority_ = UINT32_MAX; + current_child_from_before_update_ = nullptr; + grpc_error* error = grpc_error_set_int( + GRPC_ERROR_CREATE_FROM_STATIC_STRING("no ready priority"), + GRPC_ERROR_INT_GRPC_STATUS, GRPC_STATUS_UNAVAILABLE); + channel_control_helper()->UpdateState( + GRPC_CHANNEL_TRANSIENT_FAILURE, + absl::make_unique(error)); +} + +void PriorityLb::SelectPriorityLocked(uint32_t priority) { + if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_priority_trace)) { + gpr_log(GPR_INFO, "[priority_lb %p] selected priority %d, child %s", this, + priority, config_->priorities()[priority].c_str()); + } + current_priority_ = priority; + current_child_from_before_update_ = nullptr; + // Deactivate lower priorities. + for (uint32_t p = priority + 1; p < config_->priorities().size(); ++p) { + const std::string& child_name = config_->priorities()[p]; + auto it = children_.find(child_name); + if (it != children_.end()) it->second->DeactivateLocked(); + } + // Update picker. + auto& child = children_[config_->priorities()[priority]]; + channel_control_helper()->UpdateState(child->connectivity_state(), + child->GetPicker()); +} + +// +// PriorityLb::ChildPriority +// + +PriorityLb::ChildPriority::ChildPriority( + RefCountedPtr priority_policy, std::string name) + : priority_policy_(std::move(priority_policy)), name_(std::move(name)) { + if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_priority_trace)) { + gpr_log(GPR_INFO, "[priority_lb %p] creating child %s (%p)", + priority_policy_.get(), name_.c_str(), this); + } + GRPC_CLOSURE_INIT(&on_failover_timer_, OnFailoverTimer, this, + grpc_schedule_on_exec_ctx); + GRPC_CLOSURE_INIT(&on_deactivation_timer_, OnDeactivationTimer, this, + grpc_schedule_on_exec_ctx); + // Start the failover timer. + StartFailoverTimerLocked(); +} + +void PriorityLb::ChildPriority::Orphan() { + if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_priority_trace)) { + gpr_log(GPR_INFO, "[priority_lb %p] child %s (%p): orphaned", + priority_policy_.get(), name_.c_str(), this); + } + MaybeCancelFailoverTimerLocked(); + if (deactivation_timer_callback_pending_) { + grpc_timer_cancel(&deactivation_timer_); + } + // Remove the child policy's interested_parties pollset_set from the + // xDS policy. + grpc_pollset_set_del_pollset_set(child_policy_->interested_parties(), + priority_policy_->interested_parties()); + child_policy_.reset(); + // Drop our ref to the child's picker, in case it's holding a ref to + // the child. + picker_wrapper_.reset(); + if (deactivation_timer_callback_pending_) { + grpc_timer_cancel(&deactivation_timer_); + } + Unref(DEBUG_LOCATION, "ChildPriority+Orphan"); +} + +void PriorityLb::ChildPriority::UpdateLocked( + RefCountedPtr config) { + if (priority_policy_->shutting_down_) return; + if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_priority_trace)) { + gpr_log(GPR_INFO, "[priority_lb %p] child %s (%p): start update", + priority_policy_.get(), name_.c_str(), this); + } + // Create policy if needed. + if (child_policy_ == nullptr) { + child_policy_ = CreateChildPolicyLocked(priority_policy_->args_); + } + // Construct update args. + UpdateArgs update_args; + update_args.config = std::move(config); + update_args.addresses = priority_policy_->addresses_[name_]; + update_args.args = grpc_channel_args_copy(priority_policy_->args_); + // Update the policy. + if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_priority_trace)) { + gpr_log(GPR_INFO, + "[priority_lb %p] child %s (%p): updating child policy handler %p", + priority_policy_.get(), name_.c_str(), this, child_policy_.get()); + } + child_policy_->UpdateLocked(std::move(update_args)); +} + +OrphanablePtr +PriorityLb::ChildPriority::CreateChildPolicyLocked( + const grpc_channel_args* args) { + LoadBalancingPolicy::Args lb_policy_args; + lb_policy_args.work_serializer = priority_policy_->work_serializer(); + lb_policy_args.args = args; + lb_policy_args.channel_control_helper = + absl::make_unique(this->Ref(DEBUG_LOCATION, "Helper")); + OrphanablePtr lb_policy = + MakeOrphanable(std::move(lb_policy_args), + &grpc_lb_priority_trace); + if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_priority_trace)) { + gpr_log(GPR_INFO, + "[priority_lb %p] child %s (%p): created new child policy " + "handler %p", + priority_policy_.get(), name_.c_str(), this, lb_policy.get()); + } + // Add the parent's interested_parties pollset_set to that of the newly + // created child policy. This will make the child policy progress upon + // activity on the parent LB, which in turn is tied to the application's call. + grpc_pollset_set_add_pollset_set(lb_policy->interested_parties(), + priority_policy_->interested_parties()); + return lb_policy; +} + +void PriorityLb::ChildPriority::ExitIdleLocked() { + if (connectivity_state_ == GRPC_CHANNEL_IDLE && + !failover_timer_callback_pending_) { + StartFailoverTimerLocked(); + } + child_policy_->ExitIdleLocked(); +} + +void PriorityLb::ChildPriority::ResetBackoffLocked() { + child_policy_->ResetBackoffLocked(); +} + +void PriorityLb::ChildPriority::OnConnectivityStateUpdateLocked( + grpc_connectivity_state state, std::unique_ptr picker) { + if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_priority_trace)) { + gpr_log(GPR_INFO, + "[priority_lb %p] child %s (%p): state update: %s, picker %p", + priority_policy_.get(), name_.c_str(), this, + ConnectivityStateName(state), picker.get()); + } + // Store the state and picker. + connectivity_state_ = state; + picker_wrapper_ = MakeRefCounted(std::move(picker)); + // If READY or TRANSIENT_FAILURE, cancel failover timer. + if (state == GRPC_CHANNEL_READY || state == GRPC_CHANNEL_TRANSIENT_FAILURE) { + MaybeCancelFailoverTimerLocked(); + } + // Notify the parent policy. + priority_policy_->HandleChildConnectivityStateChangeLocked(this); +} + +void PriorityLb::ChildPriority::StartFailoverTimerLocked() { + if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_priority_trace)) { + gpr_log(GPR_INFO, + "[priority_lb %p] child %s (%p): starting failover timer for %d ms", + priority_policy_.get(), name_.c_str(), this, + priority_policy_->child_failover_timeout_ms_); + } + Ref(DEBUG_LOCATION, "ChildPriority+OnFailoverTimerLocked").release(); + grpc_timer_init( + &failover_timer_, + ExecCtx::Get()->Now() + priority_policy_->child_failover_timeout_ms_, + &on_failover_timer_); + failover_timer_callback_pending_ = true; +} + +void PriorityLb::ChildPriority::MaybeCancelFailoverTimerLocked() { + if (failover_timer_callback_pending_) { + if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_priority_trace)) { + gpr_log(GPR_INFO, + "[priority_lb %p] child %s (%p): cancelling failover timer", + priority_policy_.get(), name_.c_str(), this); + } + grpc_timer_cancel(&failover_timer_); + failover_timer_callback_pending_ = false; + } +} + +void PriorityLb::ChildPriority::OnFailoverTimer(void* arg, grpc_error* error) { + ChildPriority* self = static_cast(arg); + GRPC_ERROR_REF(error); // ref owned by lambda + self->priority_policy_->work_serializer()->Run( + [self, error]() { self->OnFailoverTimerLocked(error); }, DEBUG_LOCATION); +} + +void PriorityLb::ChildPriority::OnFailoverTimerLocked(grpc_error* error) { + if (error == GRPC_ERROR_NONE && failover_timer_callback_pending_ && + !priority_policy_->shutting_down_) { + if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_priority_trace)) { + gpr_log(GPR_INFO, + "[priority_lb %p] child %s (%p): failover timer fired, " + "reporting TRANSIENT_FAILURE", + priority_policy_.get(), name_.c_str(), this); + } + failover_timer_callback_pending_ = false; + OnConnectivityStateUpdateLocked(GRPC_CHANNEL_TRANSIENT_FAILURE, nullptr); + } + Unref(DEBUG_LOCATION, "ChildPriority+OnFailoverTimerLocked"); + GRPC_ERROR_UNREF(error); +} + +void PriorityLb::ChildPriority::DeactivateLocked() { + // If already deactivated, don't do it again. + if (deactivation_timer_callback_pending_) return; + if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_priority_trace)) { + gpr_log(GPR_INFO, + "[priority_lb %p] child %s (%p): deactivating -- will remove in %d " + "ms.", + priority_policy_.get(), name_.c_str(), this, + kChildRetentionIntervalMs); + } + MaybeCancelFailoverTimerLocked(); + // Start a timer to delete the child. + Ref(DEBUG_LOCATION, "ChildPriority+timer").release(); + grpc_timer_init(&deactivation_timer_, + ExecCtx::Get()->Now() + kChildRetentionIntervalMs, + &on_deactivation_timer_); + deactivation_timer_callback_pending_ = true; +} + +void PriorityLb::ChildPriority::MaybeReactivateLocked() { + if (deactivation_timer_callback_pending_) { + if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_priority_trace)) { + gpr_log(GPR_INFO, "[priority_lb %p] child %s (%p): reactivating", + priority_policy_.get(), name_.c_str(), this); + } + deactivation_timer_callback_pending_ = false; + grpc_timer_cancel(&deactivation_timer_); + } +} + +void PriorityLb::ChildPriority::OnDeactivationTimer(void* arg, + grpc_error* error) { + ChildPriority* self = static_cast(arg); + GRPC_ERROR_REF(error); // ref owned by lambda + self->priority_policy_->work_serializer()->Run( + [self, error]() { self->OnDeactivationTimerLocked(error); }, + DEBUG_LOCATION); +} + +void PriorityLb::ChildPriority::OnDeactivationTimerLocked(grpc_error* error) { + if (error == GRPC_ERROR_NONE && deactivation_timer_callback_pending_ && + !priority_policy_->shutting_down_) { + if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_priority_trace)) { + gpr_log(GPR_INFO, + "[priority_lb %p] child %s (%p): deactivation timer fired, " + "deleting child", + priority_policy_.get(), name_.c_str(), this); + } + deactivation_timer_callback_pending_ = false; + priority_policy_->DeleteChild(this); + } + Unref(DEBUG_LOCATION, "ChildPriority+timer"); + GRPC_ERROR_UNREF(error); +} + +// +// PriorityLb::ChildPriority::Helper +// + +void PriorityLb::ChildPriority::Helper::RequestReresolution() { + if (priority_->priority_policy_->shutting_down_) return; + priority_->priority_policy_->channel_control_helper()->RequestReresolution(); +} + +RefCountedPtr +PriorityLb::ChildPriority::Helper::CreateSubchannel( + const grpc_channel_args& args) { + if (priority_->priority_policy_->shutting_down_) return nullptr; + return priority_->priority_policy_->channel_control_helper() + ->CreateSubchannel(args); +} + +void PriorityLb::ChildPriority::Helper::UpdateState( + grpc_connectivity_state state, std::unique_ptr picker) { + if (priority_->priority_policy_->shutting_down_) return; + // Notify the priority. + priority_->OnConnectivityStateUpdateLocked(state, std::move(picker)); +} + +void PriorityLb::ChildPriority::Helper::AddTraceEvent(TraceSeverity severity, + StringView message) { + if (priority_->priority_policy_->shutting_down_) return; + priority_->priority_policy_->channel_control_helper()->AddTraceEvent(severity, + message); +} + +// +// factory +// + +class PriorityLbFactory : public LoadBalancingPolicyFactory { + public: + OrphanablePtr CreateLoadBalancingPolicy( + LoadBalancingPolicy::Args args) const override { + return MakeOrphanable(std::move(args)); + } + + const char* name() const override { return kPriority; } + + RefCountedPtr ParseLoadBalancingConfig( + const Json& json, grpc_error** error) const override { + GPR_DEBUG_ASSERT(error != nullptr && *error == GRPC_ERROR_NONE); + if (json.type() == Json::Type::JSON_NULL) { + // priority was mentioned as a policy in the deprecated + // loadBalancingPolicy field or in the client API. + *error = GRPC_ERROR_CREATE_FROM_STATIC_STRING( + "field:loadBalancingPolicy error:priority policy requires " + "configuration. Please use loadBalancingConfig field of service " + "config instead."); + return nullptr; + } + std::vector error_list; + // Children. + std::map> children; + auto it = json.object_value().find("children"); + if (it == json.object_value().end()) { + error_list.push_back(GRPC_ERROR_CREATE_FROM_STATIC_STRING( + "field:children error:required field missing")); + } else if (it->second.type() != Json::Type::OBJECT) { + error_list.push_back(GRPC_ERROR_CREATE_FROM_STATIC_STRING( + "field:children error:type should be object")); + } else { + const Json::Object& object = it->second.object_value(); + for (const auto& p : object) { + const std::string& child_name = p.first; + const Json& element = p.second; + if (element.type() != Json::Type::OBJECT) { + error_list.push_back(GRPC_ERROR_CREATE_FROM_COPIED_STRING( + absl::StrCat("field:children key:", child_name, + " error:should be type object") + .c_str())); + } else { + auto it2 = element.object_value().find("config"); + if (it2 == element.object_value().end()) { + error_list.push_back(GRPC_ERROR_CREATE_FROM_COPIED_STRING( + absl::StrCat("field:children key:", child_name, + " error:missing 'config' field") + .c_str())); + } else { + grpc_error* parse_error = GRPC_ERROR_NONE; + auto config = LoadBalancingPolicyRegistry::ParseLoadBalancingConfig( + it2->second, &parse_error); + if (config == nullptr) { + GPR_DEBUG_ASSERT(parse_error != GRPC_ERROR_NONE); + error_list.push_back( + GRPC_ERROR_CREATE_REFERENCING_FROM_COPIED_STRING( + absl::StrCat("field:children key:", child_name).c_str(), + &parse_error, 1)); + GRPC_ERROR_UNREF(parse_error); + } + children[child_name] = std::move(config); + } + } + } + } + // Priorities. + std::vector priorities; + it = json.object_value().find("priorities"); + if (it == json.object_value().end()) { + error_list.push_back(GRPC_ERROR_CREATE_FROM_STATIC_STRING( + "field:priorities error:required field missing")); + } else if (it->second.type() != Json::Type::ARRAY) { + error_list.push_back(GRPC_ERROR_CREATE_FROM_STATIC_STRING( + "field:priorities error:type should be array")); + } else { + const Json::Array& array = it->second.array_value(); + for (size_t i = 0; i < array.size(); ++i) { + const Json& element = array[i]; + if (element.type() != Json::Type::STRING) { + error_list.push_back(GRPC_ERROR_CREATE_FROM_COPIED_STRING( + absl::StrCat("field:priorities element:", i, + " error:should be type string") + .c_str())); + } else if (children.find(element.string_value()) == children.end()) { + error_list.push_back(GRPC_ERROR_CREATE_FROM_COPIED_STRING( + absl::StrCat("field:priorities element:", i, + " error:unknown child '", element.string_value(), + "'") + .c_str())); + } else { + priorities.emplace_back(element.string_value()); + } + } + if (priorities.size() != children.size()) { + error_list.push_back(GRPC_ERROR_CREATE_FROM_COPIED_STRING( + absl::StrCat("field:priorities error:priorities size (", + priorities.size(), ") != children size (", + children.size(), ")") + .c_str())); + } + } + if (error_list.empty()) { + return MakeRefCounted(std::move(children), + std::move(priorities)); + } else { + *error = GRPC_ERROR_CREATE_FROM_VECTOR( + "priority_experimental LB policy config", &error_list); + return nullptr; + } + } +}; + +} // namespace + +} // namespace grpc_core + +// +// Plugin registration +// + +void grpc_lb_policy_priority_init() { + grpc_core::LoadBalancingPolicyRegistry::Builder:: + RegisterLoadBalancingPolicyFactory( + absl::make_unique()); +} + +void grpc_lb_policy_priority_shutdown() {} diff --git a/src/core/ext/filters/client_channel/lb_policy/subchannel_list.h b/src/core/ext/filters/client_channel/lb_policy/subchannel_list.h index 33ce8a5c5fd..0fa4871b707 100644 --- a/src/core/ext/filters/client_channel/lb_policy/subchannel_list.h +++ b/src/core/ext/filters/client_channel/lb_policy/subchannel_list.h @@ -370,13 +370,6 @@ SubchannelList::SubchannelList( GRPC_ARG_SERVICE_CONFIG}; // Create a subchannel for each address. for (size_t i = 0; i < addresses.size(); i++) { - // TODO(roth): we should ideally hide this from the LB policy code. In - // principle, if we're dealing with this special case in the client_channel - // code for selecting grpclb, then we should also strip out these addresses - // there if we're not using grpclb. - if (addresses[i].IsBalancer()) { - continue; - } InlinedVector args_to_add; const size_t subchannel_address_arg_index = args_to_add.size(); args_to_add.emplace_back( diff --git a/src/core/ext/filters/client_channel/lb_policy/weighted_target/weighted_target.cc b/src/core/ext/filters/client_channel/lb_policy/weighted_target/weighted_target.cc new file mode 100644 index 00000000000..09e72ae72e0 --- /dev/null +++ b/src/core/ext/filters/client_channel/lb_policy/weighted_target/weighted_target.cc @@ -0,0 +1,721 @@ +// +// Copyright 2018 gRPC authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +#include + +#include +#include +#include + +#include "absl/strings/str_cat.h" + +#include + +#include "src/core/ext/filters/client_channel/lb_policy.h" +#include "src/core/ext/filters/client_channel/lb_policy/address_filtering.h" +#include "src/core/ext/filters/client_channel/lb_policy/child_policy_handler.h" +#include "src/core/ext/filters/client_channel/lb_policy_factory.h" +#include "src/core/ext/filters/client_channel/lb_policy_registry.h" +#include "src/core/lib/channel/channel_args.h" +#include "src/core/lib/gpr/string.h" +#include "src/core/lib/gprpp/orphanable.h" +#include "src/core/lib/gprpp/ref_counted_ptr.h" +#include "src/core/lib/iomgr/timer.h" +#include "src/core/lib/iomgr/work_serializer.h" + +namespace grpc_core { + +TraceFlag grpc_lb_weighted_target_trace(false, "weighted_target_lb"); + +namespace { + +constexpr char kWeightedTarget[] = "weighted_target_experimental"; + +// How long we keep a child around for after it has been removed from +// the config. +constexpr int kChildRetentionIntervalMs = 15 * 60 * 1000; + +// Config for weighted_target LB policy. +class WeightedTargetLbConfig : public LoadBalancingPolicy::Config { + public: + struct ChildConfig { + uint32_t weight; + RefCountedPtr config; + }; + + using TargetMap = std::map; + + explicit WeightedTargetLbConfig(TargetMap target_map) + : target_map_(std::move(target_map)) {} + + const char* name() const override { return kWeightedTarget; } + + const TargetMap& target_map() const { return target_map_; } + + private: + TargetMap target_map_; +}; + +// weighted_target LB policy. +class WeightedTargetLb : public LoadBalancingPolicy { + public: + explicit WeightedTargetLb(Args args); + + const char* name() const override { return kWeightedTarget; } + + void UpdateLocked(UpdateArgs args) override; + void ResetBackoffLocked() override; + + private: + // A simple wrapper for ref-counting a picker from the child policy. + class ChildPickerWrapper : public RefCounted { + public: + explicit ChildPickerWrapper(std::unique_ptr picker) + : picker_(std::move(picker)) {} + PickResult Pick(PickArgs args) { return picker_->Pick(args); } + + private: + std::unique_ptr picker_; + }; + + // Picks a child using stateless WRR and then delegates to that + // child's picker. + class WeightedPicker : public SubchannelPicker { + public: + // Maintains a weighted list of pickers from each child that is in + // ready state. The first element in the pair represents the end of a + // range proportional to the child's weight. The start of the range + // is the previous value in the vector and is 0 for the first element. + using PickerList = + InlinedVector>, + 1>; + + explicit WeightedPicker(PickerList pickers) + : pickers_(std::move(pickers)) {} + + PickResult Pick(PickArgs args) override; + + private: + PickerList pickers_; + }; + + // Each WeightedChild holds a ref to its parent WeightedTargetLb. + class WeightedChild : public InternallyRefCounted { + public: + WeightedChild(RefCountedPtr weighted_target_policy, + const std::string& name); + ~WeightedChild(); + + void Orphan() override; + + void UpdateLocked(const WeightedTargetLbConfig::ChildConfig& config, + ServerAddressList addresses, + const grpc_channel_args* args); + void ResetBackoffLocked(); + void DeactivateLocked(); + + uint32_t weight() const { return weight_; } + grpc_connectivity_state connectivity_state() const { + return connectivity_state_; + } + RefCountedPtr picker_wrapper() const { + return picker_wrapper_; + } + + private: + class Helper : public ChannelControlHelper { + public: + explicit Helper(RefCountedPtr weighted_child) + : weighted_child_(std::move(weighted_child)) {} + + ~Helper() { weighted_child_.reset(DEBUG_LOCATION, "Helper"); } + + RefCountedPtr CreateSubchannel( + const grpc_channel_args& args) override; + void UpdateState(grpc_connectivity_state state, + std::unique_ptr picker) override; + void RequestReresolution() override; + void AddTraceEvent(TraceSeverity severity, StringView message) override; + + private: + RefCountedPtr weighted_child_; + }; + + // Methods for dealing with the child policy. + OrphanablePtr CreateChildPolicyLocked( + const grpc_channel_args* args); + + void OnConnectivityStateUpdateLocked( + grpc_connectivity_state state, + std::unique_ptr picker); + + static void OnDelayedRemovalTimer(void* arg, grpc_error* error); + void OnDelayedRemovalTimerLocked(grpc_error* error); + + // The owning LB policy. + RefCountedPtr weighted_target_policy_; + + const std::string& name_; + + uint32_t weight_; + + OrphanablePtr child_policy_; + + RefCountedPtr picker_wrapper_; + grpc_connectivity_state connectivity_state_ = GRPC_CHANNEL_CONNECTING; + bool seen_failure_since_ready_ = false; + + // States for delayed removal. + grpc_timer delayed_removal_timer_; + grpc_closure on_delayed_removal_timer_; + bool delayed_removal_timer_callback_pending_ = false; + bool shutdown_ = false; + }; + + ~WeightedTargetLb(); + + void ShutdownLocked() override; + + void UpdateStateLocked(); + + // Current config from the resolver. + RefCountedPtr config_; + + // Internal state. + bool shutting_down_ = false; + + // Children. + std::map> targets_; +}; + +// +// WeightedTargetLb::WeightedPicker +// + +WeightedTargetLb::PickResult WeightedTargetLb::WeightedPicker::Pick( + PickArgs args) { + // Generate a random number in [0, total weight). + const uint32_t key = rand() % pickers_[pickers_.size() - 1].first; + // Find the index in pickers_ corresponding to key. + size_t mid = 0; + size_t start_index = 0; + size_t end_index = pickers_.size() - 1; + size_t index = 0; + while (end_index > start_index) { + mid = (start_index + end_index) / 2; + if (pickers_[mid].first > key) { + end_index = mid; + } else if (pickers_[mid].first < key) { + start_index = mid + 1; + } else { + index = mid + 1; + break; + } + } + if (index == 0) index = start_index; + GPR_ASSERT(pickers_[index].first > key); + // Delegate to the child picker. + return pickers_[index].second->Pick(args); +} + +// +// WeightedTargetLb +// + +WeightedTargetLb::WeightedTargetLb(Args args) + : LoadBalancingPolicy(std::move(args)) { + if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_weighted_target_trace)) { + gpr_log(GPR_INFO, "[weighted_target_lb %p] created", this); + } +} + +WeightedTargetLb::~WeightedTargetLb() { + if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_weighted_target_trace)) { + gpr_log(GPR_INFO, + "[weighted_target_lb %p] destroying weighted_target LB policy", + this); + } +} + +void WeightedTargetLb::ShutdownLocked() { + if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_weighted_target_trace)) { + gpr_log(GPR_INFO, "[weighted_target_lb %p] shutting down", this); + } + shutting_down_ = true; + targets_.clear(); +} + +void WeightedTargetLb::ResetBackoffLocked() { + for (auto& p : targets_) p.second->ResetBackoffLocked(); +} + +void WeightedTargetLb::UpdateLocked(UpdateArgs args) { + if (shutting_down_) return; + if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_weighted_target_trace)) { + gpr_log(GPR_INFO, "[weighted_target_lb %p] Received update", this); + } + // Update config. + config_ = std::move(args.config); + // Deactivate the targets not in the new config. + for (const auto& p : targets_) { + const std::string& name = p.first; + WeightedChild* child = p.second.get(); + if (config_->target_map().find(name) == config_->target_map().end()) { + child->DeactivateLocked(); + } + } + // Add or update the targets in the new config. + HierarchicalAddressMap address_map = + MakeHierarchicalAddressMap(args.addresses); + for (const auto& p : config_->target_map()) { + const std::string& name = p.first; + const WeightedTargetLbConfig::ChildConfig& config = p.second; + auto it = targets_.find(name); + if (it == targets_.end()) { + it = targets_.emplace(std::make_pair(name, nullptr)).first; + it->second = MakeOrphanable( + Ref(DEBUG_LOCATION, "WeightedChild"), it->first); + } + it->second->UpdateLocked(config, std::move(address_map[name]), args.args); + } +} + +void WeightedTargetLb::UpdateStateLocked() { + if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_weighted_target_trace)) { + gpr_log(GPR_INFO, + "[weighted_target_lb %p] scanning children to determine " + "connectivity state", + this); + } + // Construct a new picker which maintains a map of all child pickers + // that are ready. Each child is represented by a portion of the range + // proportional to its weight, such that the total range is the sum of the + // weights of all children. + WeightedPicker::PickerList picker_list; + uint32_t end = 0; + // Also count the number of children in each state, to determine the + // overall state. + size_t num_connecting = 0; + size_t num_idle = 0; + size_t num_transient_failures = 0; + for (const auto& p : targets_) { + const std::string& child_name = p.first; + const WeightedChild* child = p.second.get(); + // Skip the targets that are not in the latest update. + if (config_->target_map().find(child_name) == config_->target_map().end()) { + continue; + } + if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_weighted_target_trace)) { + gpr_log(GPR_INFO, + "[weighted_target_lb %p] child=%s state=%s weight=%d picker=%p", + this, child_name.c_str(), + ConnectivityStateName(child->connectivity_state()), + child->weight(), child->picker_wrapper().get()); + } + switch (child->connectivity_state()) { + case GRPC_CHANNEL_READY: { + end += child->weight(); + picker_list.push_back(std::make_pair(end, child->picker_wrapper())); + break; + } + case GRPC_CHANNEL_CONNECTING: { + ++num_connecting; + break; + } + case GRPC_CHANNEL_IDLE: { + ++num_idle; + break; + } + case GRPC_CHANNEL_TRANSIENT_FAILURE: { + ++num_transient_failures; + break; + } + default: + GPR_UNREACHABLE_CODE(return ); + } + } + // Determine aggregated connectivity state. + grpc_connectivity_state connectivity_state; + if (!picker_list.empty()) { + connectivity_state = GRPC_CHANNEL_READY; + } else if (num_connecting > 0) { + connectivity_state = GRPC_CHANNEL_CONNECTING; + } else if (num_idle > 0) { + connectivity_state = GRPC_CHANNEL_IDLE; + } else { + connectivity_state = GRPC_CHANNEL_TRANSIENT_FAILURE; + } + if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_weighted_target_trace)) { + gpr_log(GPR_INFO, "[weighted_target_lb %p] connectivity changed to %s", + this, ConnectivityStateName(connectivity_state)); + } + std::unique_ptr picker; + switch (connectivity_state) { + case GRPC_CHANNEL_READY: + picker = absl::make_unique(std::move(picker_list)); + break; + case GRPC_CHANNEL_CONNECTING: + case GRPC_CHANNEL_IDLE: + picker = + absl::make_unique(Ref(DEBUG_LOCATION, "QueuePicker")); + break; + default: + picker = absl::make_unique( + GRPC_ERROR_CREATE_FROM_STATIC_STRING( + "weighted_target: all children report state TRANSIENT_FAILURE")); + } + channel_control_helper()->UpdateState(connectivity_state, std::move(picker)); +} + +// +// WeightedTargetLb::WeightedChild +// + +WeightedTargetLb::WeightedChild::WeightedChild( + RefCountedPtr weighted_target_policy, + const std::string& name) + : weighted_target_policy_(std::move(weighted_target_policy)), name_(name) { + if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_weighted_target_trace)) { + gpr_log(GPR_INFO, "[weighted_target_lb %p] created WeightedChild %p for %s", + weighted_target_policy_.get(), this, name_.c_str()); + } + GRPC_CLOSURE_INIT(&on_delayed_removal_timer_, OnDelayedRemovalTimer, this, + grpc_schedule_on_exec_ctx); +} + +WeightedTargetLb::WeightedChild::~WeightedChild() { + if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_weighted_target_trace)) { + gpr_log(GPR_INFO, + "[weighted_target_lb %p] WeightedChild %p %s: destroying child", + weighted_target_policy_.get(), this, name_.c_str()); + } + weighted_target_policy_.reset(DEBUG_LOCATION, "WeightedChild"); +} + +void WeightedTargetLb::WeightedChild::Orphan() { + if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_weighted_target_trace)) { + gpr_log(GPR_INFO, + "[weighted_target_lb %p] WeightedChild %p %s: shutting down child", + weighted_target_policy_.get(), this, name_.c_str()); + } + // Remove the child policy's interested_parties pollset_set from the + // xDS policy. + grpc_pollset_set_del_pollset_set( + child_policy_->interested_parties(), + weighted_target_policy_->interested_parties()); + child_policy_.reset(); + // Drop our ref to the child's picker, in case it's holding a ref to + // the child. + picker_wrapper_.reset(); + if (delayed_removal_timer_callback_pending_) { + delayed_removal_timer_callback_pending_ = false; + grpc_timer_cancel(&delayed_removal_timer_); + } + shutdown_ = true; + Unref(); +} + +OrphanablePtr +WeightedTargetLb::WeightedChild::CreateChildPolicyLocked( + const grpc_channel_args* args) { + LoadBalancingPolicy::Args lb_policy_args; + lb_policy_args.work_serializer = weighted_target_policy_->work_serializer(); + lb_policy_args.args = args; + lb_policy_args.channel_control_helper = + absl::make_unique(this->Ref(DEBUG_LOCATION, "Helper")); + OrphanablePtr lb_policy = + MakeOrphanable(std::move(lb_policy_args), + &grpc_lb_weighted_target_trace); + if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_weighted_target_trace)) { + gpr_log(GPR_INFO, + "[weighted_target_lb %p] WeightedChild %p %s: Created new child " + "policy handler %p", + weighted_target_policy_.get(), this, name_.c_str(), + lb_policy.get()); + } + // Add the xDS's interested_parties pollset_set to that of the newly created + // child policy. This will make the child policy progress upon activity on + // xDS LB, which in turn is tied to the application's call. + grpc_pollset_set_add_pollset_set( + lb_policy->interested_parties(), + weighted_target_policy_->interested_parties()); + return lb_policy; +} + +void WeightedTargetLb::WeightedChild::UpdateLocked( + const WeightedTargetLbConfig::ChildConfig& config, + ServerAddressList addresses, const grpc_channel_args* args) { + if (weighted_target_policy_->shutting_down_) return; + // Update child weight. + weight_ = config.weight; + // Reactivate if needed. + if (delayed_removal_timer_callback_pending_) { + if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_weighted_target_trace)) { + gpr_log(GPR_INFO, + "[weighted_target_lb %p] WeightedChild %p %s: reactivating", + weighted_target_policy_.get(), this, name_.c_str()); + } + delayed_removal_timer_callback_pending_ = false; + grpc_timer_cancel(&delayed_removal_timer_); + } + // Create child policy if needed. + if (child_policy_ == nullptr) { + child_policy_ = CreateChildPolicyLocked(args); + } + // Construct update args. + UpdateArgs update_args; + update_args.config = config.config; + update_args.addresses = std::move(addresses); + update_args.args = grpc_channel_args_copy(args); + // Update the policy. + if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_weighted_target_trace)) { + gpr_log(GPR_INFO, + "[weighted_target_lb %p] WeightedChild %p %s: Updating child " + "policy handler %p", + weighted_target_policy_.get(), this, name_.c_str(), + child_policy_.get()); + } + child_policy_->UpdateLocked(std::move(update_args)); +} + +void WeightedTargetLb::WeightedChild::ResetBackoffLocked() { + child_policy_->ResetBackoffLocked(); +} + +void WeightedTargetLb::WeightedChild::OnConnectivityStateUpdateLocked( + grpc_connectivity_state state, std::unique_ptr picker) { + // Cache the picker in the WeightedChild. + picker_wrapper_ = MakeRefCounted(std::move(picker)); + if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_weighted_target_trace)) { + gpr_log(GPR_INFO, + "[weighted_target_lb %p] WeightedChild %p %s: connectivity " + "state update: state=%s picker_wrapper=%p", + weighted_target_policy_.get(), this, name_.c_str(), + ConnectivityStateName(state), picker_wrapper_.get()); + } + // If the child reports IDLE, immediately tell it to exit idle. + if (state == GRPC_CHANNEL_IDLE) child_policy_->ExitIdleLocked(); + // Decide what state to report for aggregation purposes. + // If we haven't seen a failure since the last time we were in state + // READY, then we report the state change as-is. However, once we do see + // a failure, we report TRANSIENT_FAILURE and ignore any subsequent state + // changes until we go back into state READY. + if (!seen_failure_since_ready_) { + if (state == GRPC_CHANNEL_TRANSIENT_FAILURE) { + seen_failure_since_ready_ = true; + } + } else { + if (state != GRPC_CHANNEL_READY) return; + seen_failure_since_ready_ = false; + } + connectivity_state_ = state; + // Notify the LB policy. + weighted_target_policy_->UpdateStateLocked(); +} + +void WeightedTargetLb::WeightedChild::DeactivateLocked() { + // If already deactivated, don't do that again. + if (weight_ == 0) return; + if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_weighted_target_trace)) { + gpr_log(GPR_INFO, + "[weighted_target_lb %p] WeightedChild %p %s: deactivating", + weighted_target_policy_.get(), this, name_.c_str()); + } + // Set the child weight to 0 so that future picker won't contain this child. + weight_ = 0; + // Start a timer to delete the child. + Ref(DEBUG_LOCATION, "WeightedChild+timer").release(); + delayed_removal_timer_callback_pending_ = true; + grpc_timer_init(&delayed_removal_timer_, + ExecCtx::Get()->Now() + kChildRetentionIntervalMs, + &on_delayed_removal_timer_); +} + +void WeightedTargetLb::WeightedChild::OnDelayedRemovalTimer(void* arg, + grpc_error* error) { + WeightedChild* self = static_cast(arg); + GRPC_ERROR_REF(error); // ref owned by lambda + self->weighted_target_policy_->work_serializer()->Run( + [self, error]() { self->OnDelayedRemovalTimerLocked(error); }, + DEBUG_LOCATION); +} + +void WeightedTargetLb::WeightedChild::OnDelayedRemovalTimerLocked( + grpc_error* error) { + if (error == GRPC_ERROR_NONE && delayed_removal_timer_callback_pending_ && + !shutdown_ && weight_ == 0) { + delayed_removal_timer_callback_pending_ = false; + weighted_target_policy_->targets_.erase(name_); + } + Unref(DEBUG_LOCATION, "WeightedChild+timer"); + GRPC_ERROR_UNREF(error); +} + +// +// WeightedTargetLb::WeightedChild::Helper +// + +RefCountedPtr +WeightedTargetLb::WeightedChild::Helper::CreateSubchannel( + const grpc_channel_args& args) { + if (weighted_child_->weighted_target_policy_->shutting_down_) return nullptr; + return weighted_child_->weighted_target_policy_->channel_control_helper() + ->CreateSubchannel(args); +} + +void WeightedTargetLb::WeightedChild::Helper::UpdateState( + grpc_connectivity_state state, std::unique_ptr picker) { + if (weighted_child_->weighted_target_policy_->shutting_down_) return; + weighted_child_->OnConnectivityStateUpdateLocked(state, std::move(picker)); +} + +void WeightedTargetLb::WeightedChild::Helper::RequestReresolution() { + if (weighted_child_->weighted_target_policy_->shutting_down_) return; + weighted_child_->weighted_target_policy_->channel_control_helper() + ->RequestReresolution(); +} + +void WeightedTargetLb::WeightedChild::Helper::AddTraceEvent( + TraceSeverity severity, StringView message) { + if (weighted_child_->weighted_target_policy_->shutting_down_) return; + weighted_child_->weighted_target_policy_->channel_control_helper() + ->AddTraceEvent(severity, message); +} + +// +// factory +// + +class WeightedTargetLbFactory : public LoadBalancingPolicyFactory { + public: + OrphanablePtr CreateLoadBalancingPolicy( + LoadBalancingPolicy::Args args) const override { + return MakeOrphanable(std::move(args)); + } + + const char* name() const override { return kWeightedTarget; } + + RefCountedPtr ParseLoadBalancingConfig( + const Json& json, grpc_error** error) const override { + GPR_DEBUG_ASSERT(error != nullptr && *error == GRPC_ERROR_NONE); + if (json.type() == Json::Type::JSON_NULL) { + // weighted_target was mentioned as a policy in the deprecated + // loadBalancingPolicy field or in the client API. + *error = GRPC_ERROR_CREATE_FROM_STATIC_STRING( + "field:loadBalancingPolicy error:weighted_target policy requires " + "configuration. Please use loadBalancingConfig field of service " + "config instead."); + return nullptr; + } + std::vector error_list; + // Weight map. + WeightedTargetLbConfig::TargetMap target_map; + auto it = json.object_value().find("targets"); + if (it == json.object_value().end()) { + error_list.push_back(GRPC_ERROR_CREATE_FROM_STATIC_STRING( + "field:targets error:required field not present")); + } else if (it->second.type() != Json::Type::OBJECT) { + error_list.push_back(GRPC_ERROR_CREATE_FROM_STATIC_STRING( + "field:targets error:type should be object")); + } else { + for (const auto& p : it->second.object_value()) { + WeightedTargetLbConfig::ChildConfig child_config; + std::vector child_errors = + ParseChildConfig(p.second, &child_config); + if (!child_errors.empty()) { + // Can't use GRPC_ERROR_CREATE_FROM_VECTOR() here, because the error + // string is not static in this case. + grpc_error* error = GRPC_ERROR_CREATE_FROM_COPIED_STRING( + absl::StrCat("field:targets key:", p.first).c_str()); + for (grpc_error* child_error : child_errors) { + error = grpc_error_add_child(error, child_error); + } + error_list.push_back(error); + } else { + target_map[p.first] = std::move(child_config); + } + } + } + if (!error_list.empty()) { + *error = GRPC_ERROR_CREATE_FROM_VECTOR( + "weighted_target_experimental LB policy config", &error_list); + return nullptr; + } + return MakeRefCounted(std::move(target_map)); + } + + private: + static std::vector ParseChildConfig( + const Json& json, WeightedTargetLbConfig::ChildConfig* child_config) { + std::vector error_list; + if (json.type() != Json::Type::OBJECT) { + error_list.push_back(GRPC_ERROR_CREATE_FROM_STATIC_STRING( + "value should be of type object")); + return error_list; + } + // Weight. + auto it = json.object_value().find("weight"); + if (it == json.object_value().end()) { + error_list.push_back(GRPC_ERROR_CREATE_FROM_STATIC_STRING( + "required field \"weight\" not specified")); + } else if (it->second.type() != Json::Type::NUMBER) { + error_list.push_back(GRPC_ERROR_CREATE_FROM_STATIC_STRING( + "field:weight error:must be of type number")); + } else { + child_config->weight = + gpr_parse_nonnegative_int(it->second.string_value().c_str()); + if (child_config->weight == -1) { + error_list.push_back(GRPC_ERROR_CREATE_FROM_STATIC_STRING( + "field:weight error:unparseable value")); + } else if (child_config->weight == 0) { + error_list.push_back(GRPC_ERROR_CREATE_FROM_STATIC_STRING( + "field:weight error:value must be greater than zero")); + } + } + // Child policy. + it = json.object_value().find("childPolicy"); + if (it != json.object_value().end()) { + grpc_error* parse_error = GRPC_ERROR_NONE; + child_config->config = + LoadBalancingPolicyRegistry::ParseLoadBalancingConfig(it->second, + &parse_error); + if (child_config->config == nullptr) { + GPR_DEBUG_ASSERT(parse_error != GRPC_ERROR_NONE); + std::vector child_errors; + child_errors.push_back(parse_error); + error_list.push_back( + GRPC_ERROR_CREATE_FROM_VECTOR("field:childPolicy", &child_errors)); + } + } + return error_list; + } +}; + +} // namespace + +} // namespace grpc_core + +// +// Plugin registration +// + +void grpc_lb_policy_weighted_target_init() { + grpc_core::LoadBalancingPolicyRegistry::Builder:: + RegisterLoadBalancingPolicyFactory( + absl::make_unique()); +} + +void grpc_lb_policy_weighted_target_shutdown() {} diff --git a/src/core/ext/filters/client_channel/lb_policy/xds/cds.cc b/src/core/ext/filters/client_channel/lb_policy/xds/cds.cc index 9001fb7fac1..90e36d1320f 100644 --- a/src/core/ext/filters/client_channel/lb_policy/xds/cds.cc +++ b/src/core/ext/filters/client_channel/lb_policy/xds/cds.cc @@ -37,9 +37,9 @@ namespace { constexpr char kCds[] = "cds_experimental"; // Config for this LB policy. -class CdsConfig : public LoadBalancingPolicy::Config { +class CdsLbConfig : public LoadBalancingPolicy::Config { public: - explicit CdsConfig(std::string cluster) : cluster_(std::move(cluster)) {} + explicit CdsLbConfig(std::string cluster) : cluster_(std::move(cluster)) {} const std::string& cluster() const { return cluster_; } const char* name() const override { return kCds; } @@ -50,7 +50,7 @@ class CdsConfig : public LoadBalancingPolicy::Config { // CDS LB policy. class CdsLb : public LoadBalancingPolicy { public: - explicit CdsLb(Args args); + CdsLb(RefCountedPtr xds_client, Args args); const char* name() const override { return kCds; } @@ -89,7 +89,7 @@ class CdsLb : public LoadBalancingPolicy { void ShutdownLocked() override; - RefCountedPtr config_; + RefCountedPtr config_; // Current channel args from the resolver. const grpc_channel_args* args_ = nullptr; @@ -124,21 +124,37 @@ void CdsLb::ClusterWatcher::OnClusterChanged(XdsApi::CdsUpdate cluster_data) { } // Construct config for child policy. Json::Object child_config = { - {"edsServiceName", - (cluster_data.eds_service_name.empty() ? parent_->config_->cluster() - : cluster_data.eds_service_name)}, + {"clusterName", parent_->config_->cluster()}, + {"localityPickingPolicy", + Json::Array{ + Json::Object{ + {"weighted_target_experimental", + Json::Object{ + {"targets", Json::Object()}, + }}, + }, + }}, + {"endpointPickingPolicy", + Json::Array{ + Json::Object{ + {"round_robin", Json::Object()}, + }, + }}, }; + if (!cluster_data.eds_service_name.empty()) { + child_config["edsServiceName"] = cluster_data.eds_service_name; + } if (cluster_data.lrs_load_reporting_server_name.has_value()) { child_config["lrsLoadReportingServerName"] = cluster_data.lrs_load_reporting_server_name.value(); } Json json = Json::Array{ Json::Object{ - {"xds_experimental", std::move(child_config)}, + {"eds_experimental", std::move(child_config)}, }, }; if (GRPC_TRACE_FLAG_ENABLED(grpc_cds_lb_trace)) { - std::string json_str = json.Dump(); + std::string json_str = json.Dump(/*indent=*/1); gpr_log(GPR_INFO, "[cdslb %p] generated config for child policy: %s", parent_.get(), json_str.c_str()); } @@ -156,19 +172,19 @@ void CdsLb::ClusterWatcher::OnClusterChanged(XdsApi::CdsUpdate cluster_data) { args.args = parent_->args_; args.channel_control_helper = absl::make_unique(parent_->Ref()); parent_->child_policy_ = - LoadBalancingPolicyRegistry::CreateLoadBalancingPolicy( - "xds_experimental", std::move(args)); + LoadBalancingPolicyRegistry::CreateLoadBalancingPolicy(config->name(), + std::move(args)); if (parent_->child_policy_ == nullptr) { OnError(GRPC_ERROR_CREATE_FROM_STATIC_STRING( - "failed to create xds_experimental child policy")); + "failed to create child policy")); return; } grpc_pollset_set_add_pollset_set( parent_->child_policy_->interested_parties(), parent_->interested_parties()); if (GRPC_TRACE_FLAG_ENABLED(grpc_cds_lb_trace)) { - gpr_log(GPR_INFO, "[cdslb %p] created child policy xds_experimental (%p)", - parent_.get(), parent_->child_policy_.get()); + gpr_log(GPR_INFO, "[cdslb %p] created child policy %s (%p)", + parent_.get(), config->name(), parent_->child_policy_.get()); } } // Update child policy. @@ -232,9 +248,8 @@ void CdsLb::Helper::AddTraceEvent(TraceSeverity severity, StringView message) { // CdsLb // -CdsLb::CdsLb(Args args) - : LoadBalancingPolicy(std::move(args)), - xds_client_(XdsClient::GetFromChannelArgs(*args.args)) { +CdsLb::CdsLb(RefCountedPtr xds_client, Args args) + : LoadBalancingPolicy(std::move(args)), xds_client_(std::move(xds_client)) { if (GRPC_TRACE_FLAG_ENABLED(grpc_cds_lb_trace)) { gpr_log(GPR_INFO, "[cdslb %p] created -- using xds client %p from channel", this, xds_client_.get()); @@ -313,11 +328,19 @@ void CdsLb::UpdateLocked(UpdateArgs args) { // factory // -class CdsFactory : public LoadBalancingPolicyFactory { +class CdsLbFactory : public LoadBalancingPolicyFactory { public: OrphanablePtr CreateLoadBalancingPolicy( LoadBalancingPolicy::Args args) const override { - return MakeOrphanable(std::move(args)); + RefCountedPtr xds_client = + XdsClient::GetFromChannelArgs(*args.args); + if (xds_client == nullptr) { + gpr_log(GPR_ERROR, + "XdsClient not present in channel args -- cannot instantiate " + "cds LB policy"); + return nullptr; + } + return MakeOrphanable(std::move(xds_client), std::move(args)); } const char* name() const override { return kCds; } @@ -349,7 +372,7 @@ class CdsFactory : public LoadBalancingPolicyFactory { *error = GRPC_ERROR_CREATE_FROM_VECTOR("Cds Parser", &error_list); return nullptr; } - return MakeRefCounted(std::move(cluster)); + return MakeRefCounted(std::move(cluster)); } }; @@ -364,7 +387,7 @@ class CdsFactory : public LoadBalancingPolicyFactory { void grpc_lb_policy_cds_init() { grpc_core::LoadBalancingPolicyRegistry::Builder:: RegisterLoadBalancingPolicyFactory( - absl::make_unique()); + absl::make_unique()); } void grpc_lb_policy_cds_shutdown() {} diff --git a/src/core/ext/filters/client_channel/lb_policy/xds/eds.cc b/src/core/ext/filters/client_channel/lb_policy/xds/eds.cc new file mode 100644 index 00000000000..da60dc5998b --- /dev/null +++ b/src/core/ext/filters/client_channel/lb_policy/xds/eds.cc @@ -0,0 +1,1175 @@ +// +// Copyright 2018 gRPC authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +#include + +#include +#include + +#include "absl/strings/str_cat.h" +#include "absl/types/optional.h" + +#include + +#include "src/core/ext/filters/client_channel/client_channel.h" +#include "src/core/ext/filters/client_channel/lb_policy.h" +#include "src/core/ext/filters/client_channel/lb_policy/address_filtering.h" +#include "src/core/ext/filters/client_channel/lb_policy/child_policy_handler.h" +#include "src/core/ext/filters/client_channel/lb_policy/xds/xds.h" +#include "src/core/ext/filters/client_channel/lb_policy_factory.h" +#include "src/core/ext/filters/client_channel/lb_policy_registry.h" +#include "src/core/ext/filters/client_channel/server_address.h" +#include "src/core/ext/filters/client_channel/xds/xds_client.h" +#include "src/core/ext/filters/client_channel/xds/xds_client_stats.h" +#include "src/core/lib/channel/channel_args.h" +#include "src/core/lib/gprpp/orphanable.h" +#include "src/core/lib/gprpp/ref_counted_ptr.h" +#include "src/core/lib/iomgr/timer.h" +#include "src/core/lib/iomgr/work_serializer.h" +#include "src/core/lib/uri/uri_parser.h" + +#define GRPC_EDS_DEFAULT_FALLBACK_TIMEOUT 10000 + +namespace grpc_core { + +TraceFlag grpc_lb_eds_trace(false, "eds_lb"); + +namespace { + +constexpr char kXds[] = "xds_experimental"; +constexpr char kEds[] = "eds_experimental"; + +// Config for EDS LB policy. +class EdsLbConfig : public LoadBalancingPolicy::Config { + public: + EdsLbConfig(const char* name, std::string cluster_name, + std::string eds_service_name, + absl::optional lrs_load_reporting_server_name, + Json locality_picking_policy, Json endpoint_picking_policy, + RefCountedPtr fallback_policy) + : name_(name), + cluster_name_(std::move(cluster_name)), + eds_service_name_(std::move(eds_service_name)), + lrs_load_reporting_server_name_( + std::move(lrs_load_reporting_server_name)), + locality_picking_policy_(std::move(locality_picking_policy)), + endpoint_picking_policy_(std::move(endpoint_picking_policy)), + fallback_policy_(std::move(fallback_policy)) {} + + const char* name() const override { return name_; } + + const std::string& cluster_name() const { return cluster_name_; } + const std::string& eds_service_name() const { return eds_service_name_; } + const absl::optional& lrs_load_reporting_server_name() const { + return lrs_load_reporting_server_name_; + }; + const Json& locality_picking_policy() const { + return locality_picking_policy_; + } + const Json& endpoint_picking_policy() const { + return endpoint_picking_policy_; + } + RefCountedPtr fallback_policy() const { + return fallback_policy_; + } + + private: + const char* name_; + std::string cluster_name_; + std::string eds_service_name_; + absl::optional lrs_load_reporting_server_name_; + Json locality_picking_policy_; + Json endpoint_picking_policy_; + RefCountedPtr fallback_policy_; +}; + +// EDS LB policy. +class EdsLb : public LoadBalancingPolicy { + public: + EdsLb(const char* name, Args args); + + const char* name() const override { return name_; } + + void UpdateLocked(UpdateArgs args) override; + void ResetBackoffLocked() override; + + private: + class EndpointWatcher; + + // A simple wrapper for ref-counting a picker from the child policy. + class ChildPickerWrapper : public RefCounted { + public: + explicit ChildPickerWrapper(std::unique_ptr picker) + : picker_(std::move(picker)) {} + PickResult Pick(PickArgs args) { return picker_->Pick(args); } + + private: + std::unique_ptr picker_; + }; + + // A picker that handles drops. + class DropPicker : public SubchannelPicker { + public: + explicit DropPicker(EdsLb* eds_policy); + + PickResult Pick(PickArgs args) override; + + private: + RefCountedPtr drop_config_; + RefCountedPtr drop_stats_; + RefCountedPtr child_picker_; + }; + + class Helper : public ChannelControlHelper { + public: + explicit Helper(RefCountedPtr eds_policy) + : eds_policy_(std::move(eds_policy)) {} + + ~Helper() { eds_policy_.reset(DEBUG_LOCATION, "Helper"); } + + RefCountedPtr CreateSubchannel( + const grpc_channel_args& args) override; + void UpdateState(grpc_connectivity_state state, + std::unique_ptr picker) override; + // This is a no-op, because we get the addresses from the xds + // client, which is a watch-based API. + void RequestReresolution() override {} + void AddTraceEvent(TraceSeverity severity, StringView message) override; + + private: + RefCountedPtr eds_policy_; + }; + + class FallbackHelper : public ChannelControlHelper { + public: + explicit FallbackHelper(RefCountedPtr parent) + : parent_(std::move(parent)) {} + + ~FallbackHelper() { parent_.reset(DEBUG_LOCATION, "FallbackHelper"); } + + RefCountedPtr CreateSubchannel( + const grpc_channel_args& args) override; + void UpdateState(grpc_connectivity_state state, + std::unique_ptr picker) override; + void RequestReresolution() override; + void AddTraceEvent(TraceSeverity severity, StringView message) override; + + private: + RefCountedPtr parent_; + }; + + ~EdsLb(); + + void ShutdownLocked() override; + + void UpdatePriorityList(XdsApi::PriorityListUpdate priority_list_update); + void UpdateChildPolicyLocked(); + OrphanablePtr CreateChildPolicyLocked( + const grpc_channel_args* args); + ServerAddressList CreateChildPolicyAddressesLocked(); + RefCountedPtr CreateChildPolicyConfigLocked(); + grpc_channel_args* CreateChildPolicyArgsLocked( + const grpc_channel_args* args_in); + void MaybeUpdateDropPickerLocked(); + + // Methods for dealing with fallback state. + void MaybeCancelFallbackAtStartupChecks(); + static void OnFallbackTimer(void* arg, grpc_error* error); + void OnFallbackTimerLocked(grpc_error* error); + void UpdateFallbackPolicyLocked(); + OrphanablePtr CreateFallbackPolicyLocked( + const grpc_channel_args* args); + void MaybeExitFallbackMode(); + + // Caller must ensure that config_ is set before calling. + const StringView GetEdsResourceName() const { + if (xds_client_from_channel_ == nullptr) return server_name_; + if (!config_->eds_service_name().empty()) { + return config_->eds_service_name(); + } + return config_->cluster_name(); + } + + // Returns a pair containing the cluster and eds_service_name to use + // for LRS load reporting. + // Caller must ensure that config_ is set before calling. + std::pair GetLrsClusterKey() const { + if (xds_client_from_channel_ == nullptr) return {server_name_, nullptr}; + return {config_->cluster_name(), config_->eds_service_name()}; + } + + XdsClient* xds_client() const { + return xds_client_from_channel_ != nullptr ? xds_client_from_channel_.get() + : xds_client_.get(); + } + + // Policy name (kXds or kEds). + const char* name_; + + // Server name from target URI. + std::string server_name_; + + // Current channel args and config from the resolver. + const grpc_channel_args* args_ = nullptr; + RefCountedPtr config_; + + // Internal state. + bool shutting_down_ = false; + + // The xds client and endpoint watcher. + // If we get the XdsClient from the channel, we store it in + // xds_client_from_channel_; if we create it ourselves, we store it in + // xds_client_. + RefCountedPtr xds_client_from_channel_; + OrphanablePtr xds_client_; + // A pointer to the endpoint watcher, to be used when cancelling the watch. + // Note that this is not owned, so this pointer must never be derefernced. + EndpointWatcher* endpoint_watcher_ = nullptr; + // The latest data from the endpoint watcher. + XdsApi::PriorityListUpdate priority_list_update_; + // State used to retain child policy names for priority policy. + std::vector priority_child_numbers_; + + RefCountedPtr drop_config_; + RefCountedPtr drop_stats_; + + OrphanablePtr child_policy_; + + // The latest state and picker returned from the child policy. + grpc_connectivity_state child_state_; + RefCountedPtr child_picker_; + + // Non-null iff we are in fallback mode. + OrphanablePtr fallback_policy_; + + // Whether the checks for fallback at startup are ALL pending. There are + // several cases where this can be reset: + // 1. The fallback timer fires, we enter fallback mode. + // 2. Before the fallback timer fires, the endpoint watcher reports an + // error, we enter fallback mode. + // 3. Before the fallback timer fires, if any child policy in the locality map + // becomes READY, we cancel the fallback timer. + bool fallback_at_startup_checks_pending_ = false; + // Timeout in milliseconds for before using fallback backend addresses. + // 0 means not using fallback. + const grpc_millis lb_fallback_timeout_ms_; + // The backend addresses from the resolver. + ServerAddressList fallback_backend_addresses_; + // Fallback timer. + grpc_timer lb_fallback_timer_; + grpc_closure lb_on_fallback_; +}; + +// +// EdsLb::DropPicker +// + +EdsLb::DropPicker::DropPicker(EdsLb* eds_policy) + : drop_config_(eds_policy->drop_config_), + drop_stats_(eds_policy->drop_stats_), + child_picker_(eds_policy->child_picker_) { + if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_eds_trace)) { + gpr_log(GPR_INFO, "[edslb %p] constructed new drop picker %p", eds_policy, + this); + } +} + +EdsLb::PickResult EdsLb::DropPicker::Pick(PickArgs args) { + // Handle drop. + const std::string* drop_category; + if (drop_config_->ShouldDrop(&drop_category)) { + if (drop_stats_ != nullptr) drop_stats_->AddCallDropped(*drop_category); + PickResult result; + result.type = PickResult::PICK_COMPLETE; + return result; + } + // If we're not dropping all calls, we should always have a child picker. + if (child_picker_ == nullptr) { // Should never happen. + PickResult result; + result.type = PickResult::PICK_FAILED; + result.error = + grpc_error_set_int(GRPC_ERROR_CREATE_FROM_STATIC_STRING( + "eds drop picker not given any child picker"), + GRPC_ERROR_INT_GRPC_STATUS, GRPC_STATUS_INTERNAL); + return result; + } + // Not dropping, so delegate to child's picker. + return child_picker_->Pick(args); +} + +// +// EdsLb::Helper +// + +RefCountedPtr EdsLb::Helper::CreateSubchannel( + const grpc_channel_args& args) { + if (eds_policy_->shutting_down_) return nullptr; + return eds_policy_->channel_control_helper()->CreateSubchannel(args); +} + +void EdsLb::Helper::UpdateState(grpc_connectivity_state state, + std::unique_ptr picker) { + if (eds_policy_->shutting_down_) return; + if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_eds_trace)) { + gpr_log(GPR_INFO, "[edslb %p] child policy updated state=%s picker=%p", + eds_policy_.get(), ConnectivityStateName(state), picker.get()); + } + // Save the state and picker. + eds_policy_->child_state_ = state; + eds_policy_->child_picker_ = + MakeRefCounted(std::move(picker)); + // If the new state is READY, cancel the fallback-at-startup checks. + if (state == GRPC_CHANNEL_READY) { + eds_policy_->MaybeCancelFallbackAtStartupChecks(); + eds_policy_->MaybeExitFallbackMode(); + } + // TODO(roth): If the child reports TRANSIENT_FAILURE and the + // fallback-at-startup checks are pending, we should probably go into + // fallback mode immediately (cancelling the fallback-at-startup timer + // if needed). + // Wrap the picker in a DropPicker and pass it up. + eds_policy_->MaybeUpdateDropPickerLocked(); +} + +void EdsLb::Helper::AddTraceEvent(TraceSeverity severity, StringView message) { + if (eds_policy_->shutting_down_) return; + eds_policy_->channel_control_helper()->AddTraceEvent(severity, message); +} + +// +// EdsLb::FallbackHelper +// + +RefCountedPtr EdsLb::FallbackHelper::CreateSubchannel( + const grpc_channel_args& args) { + if (parent_->shutting_down_) return nullptr; + return parent_->channel_control_helper()->CreateSubchannel(args); +} + +void EdsLb::FallbackHelper::UpdateState( + grpc_connectivity_state state, std::unique_ptr picker) { + if (parent_->shutting_down_) return; + parent_->channel_control_helper()->UpdateState(state, std::move(picker)); +} + +void EdsLb::FallbackHelper::RequestReresolution() { + if (parent_->shutting_down_) return; + parent_->channel_control_helper()->RequestReresolution(); +} + +void EdsLb::FallbackHelper::AddTraceEvent(TraceSeverity severity, + StringView message) { + if (parent_->shutting_down_) return; + parent_->channel_control_helper()->AddTraceEvent(severity, message); +} + +// +// EdsLb::EndpointWatcher +// + +class EdsLb::EndpointWatcher : public XdsClient::EndpointWatcherInterface { + public: + explicit EndpointWatcher(RefCountedPtr eds_policy) + : eds_policy_(std::move(eds_policy)) {} + + ~EndpointWatcher() { eds_policy_.reset(DEBUG_LOCATION, "EndpointWatcher"); } + + void OnEndpointChanged(XdsApi::EdsUpdate update) override { + if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_eds_trace)) { + gpr_log(GPR_INFO, "[edslb %p] Received EDS update from xds client", + eds_policy_.get()); + } + // If the balancer tells us to drop all the calls, we should exit fallback + // mode immediately. + if (update.drop_config->drop_all()) eds_policy_->MaybeExitFallbackMode(); + // Update the drop config. + const bool drop_config_changed = + eds_policy_->drop_config_ == nullptr || + *eds_policy_->drop_config_ != *update.drop_config; + if (drop_config_changed) { + if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_eds_trace)) { + gpr_log(GPR_INFO, "[edslb %p] Updating drop config", eds_policy_.get()); + } + eds_policy_->drop_config_ = std::move(update.drop_config); + eds_policy_->MaybeUpdateDropPickerLocked(); + } else if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_eds_trace)) { + gpr_log(GPR_INFO, "[edslb %p] Drop config unchanged, ignoring", + eds_policy_.get()); + } + // Update priority and locality info. + if (eds_policy_->child_policy_ == nullptr || + eds_policy_->priority_list_update_ != update.priority_list_update) { + if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_eds_trace)) { + gpr_log(GPR_INFO, "[edslb %p] Updating priority list", + eds_policy_.get()); + } + eds_policy_->UpdatePriorityList(std::move(update.priority_list_update)); + } else if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_eds_trace)) { + gpr_log(GPR_INFO, "[edslb %p] Priority list unchanged, ignoring", + eds_policy_.get()); + } + } + + void OnError(grpc_error* error) override { + // If the fallback-at-startup checks are pending, go into fallback mode + // immediately. This short-circuits the timeout for the + // fallback-at-startup case. + if (eds_policy_->fallback_at_startup_checks_pending_) { + gpr_log(GPR_ERROR, + "[edslb %p] xds watcher reported error; entering fallback " + "mode: %s", + eds_policy_.get(), grpc_error_string(error)); + eds_policy_->fallback_at_startup_checks_pending_ = false; + grpc_timer_cancel(&eds_policy_->lb_fallback_timer_); + eds_policy_->UpdateFallbackPolicyLocked(); + // If the xds call failed, request re-resolution. + // TODO(roth): We check the error string contents here to + // differentiate between the xds call failing and the xds channel + // going into TRANSIENT_FAILURE. This is a pretty ugly hack, + // but it's okay for now, since we're not yet sure whether we will + // continue to support the current fallback functionality. If we + // decide to keep the fallback approach, then we should either + // find a cleaner way to expose the difference between these two + // cases or decide that we're okay re-resolving in both cases. + // Note that even if we do keep the current fallback functionality, + // this re-resolution will only be necessary if we are going to be + // using this LB policy with resolvers other than the xds resolver. + if (strstr(grpc_error_string(error), "xds call failed")) { + eds_policy_->channel_control_helper()->RequestReresolution(); + } + } + GRPC_ERROR_UNREF(error); + } + + private: + RefCountedPtr eds_policy_; +}; + +// +// EdsLb public methods +// + +EdsLb::EdsLb(const char* name, Args args) + : LoadBalancingPolicy(std::move(args)), + name_(name), + xds_client_from_channel_(XdsClient::GetFromChannelArgs(*args.args)), + lb_fallback_timeout_ms_(grpc_channel_args_find_integer( + args.args, GRPC_ARG_XDS_FALLBACK_TIMEOUT_MS, + {GRPC_EDS_DEFAULT_FALLBACK_TIMEOUT, 0, INT_MAX})) { + if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_eds_trace)) { + gpr_log(GPR_INFO, "[edslb %p] created -- xds client from channel: %p", this, + xds_client_from_channel_.get()); + } + // Record server name. + const grpc_arg* arg = grpc_channel_args_find(args.args, GRPC_ARG_SERVER_URI); + const char* server_uri = grpc_channel_arg_get_string(arg); + GPR_ASSERT(server_uri != nullptr); + grpc_uri* uri = grpc_uri_parse(server_uri, true); + GPR_ASSERT(uri->path[0] != '\0'); + server_name_ = uri->path[0] == '/' ? uri->path + 1 : uri->path; + if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_eds_trace)) { + gpr_log(GPR_INFO, "[edslb %p] server name from channel: %s", this, + server_name_.c_str()); + } + grpc_uri_destroy(uri); +} + +EdsLb::~EdsLb() { + if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_eds_trace)) { + gpr_log(GPR_INFO, "[edslb %p] destroying xds LB policy", this); + } + grpc_channel_args_destroy(args_); +} + +void EdsLb::ShutdownLocked() { + if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_eds_trace)) { + gpr_log(GPR_INFO, "[edslb %p] shutting down", this); + } + shutting_down_ = true; + MaybeCancelFallbackAtStartupChecks(); + // Drop our ref to the child's picker, in case it's holding a ref to + // the child. + child_picker_.reset(); + if (child_policy_ != nullptr) { + grpc_pollset_set_del_pollset_set(child_policy_->interested_parties(), + interested_parties()); + child_policy_.reset(); + } + if (fallback_policy_ != nullptr) { + grpc_pollset_set_del_pollset_set(fallback_policy_->interested_parties(), + interested_parties()); + fallback_policy_.reset(); + } + drop_stats_.reset(); + // Cancel the endpoint watch here instead of in our dtor if we are using the + // xds resolver, because the watcher holds a ref to us and we might not be + // destroying the XdsClient, leading to a situation where this LB policy is + // never destroyed. + if (xds_client_from_channel_ != nullptr) { + if (config_ != nullptr) { + if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_eds_trace)) { + gpr_log(GPR_INFO, "[edslb %p] cancelling xds watch for %s", this, + std::string(GetEdsResourceName()).c_str()); + } + xds_client()->CancelEndpointDataWatch(GetEdsResourceName(), + endpoint_watcher_); + } + xds_client_from_channel_.reset(); + } + xds_client_.reset(); +} + +void EdsLb::UpdateLocked(UpdateArgs args) { + if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_eds_trace)) { + gpr_log(GPR_INFO, "[edslb %p] Received update", this); + } + const bool is_initial_update = args_ == nullptr; + // Update config. + auto old_config = std::move(config_); + config_ = std::move(args.config); + // Update fallback address list. + fallback_backend_addresses_ = std::move(args.addresses); + // Update args. + grpc_channel_args_destroy(args_); + args_ = args.args; + args.args = nullptr; + // Update the existing fallback policy. The fallback policy config and/or the + // fallback addresses may be new. + if (fallback_policy_ != nullptr) UpdateFallbackPolicyLocked(); + if (is_initial_update) { + // Initialize XdsClient. + if (xds_client_from_channel_ == nullptr) { + grpc_error* error = GRPC_ERROR_NONE; + xds_client_ = MakeOrphanable( + work_serializer(), interested_parties(), GetEdsResourceName(), + nullptr /* service config watcher */, *args_, &error); + // TODO(roth): If we decide that we care about fallback mode, add + // proper error handling here. + GPR_ASSERT(error == GRPC_ERROR_NONE); + if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_eds_trace)) { + gpr_log(GPR_INFO, "[edslb %p] Created xds client %p", this, + xds_client_.get()); + } + } + // Start fallback-at-startup checks. + grpc_millis deadline = ExecCtx::Get()->Now() + lb_fallback_timeout_ms_; + Ref(DEBUG_LOCATION, "on_fallback_timer").release(); // Held by closure + GRPC_CLOSURE_INIT(&lb_on_fallback_, &EdsLb::OnFallbackTimer, this, + grpc_schedule_on_exec_ctx); + fallback_at_startup_checks_pending_ = true; + grpc_timer_init(&lb_fallback_timer_, deadline, &lb_on_fallback_); + } + // Update drop stats for load reporting if needed. + if (is_initial_update || config_->lrs_load_reporting_server_name() != + old_config->lrs_load_reporting_server_name()) { + drop_stats_.reset(); + if (config_->lrs_load_reporting_server_name().has_value()) { + const auto key = GetLrsClusterKey(); + drop_stats_ = xds_client()->AddClusterDropStats( + config_->lrs_load_reporting_server_name().value(), + key.first /*cluster_name*/, key.second /*eds_service_name*/); + } + MaybeUpdateDropPickerLocked(); + } + // Update child policy if needed. + // Note that this comes after updating drop_stats_, since we want that + // to be used by any new picker we create here. + if (child_policy_ != nullptr) UpdateChildPolicyLocked(); + // Create endpoint watcher if needed. + if (is_initial_update) { + if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_eds_trace)) { + gpr_log(GPR_INFO, "[edslb %p] starting xds watch for %s", this, + std::string(GetEdsResourceName()).c_str()); + } + auto watcher = absl::make_unique( + Ref(DEBUG_LOCATION, "EndpointWatcher")); + endpoint_watcher_ = watcher.get(); + xds_client()->WatchEndpointData(GetEdsResourceName(), std::move(watcher)); + } +} + +void EdsLb::ResetBackoffLocked() { + // When the XdsClient is instantiated in the resolver instead of in this + // LB policy, this is done via the resolver, so we don't need to do it + // for xds_client_from_channel_ here. + if (xds_client_ != nullptr) xds_client_->ResetBackoff(); + if (child_policy_ != nullptr) { + child_policy_->ResetBackoffLocked(); + } + if (fallback_policy_ != nullptr) { + fallback_policy_->ResetBackoffLocked(); + } +} + +// +// child policy-related methods +// + +void EdsLb::UpdatePriorityList( + XdsApi::PriorityListUpdate priority_list_update) { + // Build some maps from locality to child number and the reverse from + // the old data in priority_list_update_ and priority_child_numbers_. + std::map + locality_child_map; + std::map> child_locality_map; + for (uint32_t priority = 0; priority < priority_list_update_.size(); + ++priority) { + auto* locality_map = priority_list_update_.Find(priority); + GPR_ASSERT(locality_map != nullptr); + size_t child_number = priority_child_numbers_[priority]; + for (const auto& p : locality_map->localities) { + XdsLocalityName* locality_name = p.first.get(); + locality_child_map[locality_name] = child_number; + child_locality_map[child_number].insert(locality_name); + } + } + // Construct new list of children. + std::vector priority_child_numbers; + for (uint32_t priority = 0; priority < priority_list_update.size(); + ++priority) { + auto* locality_map = priority_list_update.Find(priority); + GPR_ASSERT(locality_map != nullptr); + absl::optional child_number; + // If one of the localities in this priority already existed, reuse its + // child number. + for (const auto& p : locality_map->localities) { + XdsLocalityName* locality_name = p.first.get(); + if (!child_number.has_value()) { + auto it = locality_child_map.find(locality_name); + if (it != locality_child_map.end()) { + child_number = it->second; + locality_child_map.erase(it); + // Remove localities that *used* to be in this child number, so + // that we don't incorrectly reuse this child number for a + // subsequent priority. + for (XdsLocalityName* old_locality : + child_locality_map[*child_number]) { + locality_child_map.erase(old_locality); + } + } + } else { + // Remove all localities that are now in this child number, so + // that we don't accidentally reuse this child number for a + // subsequent priority. + locality_child_map.erase(locality_name); + } + } + // If we didn't find an existing child number, assign a new one. + if (!child_number.has_value()) { + for (child_number = 0; + child_locality_map.find(*child_number) != child_locality_map.end(); + ++(*child_number)) + ; + // Add entry so we know that the child number is in use. + // (Don't need to add the list of localities, since we won't use them.) + child_locality_map[*child_number]; + } + priority_child_numbers.push_back(*child_number); + } + // Save update. + priority_list_update_ = std::move(priority_list_update); + priority_child_numbers_ = std::move(priority_child_numbers); + // Update child policy. + UpdateChildPolicyLocked(); +} + +ServerAddressList EdsLb::CreateChildPolicyAddressesLocked() { + ServerAddressList addresses; + for (uint32_t priority = 0; priority < priority_list_update_.size(); + ++priority) { + std::string priority_child_name = + absl::StrCat("child", priority_child_numbers_[priority]); + const auto* locality_map = priority_list_update_.Find(priority); + GPR_ASSERT(locality_map != nullptr); + for (const auto& p : locality_map->localities) { + const auto& locality_name = p.first; + const auto& locality = p.second; + std::vector hierarchical_path = { + priority_child_name, locality_name->AsHumanReadableString()}; + for (size_t i = 0; i < locality.serverlist.size(); ++i) { + const ServerAddress& address = locality.serverlist[i]; + grpc_arg new_arg = MakeHierarchicalPathArg(hierarchical_path); + grpc_channel_args* args = + grpc_channel_args_copy_and_add(address.args(), &new_arg, 1); + addresses.emplace_back(address.address(), args); + } + } + } + return addresses; +} + +RefCountedPtr +EdsLb::CreateChildPolicyConfigLocked() { + Json::Object priority_children; + Json::Array priority_priorities; + for (uint32_t priority = 0; priority < priority_list_update_.size(); + ++priority) { + const auto* locality_map = priority_list_update_.Find(priority); + GPR_ASSERT(locality_map != nullptr); + Json::Object weighted_targets; + for (const auto& p : locality_map->localities) { + XdsLocalityName* locality_name = p.first.get(); + const auto& locality = p.second; + // Construct JSON object containing locality name. + Json::Object locality_name_json; + if (!locality_name->region().empty()) { + locality_name_json["region"] = locality_name->region(); + } + if (!locality_name->zone().empty()) { + locality_name_json["zone"] = locality_name->zone(); + } + if (!locality_name->sub_zone().empty()) { + locality_name_json["subzone"] = locality_name->sub_zone(); + } + // Construct endpoint-picking policy. + // Wrap it in the LRS policy if load reporting is enabled. + Json endpoint_picking_policy; + if (config_->lrs_load_reporting_server_name().has_value()) { + const auto key = GetLrsClusterKey(); + Json::Object lrs_config = { + {"clusterName", std::string(key.first)}, + {"locality", std::move(locality_name_json)}, + {"lrsLoadReportingServerName", + config_->lrs_load_reporting_server_name().value()}, + {"childPolicy", config_->endpoint_picking_policy()}, + }; + if (!key.second.empty()) { + lrs_config["edsServiceName"] = std::string(key.second); + } + endpoint_picking_policy = Json::Array{Json::Object{ + {"lrs_experimental", std::move(lrs_config)}, + }}; + } else { + endpoint_picking_policy = config_->endpoint_picking_policy(); + } + // Add weighted target entry. + weighted_targets[locality_name->AsHumanReadableString()] = Json::Object{ + {"weight", locality.lb_weight}, + {"childPolicy", std::move(endpoint_picking_policy)}, + }; + } + // Add priority entry. + const size_t child_number = priority_child_numbers_[priority]; + std::string child_name = absl::StrCat("child", child_number); + priority_priorities.emplace_back(child_name); + Json locality_picking_config = config_->locality_picking_policy(); + Json::Object& config = + *(*locality_picking_config.mutable_array())[0].mutable_object(); + auto it = config.begin(); + GPR_ASSERT(it != config.end()); + (*it->second.mutable_object())["targets"] = std::move(weighted_targets); + priority_children[child_name] = Json::Object{ + {"config", std::move(locality_picking_config)}, + }; + } + Json json = Json::Array{Json::Object{ + {"priority_experimental", + Json::Object{ + {"children", std::move(priority_children)}, + {"priorities", std::move(priority_priorities)}, + }}, + }}; + if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_eds_trace)) { + std::string json_str = json.Dump(/*indent=*/1); + gpr_log(GPR_INFO, "[edslb %p] generated config for child policy: %s", this, + json_str.c_str()); + } + grpc_error* error = GRPC_ERROR_NONE; + RefCountedPtr config = + LoadBalancingPolicyRegistry::ParseLoadBalancingConfig(json, &error); + if (error != GRPC_ERROR_NONE) { + // This should never happen, but if it does, we basically have no + // way to fix it, so we put the channel in TRANSIENT_FAILURE. + gpr_log(GPR_ERROR, + "[edslb %p] error parsing generated child policy config -- " + "will put channel in TRANSIENT_FAILURE: %s", + this, grpc_error_string(error)); + error = grpc_error_set_int( + grpc_error_add_child( + GRPC_ERROR_CREATE_FROM_STATIC_STRING( + "eds LB policy: error parsing generated child policy config"), + error), + GRPC_ERROR_INT_GRPC_STATUS, GRPC_STATUS_INTERNAL); + channel_control_helper()->UpdateState( + GRPC_CHANNEL_TRANSIENT_FAILURE, + absl::make_unique(error)); + return nullptr; + } + return config; +} + +void EdsLb::UpdateChildPolicyLocked() { + if (shutting_down_) return; + UpdateArgs update_args; + update_args.config = CreateChildPolicyConfigLocked(); + if (update_args.config == nullptr) return; + update_args.addresses = CreateChildPolicyAddressesLocked(); + update_args.args = CreateChildPolicyArgsLocked(args_); + if (child_policy_ == nullptr) { + child_policy_ = CreateChildPolicyLocked(update_args.args); + } + if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_eds_trace)) { + gpr_log(GPR_INFO, "[edslb %p] Updating child policy %p", this, + child_policy_.get()); + } + child_policy_->UpdateLocked(std::move(update_args)); +} + +grpc_channel_args* EdsLb::CreateChildPolicyArgsLocked( + const grpc_channel_args* args) { + absl::InlinedVector args_to_add = { + // A channel arg indicating if the target is a backend inferred from an + // xds load balancer. + grpc_channel_arg_integer_create( + const_cast(GRPC_ARG_ADDRESS_IS_BACKEND_FROM_XDS_LOAD_BALANCER), + 1), + // Inhibit client-side health checking, since the balancer does + // this for us. + grpc_channel_arg_integer_create( + const_cast(GRPC_ARG_INHIBIT_HEALTH_CHECKING), 1), + }; + if (xds_client_from_channel_ == nullptr) { + args_to_add.emplace_back(xds_client_->MakeChannelArg()); + } + return grpc_channel_args_copy_and_add(args, args_to_add.data(), + args_to_add.size()); +} + +OrphanablePtr EdsLb::CreateChildPolicyLocked( + const grpc_channel_args* args) { + LoadBalancingPolicy::Args lb_policy_args; + lb_policy_args.work_serializer = work_serializer(); + lb_policy_args.args = args; + lb_policy_args.channel_control_helper = + absl::make_unique(Ref(DEBUG_LOCATION, "Helper")); + OrphanablePtr lb_policy = + LoadBalancingPolicyRegistry::CreateLoadBalancingPolicy( + "priority_experimental", std::move(lb_policy_args)); + if (GPR_UNLIKELY(lb_policy == nullptr)) { + gpr_log(GPR_ERROR, "[edslb %p] failure creating child policy", this); + return nullptr; + } + if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_eds_trace)) { + gpr_log(GPR_INFO, "[edslb %p]: Created new child policy %p", this, + lb_policy.get()); + } + // Add our interested_parties pollset_set to that of the newly created + // child policy. This will make the child policy progress upon activity on + // this policy, which in turn is tied to the application's call. + grpc_pollset_set_add_pollset_set(lb_policy->interested_parties(), + interested_parties()); + return lb_policy; +} + +void EdsLb::MaybeUpdateDropPickerLocked() { + // If we are in fallback mode, don't override the picker. + if (fallback_policy_ != nullptr) return; + // If we're dropping all calls, report READY, regardless of what (or + // whether) the child has reported. + if (drop_config_ != nullptr && drop_config_->drop_all()) { + channel_control_helper()->UpdateState(GRPC_CHANNEL_READY, + absl::make_unique(this)); + return; + } + // Update only if we have a child picker. + if (child_picker_ != nullptr) { + channel_control_helper()->UpdateState(child_state_, + absl::make_unique(this)); + } +} + +// +// fallback-related methods +// + +void EdsLb::MaybeCancelFallbackAtStartupChecks() { + if (!fallback_at_startup_checks_pending_) return; + if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_eds_trace)) { + gpr_log(GPR_INFO, "[edslb %p] Cancelling fallback timer", this); + } + grpc_timer_cancel(&lb_fallback_timer_); + fallback_at_startup_checks_pending_ = false; +} + +void EdsLb::OnFallbackTimer(void* arg, grpc_error* error) { + EdsLb* edslb_policy = static_cast(arg); + GRPC_ERROR_REF(error); // ref owned by lambda + edslb_policy->work_serializer()->Run( + [edslb_policy, error]() { edslb_policy->OnFallbackTimerLocked(error); }, + DEBUG_LOCATION); +} + +void EdsLb::OnFallbackTimerLocked(grpc_error* error) { + // If some fallback-at-startup check is done after the timer fires but before + // this callback actually runs, don't fall back. + if (fallback_at_startup_checks_pending_ && !shutting_down_ && + error == GRPC_ERROR_NONE) { + gpr_log(GPR_INFO, + "[edslb %p] Child policy not ready after fallback timeout; " + "entering fallback mode", + this); + fallback_at_startup_checks_pending_ = false; + UpdateFallbackPolicyLocked(); + } + Unref(DEBUG_LOCATION, "on_fallback_timer"); + GRPC_ERROR_UNREF(error); +} + +void EdsLb::UpdateFallbackPolicyLocked() { + if (shutting_down_) return; + // Create policy if needed. + if (fallback_policy_ == nullptr) { + fallback_policy_ = CreateFallbackPolicyLocked(args_); + } + // Construct update args. + UpdateArgs update_args; + update_args.addresses = fallback_backend_addresses_; + update_args.config = config_->fallback_policy(); + update_args.args = grpc_channel_args_copy(args_); + // Update the policy. + if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_eds_trace)) { + gpr_log(GPR_INFO, "[edslb %p] Updating fallback child policy handler %p", + this, fallback_policy_.get()); + } + fallback_policy_->UpdateLocked(std::move(update_args)); +} + +OrphanablePtr EdsLb::CreateFallbackPolicyLocked( + const grpc_channel_args* args) { + LoadBalancingPolicy::Args lb_policy_args; + lb_policy_args.work_serializer = work_serializer(); + lb_policy_args.args = args; + lb_policy_args.channel_control_helper = + absl::make_unique(Ref(DEBUG_LOCATION, "FallbackHelper")); + OrphanablePtr lb_policy = + MakeOrphanable(std::move(lb_policy_args), + &grpc_lb_eds_trace); + if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_eds_trace)) { + gpr_log(GPR_INFO, "[edslb %p] Created new fallback child policy handler %p", + this, lb_policy.get()); + } + // Add our interested_parties pollset_set to that of the newly created + // child policy. This will make the child policy progress upon activity on + // this policy, which in turn is tied to the application's call. + grpc_pollset_set_add_pollset_set(lb_policy->interested_parties(), + interested_parties()); + return lb_policy; +} + +void EdsLb::MaybeExitFallbackMode() { + if (fallback_policy_ == nullptr) return; + gpr_log(GPR_INFO, "[edslb %p] Exiting fallback mode", this); + fallback_policy_.reset(); +} + +// +// factory +// + +class EdsLbFactory : public LoadBalancingPolicyFactory { + public: + explicit EdsLbFactory(const char* name) : name_(name) {} + + OrphanablePtr CreateLoadBalancingPolicy( + LoadBalancingPolicy::Args args) const override { + return MakeOrphanable(std::move(args), &grpc_lb_eds_trace, + name_); + } + + const char* name() const override { return name_; } + + RefCountedPtr ParseLoadBalancingConfig( + const Json& json, grpc_error** error) const override { + GPR_DEBUG_ASSERT(error != nullptr && *error == GRPC_ERROR_NONE); + if (json.type() == Json::Type::JSON_NULL) { + // xds was mentioned as a policy in the deprecated loadBalancingPolicy + // field or in the client API. + *error = GRPC_ERROR_CREATE_FROM_STATIC_STRING( + "field:loadBalancingPolicy error:eds policy requires configuration. " + "Please use loadBalancingConfig field of service config instead."); + return nullptr; + } + std::vector error_list; + // EDS service name. + std::string eds_service_name; + auto it = json.object_value().find("edsServiceName"); + if (it != json.object_value().end()) { + if (it->second.type() != Json::Type::STRING) { + error_list.push_back(GRPC_ERROR_CREATE_FROM_STATIC_STRING( + "field:edsServiceName error:type should be string")); + } else { + eds_service_name = it->second.string_value(); + } + } + // Cluster name. + std::string cluster_name; + if (name_ == kEds) { + it = json.object_value().find("clusterName"); + if (it == json.object_value().end()) { + error_list.push_back(GRPC_ERROR_CREATE_FROM_STATIC_STRING( + "field:clusterName error:required field missing")); + } else if (it->second.type() != Json::Type::STRING) { + error_list.push_back(GRPC_ERROR_CREATE_FROM_STATIC_STRING( + "field:clusterName error:type should be string")); + } else { + cluster_name = it->second.string_value(); + } + } else { + // For xds policy, this field does not exist in the config, so it + // will always be set to the same value as edsServiceName. + cluster_name = eds_service_name; + } + // LRS load reporting server name. + absl::optional lrs_load_reporting_server_name; + it = json.object_value().find("lrsLoadReportingServerName"); + if (it != json.object_value().end()) { + if (it->second.type() != Json::Type::STRING) { + error_list.push_back(GRPC_ERROR_CREATE_FROM_STATIC_STRING( + "field:lrsLoadReportingServerName error:type should be string")); + } else { + lrs_load_reporting_server_name.emplace(it->second.string_value()); + } + } + // Locality-picking policy. Not supported for xds policy. + Json locality_picking_policy = Json::Array{ + Json::Object{ + {"weighted_target_experimental", + Json::Object{ + {"targets", Json::Object()}, + }}, + }, + }; + if (name_ == kEds) { + it = json.object_value().find("localityPickingPolicy"); + if (it != json.object_value().end()) { + locality_picking_policy = it->second; + } + } + grpc_error* parse_error = GRPC_ERROR_NONE; + if (LoadBalancingPolicyRegistry::ParseLoadBalancingConfig( + locality_picking_policy, &parse_error) == nullptr) { + GPR_DEBUG_ASSERT(parse_error != GRPC_ERROR_NONE); + error_list.push_back(GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING( + "localityPickingPolicy", &parse_error, 1)); + GRPC_ERROR_UNREF(parse_error); + } + // Endpoint-picking policy. Called "childPolicy" for xds policy. + const char* field_name = + name_ == kEds ? "endpointPickingPolicy" : "childPolicy"; + Json endpoint_picking_policy; + it = json.object_value().find(field_name); + if (it == json.object_value().end()) { + endpoint_picking_policy = Json::Array{ + Json::Object{ + {"round_robin", Json::Object()}, + }, + }; + } else { + endpoint_picking_policy = it->second; + } + parse_error = GRPC_ERROR_NONE; + if (LoadBalancingPolicyRegistry::ParseLoadBalancingConfig( + endpoint_picking_policy, &parse_error) == nullptr) { + GPR_DEBUG_ASSERT(parse_error != GRPC_ERROR_NONE); + error_list.push_back(GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING( + field_name, &parse_error, 1)); + GRPC_ERROR_UNREF(parse_error); + } + // Fallback policy. + Json fallback_policy_config; + it = json.object_value().find("fallbackPolicy"); + if (it == json.object_value().end()) { + fallback_policy_config = Json::Array{Json::Object{ + {"round_robin", Json::Object()}, + }}; + } else { + fallback_policy_config = it->second; + } + parse_error = GRPC_ERROR_NONE; + RefCountedPtr fallback_policy = + LoadBalancingPolicyRegistry::ParseLoadBalancingConfig( + fallback_policy_config, &parse_error); + if (fallback_policy == nullptr) { + GPR_DEBUG_ASSERT(parse_error != GRPC_ERROR_NONE); + error_list.push_back(GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING( + "fallbackPolicy", &parse_error, 1)); + GRPC_ERROR_UNREF(parse_error); + error_list.push_back(parse_error); + } + if (error_list.empty()) { + return MakeRefCounted( + name_, std::move(cluster_name), std::move(eds_service_name), + std::move(lrs_load_reporting_server_name), + std::move(locality_picking_policy), + std::move(endpoint_picking_policy), std::move(fallback_policy)); + } else { + *error = GRPC_ERROR_CREATE_FROM_VECTOR( + "eds_experimental LB policy config", &error_list); + return nullptr; + } + } + + private: + class EdsChildHandler : public ChildPolicyHandler { + public: + EdsChildHandler(Args args, TraceFlag* tracer, const char* name) + : ChildPolicyHandler(std::move(args), tracer), name_(name) {} + + bool ConfigChangeRequiresNewPolicyInstance( + LoadBalancingPolicy::Config* old_config, + LoadBalancingPolicy::Config* new_config) const override { + GPR_ASSERT(old_config->name() == name_); + GPR_ASSERT(new_config->name() == name_); + EdsLbConfig* old_eds_config = static_cast(old_config); + EdsLbConfig* new_eds_config = static_cast(new_config); + return old_eds_config->cluster_name() != new_eds_config->cluster_name() || + old_eds_config->eds_service_name() != + new_eds_config->eds_service_name(); + } + + OrphanablePtr CreateLoadBalancingPolicy( + const char* name, LoadBalancingPolicy::Args args) const override { + return MakeOrphanable(name_, std::move(args)); + } + + private: + const char* name_; + }; + + const char* name_; +}; + +} // namespace + +} // namespace grpc_core + +// +// Plugin registration +// + +void grpc_lb_policy_eds_init() { + grpc_core::LoadBalancingPolicyRegistry::Builder:: + RegisterLoadBalancingPolicyFactory( + absl::make_unique(grpc_core::kEds)); + // TODO(roth): This is here just for backward compatibility with some + // old tests we have internally. Remove this once they are upgraded + // to use the new policy name and config. + grpc_core::LoadBalancingPolicyRegistry::Builder:: + RegisterLoadBalancingPolicyFactory( + absl::make_unique(grpc_core::kXds)); +} + +void grpc_lb_policy_eds_shutdown() {} diff --git a/src/core/ext/filters/client_channel/lb_policy/xds/lrs.cc b/src/core/ext/filters/client_channel/lb_policy/xds/lrs.cc new file mode 100644 index 00000000000..30dbd20b9ad --- /dev/null +++ b/src/core/ext/filters/client_channel/lb_policy/xds/lrs.cc @@ -0,0 +1,524 @@ +// +// Copyright 2018 gRPC authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +#include + +#include + +#include "src/core/ext/filters/client_channel/lb_policy.h" +#include "src/core/ext/filters/client_channel/lb_policy/child_policy_handler.h" +#include "src/core/ext/filters/client_channel/lb_policy_factory.h" +#include "src/core/ext/filters/client_channel/lb_policy_registry.h" +#include "src/core/ext/filters/client_channel/xds/xds_client.h" +#include "src/core/ext/filters/client_channel/xds/xds_client_stats.h" +#include "src/core/lib/channel/channel_args.h" +#include "src/core/lib/gprpp/orphanable.h" +#include "src/core/lib/gprpp/ref_counted_ptr.h" +#include "src/core/lib/iomgr/work_serializer.h" + +namespace grpc_core { + +TraceFlag grpc_lb_lrs_trace(false, "lrs_lb"); + +namespace { + +constexpr char kLrs[] = "lrs_experimental"; + +// Config for LRS LB policy. +class LrsLbConfig : public LoadBalancingPolicy::Config { + public: + LrsLbConfig(RefCountedPtr child_policy, + std::string cluster_name, std::string eds_service_name, + std::string lrs_load_reporting_server_name, + RefCountedPtr locality_name) + : child_policy_(std::move(child_policy)), + cluster_name_(std::move(cluster_name)), + eds_service_name_(std::move(eds_service_name)), + lrs_load_reporting_server_name_( + std::move(lrs_load_reporting_server_name)), + locality_name_(std::move(locality_name)) {} + + const char* name() const override { return kLrs; } + + RefCountedPtr child_policy() const { + return child_policy_; + } + const std::string& cluster_name() const { return cluster_name_; } + const std::string& eds_service_name() const { return eds_service_name_; } + const std::string& lrs_load_reporting_server_name() const { + return lrs_load_reporting_server_name_; + }; + RefCountedPtr locality_name() const { + return locality_name_; + } + + private: + RefCountedPtr child_policy_; + std::string cluster_name_; + std::string eds_service_name_; + std::string lrs_load_reporting_server_name_; + RefCountedPtr locality_name_; +}; + +// LRS LB policy. +class LrsLb : public LoadBalancingPolicy { + public: + LrsLb(RefCountedPtr xds_client, Args args); + + const char* name() const override { return kLrs; } + + void UpdateLocked(UpdateArgs args) override; + void ExitIdleLocked() override; + void ResetBackoffLocked() override; + + private: + // A simple wrapper for ref-counting a picker from the child policy. + class RefCountedPicker : public RefCounted { + public: + explicit RefCountedPicker(std::unique_ptr picker) + : picker_(std::move(picker)) {} + PickResult Pick(PickArgs args) { return picker_->Pick(args); } + + private: + std::unique_ptr picker_; + }; + + // A picker that wraps the picker from the child to perform load reporting. + class LoadReportingPicker : public SubchannelPicker { + public: + LoadReportingPicker(RefCountedPtr picker, + RefCountedPtr locality_stats) + : picker_(std::move(picker)), + locality_stats_(std::move(locality_stats)) {} + + PickResult Pick(PickArgs args); + + private: + RefCountedPtr picker_; + RefCountedPtr locality_stats_; + }; + + class Helper : public ChannelControlHelper { + public: + explicit Helper(RefCountedPtr lrs_policy) + : lrs_policy_(std::move(lrs_policy)) {} + + ~Helper() { lrs_policy_.reset(DEBUG_LOCATION, "Helper"); } + + RefCountedPtr CreateSubchannel( + const grpc_channel_args& args) override; + void UpdateState(grpc_connectivity_state state, + std::unique_ptr picker) override; + void RequestReresolution() override; + void AddTraceEvent(TraceSeverity severity, StringView message) override; + + private: + RefCountedPtr lrs_policy_; + }; + + ~LrsLb(); + + void ShutdownLocked() override; + + OrphanablePtr CreateChildPolicyLocked( + const grpc_channel_args* args); + void UpdateChildPolicyLocked(ServerAddressList addresses, + const grpc_channel_args* args); + + void MaybeUpdatePickerLocked(); + + // Current config from the resolver. + RefCountedPtr config_; + + // Internal state. + bool shutting_down_ = false; + + // The xds client. + RefCountedPtr xds_client_; + + // The stats for client-side load reporting. + RefCountedPtr locality_stats_; + + OrphanablePtr child_policy_; + + // Latest state and picker reported by the child policy. + grpc_connectivity_state state_ = GRPC_CHANNEL_IDLE; + RefCountedPtr picker_; +}; + +// +// LrsLb::LoadReportingPicker +// + +LoadBalancingPolicy::PickResult LrsLb::LoadReportingPicker::Pick( + LoadBalancingPolicy::PickArgs args) { + // Forward the pick to the picker returned from the child policy. + PickResult result = picker_->Pick(args); + if (result.type == PickResult::PICK_COMPLETE && + result.subchannel != nullptr) { + // Record a call started. + locality_stats_->AddCallStarted(); + // Intercept the recv_trailing_metadata op to record call completion. + XdsClusterLocalityStats* locality_stats = + locality_stats_->Ref(DEBUG_LOCATION, "LocalityStats+call").release(); + result.recv_trailing_metadata_ready = + // Note: This callback does not run in either the control plane + // work serializer or in the data plane mutex. + [locality_stats](grpc_error* error, MetadataInterface* /*metadata*/, + CallState* /*call_state*/) { + const bool call_failed = error != GRPC_ERROR_NONE; + locality_stats->AddCallFinished(call_failed); + locality_stats->Unref(DEBUG_LOCATION, "LocalityStats+call"); + }; + } + return result; +} + +// +// LrsLb +// + +LrsLb::LrsLb(RefCountedPtr xds_client, Args args) + : LoadBalancingPolicy(std::move(args)), xds_client_(std::move(xds_client)) { + if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_lrs_trace)) { + gpr_log(GPR_INFO, "[lrs_lb %p] created -- using xds client %p from channel", + this, xds_client_.get()); + } +} + +LrsLb::~LrsLb() { + if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_lrs_trace)) { + gpr_log(GPR_INFO, "[lrs_lb %p] destroying xds LB policy", this); + } +} + +void LrsLb::ShutdownLocked() { + if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_lrs_trace)) { + gpr_log(GPR_INFO, "[lrs_lb %p] shutting down", this); + } + shutting_down_ = true; + // Remove the child policy's interested_parties pollset_set from the + // xDS policy. + if (child_policy_ != nullptr) { + grpc_pollset_set_del_pollset_set(child_policy_->interested_parties(), + interested_parties()); + child_policy_.reset(); + } + // Drop our ref to the child's picker, in case it's holding a ref to + // the child. + picker_.reset(); + locality_stats_.reset(); + xds_client_.reset(); +} + +void LrsLb::ExitIdleLocked() { + if (child_policy_ != nullptr) child_policy_->ExitIdleLocked(); +} + +void LrsLb::ResetBackoffLocked() { + // The XdsClient will have its backoff reset by the xds resolver, so we + // don't need to do it here. + if (child_policy_ != nullptr) child_policy_->ResetBackoffLocked(); +} + +void LrsLb::UpdateLocked(UpdateArgs args) { + if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_lrs_trace)) { + gpr_log(GPR_INFO, "[lrs_lb %p] Received update", this); + } + // Update config. + auto old_config = std::move(config_); + config_ = std::move(args.config); + // Update load reporting if needed. + if (old_config == nullptr || + config_->lrs_load_reporting_server_name() != + old_config->lrs_load_reporting_server_name() || + config_->cluster_name() != old_config->cluster_name() || + config_->eds_service_name() != old_config->eds_service_name() || + *config_->locality_name() != *old_config->locality_name()) { + locality_stats_ = xds_client_->AddClusterLocalityStats( + config_->lrs_load_reporting_server_name(), config_->cluster_name(), + config_->eds_service_name(), config_->locality_name()); + MaybeUpdatePickerLocked(); + } + // Update child policy. + UpdateChildPolicyLocked(std::move(args.addresses), args.args); + args.args = nullptr; // Ownership passed to UpdateChildPolicyLocked(). +} + +void LrsLb::MaybeUpdatePickerLocked() { + if (picker_ != nullptr) { + auto lrs_picker = + absl::make_unique(picker_, locality_stats_); + if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_lrs_trace)) { + gpr_log(GPR_INFO, "[lrs_lb %p] updating connectivity: state=%s picker=%p", + this, ConnectivityStateName(state_), lrs_picker.get()); + } + channel_control_helper()->UpdateState(state_, std::move(lrs_picker)); + } +} + +OrphanablePtr LrsLb::CreateChildPolicyLocked( + const grpc_channel_args* args) { + LoadBalancingPolicy::Args lb_policy_args; + lb_policy_args.work_serializer = work_serializer(); + lb_policy_args.args = args; + lb_policy_args.channel_control_helper = + absl::make_unique(Ref(DEBUG_LOCATION, "Helper")); + OrphanablePtr lb_policy = + MakeOrphanable(std::move(lb_policy_args), + &grpc_lb_lrs_trace); + if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_lrs_trace)) { + gpr_log(GPR_INFO, "[lrs_lb %p] Created new child policy handler %p", this, + lb_policy.get()); + } + // Add our interested_parties pollset_set to that of the newly created + // child policy. This will make the child policy progress upon activity on + // this policy, which in turn is tied to the application's call. + grpc_pollset_set_add_pollset_set(lb_policy->interested_parties(), + interested_parties()); + return lb_policy; +} + +void LrsLb::UpdateChildPolicyLocked(ServerAddressList addresses, + const grpc_channel_args* args) { + // Create policy if needed. + if (child_policy_ == nullptr) { + child_policy_ = CreateChildPolicyLocked(args); + } + // Construct update args. + UpdateArgs update_args; + update_args.addresses = std::move(addresses); + update_args.config = config_->child_policy(); + update_args.args = args; + // Update the policy. + if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_lrs_trace)) { + gpr_log(GPR_INFO, "[lrs_lb %p] Updating child policy handler %p", this, + child_policy_.get()); + } + child_policy_->UpdateLocked(std::move(update_args)); +} + +// +// LrsLb::Helper +// + +RefCountedPtr LrsLb::Helper::CreateSubchannel( + const grpc_channel_args& args) { + if (lrs_policy_->shutting_down_) return nullptr; + return lrs_policy_->channel_control_helper()->CreateSubchannel(args); +} + +void LrsLb::Helper::UpdateState(grpc_connectivity_state state, + std::unique_ptr picker) { + if (lrs_policy_->shutting_down_) return; + if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_lrs_trace)) { + gpr_log(GPR_INFO, + "[lrs_lb %p] child connectivity state update: state=%s picker=%p", + lrs_policy_.get(), ConnectivityStateName(state), picker.get()); + } + // Save the state and picker. + lrs_policy_->state_ = state; + lrs_policy_->picker_ = MakeRefCounted(std::move(picker)); + // Wrap the picker and return it to the channel. + lrs_policy_->MaybeUpdatePickerLocked(); +} + +void LrsLb::Helper::RequestReresolution() { + if (lrs_policy_->shutting_down_) return; + lrs_policy_->channel_control_helper()->RequestReresolution(); +} + +void LrsLb::Helper::AddTraceEvent(TraceSeverity severity, StringView message) { + if (lrs_policy_->shutting_down_) return; + lrs_policy_->channel_control_helper()->AddTraceEvent(severity, message); +} + +// +// factory +// + +class LrsLbFactory : public LoadBalancingPolicyFactory { + public: + OrphanablePtr CreateLoadBalancingPolicy( + LoadBalancingPolicy::Args args) const override { + RefCountedPtr xds_client = + XdsClient::GetFromChannelArgs(*args.args); + if (xds_client == nullptr) { + gpr_log(GPR_ERROR, + "XdsClient not present in channel args -- cannot instantiate " + "lrs LB policy"); + return nullptr; + } + return MakeOrphanable(std::move(xds_client), std::move(args)); + } + + const char* name() const override { return kLrs; } + + RefCountedPtr ParseLoadBalancingConfig( + const Json& json, grpc_error** error) const override { + GPR_DEBUG_ASSERT(error != nullptr && *error == GRPC_ERROR_NONE); + if (json.type() == Json::Type::JSON_NULL) { + // lrs was mentioned as a policy in the deprecated loadBalancingPolicy + // field or in the client API. + *error = GRPC_ERROR_CREATE_FROM_STATIC_STRING( + "field:loadBalancingPolicy error:lrs policy requires configuration. " + "Please use loadBalancingConfig field of service config instead."); + return nullptr; + } + std::vector error_list; + // Child policy. + RefCountedPtr child_policy; + auto it = json.object_value().find("childPolicy"); + if (it == json.object_value().end()) { + error_list.push_back(GRPC_ERROR_CREATE_FROM_STATIC_STRING( + "field:childPolicy error:required field missing")); + } else { + grpc_error* parse_error = GRPC_ERROR_NONE; + child_policy = LoadBalancingPolicyRegistry::ParseLoadBalancingConfig( + it->second, &parse_error); + if (child_policy == nullptr) { + GPR_DEBUG_ASSERT(parse_error != GRPC_ERROR_NONE); + std::vector child_errors; + child_errors.push_back(parse_error); + error_list.push_back( + GRPC_ERROR_CREATE_FROM_VECTOR("field:childPolicy", &child_errors)); + } + } + // Cluster name. + std::string cluster_name; + it = json.object_value().find("clusterName"); + if (it == json.object_value().end()) { + error_list.push_back(GRPC_ERROR_CREATE_FROM_STATIC_STRING( + "field:clusterName error:required field missing")); + } else if (it->second.type() != Json::Type::STRING) { + error_list.push_back(GRPC_ERROR_CREATE_FROM_STATIC_STRING( + "field:clusterName error:type should be string")); + } else { + cluster_name = it->second.string_value(); + } + // EDS service name. + std::string eds_service_name; + it = json.object_value().find("edsServiceName"); + if (it != json.object_value().end()) { + if (it->second.type() != Json::Type::STRING) { + error_list.push_back(GRPC_ERROR_CREATE_FROM_STATIC_STRING( + "field:edsServiceName error:type should be string")); + } else { + eds_service_name = it->second.string_value(); + } + } + // Locality. + RefCountedPtr locality_name; + it = json.object_value().find("locality"); + if (it == json.object_value().end()) { + error_list.push_back(GRPC_ERROR_CREATE_FROM_STATIC_STRING( + "field:locality error:required field missing")); + } else { + std::vector child_errors = + ParseLocality(it->second, &locality_name); + if (!child_errors.empty()) { + error_list.push_back( + GRPC_ERROR_CREATE_FROM_VECTOR("field:locality", &child_errors)); + } + } + // LRS load reporting server name. + std::string lrs_load_reporting_server_name; + it = json.object_value().find("lrsLoadReportingServerName"); + if (it == json.object_value().end()) { + error_list.push_back(GRPC_ERROR_CREATE_FROM_STATIC_STRING( + "field:lrsLoadReportingServerName error:required field missing")); + } else if (it->second.type() != Json::Type::STRING) { + error_list.push_back(GRPC_ERROR_CREATE_FROM_STATIC_STRING( + "field:lrsLoadReportingServerName error:type should be string")); + } else { + lrs_load_reporting_server_name = it->second.string_value(); + } + if (!error_list.empty()) { + *error = GRPC_ERROR_CREATE_FROM_VECTOR( + "lrs_experimental LB policy config", &error_list); + return nullptr; + } + return MakeRefCounted( + std::move(child_policy), std::move(cluster_name), + std::move(eds_service_name), std::move(lrs_load_reporting_server_name), + std::move(locality_name)); + } + + private: + static std::vector ParseLocality( + const Json& json, RefCountedPtr* name) { + std::vector error_list; + if (json.type() != Json::Type::OBJECT) { + error_list.push_back(GRPC_ERROR_CREATE_FROM_STATIC_STRING( + "locality field is not an object")); + return error_list; + } + std::string region; + auto it = json.object_value().find("region"); + if (it != json.object_value().end()) { + if (it->second.type() != Json::Type::STRING) { + error_list.push_back(GRPC_ERROR_CREATE_FROM_STATIC_STRING( + "\"region\" field is not a string")); + } else { + region = it->second.string_value(); + } + } + std::string zone; + it = json.object_value().find("zone"); + if (it != json.object_value().end()) { + if (it->second.type() != Json::Type::STRING) { + error_list.push_back(GRPC_ERROR_CREATE_FROM_STATIC_STRING( + "\"zone\" field is not a string")); + } else { + zone = it->second.string_value(); + } + } + std::string subzone; + it = json.object_value().find("subzone"); + if (it != json.object_value().end()) { + if (it->second.type() != Json::Type::STRING) { + error_list.push_back(GRPC_ERROR_CREATE_FROM_STATIC_STRING( + "\"subzone\" field is not a string")); + } else { + subzone = it->second.string_value(); + } + } + if (region.empty() && zone.empty() && subzone.empty()) { + error_list.push_back(GRPC_ERROR_CREATE_FROM_STATIC_STRING( + "at least one of region, zone, or subzone must be set")); + } + if (error_list.empty()) { + *name = MakeRefCounted(region, zone, subzone); + } + return error_list; + } +}; + +} // namespace + +} // namespace grpc_core + +// +// Plugin registration +// + +void grpc_lb_policy_lrs_init() { + grpc_core::LoadBalancingPolicyRegistry::Builder:: + RegisterLoadBalancingPolicyFactory( + absl::make_unique()); +} + +void grpc_lb_policy_lrs_shutdown() {} diff --git a/src/core/ext/filters/client_channel/lb_policy/xds/xds.cc b/src/core/ext/filters/client_channel/lb_policy/xds/xds.cc deleted file mode 100644 index a9c3b236cdb..00000000000 --- a/src/core/ext/filters/client_channel/lb_policy/xds/xds.cc +++ /dev/null @@ -1,1736 +0,0 @@ -/* - * - * Copyright 2018 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -#include - -#include "src/core/lib/iomgr/sockaddr.h" -#include "src/core/lib/iomgr/socket_utils.h" - -#include -#include -#include - -#include "absl/types/optional.h" - -#include -#include -#include -#include - -#include "src/core/ext/filters/client_channel/client_channel.h" -#include "src/core/ext/filters/client_channel/lb_policy.h" -#include "src/core/ext/filters/client_channel/lb_policy/child_policy_handler.h" -#include "src/core/ext/filters/client_channel/lb_policy/xds/xds.h" -#include "src/core/ext/filters/client_channel/lb_policy_factory.h" -#include "src/core/ext/filters/client_channel/lb_policy_registry.h" -#include "src/core/ext/filters/client_channel/parse_address.h" -#include "src/core/ext/filters/client_channel/server_address.h" -#include "src/core/ext/filters/client_channel/service_config.h" -#include "src/core/ext/filters/client_channel/xds/xds_client.h" -#include "src/core/ext/filters/client_channel/xds/xds_client_stats.h" -#include "src/core/lib/backoff/backoff.h" -#include "src/core/lib/channel/channel_args.h" -#include "src/core/lib/channel/channel_stack.h" -#include "src/core/lib/gpr/string.h" -#include "src/core/lib/gprpp/manual_constructor.h" -#include "src/core/lib/gprpp/map.h" -#include "src/core/lib/gprpp/memory.h" -#include "src/core/lib/gprpp/orphanable.h" -#include "src/core/lib/gprpp/ref_counted_ptr.h" -#include "src/core/lib/gprpp/sync.h" -#include "src/core/lib/iomgr/sockaddr.h" -#include "src/core/lib/iomgr/sockaddr_utils.h" -#include "src/core/lib/iomgr/timer.h" -#include "src/core/lib/slice/slice_hash_table.h" -#include "src/core/lib/slice/slice_internal.h" -#include "src/core/lib/slice/slice_string_helpers.h" -#include "src/core/lib/surface/call.h" -#include "src/core/lib/surface/channel.h" -#include "src/core/lib/surface/channel_init.h" -#include "src/core/lib/transport/static_metadata.h" - -#define GRPC_XDS_DEFAULT_FALLBACK_TIMEOUT_MS 10000 -#define GRPC_XDS_DEFAULT_LOCALITY_RETENTION_INTERVAL_MS (15 * 60 * 1000) -#define GRPC_XDS_DEFAULT_FAILOVER_TIMEOUT_MS 10000 - -namespace grpc_core { - -TraceFlag grpc_lb_xds_trace(false, "xds_lb"); - -namespace { - -constexpr char kXds[] = "xds_experimental"; - -class XdsConfig : public LoadBalancingPolicy::Config { - public: - XdsConfig(RefCountedPtr child_policy, - RefCountedPtr fallback_policy, - std::string eds_service_name, - absl::optional lrs_load_reporting_server_name) - : child_policy_(std::move(child_policy)), - fallback_policy_(std::move(fallback_policy)), - eds_service_name_(std::move(eds_service_name)), - lrs_load_reporting_server_name_( - std::move(lrs_load_reporting_server_name)) {} - - const char* name() const override { return kXds; } - - RefCountedPtr child_policy() const { - return child_policy_; - } - - RefCountedPtr fallback_policy() const { - return fallback_policy_; - } - - const char* eds_service_name() const { - return eds_service_name_.empty() ? nullptr : eds_service_name_.c_str(); - }; - - const absl::optional& lrs_load_reporting_server_name() const { - return lrs_load_reporting_server_name_; - }; - - private: - RefCountedPtr child_policy_; - RefCountedPtr fallback_policy_; - std::string eds_service_name_; - absl::optional lrs_load_reporting_server_name_; -}; - -class XdsLb : public LoadBalancingPolicy { - public: - explicit XdsLb(Args args); - - const char* name() const override { return kXds; } - - void UpdateLocked(UpdateArgs args) override; - void ResetBackoffLocked() override; - - private: - class EndpointWatcher; - - // A simple wrapper to convert the picker returned from a locality's child - // policy as a unique_ptr<> to a RefCountedPtr<>. This allows it to be - // referenced by both the picker and the locality. - class RefCountedEndpointPicker : public RefCounted { - public: - explicit RefCountedEndpointPicker(std::unique_ptr picker) - : picker_(std::move(picker)) {} - PickResult Pick(PickArgs args) { return picker_->Pick(args); } - - private: - std::unique_ptr picker_; - }; - - // A picker that wraps the RefCountedEndpointPicker and performs load - // reporting for the locality. - class LoadReportingPicker : public RefCounted { - public: - LoadReportingPicker(RefCountedPtr picker, - RefCountedPtr locality_stats) - : picker_(std::move(picker)), - locality_stats_(std::move(locality_stats)) {} - - PickResult Pick(PickArgs args); - - RefCountedEndpointPicker* picker() const { return picker_.get(); } - XdsClusterLocalityStats* locality_stats() const { - return locality_stats_.get(); - } - - private: - RefCountedPtr picker_; - RefCountedPtr locality_stats_; - }; - - // A picker that uses a stateless weighting algorithm to pick the locality - // to use for each request. - class LocalityPicker : public SubchannelPicker { - public: - // Maintains a weighted list of pickers from each locality that is in ready - // state. The first element in the pair represents the end of a range - // proportional to the locality's weight. The start of the range is the - // previous value in the vector and is 0 for the first element. - using PickerList = - InlinedVector>, - 1>; - LocalityPicker(XdsLb* xds_policy, PickerList pickers) - : drop_stats_(xds_policy->drop_stats_), - drop_config_(xds_policy->drop_config_), - pickers_(std::move(pickers)) {} - - PickResult Pick(PickArgs args) override; - - private: - // Calls the picker of the locality that the key falls within. - PickResult PickFromLocality(const uint32_t key, PickArgs args); - - RefCountedPtr drop_stats_; - RefCountedPtr drop_config_; - PickerList pickers_; - }; - - class FallbackHelper : public ChannelControlHelper { - public: - explicit FallbackHelper(RefCountedPtr parent) - : parent_(std::move(parent)) {} - - ~FallbackHelper() { parent_.reset(DEBUG_LOCATION, "FallbackHelper"); } - - RefCountedPtr CreateSubchannel( - const grpc_channel_args& args) override; - void UpdateState(grpc_connectivity_state state, - std::unique_ptr picker) override; - void RequestReresolution() override; - void AddTraceEvent(TraceSeverity severity, StringView message) override; - - private: - RefCountedPtr parent_; - }; - - // Each LocalityMap holds a ref to the XdsLb. - class LocalityMap : public InternallyRefCounted { - public: - // Each Locality holds a ref to the LocalityMap it is in. - class Locality : public InternallyRefCounted { - public: - Locality(RefCountedPtr locality_map, - RefCountedPtr name); - ~Locality(); - - void UpdateLocked(uint32_t locality_weight, ServerAddressList serverlist, - bool update_locality_stats); - void ShutdownLocked(); - void ResetBackoffLocked(); - void DeactivateLocked(); - void Orphan() override; - - uint32_t weight() const { return weight_; } - - grpc_connectivity_state connectivity_state() const { - return connectivity_state_; - } - - RefCountedPtr GetLoadReportingPicker() { - // Recreate load reporting picker if stats object has changed. - if (load_reporting_picker_ == nullptr || - load_reporting_picker_->picker() != picker_wrapper_.get() || - load_reporting_picker_->locality_stats() != stats_.get()) { - load_reporting_picker_ = - MakeRefCounted(picker_wrapper_, stats_); - } - return load_reporting_picker_; - } - - void set_locality_map(RefCountedPtr locality_map) { - locality_map_ = std::move(locality_map); - } - - private: - class Helper : public ChannelControlHelper { - public: - explicit Helper(RefCountedPtr locality) - : locality_(std::move(locality)) {} - - ~Helper() { locality_.reset(DEBUG_LOCATION, "Helper"); } - - RefCountedPtr CreateSubchannel( - const grpc_channel_args& args) override; - void UpdateState(grpc_connectivity_state state, - std::unique_ptr picker) override; - // This is a no-op, because we get the addresses from the xds - // client, which is a watch-based API. - void RequestReresolution() override {} - void AddTraceEvent(TraceSeverity severity, StringView message) override; - - private: - RefCountedPtr locality_; - }; - - // Methods for dealing with the child policy. - OrphanablePtr CreateChildPolicyLocked( - const grpc_channel_args* args); - grpc_channel_args* CreateChildPolicyArgsLocked( - const grpc_channel_args* args); - - void UpdateLocalityStats(); - - static void OnDelayedRemovalTimer(void* arg, grpc_error* error); - void OnDelayedRemovalTimerLocked(grpc_error* error); - - XdsLb* xds_policy() const { return locality_map_->xds_policy(); } - - // The owning locality map. - RefCountedPtr locality_map_; - - RefCountedPtr name_; - RefCountedPtr stats_; - OrphanablePtr child_policy_; - RefCountedPtr picker_wrapper_; - RefCountedPtr load_reporting_picker_; - grpc_connectivity_state connectivity_state_ = GRPC_CHANNEL_IDLE; - uint32_t weight_; - - // States for delayed removal. - grpc_timer delayed_removal_timer_; - grpc_closure on_delayed_removal_timer_; - bool delayed_removal_timer_callback_pending_ = false; - bool shutdown_ = false; - }; - - LocalityMap(RefCountedPtr xds_policy, uint32_t priority); - - ~LocalityMap() { xds_policy_.reset(DEBUG_LOCATION, "LocalityMap"); } - - void UpdateLocked( - const XdsApi::PriorityListUpdate::LocalityMap& priority_update, - bool update_locality_stats); - void ResetBackoffLocked(); - void UpdateXdsPickerLocked(); - OrphanablePtr ExtractLocalityLocked( - const RefCountedPtr& name); - void DeactivateLocked(); - // Returns true if this locality map becomes the currently used one (i.e., - // its priority is selected) after reactivation. - bool MaybeReactivateLocked(); - void MaybeCancelFailoverTimerLocked(); - - void Orphan() override; - - XdsLb* xds_policy() const { return xds_policy_.get(); } - uint32_t priority() const { return priority_; } - grpc_connectivity_state connectivity_state() const { - return connectivity_state_; - } - bool failover_timer_callback_pending() const { - return failover_timer_callback_pending_; - } - - private: - void OnLocalityStateUpdateLocked(); - void UpdateConnectivityStateLocked(); - static void OnDelayedRemovalTimer(void* arg, grpc_error* error); - static void OnFailoverTimer(void* arg, grpc_error* error); - void OnDelayedRemovalTimerLocked(grpc_error* error); - void OnFailoverTimerLocked(grpc_error* error); - - const XdsApi::PriorityListUpdate& priority_list_update() const { - return xds_policy_->priority_list_update_; - } - const XdsApi::PriorityListUpdate::LocalityMap* locality_map_update() const { - return xds_policy_->priority_list_update_.Find(priority_); - } - - RefCountedPtr xds_policy_; - - std::map, OrphanablePtr, - XdsLocalityName::Less> - localities_; - const uint32_t priority_; - grpc_connectivity_state connectivity_state_ = GRPC_CHANNEL_IDLE; - - // States for delayed removal. - grpc_timer delayed_removal_timer_; - grpc_closure on_delayed_removal_timer_; - bool delayed_removal_timer_callback_pending_ = false; - - // States of failover. - grpc_timer failover_timer_; - grpc_closure on_failover_timer_; - bool failover_timer_callback_pending_ = false; - }; - - ~XdsLb(); - - void ShutdownLocked() override; - - const char* eds_service_name() const { - if (config_ != nullptr && config_->eds_service_name() != nullptr) { - return config_->eds_service_name(); - } - return server_name_.c_str(); - } - - XdsClient* xds_client() const { - return xds_client_from_channel_ != nullptr ? xds_client_from_channel_.get() - : xds_client_.get(); - } - - void UpdatePrioritiesLocked(bool update_locality_stats); - void UpdateXdsPickerLocked(); - void MaybeCreateLocalityMapLocked(uint32_t priority); - void FailoverOnConnectionFailureLocked(); - void FailoverOnDisconnectionLocked(uint32_t failed_priority); - void SwitchToHigherPriorityLocked(uint32_t priority); - void DeactivatePrioritiesLowerThan(uint32_t priority); - OrphanablePtr ExtractLocalityLocked( - const RefCountedPtr& name, uint32_t exclude_priority); - // Callers should make sure the priority list is non-empty. - uint32_t LowestPriority() const { - return static_cast(priorities_.size()) - 1; - } - bool Contains(uint32_t priority) { return priority < priorities_.size(); } - - // Methods for dealing with fallback state. - void MaybeCancelFallbackAtStartupChecks(); - static void OnFallbackTimer(void* arg, grpc_error* error); - void OnFallbackTimerLocked(grpc_error* error); - void UpdateFallbackPolicyLocked(); - OrphanablePtr CreateFallbackPolicyLocked( - const grpc_channel_args* args); - void MaybeExitFallbackMode(); - - // Server name from target URI. - std::string server_name_; - - // Current channel args and config from the resolver. - const grpc_channel_args* args_ = nullptr; - RefCountedPtr config_; - - // Internal state. - bool shutting_down_ = false; - - // The xds client and endpoint watcher. - // If we get the XdsClient from the channel, we store it in - // xds_client_from_channel_; if we create it ourselves, we store it in - // xds_client_. - RefCountedPtr xds_client_from_channel_; - OrphanablePtr xds_client_; - // A pointer to the endpoint watcher, to be used when cancelling the watch. - // Note that this is not owned, so this pointer must never be derefernced. - EndpointWatcher* endpoint_watcher_ = nullptr; - - // Whether the checks for fallback at startup are ALL pending. There are - // several cases where this can be reset: - // 1. The fallback timer fires, we enter fallback mode. - // 2. Before the fallback timer fires, the endpoint watcher reports an - // error, we enter fallback mode. - // 3. Before the fallback timer fires, if any child policy in the locality map - // becomes READY, we cancel the fallback timer. - bool fallback_at_startup_checks_pending_ = false; - // Timeout in milliseconds for before using fallback backend addresses. - // 0 means not using fallback. - const grpc_millis lb_fallback_timeout_ms_; - // The backend addresses from the resolver. - ServerAddressList fallback_backend_addresses_; - // Fallback timer. - grpc_timer lb_fallback_timer_; - grpc_closure lb_on_fallback_; - - // Non-null iff we are in fallback mode. - OrphanablePtr fallback_policy_; - - const grpc_millis locality_retention_interval_ms_; - const grpc_millis locality_map_failover_timeout_ms_; - // The list of locality maps, indexed by priority. P0 is the highest - // priority. - InlinedVector, 2> priorities_; - // The priority that is being used. - uint32_t current_priority_ = UINT32_MAX; - // The update for priority_list_. - XdsApi::PriorityListUpdate priority_list_update_; - - // The config for dropping calls. - RefCountedPtr drop_config_; - - // Drop stats for client-side load reporting. - RefCountedPtr drop_stats_; -}; - -// -// XdsLb::LoadReportingPicker -// - -LoadBalancingPolicy::PickResult XdsLb::LoadReportingPicker::Pick( - LoadBalancingPolicy::PickArgs args) { - // Forward the pick to the picker returned from the child policy. - PickResult result = picker_->Pick(args); - if (result.type != PickResult::PICK_COMPLETE || - result.subchannel == nullptr || locality_stats_ == nullptr) { - return result; - } - // Record a call started. - locality_stats_->AddCallStarted(); - // Intercept the recv_trailing_metadata op to record call completion. - XdsClusterLocalityStats* locality_stats = - locality_stats_->Ref(DEBUG_LOCATION, "LocalityStats+call").release(); - result.recv_trailing_metadata_ready = - // Note: This callback does not run in either the control plane - // combiner or in the data plane mutex. - [locality_stats](grpc_error* error, MetadataInterface* /*metadata*/, - CallState* /*call_state*/) { - const bool call_failed = error != GRPC_ERROR_NONE; - locality_stats->AddCallFinished(call_failed); - locality_stats->Unref(DEBUG_LOCATION, "LocalityStats+call"); - }; - return result; -} - -// -// XdsLb::LocalityPicker -// - -XdsLb::PickResult XdsLb::LocalityPicker::Pick(PickArgs args) { - // Handle drop. - const std::string* drop_category; - if (drop_config_->ShouldDrop(&drop_category)) { - if (drop_stats_ != nullptr) drop_stats_->AddCallDropped(*drop_category); - PickResult result; - result.type = PickResult::PICK_COMPLETE; - return result; - } - // Generate a random number in [0, total weight). - const uint32_t key = rand() % pickers_[pickers_.size() - 1].first; - // Forward pick to whichever locality maps to the range in which the - // random number falls in. - return PickFromLocality(key, args); -} - -XdsLb::PickResult XdsLb::LocalityPicker::PickFromLocality(const uint32_t key, - PickArgs args) { - size_t mid = 0; - size_t start_index = 0; - size_t end_index = pickers_.size() - 1; - size_t index = 0; - while (end_index > start_index) { - mid = (start_index + end_index) / 2; - if (pickers_[mid].first > key) { - end_index = mid; - } else if (pickers_[mid].first < key) { - start_index = mid + 1; - } else { - index = mid + 1; - break; - } - } - if (index == 0) index = start_index; - GPR_ASSERT(pickers_[index].first > key); - return pickers_[index].second->Pick(args); -} - -// -// XdsLb::FallbackHelper -// - -RefCountedPtr XdsLb::FallbackHelper::CreateSubchannel( - const grpc_channel_args& args) { - if (parent_->shutting_down_) return nullptr; - return parent_->channel_control_helper()->CreateSubchannel(args); -} - -void XdsLb::FallbackHelper::UpdateState( - grpc_connectivity_state state, std::unique_ptr picker) { - if (parent_->shutting_down_) return; - parent_->channel_control_helper()->UpdateState(state, std::move(picker)); -} - -void XdsLb::FallbackHelper::RequestReresolution() { - if (parent_->shutting_down_) return; - parent_->channel_control_helper()->RequestReresolution(); -} - -void XdsLb::FallbackHelper::AddTraceEvent(TraceSeverity severity, - StringView message) { - if (parent_->shutting_down_) return; - parent_->channel_control_helper()->AddTraceEvent(severity, message); -} - -// -// XdsLb::EndpointWatcher -// - -class XdsLb::EndpointWatcher : public XdsClient::EndpointWatcherInterface { - public: - explicit EndpointWatcher(RefCountedPtr xds_policy) - : xds_policy_(std::move(xds_policy)) {} - - ~EndpointWatcher() { xds_policy_.reset(DEBUG_LOCATION, "EndpointWatcher"); } - - void OnEndpointChanged(XdsApi::EdsUpdate update) override { - if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_xds_trace)) { - gpr_log(GPR_INFO, "[xdslb %p] Received EDS update from xds client", - xds_policy_.get()); - } - // If the balancer tells us to drop all the calls, we should exit fallback - // mode immediately. - if (update.drop_all) xds_policy_->MaybeExitFallbackMode(); - // Update the drop config. - const bool drop_config_changed = - xds_policy_->drop_config_ == nullptr || - *xds_policy_->drop_config_ != *update.drop_config; - xds_policy_->drop_config_ = std::move(update.drop_config); - // Ignore identical locality update. - if (xds_policy_->priority_list_update_ == update.priority_list_update) { - if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_xds_trace)) { - gpr_log(GPR_INFO, - "[xdslb %p] Incoming locality update identical to current, " - "ignoring. (drop_config_changed=%d)", - xds_policy_.get(), drop_config_changed); - } - if (drop_config_changed) { - xds_policy_->UpdateXdsPickerLocked(); - } - return; - } - // Update the priority list. - xds_policy_->priority_list_update_ = std::move(update.priority_list_update); - xds_policy_->UpdatePrioritiesLocked(false /*update_locality_stats*/); - } - - void OnError(grpc_error* error) override { - // If the fallback-at-startup checks are pending, go into fallback mode - // immediately. This short-circuits the timeout for the - // fallback-at-startup case. - if (xds_policy_->fallback_at_startup_checks_pending_) { - gpr_log(GPR_INFO, - "[xdslb %p] xds watcher reported error; entering fallback " - "mode: %s", - xds_policy_.get(), grpc_error_string(error)); - xds_policy_->fallback_at_startup_checks_pending_ = false; - grpc_timer_cancel(&xds_policy_->lb_fallback_timer_); - xds_policy_->UpdateFallbackPolicyLocked(); - // If the xds call failed, request re-resolution. - // TODO(roth): We check the error string contents here to - // differentiate between the xds call failing and the xds channel - // going into TRANSIENT_FAILURE. This is a pretty ugly hack, - // but it's okay for now, since we're not yet sure whether we will - // continue to support the current fallback functionality. If we - // decide to keep the fallback approach, then we should either - // find a cleaner way to expose the difference between these two - // cases or decide that we're okay re-resolving in both cases. - // Note that even if we do keep the current fallback functionality, - // this re-resolution will only be necessary if we are going to be - // using this LB policy with resolvers other than the xds resolver. - if (strstr(grpc_error_string(error), "xds call failed")) { - xds_policy_->channel_control_helper()->RequestReresolution(); - } - } else if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_xds_trace)) { - gpr_log(GPR_INFO, "[xdslb %p] xds watcher reported error (ignoring): %s", - xds_policy_.get(), grpc_error_string(error)); - } - GRPC_ERROR_UNREF(error); - } - - private: - RefCountedPtr xds_policy_; -}; - -// -// ctor and dtor -// - -XdsLb::XdsLb(Args args) - : LoadBalancingPolicy(std::move(args)), - xds_client_from_channel_(XdsClient::GetFromChannelArgs(*args.args)), - lb_fallback_timeout_ms_(grpc_channel_args_find_integer( - args.args, GRPC_ARG_XDS_FALLBACK_TIMEOUT_MS, - {GRPC_XDS_DEFAULT_FALLBACK_TIMEOUT_MS, 0, INT_MAX})), - locality_retention_interval_ms_(grpc_channel_args_find_integer( - args.args, GRPC_ARG_LOCALITY_RETENTION_INTERVAL_MS, - {GRPC_XDS_DEFAULT_LOCALITY_RETENTION_INTERVAL_MS, 0, INT_MAX})), - locality_map_failover_timeout_ms_(grpc_channel_args_find_integer( - args.args, GRPC_ARG_XDS_FAILOVER_TIMEOUT_MS, - {GRPC_XDS_DEFAULT_FAILOVER_TIMEOUT_MS, 0, INT_MAX})) { - if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_xds_trace)) { - gpr_log(GPR_INFO, "[xdslb %p] created -- xds client from channel: %p", this, - xds_client_from_channel_.get()); - } - // Closure Initialization - GRPC_CLOSURE_INIT(&lb_on_fallback_, &XdsLb::OnFallbackTimer, this, - grpc_schedule_on_exec_ctx); - // Record server name. - const grpc_arg* arg = grpc_channel_args_find(args.args, GRPC_ARG_SERVER_URI); - const char* server_uri = grpc_channel_arg_get_string(arg); - GPR_ASSERT(server_uri != nullptr); - grpc_uri* uri = grpc_uri_parse(server_uri, true); - GPR_ASSERT(uri->path[0] != '\0'); - server_name_ = uri->path[0] == '/' ? uri->path + 1 : uri->path; - if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_xds_trace)) { - gpr_log(GPR_INFO, "[xdslb %p] server name from channel: %s", this, - server_name_.c_str()); - } - grpc_uri_destroy(uri); -} - -XdsLb::~XdsLb() { - if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_xds_trace)) { - gpr_log(GPR_INFO, "[xdslb %p] destroying xds LB policy", this); - } - grpc_channel_args_destroy(args_); -} - -void XdsLb::ShutdownLocked() { - if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_xds_trace)) { - gpr_log(GPR_INFO, "[xdslb %p] shutting down", this); - } - shutting_down_ = true; - MaybeCancelFallbackAtStartupChecks(); - priorities_.clear(); - drop_stats_.reset(); - if (fallback_policy_ != nullptr) { - grpc_pollset_set_del_pollset_set(fallback_policy_->interested_parties(), - interested_parties()); - fallback_policy_.reset(); - } - // Cancel the endpoint watch here instead of in our dtor if we are using the - // XdsResolver, because the watcher holds a ref to us and we might not be - // destroying the Xds client leading to a situation where the Xds lb policy is - // never destroyed. - if (xds_client_from_channel_ != nullptr) { - if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_xds_trace)) { - gpr_log(GPR_INFO, "[xdslb %p] cancelling watch for %s", this, - eds_service_name()); - } - xds_client()->CancelEndpointDataWatch(StringView(eds_service_name()), - endpoint_watcher_); - xds_client_from_channel_.reset(); - } - xds_client_.reset(); -} - -// -// public methods -// - -void XdsLb::ResetBackoffLocked() { - // When the XdsClient is instantiated in the resolver instead of in this - // LB policy, this is done via the resolver, so we don't need to do it - // for xds_client_from_channel_ here. - if (xds_client_ != nullptr) xds_client_->ResetBackoff(); - for (size_t i = 0; i < priorities_.size(); ++i) { - priorities_[i]->ResetBackoffLocked(); - } - if (fallback_policy_ != nullptr) { - fallback_policy_->ResetBackoffLocked(); - } -} - -void XdsLb::UpdateLocked(UpdateArgs args) { - if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_xds_trace)) { - gpr_log(GPR_INFO, "[xdslb %p] Received update", this); - } - const bool is_initial_update = args_ == nullptr; - // Update config. - auto old_config = std::move(config_); - config_ = std::move(args.config); - // Update fallback address list. - fallback_backend_addresses_ = std::move(args.addresses); - // Update args. - grpc_channel_args_destroy(args_); - args_ = args.args; - args.args = nullptr; - // Update the existing fallback policy. The fallback policy config and/or the - // fallback addresses may be new. - if (fallback_policy_ != nullptr) UpdateFallbackPolicyLocked(); - if (is_initial_update) { - // Initialize XdsClient. - if (xds_client_from_channel_ == nullptr) { - grpc_error* error = GRPC_ERROR_NONE; - xds_client_ = MakeOrphanable( - work_serializer(), interested_parties(), - StringView(eds_service_name()), nullptr /* service config watcher */, - *args_, &error); - // TODO(roth): If we decide that we care about fallback mode, add - // proper error handling here. - GPR_ASSERT(error == GRPC_ERROR_NONE); - if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_xds_trace)) { - gpr_log(GPR_INFO, "[xdslb %p] Created xds client %p", this, - xds_client_.get()); - } - } - // Start fallback-at-startup checks. - grpc_millis deadline = ExecCtx::Get()->Now() + lb_fallback_timeout_ms_; - Ref(DEBUG_LOCATION, "on_fallback_timer").release(); // Held by closure - fallback_at_startup_checks_pending_ = true; - grpc_timer_init(&lb_fallback_timer_, deadline, &lb_on_fallback_); - } - // Update drop stats for load reporting if needed. - if (is_initial_update || config_->lrs_load_reporting_server_name() != - old_config->lrs_load_reporting_server_name()) { - drop_stats_.reset(); - if (config_->lrs_load_reporting_server_name().has_value()) { - drop_stats_ = xds_client()->AddClusterDropStats( - config_->lrs_load_reporting_server_name().value(), - // TODO(roth): We currently hard-code the assumption that - // cluster name and EDS service name are the same. Fix this - // as part of refectoring this LB policy. - eds_service_name(), eds_service_name()); - } - } - // On the initial update, create the endpoint watcher. - if (is_initial_update) { - if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_xds_trace)) { - gpr_log(GPR_INFO, "[xdslb %p] starting watch for %s", this, - eds_service_name()); - } - auto watcher = absl::make_unique( - Ref(DEBUG_LOCATION, "EndpointWatcher")); - endpoint_watcher_ = watcher.get(); - xds_client()->WatchEndpointData(StringView(eds_service_name()), - std::move(watcher)); - } else { - // Update priority list. - // Note that this comes after updating drop_stats_, since we want that - // to be used by any new picker we create here. - // No need to do this on the initial update, since there won't be any - // priorities to update yet. - const bool update_locality_stats = - config_->lrs_load_reporting_server_name() != - old_config->lrs_load_reporting_server_name(); - UpdatePrioritiesLocked(update_locality_stats); - } -} - -// -// fallback-related methods -// - -void XdsLb::MaybeCancelFallbackAtStartupChecks() { - if (!fallback_at_startup_checks_pending_) return; - if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_xds_trace)) { - gpr_log(GPR_INFO, "[xdslb %p] Cancelling fallback timer", this); - } - grpc_timer_cancel(&lb_fallback_timer_); - fallback_at_startup_checks_pending_ = false; -} - -void XdsLb::OnFallbackTimer(void* arg, grpc_error* error) { - XdsLb* xdslb_policy = static_cast(arg); - GRPC_ERROR_REF(error); // ref owned by lambda - xdslb_policy->work_serializer()->Run( - [xdslb_policy, error]() { xdslb_policy->OnFallbackTimerLocked(error); }, - DEBUG_LOCATION); -} - -void XdsLb::OnFallbackTimerLocked(grpc_error* error) { - // If some fallback-at-startup check is done after the timer fires but before - // this callback actually runs, don't fall back. - if (fallback_at_startup_checks_pending_ && !shutting_down_ && - error == GRPC_ERROR_NONE) { - gpr_log(GPR_INFO, - "[xdslb %p] Child policy not ready after fallback timeout; " - "entering fallback mode", - this); - fallback_at_startup_checks_pending_ = false; - UpdateFallbackPolicyLocked(); - } - Unref(DEBUG_LOCATION, "on_fallback_timer"); - GRPC_ERROR_UNREF(error); -} - -void XdsLb::UpdateFallbackPolicyLocked() { - if (shutting_down_) return; - // Create policy if needed. - if (fallback_policy_ == nullptr) { - fallback_policy_ = CreateFallbackPolicyLocked(args_); - GPR_ASSERT(fallback_policy_ != nullptr); - } - // Perform update. - UpdateArgs update_args; - update_args.addresses = fallback_backend_addresses_; - update_args.config = config_->fallback_policy(); - update_args.args = grpc_channel_args_copy(args_); - if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_xds_trace)) { - gpr_log(GPR_INFO, "[xdslb %p] Updating fallback child policy handler %p", - this, fallback_policy_.get()); - } - fallback_policy_->UpdateLocked(std::move(update_args)); -} - -OrphanablePtr XdsLb::CreateFallbackPolicyLocked( - const grpc_channel_args* args) { - LoadBalancingPolicy::Args lb_policy_args; - lb_policy_args.work_serializer = work_serializer(); - lb_policy_args.args = args; - lb_policy_args.channel_control_helper = - absl::make_unique(Ref(DEBUG_LOCATION, "FallbackHelper")); - OrphanablePtr lb_policy = - MakeOrphanable(std::move(lb_policy_args), - &grpc_lb_xds_trace); - if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_xds_trace)) { - gpr_log(GPR_INFO, - "[xdslb %p] Created new fallback child policy handler (%p)", this, - lb_policy.get()); - } - // Add the xDS's interested_parties pollset_set to that of the newly created - // child policy. This will make the child policy progress upon activity on xDS - // LB, which in turn is tied to the application's call. - grpc_pollset_set_add_pollset_set(lb_policy->interested_parties(), - interested_parties()); - return lb_policy; -} - -void XdsLb::MaybeExitFallbackMode() { - if (fallback_policy_ == nullptr) return; - gpr_log(GPR_INFO, "[xdslb %p] Exiting fallback mode", this); - fallback_policy_.reset(); -} - -// -// priority list-related methods -// - -void XdsLb::UpdatePrioritiesLocked(bool update_locality_stats) { - // 1. Remove from the priority list the priorities that are not in the update. - DeactivatePrioritiesLowerThan(priority_list_update_.LowestPriority()); - // 2. Update all the existing priorities. - for (uint32_t priority = 0; priority < priorities_.size(); ++priority) { - LocalityMap* locality_map = priorities_[priority].get(); - const auto* locality_map_update = priority_list_update_.Find(priority); - // If we have more current priorities than exist in the update, stop here. - if (locality_map_update == nullptr) break; - // Propagate locality_map_update. - // TODO(juanlishen): Find a clean way to skip duplicate update for a - // priority. - locality_map->UpdateLocked(*locality_map_update, update_locality_stats); - } - // 3. Only create a new locality map if all the existing ones have failed. - if (priorities_.empty() || - !priorities_[priorities_.size() - 1]->failover_timer_callback_pending()) { - const uint32_t new_priority = static_cast(priorities_.size()); - // Create a new locality map. Note that in some rare cases (e.g., the - // locality map reports TRANSIENT_FAILURE synchronously due to subchannel - // sharing), the following invocation may result in multiple locality maps - // to be created. - MaybeCreateLocalityMapLocked(new_priority); - } - // 4. If we updated locality stats and we already have at least one - // priority, update the picker to start using the new stats object(s). - if (update_locality_stats && !priorities_.empty()) { - UpdateXdsPickerLocked(); - } -} - -void XdsLb::UpdateXdsPickerLocked() { - // If we are in fallback mode, don't generate an xds picker from localities. - if (fallback_policy_ != nullptr) return; - if (current_priority_ == UINT32_MAX) { - if (fallback_policy_ == nullptr) { - grpc_error* error = grpc_error_set_int( - GRPC_ERROR_CREATE_FROM_STATIC_STRING("no ready locality map"), - GRPC_ERROR_INT_GRPC_STATUS, GRPC_STATUS_UNAVAILABLE); - channel_control_helper()->UpdateState( - GRPC_CHANNEL_TRANSIENT_FAILURE, - absl::make_unique(error)); - } - return; - } - priorities_[current_priority_]->UpdateXdsPickerLocked(); -} - -void XdsLb::MaybeCreateLocalityMapLocked(uint32_t priority) { - // Exhausted priorities in the update. - if (!priority_list_update_.Contains(priority)) return; - auto new_locality_map = - new LocalityMap(Ref(DEBUG_LOCATION, "LocalityMap"), priority); - priorities_.emplace_back(OrphanablePtr(new_locality_map)); - new_locality_map->UpdateLocked(*priority_list_update_.Find(priority), - false /*update_locality_stats*/); -} - -void XdsLb::FailoverOnConnectionFailureLocked() { - const uint32_t failed_priority = LowestPriority(); - // If we're failing over from the lowest priority, report TRANSIENT_FAILURE. - if (failed_priority == priority_list_update_.LowestPriority()) { - UpdateXdsPickerLocked(); - } - MaybeCreateLocalityMapLocked(failed_priority + 1); -} - -void XdsLb::FailoverOnDisconnectionLocked(uint32_t failed_priority) { - current_priority_ = UINT32_MAX; - for (uint32_t next_priority = failed_priority + 1; - next_priority <= priority_list_update_.LowestPriority(); - ++next_priority) { - if (!Contains(next_priority)) { - MaybeCreateLocalityMapLocked(next_priority); - return; - } - if (priorities_[next_priority]->MaybeReactivateLocked()) return; - } -} - -void XdsLb::SwitchToHigherPriorityLocked(uint32_t priority) { - current_priority_ = priority; - DeactivatePrioritiesLowerThan(current_priority_); - UpdateXdsPickerLocked(); -} - -void XdsLb::DeactivatePrioritiesLowerThan(uint32_t priority) { - if (priorities_.empty()) return; - // Deactivate the locality maps from the lowest priority. - for (uint32_t p = LowestPriority(); p > priority; --p) { - if (locality_retention_interval_ms_ == 0) { - priorities_.pop_back(); - } else { - priorities_[p]->DeactivateLocked(); - } - } -} - -OrphanablePtr XdsLb::ExtractLocalityLocked( - const RefCountedPtr& name, uint32_t exclude_priority) { - for (uint32_t priority = 0; priority < priorities_.size(); ++priority) { - if (priority == exclude_priority) continue; - LocalityMap* locality_map = priorities_[priority].get(); - auto locality = locality_map->ExtractLocalityLocked(name); - if (locality != nullptr) { - if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_xds_trace)) { - gpr_log(GPR_INFO, - "[xdslb %p] moving locality %p %s to new priority (%" PRIu32 - " -> %" PRIu32 ")", - this, locality.get(), name->AsHumanReadableString(), - exclude_priority, priority); - } - return locality; - } - } - return nullptr; -} - -// -// XdsLb::LocalityMap -// - -XdsLb::LocalityMap::LocalityMap(RefCountedPtr xds_policy, - uint32_t priority) - : xds_policy_(std::move(xds_policy)), priority_(priority) { - if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_xds_trace)) { - gpr_log(GPR_INFO, "[xdslb %p] Creating priority %" PRIu32, - xds_policy_.get(), priority_); - } - // Closure Initialization - GRPC_CLOSURE_INIT(&on_delayed_removal_timer_, OnDelayedRemovalTimer, this, - grpc_schedule_on_exec_ctx); - GRPC_CLOSURE_INIT(&on_failover_timer_, OnFailoverTimer, this, - grpc_schedule_on_exec_ctx); - // Start the failover timer. - Ref(DEBUG_LOCATION, "LocalityMap+OnFailoverTimerLocked").release(); - grpc_timer_init( - &failover_timer_, - ExecCtx::Get()->Now() + xds_policy_->locality_map_failover_timeout_ms_, - &on_failover_timer_); - failover_timer_callback_pending_ = true; - // This is the first locality map ever created, report CONNECTING. - if (priority_ == 0 && xds_policy_->fallback_policy_ == nullptr) { - xds_policy_->channel_control_helper()->UpdateState( - GRPC_CHANNEL_CONNECTING, - absl::make_unique( - xds_policy_->Ref(DEBUG_LOCATION, "QueuePicker"))); - } -} - -void XdsLb::LocalityMap::UpdateLocked( - const XdsApi::PriorityListUpdate::LocalityMap& priority_update, - bool update_locality_stats) { - if (xds_policy_->shutting_down_) return; - if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_xds_trace)) { - gpr_log(GPR_INFO, "[xdslb %p] Start Updating priority %" PRIu32, - xds_policy(), priority_); - } - // Maybe reactivate the locality map in case all the active locality maps have - // failed. - MaybeReactivateLocked(); - // Remove (later) the localities not in priority_update. - for (auto iter = localities_.begin(); iter != localities_.end();) { - const auto& name = iter->first; - Locality* locality = iter->second.get(); - if (priority_update.Contains(name)) { - ++iter; - continue; - } - if (xds_policy()->locality_retention_interval_ms_ == 0) { - iter = localities_.erase(iter); - } else { - locality->DeactivateLocked(); - ++iter; - } - } - // Add or update the localities in priority_update. - for (const auto& p : priority_update.localities) { - const auto& name = p.first; - const auto& locality_update = p.second; - OrphanablePtr& locality = localities_[name]; - if (locality == nullptr) { - // Move from another locality map if possible. - locality = xds_policy_->ExtractLocalityLocked(name, priority_); - if (locality != nullptr) { - locality->set_locality_map( - Ref(DEBUG_LOCATION, "LocalityMap+Locality_move")); - } else { - locality = MakeOrphanable( - Ref(DEBUG_LOCATION, "LocalityMap+Locality"), name); - } - } - // Keep a copy of serverlist in the update so that we can compare it - // with the future ones. - locality->UpdateLocked(locality_update.lb_weight, - locality_update.serverlist, update_locality_stats); - } - // If this is the current priority and we removed all of the READY - // localities, go into state CONNECTING. - // TODO(roth): Ideally, we should model this as a graceful policy - // switch: we should keep using the old localities for a short period - // of time, long enough to give the new localities a chance to get - // connected. As part of refactoring this policy, we should try to - // fix that. - if (priority_ == xds_policy()->current_priority_) { - bool found_ready = false; - for (auto& p : localities_) { - const auto& locality_name = p.first; - Locality* locality = p.second.get(); - if (!locality_map_update()->Contains(locality_name)) continue; - if (locality->connectivity_state() == GRPC_CHANNEL_READY) { - found_ready = true; - break; - } - } - if (!found_ready) { - xds_policy_->channel_control_helper()->UpdateState( - GRPC_CHANNEL_CONNECTING, - absl::make_unique( - xds_policy_->Ref(DEBUG_LOCATION, "QueuePicker"))); - xds_policy_->current_priority_ = UINT32_MAX; - } - } -} - -void XdsLb::LocalityMap::ResetBackoffLocked() { - for (auto& p : localities_) p.second->ResetBackoffLocked(); -} - -void XdsLb::LocalityMap::UpdateXdsPickerLocked() { - if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_xds_trace)) { - gpr_log(GPR_INFO, "[xdslb %p] constructing new picker", xds_policy()); - } - // Construct a new xds picker which maintains a map of all locality pickers - // that are ready. Each locality is represented by a portion of the range - // proportional to its weight, such that the total range is the sum of the - // weights of all localities. - LocalityPicker::PickerList picker_list; - uint32_t end = 0; - for (auto& p : localities_) { - const auto& locality_name = p.first; - Locality* locality = p.second.get(); - // Skip the localities that are not in the latest locality map update. - const auto* locality_update = locality_map_update(); - if (locality_update == nullptr) continue; - if (!locality_update->Contains(locality_name)) continue; - if (locality->connectivity_state() != GRPC_CHANNEL_READY) continue; - end += locality->weight(); - picker_list.push_back( - std::make_pair(end, locality->GetLoadReportingPicker())); - if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_xds_trace)) { - gpr_log(GPR_INFO, "[xdslb %p] locality=%s weight=%d picker=%p", - xds_policy(), locality_name->AsHumanReadableString(), - locality->weight(), picker_list.back().second.get()); - } - } - xds_policy()->channel_control_helper()->UpdateState( - GRPC_CHANNEL_READY, - absl::make_unique(xds_policy(), std::move(picker_list))); -} - -OrphanablePtr -XdsLb::LocalityMap::ExtractLocalityLocked( - const RefCountedPtr& name) { - for (auto iter = localities_.begin(); iter != localities_.end(); ++iter) { - const auto& name_in_map = iter->first; - if (*name_in_map == *name) { - auto locality = std::move(iter->second); - localities_.erase(iter); - return locality; - } - } - return nullptr; -} - -void XdsLb::LocalityMap::DeactivateLocked() { - if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_xds_trace)) { - gpr_log(GPR_INFO, "[xdslb %p] deactivating priority %" PRIu32, xds_policy(), - priority_); - } - // If already deactivated, don't do it again. - if (delayed_removal_timer_callback_pending_) return; - MaybeCancelFailoverTimerLocked(); - // Start a timer to delete the locality. - Ref(DEBUG_LOCATION, "LocalityMap+timer").release(); - if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_xds_trace)) { - gpr_log(GPR_INFO, - "[xdslb %p] Will remove priority %" PRIu32 " in %" PRId64 " ms.", - xds_policy(), priority_, - xds_policy()->locality_retention_interval_ms_); - } - grpc_timer_init( - &delayed_removal_timer_, - ExecCtx::Get()->Now() + xds_policy()->locality_retention_interval_ms_, - &on_delayed_removal_timer_); - delayed_removal_timer_callback_pending_ = true; -} - -bool XdsLb::LocalityMap::MaybeReactivateLocked() { - // Don't reactivate a priority that is not higher than the current one. - if (priority_ >= xds_policy_->current_priority_) return false; - // Reactivate this priority by cancelling deletion timer. - if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_xds_trace)) { - gpr_log(GPR_INFO, "[xdslb %p] reactivating priority %" PRIu32, xds_policy(), - priority_); - } - if (delayed_removal_timer_callback_pending_) { - grpc_timer_cancel(&delayed_removal_timer_); - } - // Switch to this higher priority if it's READY. - if (connectivity_state_ != GRPC_CHANNEL_READY) return false; - xds_policy_->SwitchToHigherPriorityLocked(priority_); - return true; -} - -void XdsLb::LocalityMap::MaybeCancelFailoverTimerLocked() { - if (failover_timer_callback_pending_) grpc_timer_cancel(&failover_timer_); -} - -void XdsLb::LocalityMap::Orphan() { - if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_xds_trace)) { - gpr_log(GPR_INFO, "[xdslb %p] Priority %" PRIu32 " orphaned.", xds_policy(), - priority_); - } - MaybeCancelFailoverTimerLocked(); - if (delayed_removal_timer_callback_pending_) { - grpc_timer_cancel(&delayed_removal_timer_); - } - localities_.clear(); - Unref(DEBUG_LOCATION, "LocalityMap+Orphan"); -} - -void XdsLb::LocalityMap::OnLocalityStateUpdateLocked() { - UpdateConnectivityStateLocked(); - // Ignore priorities not in priority_list_update. - if (!priority_list_update().Contains(priority_)) return; - const uint32_t current_priority = xds_policy_->current_priority_; - // Ignore lower-than-current priorities. - if (priority_ > current_priority) return; - // Maybe update fallback state. - if (connectivity_state_ == GRPC_CHANNEL_READY) { - xds_policy_->MaybeCancelFallbackAtStartupChecks(); - xds_policy_->MaybeExitFallbackMode(); - } - // Update is for a higher-than-current priority. (Special case: update is for - // any active priority if there is no current priority.) - if (priority_ < current_priority) { - if (connectivity_state_ == GRPC_CHANNEL_READY) { - MaybeCancelFailoverTimerLocked(); - // If a higher-than-current priority becomes READY, switch to use it. - xds_policy_->SwitchToHigherPriorityLocked(priority_); - } else if (connectivity_state_ == GRPC_CHANNEL_TRANSIENT_FAILURE) { - // If a higher-than-current priority becomes TRANSIENT_FAILURE, only - // handle it if it's the priority that is still in failover timeout. - if (failover_timer_callback_pending_) { - MaybeCancelFailoverTimerLocked(); - xds_policy_->FailoverOnConnectionFailureLocked(); - } - } - return; - } - // Update is for current priority. - if (connectivity_state_ != GRPC_CHANNEL_READY) { - // Fail over if it's no longer READY. - xds_policy_->FailoverOnDisconnectionLocked(priority_); - } - // At this point, one of the following things has happened to the current - // priority. - // 1. It remained the same (but received picker update from its localities). - // 2. It changed to a lower priority due to failover. - // 3. It became invalid because failover didn't yield a READY priority. - // In any case, update the xds picker. - xds_policy_->UpdateXdsPickerLocked(); -} - -void XdsLb::LocalityMap::UpdateConnectivityStateLocked() { - size_t num_ready = 0; - size_t num_connecting = 0; - size_t num_idle = 0; - size_t num_transient_failures = 0; - for (const auto& p : localities_) { - const auto& locality_name = p.first; - const Locality* locality = p.second.get(); - // Skip the localities that are not in the latest locality map update. - if (!locality_map_update()->Contains(locality_name)) continue; - switch (locality->connectivity_state()) { - case GRPC_CHANNEL_READY: { - ++num_ready; - break; - } - case GRPC_CHANNEL_CONNECTING: { - ++num_connecting; - break; - } - case GRPC_CHANNEL_IDLE: { - ++num_idle; - break; - } - case GRPC_CHANNEL_TRANSIENT_FAILURE: { - ++num_transient_failures; - break; - } - default: - GPR_UNREACHABLE_CODE(return ); - } - } - if (num_ready > 0) { - connectivity_state_ = GRPC_CHANNEL_READY; - } else if (num_connecting > 0) { - connectivity_state_ = GRPC_CHANNEL_CONNECTING; - } else if (num_idle > 0) { - connectivity_state_ = GRPC_CHANNEL_IDLE; - } else { - connectivity_state_ = GRPC_CHANNEL_TRANSIENT_FAILURE; - } - if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_xds_trace)) { - gpr_log(GPR_INFO, - "[xdslb %p] Priority %" PRIu32 " (%p) connectivity changed to %s", - xds_policy(), priority_, this, - ConnectivityStateName(connectivity_state_)); - } -} - -void XdsLb::LocalityMap::OnDelayedRemovalTimer(void* arg, grpc_error* error) { - LocalityMap* self = static_cast(arg); - GRPC_ERROR_REF(error); // ref owned by lambda - self->xds_policy_->work_serializer()->Run( - [self, error]() { self->OnDelayedRemovalTimerLocked(error); }, - DEBUG_LOCATION); -} - -void XdsLb::LocalityMap::OnDelayedRemovalTimerLocked(grpc_error* error) { - delayed_removal_timer_callback_pending_ = false; - if (error == GRPC_ERROR_NONE && !xds_policy_->shutting_down_) { - const bool keep = priority_list_update().Contains(priority_) && - priority_ <= xds_policy_->current_priority_; - if (!keep) { - // This check is to make sure we always delete the locality maps from - // the lowest priority even if the closures of the back-to-back timers - // are not run in FIFO order. - // TODO(juanlishen): Eliminate unnecessary maintenance overhead for some - // deactivated locality maps when out-of-order closures are run. - // TODO(juanlishen): Check the timer implementation to see if this - // defense is necessary. - if (priority_ == xds_policy_->LowestPriority()) { - xds_policy_->priorities_.pop_back(); - } else { - gpr_log(GPR_ERROR, - "[xdslb %p] Priority %" PRIu32 - " is not the lowest priority (highest numeric value) but is " - "attempted to be deleted.", - xds_policy(), priority_); - } - } - } - Unref(DEBUG_LOCATION, "LocalityMap+timer"); - GRPC_ERROR_UNREF(error); -} - -void XdsLb::LocalityMap::OnFailoverTimer(void* arg, grpc_error* error) { - LocalityMap* self = static_cast(arg); - GRPC_ERROR_REF(error); // ref owned by lambda - self->xds_policy_->work_serializer()->Run( - [self, error]() { self->OnFailoverTimerLocked(error); }, DEBUG_LOCATION); -} - -void XdsLb::LocalityMap::OnFailoverTimerLocked(grpc_error* error) { - failover_timer_callback_pending_ = false; - if (error == GRPC_ERROR_NONE && !xds_policy_->shutting_down_) { - xds_policy_->FailoverOnConnectionFailureLocked(); - } - Unref(DEBUG_LOCATION, "LocalityMap+OnFailoverTimerLocked"); - GRPC_ERROR_UNREF(error); -} - -// -// XdsLb::LocalityMap::Locality -// - -XdsLb::LocalityMap::Locality::Locality(RefCountedPtr locality_map, - RefCountedPtr name) - : locality_map_(std::move(locality_map)), name_(std::move(name)) { - if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_xds_trace)) { - gpr_log(GPR_INFO, "[xdslb %p] created Locality %p for %s", xds_policy(), - this, name_->AsHumanReadableString()); - } - // Closure Initialization - GRPC_CLOSURE_INIT(&on_delayed_removal_timer_, OnDelayedRemovalTimer, this, - grpc_schedule_on_exec_ctx); - // Initialize locality stats if load reporting is enabled. - UpdateLocalityStats(); -} - -XdsLb::LocalityMap::Locality::~Locality() { - if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_xds_trace)) { - gpr_log(GPR_INFO, "[xdslb %p] Locality %p %s: destroying locality", - xds_policy(), this, name_->AsHumanReadableString()); - } - locality_map_.reset(DEBUG_LOCATION, "Locality"); -} - -void XdsLb::LocalityMap::Locality::UpdateLocalityStats() { - stats_.reset(); - if (xds_policy()->config_->lrs_load_reporting_server_name().has_value()) { - stats_ = xds_policy()->xds_client()->AddClusterLocalityStats( - xds_policy()->config_->lrs_load_reporting_server_name().value(), - // TODO(roth): We currently hard-code the assumption that - // cluster name and EDS service name are the same. Fix this - // as part of refectoring this LB policy. - xds_policy()->eds_service_name(), xds_policy()->eds_service_name(), - name_); - } -} - -grpc_channel_args* XdsLb::LocalityMap::Locality::CreateChildPolicyArgsLocked( - const grpc_channel_args* args_in) { - const grpc_arg args_to_add[] = { - // A channel arg indicating if the target is a backend inferred from a - // grpclb load balancer. - grpc_channel_arg_integer_create( - const_cast(GRPC_ARG_ADDRESS_IS_BACKEND_FROM_XDS_LOAD_BALANCER), - 1), - // Inhibit client-side health checking, since the balancer does - // this for us. - grpc_channel_arg_integer_create( - const_cast(GRPC_ARG_INHIBIT_HEALTH_CHECKING), 1), - }; - return grpc_channel_args_copy_and_add(args_in, args_to_add, - GPR_ARRAY_SIZE(args_to_add)); -} - -OrphanablePtr -XdsLb::LocalityMap::Locality::CreateChildPolicyLocked( - const grpc_channel_args* args) { - LoadBalancingPolicy::Args lb_policy_args; - lb_policy_args.work_serializer = xds_policy()->work_serializer(); - lb_policy_args.args = args; - lb_policy_args.channel_control_helper = - absl::make_unique(this->Ref(DEBUG_LOCATION, "Helper")); - OrphanablePtr lb_policy = - MakeOrphanable(std::move(lb_policy_args), - &grpc_lb_xds_trace); - if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_xds_trace)) { - gpr_log(GPR_INFO, - "[xdslb %p] Locality %p %s: Created new child policy handler (%p)", - xds_policy(), this, name_->AsHumanReadableString(), - lb_policy.get()); - } - // Add the xDS's interested_parties pollset_set to that of the newly created - // child policy. This will make the child policy progress upon activity on - // xDS LB, which in turn is tied to the application's call. - grpc_pollset_set_add_pollset_set(lb_policy->interested_parties(), - xds_policy()->interested_parties()); - return lb_policy; -} - -void XdsLb::LocalityMap::Locality::UpdateLocked(uint32_t locality_weight, - ServerAddressList serverlist, - bool update_locality_stats) { - if (xds_policy()->shutting_down_) return; - // Update locality weight. - weight_ = locality_weight; - if (delayed_removal_timer_callback_pending_) { - if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_xds_trace)) { - gpr_log(GPR_INFO, "[xdslb %p] Locality %p %s: reactivating", xds_policy(), - this, name_->AsHumanReadableString()); - } - grpc_timer_cancel(&delayed_removal_timer_); - } - // Update locality stats. - if (update_locality_stats) UpdateLocalityStats(); - // Construct update args. - UpdateArgs update_args; - update_args.addresses = std::move(serverlist); - update_args.config = xds_policy()->config_->child_policy(); - update_args.args = CreateChildPolicyArgsLocked(xds_policy()->args_); - // Create child policy if needed. - if (child_policy_ == nullptr) { - child_policy_ = CreateChildPolicyLocked(update_args.args); - GPR_ASSERT(child_policy_ != nullptr); - } - // Update the policy. - if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_xds_trace)) { - gpr_log(GPR_INFO, - "[xdslb %p] Locality %p %s: Updating child policy handler %p", - xds_policy(), this, name_->AsHumanReadableString(), - child_policy_.get()); - } - child_policy_->UpdateLocked(std::move(update_args)); -} - -void XdsLb::LocalityMap::Locality::ShutdownLocked() { - if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_xds_trace)) { - gpr_log(GPR_INFO, "[xdslb %p] Locality %p %s: shutting down locality", - xds_policy(), this, name_->AsHumanReadableString()); - } - stats_.reset(); - // Remove the child policy's interested_parties pollset_set from the - // xDS policy. - grpc_pollset_set_del_pollset_set(child_policy_->interested_parties(), - xds_policy()->interested_parties()); - child_policy_.reset(); - // Drop our ref to the child's picker, in case it's holding a ref to - // the child. - load_reporting_picker_.reset(); - picker_wrapper_.reset(); - if (delayed_removal_timer_callback_pending_) { - grpc_timer_cancel(&delayed_removal_timer_); - } - shutdown_ = true; -} - -void XdsLb::LocalityMap::Locality::ResetBackoffLocked() { - child_policy_->ResetBackoffLocked(); -} - -void XdsLb::LocalityMap::Locality::Orphan() { - ShutdownLocked(); - Unref(); -} - -void XdsLb::LocalityMap::Locality::DeactivateLocked() { - // If already deactivated, don't do that again. - if (weight_ == 0) return; - if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_xds_trace)) { - gpr_log(GPR_INFO, "[xdslb %p] Locality %p %s: deactivating", xds_policy(), - this, name_->AsHumanReadableString()); - } - // Set the locality weight to 0 so that future xds picker won't contain this - // locality. - weight_ = 0; - // Start a timer to delete the locality. - Ref(DEBUG_LOCATION, "Locality+timer").release(); - grpc_timer_init( - &delayed_removal_timer_, - ExecCtx::Get()->Now() + xds_policy()->locality_retention_interval_ms_, - &on_delayed_removal_timer_); - delayed_removal_timer_callback_pending_ = true; -} - -void XdsLb::LocalityMap::Locality::OnDelayedRemovalTimer(void* arg, - grpc_error* error) { - Locality* self = static_cast(arg); - GRPC_ERROR_REF(error); // ref owned by lambda - self->xds_policy()->work_serializer()->Run( - [self, error]() { self->OnDelayedRemovalTimerLocked(error); }, - DEBUG_LOCATION); -} - -void XdsLb::LocalityMap::Locality::OnDelayedRemovalTimerLocked( - grpc_error* error) { - delayed_removal_timer_callback_pending_ = false; - if (error == GRPC_ERROR_NONE && !shutdown_ && weight_ == 0) { - locality_map_->localities_.erase(name_); - } - Unref(DEBUG_LOCATION, "Locality+timer"); - GRPC_ERROR_UNREF(error); -} - -// -// XdsLb::LocalityMap::Locality::Helper -// - -RefCountedPtr -XdsLb::LocalityMap::Locality::Helper::CreateSubchannel( - const grpc_channel_args& args) { - if (locality_->xds_policy()->shutting_down_) return nullptr; - return locality_->xds_policy()->channel_control_helper()->CreateSubchannel( - args); -} - -void XdsLb::LocalityMap::Locality::Helper::UpdateState( - grpc_connectivity_state state, std::unique_ptr picker) { - if (locality_->xds_policy()->shutting_down_) return; - if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_xds_trace)) { - gpr_log(GPR_INFO, - "[xdslb %p helper %p] child policy handler %p reports state=%s", - locality_->xds_policy(), this, locality_->child_policy_.get(), - ConnectivityStateName(state)); - } - // Cache the state and picker in the locality. - locality_->connectivity_state_ = state; - locality_->picker_wrapper_ = - MakeRefCounted(std::move(picker)); - // Notify the locality map. - locality_->locality_map_->OnLocalityStateUpdateLocked(); -} - -void XdsLb::LocalityMap::Locality::Helper::AddTraceEvent(TraceSeverity severity, - StringView message) { - if (locality_->xds_policy()->shutting_down_) return; - locality_->xds_policy()->channel_control_helper()->AddTraceEvent(severity, - message); -} - -// -// factory -// - -class XdsFactory : public LoadBalancingPolicyFactory { - public: - OrphanablePtr CreateLoadBalancingPolicy( - LoadBalancingPolicy::Args args) const override { - return MakeOrphanable(std::move(args), &grpc_lb_xds_trace); - } - - const char* name() const override { return kXds; } - - RefCountedPtr ParseLoadBalancingConfig( - const Json& json, grpc_error** error) const override { - GPR_DEBUG_ASSERT(error != nullptr && *error == GRPC_ERROR_NONE); - if (json.type() == Json::Type::JSON_NULL) { - // xds was mentioned as a policy in the deprecated loadBalancingPolicy - // field or in the client API. - *error = GRPC_ERROR_CREATE_FROM_STATIC_STRING( - "field:loadBalancingPolicy error:xds policy requires configuration. " - "Please use loadBalancingConfig field of service config instead."); - return nullptr; - } - std::vector error_list; - // Child policy. - Json json_tmp; - const Json* child_policy_json; - auto it = json.object_value().find("childPolicy"); - if (it == json.object_value().end()) { - json_tmp = Json::Array{Json::Object{ - {"round_robin", Json::Object()}, - }}; - child_policy_json = &json_tmp; - } else { - child_policy_json = &it->second; - } - grpc_error* parse_error = GRPC_ERROR_NONE; - RefCountedPtr child_policy = - LoadBalancingPolicyRegistry::ParseLoadBalancingConfig( - *child_policy_json, &parse_error); - if (child_policy == nullptr) { - GPR_DEBUG_ASSERT(parse_error != GRPC_ERROR_NONE); - std::vector child_errors; - child_errors.push_back(parse_error); - error_list.push_back( - GRPC_ERROR_CREATE_FROM_VECTOR("field:childPolicy", &child_errors)); - } - // Fallback policy. - const Json* fallback_policy_json; - it = json.object_value().find("fallbackPolicy"); - if (it == json.object_value().end()) { - json_tmp = Json::Array{Json::Object{ - {"round_robin", Json::Object()}, - }}; - fallback_policy_json = &json_tmp; - } else { - fallback_policy_json = &it->second; - } - RefCountedPtr fallback_policy = - LoadBalancingPolicyRegistry::ParseLoadBalancingConfig( - *fallback_policy_json, &parse_error); - if (fallback_policy == nullptr) { - GPR_DEBUG_ASSERT(parse_error != GRPC_ERROR_NONE); - std::vector child_errors; - child_errors.push_back(parse_error); - error_list.push_back( - GRPC_ERROR_CREATE_FROM_VECTOR("field:fallbackPolicy", &child_errors)); - } - // EDS service name. - const char* eds_service_name = nullptr; - it = json.object_value().find("edsServiceName"); - if (it != json.object_value().end()) { - if (it->second.type() != Json::Type::STRING) { - error_list.push_back(GRPC_ERROR_CREATE_FROM_STATIC_STRING( - "field:edsServiceName error:type should be string")); - } else { - eds_service_name = it->second.string_value().c_str(); - } - } - // LRS load reporting server name. - const char* lrs_load_reporting_server_name = nullptr; - it = json.object_value().find("lrsLoadReportingServerName"); - if (it != json.object_value().end()) { - if (it->second.type() != Json::Type::STRING) { - error_list.push_back(GRPC_ERROR_CREATE_FROM_STATIC_STRING( - "field:lrsLoadReportingServerName error:type should be string")); - } else { - lrs_load_reporting_server_name = it->second.string_value().c_str(); - } - } - if (error_list.empty()) { - absl::optional optional_lrs_load_reporting_server_name; - if (lrs_load_reporting_server_name != nullptr) { - optional_lrs_load_reporting_server_name.emplace( - std::string(lrs_load_reporting_server_name)); - } - return MakeRefCounted( - std::move(child_policy), std::move(fallback_policy), - eds_service_name == nullptr ? "" : eds_service_name, - std::move(optional_lrs_load_reporting_server_name)); - } else { - *error = GRPC_ERROR_CREATE_FROM_VECTOR("Xds Parser", &error_list); - return nullptr; - } - } - - private: - class XdsChildHandler : public ChildPolicyHandler { - public: - XdsChildHandler(Args args, TraceFlag* tracer) - : ChildPolicyHandler(std::move(args), tracer) {} - - bool ConfigChangeRequiresNewPolicyInstance( - LoadBalancingPolicy::Config* old_config, - LoadBalancingPolicy::Config* new_config) const override { - GPR_ASSERT(old_config->name() == kXds); - GPR_ASSERT(new_config->name() == kXds); - XdsConfig* old_xds_config = static_cast(old_config); - XdsConfig* new_xds_config = static_cast(new_config); - const char* old_eds_service_name = - old_xds_config->eds_service_name() == nullptr - ? "" - : old_xds_config->eds_service_name(); - const char* new_eds_service_name = - new_xds_config->eds_service_name() == nullptr - ? "" - : new_xds_config->eds_service_name(); - return strcmp(old_eds_service_name, new_eds_service_name) != 0; - } - - OrphanablePtr CreateLoadBalancingPolicy( - const char* name, LoadBalancingPolicy::Args args) const override { - return MakeOrphanable(std::move(args)); - } - }; -}; - -} // namespace - -} // namespace grpc_core - -// -// Plugin registration -// - -void grpc_lb_policy_xds_init() { - grpc_core::LoadBalancingPolicyRegistry::Builder:: - RegisterLoadBalancingPolicyFactory( - absl::make_unique()); -} - -void grpc_lb_policy_xds_shutdown() {} diff --git a/src/core/ext/filters/client_channel/lb_policy/xds/xds.h b/src/core/ext/filters/client_channel/lb_policy/xds/xds.h index 13d3435da34..1de3472a3a0 100644 --- a/src/core/ext/filters/client_channel/lb_policy/xds/xds.h +++ b/src/core/ext/filters/client_channel/lb_policy/xds/xds.h @@ -29,5 +29,4 @@ #define GRPC_ARG_ADDRESS_IS_BACKEND_FROM_XDS_LOAD_BALANCER \ "grpc.address_is_backend_from_xds_load_balancer" -#endif /* GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_LB_POLICY_XDS_XDS_H \ - */ +#endif /* GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_LB_POLICY_XDS_XDS_H */ diff --git a/src/core/ext/filters/client_channel/lb_policy_registry.cc b/src/core/ext/filters/client_channel/lb_policy_registry.cc index 79e5f2f4926..498b158900e 100644 --- a/src/core/ext/filters/client_channel/lb_policy_registry.cc +++ b/src/core/ext/filters/client_channel/lb_policy_registry.cc @@ -35,6 +35,8 @@ class RegistryState { void RegisterLoadBalancingPolicyFactory( std::unique_ptr factory) { + gpr_log(GPR_DEBUG, "registering LB policy factory for \"%s\"", + factory->name()); for (size_t i = 0; i < factories_.size(); ++i) { GPR_ASSERT(strcmp(factories_[i]->name(), factory->name()) != 0); } diff --git a/src/core/ext/filters/client_channel/resolver.h b/src/core/ext/filters/client_channel/resolver.h index c537796718e..de941a063b4 100644 --- a/src/core/ext/filters/client_channel/resolver.h +++ b/src/core/ext/filters/client_channel/resolver.h @@ -135,8 +135,8 @@ class Resolver : public InternallyRefCounted { ResultHandler* result_handler() const { return result_handler_.get(); } private: - std::unique_ptr result_handler_; std::shared_ptr work_serializer_; + std::unique_ptr result_handler_; }; } // namespace grpc_core diff --git a/src/core/ext/filters/client_channel/resolver/dns/c_ares/dns_resolver_ares.cc b/src/core/ext/filters/client_channel/resolver/dns/c_ares/dns_resolver_ares.cc index 032df041297..de49ac76f6d 100644 --- a/src/core/ext/filters/client_channel/resolver/dns/c_ares/dns_resolver_ares.cc +++ b/src/core/ext/filters/client_channel/resolver/dns/c_ares/dns_resolver_ares.cc @@ -30,6 +30,7 @@ #include #include "src/core/ext/filters/client_channel/http_connect_handshaker.h" +#include "src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_balancer_addresses.h" #include "src/core/ext/filters/client_channel/lb_policy_registry.h" #include "src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.h" #include "src/core/ext/filters/client_channel/resolver/dns/dns_resolver_selection.h" @@ -107,8 +108,10 @@ class AresDnsResolver : public Resolver { grpc_millis last_resolution_timestamp_ = -1; /// retry backoff state BackOff backoff_; - /// currently resolving addresses + /// currently resolving backend addresses std::unique_ptr addresses_; + /// currently resolving balancer addresses + std::unique_ptr balancer_addresses_; /// currently resolving service config char* service_config_json_ = nullptr; // has shutdown been initiated @@ -332,9 +335,11 @@ void AresDnsResolver::OnResolvedLocked(grpc_error* error) { GRPC_ERROR_UNREF(error); return; } - if (addresses_ != nullptr) { + if (addresses_ != nullptr || balancer_addresses_ != nullptr) { Result result; - result.addresses = std::move(*addresses_); + if (addresses_ != nullptr) { + result.addresses = std::move(*addresses_); + } if (service_config_json_ != nullptr) { std::string service_config_string = ChooseServiceConfig( service_config_json_, &result.service_config_error); @@ -347,9 +352,16 @@ void AresDnsResolver::OnResolvedLocked(grpc_error* error) { service_config_string, &result.service_config_error); } } - result.args = grpc_channel_args_copy(channel_args_); + InlinedVector new_args; + if (balancer_addresses_ != nullptr) { + new_args.push_back( + CreateGrpclbBalancerAddressesArg(balancer_addresses_.get())); + } + result.args = grpc_channel_args_copy_and_add(channel_args_, new_args.data(), + new_args.size()); result_handler()->ReturnResult(std::move(result)); addresses_.reset(); + balancer_addresses_.reset(); // Reset backoff state so that we start from the beginning when the // next request gets triggered. backoff_.Reset(); @@ -423,7 +435,8 @@ void AresDnsResolver::StartResolvingLocked() { service_config_json_ = nullptr; pending_request_ = grpc_dns_lookup_ares_locked( dns_server_, name_to_resolve_, kDefaultPort, interested_parties_, - &on_resolved_, &addresses_, enable_srv_queries_ /* check_grpclb */, + &on_resolved_, &addresses_, + enable_srv_queries_ ? &balancer_addresses_ : nullptr, request_service_config_ ? &service_config_json_ : nullptr, query_timeout_ms_, work_serializer()); last_resolution_timestamp_ = grpc_core::ExecCtx::Get()->Now(); diff --git a/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver_windows.cc b/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver_windows.cc index 426f5748f5b..d4987f5ad20 100644 --- a/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver_windows.cc +++ b/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver_windows.cc @@ -699,7 +699,7 @@ struct SockToPolledFdEntry { * with a GrpcPolledFdWindows factory and event driver */ class SockToPolledFdMap { public: - SockToPolledFdMap(std::shared_ptr work_serializer) + explicit SockToPolledFdMap(std::shared_ptr work_serializer) : work_serializer_(std::move(work_serializer)) {} ~SockToPolledFdMap() { GPR_ASSERT(head_ == nullptr); } @@ -830,7 +830,7 @@ const struct ares_socket_functions custom_ares_sock_funcs = { so that c-ares can close it via usual socket teardown. */ class GrpcPolledFdWindowsWrapper : public GrpcPolledFd { public: - GrpcPolledFdWindowsWrapper(GrpcPolledFdWindows* wrapped) + explicit GrpcPolledFdWindowsWrapper(GrpcPolledFdWindows* wrapped) : wrapped_(wrapped) {} ~GrpcPolledFdWindowsWrapper() {} @@ -863,8 +863,9 @@ class GrpcPolledFdWindowsWrapper : public GrpcPolledFd { class GrpcPolledFdFactoryWindows : public GrpcPolledFdFactory { public: - GrpcPolledFdFactoryWindows(std::shared_ptr work_serializer) - : sock_to_polled_fd_map_(work_serializer) {} + explicit GrpcPolledFdFactoryWindows( + std::shared_ptr work_serializer) + : sock_to_polled_fd_map_(std::move(work_serializer)) {} GrpcPolledFd* NewGrpcPolledFdLocked( ares_socket_t as, grpc_pollset_set* driver_pollset_set, diff --git a/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.cc b/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.cc index c167981effa..54c60072654 100644 --- a/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.cc +++ b/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.cc @@ -33,6 +33,7 @@ #include #include +#include "src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_balancer_addresses.h" #include "src/core/ext/filters/client_channel/parse_address.h" #include "src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver.h" #include "src/core/lib/gpr/string.h" @@ -59,6 +60,8 @@ struct grpc_ares_request { grpc_closure* on_done; /** the pointer to receive the resolved addresses */ std::unique_ptr* addresses_out; + /** the pointer to receive the resolved balancer addresses */ + std::unique_ptr* balancer_addresses_out; /** the pointer to receive the service config in JSON */ char** service_config_json_out; /** the evernt driver used by this request */ @@ -83,25 +86,32 @@ typedef struct grpc_ares_hostbyname_request { bool is_balancer; } grpc_ares_hostbyname_request; -static void log_address_sorting_list(const ServerAddressList& addresses, +static void log_address_sorting_list(const grpc_ares_request* r, + const ServerAddressList& addresses, const char* input_output_str) { for (size_t i = 0; i < addresses.size(); i++) { char* addr_str; if (grpc_sockaddr_to_string(&addr_str, &addresses[i].address(), true)) { - gpr_log(GPR_INFO, "c-ares address sorting: %s[%" PRIuPTR "]=%s", - input_output_str, i, addr_str); + gpr_log( + GPR_INFO, + "(c-ares resolver) request:%p c-ares address sorting: %s[%" PRIuPTR + "]=%s", + r, input_output_str, i, addr_str); gpr_free(addr_str); } else { - gpr_log(GPR_INFO, - "c-ares address sorting: %s[%" PRIuPTR "]=", - input_output_str, i); + gpr_log( + GPR_INFO, + "(c-ares resolver) request:%p c-ares address sorting: %s[%" PRIuPTR + "]=", + r, input_output_str, i); } } } -void grpc_cares_wrapper_address_sorting_sort(ServerAddressList* addresses) { +void grpc_cares_wrapper_address_sorting_sort(const grpc_ares_request* r, + ServerAddressList* addresses) { if (GRPC_TRACE_FLAG_ENABLED(grpc_trace_cares_address_sorting)) { - log_address_sorting_list(*addresses, "input"); + log_address_sorting_list(r, *addresses, "input"); } address_sorting_sortable* sortables = (address_sorting_sortable*)gpr_zalloc( sizeof(address_sorting_sortable) * addresses->size()); @@ -120,7 +130,7 @@ void grpc_cares_wrapper_address_sorting_sort(ServerAddressList* addresses) { gpr_free(sortables); *addresses = std::move(sorted); if (GRPC_TRACE_FLAG_ENABLED(grpc_trace_cares_address_sorting)) { - log_address_sorting_list(*addresses, "output"); + log_address_sorting_list(r, *addresses, "output"); } } @@ -141,7 +151,7 @@ void grpc_ares_complete_request_locked(grpc_ares_request* r) { r->ev_driver = nullptr; ServerAddressList* addresses = r->addresses_out->get(); if (addresses != nullptr) { - grpc_cares_wrapper_address_sorting_sort(addresses); + grpc_cares_wrapper_address_sorting_sort(r, addresses); GRPC_ERROR_UNREF(r->error); r->error = GRPC_ERROR_NONE; // TODO(apolcyn): allow c-ares to return a service config @@ -183,17 +193,17 @@ static void on_hostbyname_done_locked(void* arg, int status, int /*timeouts*/, GRPC_CARES_TRACE_LOG( "request:%p on_hostbyname_done_locked host=%s ARES_SUCCESS", r, hr->host); - if (*r->addresses_out == nullptr) { - *r->addresses_out = absl::make_unique(); + std::unique_ptr* address_list_ptr = + hr->is_balancer ? r->balancer_addresses_out : r->addresses_out; + if (*address_list_ptr == nullptr) { + *address_list_ptr = absl::make_unique(); } - ServerAddressList& addresses = **r->addresses_out; + ServerAddressList& addresses = **address_list_ptr; for (size_t i = 0; hostent->h_addr_list[i] != nullptr; ++i) { - grpc_core::InlinedVector args_to_add; + grpc_core::InlinedVector args_to_add; if (hr->is_balancer) { - args_to_add.emplace_back(grpc_channel_arg_integer_create( - const_cast(GRPC_ARG_ADDRESS_IS_BALANCER), 1)); - args_to_add.emplace_back(grpc_channel_arg_string_create( - const_cast(GRPC_ARG_ADDRESS_BALANCER_NAME), hr->host)); + args_to_add.emplace_back( + grpc_core::CreateGrpclbBalancerNameArg(hr->host)); } grpc_channel_args* args = grpc_channel_args_copy_and_add( nullptr, args_to_add.data(), args_to_add.size()); @@ -349,7 +359,7 @@ done: void grpc_dns_lookup_ares_continue_after_check_localhost_and_ip_literals_locked( grpc_ares_request* r, const char* dns_server, const char* name, const char* default_port, grpc_pollset_set* interested_parties, - bool check_grpclb, int query_timeout_ms, + int query_timeout_ms, std::shared_ptr work_serializer) { grpc_error* error = GRPC_ERROR_NONE; grpc_ares_hostbyname_request* hr = nullptr; @@ -426,7 +436,7 @@ void grpc_dns_lookup_ares_continue_after_check_localhost_and_ip_literals_locked( /*is_balancer=*/false); ares_gethostbyname(*channel, hr->host, AF_INET, on_hostbyname_done_locked, hr); - if (check_grpclb) { + if (r->balancer_addresses_out != nullptr) { /* Query the SRV record */ grpc_ares_request_ref_locked(r); char* service_name; @@ -521,7 +531,7 @@ static bool target_matches_localhost(const char* name) { #ifdef GRPC_ARES_RESOLVE_LOCALHOST_MANUALLY static bool inner_maybe_resolve_localhost_manually_locked( - const char* name, const char* default_port, + const grpc_ares_request* r, const char* name, const char* default_port, std::unique_ptr* addrs, grpc_core::UniquePtr* host, grpc_core::UniquePtr* port) { grpc_core::SplitHostPort(name, host, port); @@ -564,23 +574,24 @@ static bool inner_maybe_resolve_localhost_manually_locked( (*addrs)->emplace_back(&ipv4_loopback_addr, sizeof(ipv4_loopback_addr), nullptr /* args */); // Let the address sorter figure out which one should be tried first. - grpc_cares_wrapper_address_sorting_sort(addrs->get()); + grpc_cares_wrapper_address_sorting_sort(r, addrs->get()); return true; } return false; } static bool grpc_ares_maybe_resolve_localhost_manually_locked( - const char* name, const char* default_port, + const grpc_ares_request* r, const char* name, const char* default_port, std::unique_ptr* addrs) { grpc_core::UniquePtr host; grpc_core::UniquePtr port; - return inner_maybe_resolve_localhost_manually_locked(name, default_port, + return inner_maybe_resolve_localhost_manually_locked(r, name, default_port, addrs, &host, &port); } #else /* GRPC_ARES_RESOLVE_LOCALHOST_MANUALLY */ static bool grpc_ares_maybe_resolve_localhost_manually_locked( - const char* /*name*/, const char* /*default_port*/, + const grpc_ares_request* r, const char* /*name*/, + const char* /*default_port*/, std::unique_ptr* /*addrs*/) { return false; } @@ -589,7 +600,8 @@ static bool grpc_ares_maybe_resolve_localhost_manually_locked( static grpc_ares_request* grpc_dns_lookup_ares_locked_impl( const char* dns_server, const char* name, const char* default_port, grpc_pollset_set* interested_parties, grpc_closure* on_done, - std::unique_ptr* addrs, bool check_grpclb, + std::unique_ptr* addrs, + std::unique_ptr* balancer_addrs, char** service_config_json, int query_timeout_ms, std::shared_ptr work_serializer) { grpc_ares_request* r = @@ -597,6 +609,7 @@ static grpc_ares_request* grpc_dns_lookup_ares_locked_impl( r->ev_driver = nullptr; r->on_done = on_done; r->addresses_out = addrs; + r->balancer_addresses_out = balancer_addrs; r->service_config_json_out = service_config_json; r->error = GRPC_ERROR_NONE; r->pending_queries = 0; @@ -610,7 +623,7 @@ static grpc_ares_request* grpc_dns_lookup_ares_locked_impl( return r; } // Early out if the target is localhost and we're on Windows. - if (grpc_ares_maybe_resolve_localhost_manually_locked(name, default_port, + if (grpc_ares_maybe_resolve_localhost_manually_locked(r, name, default_port, addrs)) { grpc_ares_complete_request_locked(r); return r; @@ -619,20 +632,21 @@ static grpc_ares_request* grpc_dns_lookup_ares_locked_impl( // as to cut down on lookups over the network, especially in tests: // https://github.com/grpc/proposal/pull/79 if (target_matches_localhost(name)) { - check_grpclb = false; + r->balancer_addresses_out = nullptr; r->service_config_json_out = nullptr; } // Look up name using c-ares lib. grpc_dns_lookup_ares_continue_after_check_localhost_and_ip_literals_locked( - r, dns_server, name, default_port, interested_parties, check_grpclb, - query_timeout_ms, std::move(work_serializer)); + r, dns_server, name, default_port, interested_parties, query_timeout_ms, + std::move(work_serializer)); return r; } grpc_ares_request* (*grpc_dns_lookup_ares_locked)( const char* dns_server, const char* name, const char* default_port, grpc_pollset_set* interested_parties, grpc_closure* on_done, - std::unique_ptr* addrs, bool check_grpclb, + std::unique_ptr* addrs, + std::unique_ptr* balancer_addrs, char** service_config_json, int query_timeout_ms, std::shared_ptr work_serializer) = grpc_dns_lookup_ares_locked_impl; @@ -710,15 +724,12 @@ static void on_dns_lookup_done_locked(grpc_resolve_address_ares_request* r, static_cast(gpr_zalloc( sizeof(grpc_resolved_address) * (*resolved_addresses)->naddrs)); for (size_t i = 0; i < (*resolved_addresses)->naddrs; ++i) { - GPR_ASSERT(!(*r->addresses)[i].IsBalancer()); memcpy(&(*resolved_addresses)->addrs[i], &(*r->addresses)[i].address(), sizeof(grpc_resolved_address)); } } - grpc_core::ExecCtx::Run(DEBUG_LOCATION, r->on_resolve_address_done, - GRPC_ERROR_REF(error)); + grpc_core::ExecCtx::Run(DEBUG_LOCATION, r->on_resolve_address_done, error); delete r; - GRPC_ERROR_UNREF(error); } static void on_dns_lookup_done(void* arg, grpc_error* error) { @@ -736,9 +747,9 @@ static void grpc_resolve_address_invoke_dns_lookup_ares_locked(void* arg) { grpc_schedule_on_exec_ctx); r->ares_request = grpc_dns_lookup_ares_locked( nullptr /* dns_server */, r->name, r->default_port, r->interested_parties, - &r->on_dns_lookup_done_locked, &r->addresses, false /* check_grpclb */, - nullptr /* service_config_json */, GRPC_DNS_ARES_DEFAULT_QUERY_TIMEOUT_MS, - r->work_serializer); + &r->on_dns_lookup_done_locked, &r->addresses, + nullptr /* balancer_addresses */, nullptr /* service_config_json */, + GRPC_DNS_ARES_DEFAULT_QUERY_TIMEOUT_MS, r->work_serializer); } static void grpc_resolve_address_ares_impl(const char* name, diff --git a/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.h b/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.h index 07bd88fb413..ddce754934f 100644 --- a/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.h +++ b/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.h @@ -64,7 +64,8 @@ extern void (*grpc_resolve_address_ares)(const char* name, extern grpc_ares_request* (*grpc_dns_lookup_ares_locked)( const char* dns_server, const char* name, const char* default_port, grpc_pollset_set* interested_parties, grpc_closure* on_done, - std::unique_ptr* addresses, bool check_grpclb, + std::unique_ptr* addresses, + std::unique_ptr* balancer_addresses, char** service_config_json, int query_timeout_ms, std::shared_ptr work_serializer); @@ -90,7 +91,7 @@ bool grpc_ares_query_ipv6(); /* Sorts destinations in lb_addrs according to RFC 6724. */ void grpc_cares_wrapper_address_sorting_sort( - grpc_core::ServerAddressList* addresses); + const grpc_ares_request* request, grpc_core::ServerAddressList* addresses); #endif /* GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_RESOLVER_DNS_C_ARES_GRPC_ARES_WRAPPER_H \ */ diff --git a/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper_fallback.cc b/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper_fallback.cc index bf93f86f4dd..2049f371184 100644 --- a/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper_fallback.cc +++ b/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper_fallback.cc @@ -29,7 +29,8 @@ struct grpc_ares_request { static grpc_ares_request* grpc_dns_lookup_ares_locked_impl( const char* dns_server, const char* name, const char* default_port, grpc_pollset_set* interested_parties, grpc_closure* on_done, - std::unique_ptr* addrs, bool check_grpclb, + std::unique_ptr* addrs, + std::unique_ptr* balancer_addrs, char** service_config_json, int query_timeout_ms, std::shared_ptr work_serializer) { return NULL; @@ -38,7 +39,8 @@ static grpc_ares_request* grpc_dns_lookup_ares_locked_impl( grpc_ares_request* (*grpc_dns_lookup_ares_locked)( const char* dns_server, const char* name, const char* default_port, grpc_pollset_set* interested_parties, grpc_closure* on_done, - std::unique_ptr* addrs, bool check_grpclb, + std::unique_ptr* addrs, + std::unique_ptr* balancer_addrs, char** service_config_json, int query_timeout_ms, std::shared_ptr work_serializer) = grpc_dns_lookup_ares_locked_impl; diff --git a/src/core/ext/filters/client_channel/server_address.cc b/src/core/ext/filters/client_channel/server_address.cc index d46896b754b..93d361a8154 100644 --- a/src/core/ext/filters/client_channel/server_address.cc +++ b/src/core/ext/filters/client_channel/server_address.cc @@ -37,15 +37,12 @@ ServerAddress::ServerAddress(const void* address, size_t address_len, address_.len = static_cast(address_len); } -bool ServerAddress::operator==(const ServerAddress& other) const { - return address_.len == other.address_.len && - memcmp(address_.addr, other.address_.addr, address_.len) == 0 && - grpc_channel_args_compare(args_, other.args_) == 0; -} - -bool ServerAddress::IsBalancer() const { - return grpc_channel_arg_get_bool( - grpc_channel_args_find(args_, GRPC_ARG_ADDRESS_IS_BALANCER), false); +int ServerAddress::Cmp(const ServerAddress& other) const { + if (address_.len > other.address_.len) return 1; + if (address_.len < other.address_.len) return -1; + int retval = memcmp(address_.addr, other.address_.addr, address_.len); + if (retval != 0) return retval; + return grpc_channel_args_compare(args_, other.args_); } } // namespace grpc_core diff --git a/src/core/ext/filters/client_channel/server_address.h b/src/core/ext/filters/client_channel/server_address.h index acd71358810..10f49f2f344 100644 --- a/src/core/ext/filters/client_channel/server_address.h +++ b/src/core/ext/filters/client_channel/server_address.h @@ -25,13 +25,6 @@ #include "src/core/lib/gprpp/inlined_vector.h" #include "src/core/lib/iomgr/resolve_address.h" -// Channel arg key for a bool indicating whether an address is a grpclb -// load balancer (as opposed to a backend). -#define GRPC_ARG_ADDRESS_IS_BALANCER "grpc.address_is_balancer" - -// Channel arg key for a string indicating an address's balancer name. -#define GRPC_ARG_ADDRESS_BALANCER_NAME "grpc.address_balancer_name" - namespace grpc_core { // @@ -73,13 +66,13 @@ class ServerAddress { return *this; } - bool operator==(const ServerAddress& other) const; + bool operator==(const ServerAddress& other) const { return Cmp(other) == 0; } + + int Cmp(const ServerAddress& other) const; const grpc_resolved_address& address() const { return address_; } const grpc_channel_args* args() const { return args_; } - bool IsBalancer() const; - private: grpc_resolved_address address_; grpc_channel_args* args_; diff --git a/src/core/ext/filters/client_channel/subchannel.cc b/src/core/ext/filters/client_channel/subchannel.cc index 231c333422d..f350b2ec36a 100644 --- a/src/core/ext/filters/client_channel/subchannel.cc +++ b/src/core/ext/filters/client_channel/subchannel.cc @@ -374,8 +374,8 @@ class Subchannel::AsyncWatcherNotifierLocked { if (state == GRPC_CHANNEL_READY) { connected_subchannel = subchannel->connected_subchannel_; } - watcher_->PushConnectivityStateChange(state, - std::move(connected_subchannel)); + watcher_->PushConnectivityStateChange( + {state, std::move(connected_subchannel)}); ExecCtx::Run( DEBUG_LOCATION, GRPC_CLOSURE_INIT(&closure_, @@ -628,6 +628,21 @@ BackOff::Options ParseArgsForBackoffValues( } // namespace +void Subchannel::ConnectivityStateWatcherInterface::PushConnectivityStateChange( + ConnectivityStateChange state_change) { + MutexLock lock(&mu_); + connectivity_state_queue_.push_back(std::move(state_change)); +} + +Subchannel::ConnectivityStateWatcherInterface::ConnectivityStateChange +Subchannel::ConnectivityStateWatcherInterface::PopConnectivityStateChange() { + MutexLock lock(&mu_); + GPR_ASSERT(!connectivity_state_queue_.empty()); + ConnectivityStateChange state_change = connectivity_state_queue_.front(); + connectivity_state_queue_.pop_front(); + return state_change; +} + Subchannel::Subchannel(SubchannelKey* key, OrphanablePtr connector, const grpc_channel_args* args) diff --git a/src/core/ext/filters/client_channel/subchannel.h b/src/core/ext/filters/client_channel/subchannel.h index 16926c4818e..9478fa7340c 100644 --- a/src/core/ext/filters/client_channel/subchannel.h +++ b/src/core/ext/filters/client_channel/subchannel.h @@ -180,6 +180,11 @@ class Subchannel { class ConnectivityStateWatcherInterface : public RefCounted { public: + struct ConnectivityStateChange { + grpc_connectivity_state state; + RefCountedPtr connected_subchannel; + }; + virtual ~ConnectivityStateWatcherInterface() = default; // Will be invoked whenever the subchannel's connectivity state @@ -199,40 +204,17 @@ class Subchannel { // TODO(yashkt): This is currently needed to send the state updates in the // right order when asynchronously notifying. This will no longer be // necessary when we have access to EventManager. - void PushConnectivityStateChange( - grpc_connectivity_state state, - RefCountedPtr connected_subchannel) { - MutexLock lock(&mu_); - connectivity_state_queue_.push_back( - std::make_pair(state, std::move(connected_subchannel))); - } - - // Dequeues connectivity state change notifications. If the queue is empty, - // it returns false, otherwise returns true and sets \a state and \a - // connected_subchannel to the popped state change and connected subchannel. - bool PopConnectivityStateChange( - grpc_connectivity_state* state, - RefCountedPtr* connected_subchannel) { - MutexLock lock(&mu_); - if (connectivity_state_queue_.empty()) { - return false; - } else { - *state = connectivity_state_queue_.front().first; - *connected_subchannel = - std::move(connectivity_state_queue_.front().second); - connectivity_state_queue_.pop_front(); - return true; - } - } + void PushConnectivityStateChange(ConnectivityStateChange state_change); + + // Dequeues connectivity state change notifications. + ConnectivityStateChange PopConnectivityStateChange(); private: // Keeps track of the updates that the watcher instance must be notified of. // TODO(yashkt): This is currently needed to send the state updates in the // right order when asynchronously notifying. This will no longer be // necessary when we have access to EventManager. - std::deque< - std::pair>> - connectivity_state_queue_; + std::deque connectivity_state_queue_; Mutex mu_; // protects the queue }; diff --git a/src/core/ext/filters/client_channel/xds/xds_api.cc b/src/core/ext/filters/client_channel/xds/xds_api.cc index 5a41d0af32a..4cf5d8c26a2 100644 --- a/src/core/ext/filters/client_channel/xds/xds_api.cc +++ b/src/core/ext/filters/client_channel/xds/xds_api.cc @@ -1310,7 +1310,7 @@ grpc_error* LocalityParse( grpc_error* DropParseAndAppend( const envoy_api_v2_ClusterLoadAssignment_Policy_DropOverload* drop_overload, - XdsApi::DropConfig* drop_config, bool* drop_all) { + XdsApi::DropConfig* drop_config) { // Get the category. upb_strview category = envoy_api_v2_ClusterLoadAssignment_Policy_DropOverload_category( @@ -1341,7 +1341,6 @@ grpc_error* DropParseAndAppend( } // Cap numerator to 1000000. numerator = GPR_MIN(numerator, 1000000); - if (numerator == 1000000) *drop_all = true; drop_config->AddCategory(std::string(category.data, category.size), numerator); return GRPC_ERROR_NONE; @@ -1417,17 +1416,10 @@ grpc_error* EdsResponseParse( policy, &drop_size); for (size_t j = 0; j < drop_size; ++j) { grpc_error* error = - DropParseAndAppend(drop_overload[j], eds_update.drop_config.get(), - &eds_update.drop_all); + DropParseAndAppend(drop_overload[j], eds_update.drop_config.get()); if (error != GRPC_ERROR_NONE) return error; } } - // Validate the update content. - if (eds_update.priority_list_update.empty() && !eds_update.drop_all) { - return GRPC_ERROR_CREATE_FROM_STATIC_STRING( - "EDS response doesn't contain any valid " - "locality but doesn't require to drop all calls."); - } eds_update_map->emplace(std::string(cluster_name.data, cluster_name.size), std::move(eds_update)); } diff --git a/src/core/ext/filters/client_channel/xds/xds_api.h b/src/core/ext/filters/client_channel/xds/xds_api.h index ee45f85dbaf..0fc4a186542 100644 --- a/src/core/ext/filters/client_channel/xds/xds_api.h +++ b/src/core/ext/filters/client_channel/xds/xds_api.h @@ -164,6 +164,7 @@ class XdsApi { void AddCategory(std::string name, uint32_t parts_per_million) { drop_category_list_.emplace_back( DropCategory{std::move(name), parts_per_million}); + if (parts_per_million == 1000000) drop_all_ = true; } // The only method invoked from the data plane combiner. @@ -173,6 +174,8 @@ class XdsApi { return drop_category_list_; } + bool drop_all() const { return drop_all_; } + bool operator==(const DropConfig& other) const { return drop_category_list_ == other.drop_category_list_; } @@ -180,12 +183,12 @@ class XdsApi { private: DropCategoryList drop_category_list_; + bool drop_all_ = false; }; struct EdsUpdate { PriorityListUpdate priority_list_update; RefCountedPtr drop_config; - bool drop_all = false; }; using EdsUpdateMap = std::map; diff --git a/src/core/ext/filters/client_channel/xds/xds_client.cc b/src/core/ext/filters/client_channel/xds/xds_client.cc index 2e8da882d9e..da9be9c3482 100644 --- a/src/core/ext/filters/client_channel/xds/xds_client.cc +++ b/src/core/ext/filters/client_channel/xds/xds_client.cc @@ -1076,7 +1076,7 @@ void XdsClient::ChannelState::AdsCallState::AcceptEdsUpdate( " drop categories received (drop_all=%d)", xds_client(), eds_update.priority_list_update.size(), eds_update.drop_config->drop_category_list().size(), - eds_update.drop_all); + eds_update.drop_config->drop_all()); for (size_t priority = 0; priority < eds_update.priority_list_update.size(); ++priority) { const auto* locality_map_update = eds_update.priority_list_update.Find( @@ -1381,10 +1381,9 @@ void XdsClient::ChannelState::LrsCallState::Reporter::OnNextReportTimerLocked( next_report_timer_callback_pending_ = false; if (error != GRPC_ERROR_NONE || !IsCurrentReporterOnCall()) { Unref(DEBUG_LOCATION, "Reporter+timer"); - GRPC_ERROR_UNREF(error); - return; + } else { + SendReportLocked(); } - SendReportLocked(); GRPC_ERROR_UNREF(error); } @@ -1464,10 +1463,9 @@ void XdsClient::ChannelState::LrsCallState::Reporter::OnReportDoneLocked( parent_->MaybeStartReportingLocked(); } Unref(DEBUG_LOCATION, "Reporter+report_done"); - GRPC_ERROR_UNREF(error); - return; + } else { + ScheduleNextReportLocked(); } - ScheduleNextReportLocked(); GRPC_ERROR_UNREF(error); } diff --git a/src/core/ext/filters/client_channel/xds/xds_client_stats.h b/src/core/ext/filters/client_channel/xds/xds_client_stats.h index 7c69c7c3675..cbeccd2279d 100644 --- a/src/core/ext/filters/client_channel/xds/xds_client_stats.h +++ b/src/core/ext/filters/client_channel/xds/xds_client_stats.h @@ -42,11 +42,7 @@ class XdsLocalityName : public RefCounted { struct Less { bool operator()(const XdsLocalityName* lhs, const XdsLocalityName* rhs) const { - int cmp_result = lhs->region_.compare(rhs->region_); - if (cmp_result != 0) return cmp_result < 0; - cmp_result = lhs->zone_.compare(rhs->zone_); - if (cmp_result != 0) return cmp_result < 0; - return lhs->sub_zone_.compare(rhs->sub_zone_) < 0; + return lhs->Compare(*rhs) < 0; } bool operator()(const RefCountedPtr& lhs, @@ -65,6 +61,18 @@ class XdsLocalityName : public RefCounted { sub_zone_ == other.sub_zone_; } + bool operator!=(const XdsLocalityName& other) const { + return !(*this == other); + } + + int Compare(const XdsLocalityName& other) const { + int cmp_result = region_.compare(other.region_); + if (cmp_result != 0) return cmp_result; + cmp_result = zone_.compare(other.zone_); + if (cmp_result != 0) return cmp_result; + return sub_zone_.compare(other.sub_zone_); + } + const std::string& region() const { return region_; } const std::string& zone() const { return zone_; } const std::string& sub_zone() const { return sub_zone_; } diff --git a/src/core/ext/filters/http/message_compress/message_compress_filter.cc b/src/core/ext/filters/http/message_compress/message_compress_filter.cc index 27f0333bee4..09c914fab07 100644 --- a/src/core/ext/filters/http/message_compress/message_compress_filter.cc +++ b/src/core/ext/filters/http/message_compress/message_compress_filter.cc @@ -21,6 +21,8 @@ #include #include +#include "absl/types/optional.h" + #include #include #include @@ -40,94 +42,156 @@ #include "src/core/lib/surface/call.h" #include "src/core/lib/transport/static_metadata.h" -static void start_send_message_batch(void* arg, grpc_error* unused); -static void send_message_on_complete(void* arg, grpc_error* error); -static void on_send_message_next_done(void* arg, grpc_error* error); - namespace { -struct channel_data { +class ChannelData { + public: + explicit ChannelData(grpc_channel_element_args* args) { + // Get the enabled and the default algorithms from channel args. + enabled_compression_algorithms_bitset_ = + grpc_channel_args_compression_algorithm_get_states(args->channel_args); + default_compression_algorithm_ = + grpc_channel_args_get_channel_default_compression_algorithm( + args->channel_args); + // Make sure the default is enabled. + if (!GPR_BITGET(enabled_compression_algorithms_bitset_, + default_compression_algorithm_)) { + const char* name; + GPR_ASSERT(grpc_compression_algorithm_name(default_compression_algorithm_, + &name) == 1); + gpr_log(GPR_ERROR, + "default compression algorithm %s not enabled: switching to none", + name); + default_compression_algorithm_ = GRPC_COMPRESS_NONE; + } + enabled_message_compression_algorithms_bitset_ = + grpc_compression_bitset_to_message_bitset( + enabled_compression_algorithms_bitset_); + enabled_stream_compression_algorithms_bitset_ = + grpc_compression_bitset_to_stream_bitset( + enabled_compression_algorithms_bitset_); + GPR_ASSERT(!args->is_last); + } + + grpc_compression_algorithm default_compression_algorithm() const { + return default_compression_algorithm_; + } + + uint32_t enabled_compression_algorithms_bitset() const { + return enabled_compression_algorithms_bitset_; + } + + uint32_t enabled_message_compression_algorithms_bitset() const { + return enabled_message_compression_algorithms_bitset_; + } + + uint32_t enabled_stream_compression_algorithms_bitset() const { + return enabled_stream_compression_algorithms_bitset_; + } + + private: /** The default, channel-level, compression algorithm */ - grpc_compression_algorithm default_compression_algorithm; + grpc_compression_algorithm default_compression_algorithm_; /** Bitset of enabled compression algorithms */ - uint32_t enabled_compression_algorithms_bitset; + uint32_t enabled_compression_algorithms_bitset_; /** Bitset of enabled message compression algorithms */ - uint32_t enabled_message_compression_algorithms_bitset; + uint32_t enabled_message_compression_algorithms_bitset_; /** Bitset of enabled stream compression algorithms */ - uint32_t enabled_stream_compression_algorithms_bitset; + uint32_t enabled_stream_compression_algorithms_bitset_; }; -struct call_data { - call_data(grpc_call_element* elem, const grpc_call_element_args& args) - : call_combiner(args.call_combiner) { - channel_data* channeld = static_cast(elem->channel_data); +class CallData { + public: + CallData(grpc_call_element* elem, const grpc_call_element_args& args) + : call_combiner_(args.call_combiner) { + ChannelData* channeld = static_cast(elem->channel_data); // The call's message compression algorithm is set to channel's default // setting. It can be overridden later by initial metadata. - if (GPR_LIKELY(GPR_BITGET(channeld->enabled_compression_algorithms_bitset, - channeld->default_compression_algorithm))) { - message_compression_algorithm = + if (GPR_LIKELY(GPR_BITGET(channeld->enabled_compression_algorithms_bitset(), + channeld->default_compression_algorithm()))) { + message_compression_algorithm_ = grpc_compression_algorithm_to_message_compression_algorithm( - channeld->default_compression_algorithm); + channeld->default_compression_algorithm()); } - GRPC_CLOSURE_INIT(&start_send_message_batch_in_call_combiner, - start_send_message_batch, elem, - grpc_schedule_on_exec_ctx); + GRPC_CLOSURE_INIT(&start_send_message_batch_in_call_combiner_, + StartSendMessageBatch, elem, grpc_schedule_on_exec_ctx); } - ~call_data() { - if (state_initialized) { - grpc_slice_buffer_destroy_internal(&slices); + ~CallData() { + if (state_initialized_) { + grpc_slice_buffer_destroy_internal(&slices_); } - GRPC_ERROR_UNREF(cancel_error); + GRPC_ERROR_UNREF(cancel_error_); } - grpc_core::CallCombiner* call_combiner; - grpc_message_compression_algorithm message_compression_algorithm = + void CompressStartTransportStreamOpBatch( + grpc_call_element* elem, grpc_transport_stream_op_batch* batch); + + private: + bool SkipMessageCompression(); + void InitializeState(grpc_call_element* elem); + + grpc_error* ProcessSendInitialMetadata(grpc_call_element* elem, + grpc_metadata_batch* initial_metadata); + + // Methods for processing a send_message batch + static void StartSendMessageBatch(void* elem_arg, grpc_error* unused); + static void OnSendMessageNextDone(void* elem_arg, grpc_error* error); + grpc_error* PullSliceFromSendMessage(); + void ContinueReadingSendMessage(grpc_call_element* elem); + void FinishSendMessage(grpc_call_element* elem); + void SendMessageBatchContinue(grpc_call_element* elem); + static void FailSendMessageBatchInCallCombiner(void* calld_arg, + grpc_error* error); + + static void SendMessageOnComplete(void* calld_arg, grpc_error* error); + + grpc_core::CallCombiner* call_combiner_; + grpc_message_compression_algorithm message_compression_algorithm_ = GRPC_MESSAGE_COMPRESS_NONE; - grpc_error* cancel_error = GRPC_ERROR_NONE; - grpc_transport_stream_op_batch* send_message_batch = nullptr; - bool seen_initial_metadata = false; + grpc_error* cancel_error_ = GRPC_ERROR_NONE; + grpc_transport_stream_op_batch* send_message_batch_ = nullptr; + bool seen_initial_metadata_ = false; /* Set to true, if the fields below are initialized. */ - bool state_initialized = false; - grpc_closure start_send_message_batch_in_call_combiner; + bool state_initialized_ = false; + grpc_closure start_send_message_batch_in_call_combiner_; /* The fields below are only initialized when we compress the payload. * Keep them at the bottom of the struct, so they don't pollute the * cache-lines. */ - grpc_linked_mdelem message_compression_algorithm_storage; - grpc_linked_mdelem stream_compression_algorithm_storage; - grpc_linked_mdelem accept_encoding_storage; - grpc_linked_mdelem accept_stream_encoding_storage; - grpc_slice_buffer slices; /**< Buffers up input slices to be compressed */ - grpc_core::ManualConstructor - replacement_stream; - grpc_closure* original_send_message_on_complete; - grpc_closure send_message_on_complete; - grpc_closure on_send_message_next_done; + grpc_linked_mdelem message_compression_algorithm_storage_; + grpc_linked_mdelem stream_compression_algorithm_storage_; + grpc_linked_mdelem accept_encoding_storage_; + grpc_linked_mdelem accept_stream_encoding_storage_; + grpc_slice_buffer slices_; /**< Buffers up input slices to be compressed */ + // Allocate space for the replacement stream + std::aligned_storage::type + replacement_stream_; + grpc_closure* original_send_message_on_complete_ = nullptr; + grpc_closure send_message_on_complete_; + grpc_closure on_send_message_next_done_; }; -} // namespace - // Returns true if we should skip message compression for the current message. -static bool skip_message_compression(grpc_call_element* elem) { - call_data* calld = static_cast(elem->call_data); +bool CallData::SkipMessageCompression() { // If the flags of this message indicate that it shouldn't be compressed, we // skip message compression. uint32_t flags = - calld->send_message_batch->payload->send_message.send_message->flags(); + send_message_batch_->payload->send_message.send_message->flags(); if (flags & (GRPC_WRITE_NO_COMPRESS | GRPC_WRITE_INTERNAL_COMPRESS)) { return true; } // If this call doesn't have any message compression algorithm set, skip // message compression. - return calld->message_compression_algorithm == GRPC_MESSAGE_COMPRESS_NONE; + return message_compression_algorithm_ == GRPC_MESSAGE_COMPRESS_NONE; } // Determines the compression algorithm from the initial metadata and the // channel's default setting. -static grpc_compression_algorithm find_compression_algorithm( - grpc_metadata_batch* initial_metadata, channel_data* channeld) { +grpc_compression_algorithm FindCompressionAlgorithm( + grpc_metadata_batch* initial_metadata, ChannelData* channeld) { if (initial_metadata->idx.named.grpc_internal_encoding_request == nullptr) { - return channeld->default_compression_algorithm; + return channeld->default_compression_algorithm(); } grpc_compression_algorithm compression_algorithm; // Parse the compression algorithm from the initial metadata. @@ -143,7 +207,7 @@ static grpc_compression_algorithm find_compression_algorithm( // enabled. // TODO(juanlishen): Maybe use channel default or abort() if the algorithm // from the initial metadata is disabled. - if (GPR_LIKELY(GPR_BITGET(channeld->enabled_compression_algorithms_bitset, + if (GPR_LIKELY(GPR_BITGET(channeld->enabled_compression_algorithms_bitset(), compression_algorithm))) { return compression_algorithm; } @@ -158,30 +222,24 @@ static grpc_compression_algorithm find_compression_algorithm( return GRPC_COMPRESS_NONE; } -static void initialize_state(grpc_call_element* elem, call_data* calld) { - GPR_DEBUG_ASSERT(!calld->state_initialized); - calld->state_initialized = true; - grpc_slice_buffer_init(&calld->slices); - GRPC_CLOSURE_INIT(&calld->send_message_on_complete, - ::send_message_on_complete, elem, +void CallData::InitializeState(grpc_call_element* elem) { + GPR_DEBUG_ASSERT(!state_initialized_); + state_initialized_ = true; + grpc_slice_buffer_init(&slices_); + GRPC_CLOSURE_INIT(&send_message_on_complete_, SendMessageOnComplete, this, grpc_schedule_on_exec_ctx); - GRPC_CLOSURE_INIT(&calld->on_send_message_next_done, - ::on_send_message_next_done, elem, + GRPC_CLOSURE_INIT(&on_send_message_next_done_, OnSendMessageNextDone, elem, grpc_schedule_on_exec_ctx); } -static grpc_error* process_send_initial_metadata( - grpc_call_element* elem, - grpc_metadata_batch* initial_metadata) GRPC_MUST_USE_RESULT; -static grpc_error* process_send_initial_metadata( +grpc_error* CallData::ProcessSendInitialMetadata( grpc_call_element* elem, grpc_metadata_batch* initial_metadata) { - call_data* calld = static_cast(elem->call_data); - channel_data* channeld = static_cast(elem->channel_data); + ChannelData* channeld = static_cast(elem->channel_data); // Find the compression algorithm. grpc_compression_algorithm compression_algorithm = - find_compression_algorithm(initial_metadata, channeld); + FindCompressionAlgorithm(initial_metadata, channeld); // Note that at most one of the following algorithms can be set. - calld->message_compression_algorithm = + message_compression_algorithm_ = grpc_compression_algorithm_to_message_compression_algorithm( compression_algorithm); grpc_stream_compression_algorithm stream_compression_algorithm = @@ -189,321 +247,300 @@ static grpc_error* process_send_initial_metadata( compression_algorithm); // Hint compression algorithm. grpc_error* error = GRPC_ERROR_NONE; - if (calld->message_compression_algorithm != GRPC_MESSAGE_COMPRESS_NONE) { - initialize_state(elem, calld); + if (message_compression_algorithm_ != GRPC_MESSAGE_COMPRESS_NONE) { + InitializeState(elem); error = grpc_metadata_batch_add_tail( - initial_metadata, &calld->message_compression_algorithm_storage, + initial_metadata, &message_compression_algorithm_storage_, grpc_message_compression_encoding_mdelem( - calld->message_compression_algorithm), + message_compression_algorithm_), GRPC_BATCH_GRPC_ENCODING); } else if (stream_compression_algorithm != GRPC_STREAM_COMPRESS_NONE) { - initialize_state(elem, calld); + InitializeState(elem); error = grpc_metadata_batch_add_tail( - initial_metadata, &calld->stream_compression_algorithm_storage, + initial_metadata, &stream_compression_algorithm_storage_, grpc_stream_compression_encoding_mdelem(stream_compression_algorithm), GRPC_BATCH_CONTENT_ENCODING); } if (error != GRPC_ERROR_NONE) return error; // Convey supported compression algorithms. error = grpc_metadata_batch_add_tail( - initial_metadata, &calld->accept_encoding_storage, + initial_metadata, &accept_encoding_storage_, GRPC_MDELEM_ACCEPT_ENCODING_FOR_ALGORITHMS( - channeld->enabled_message_compression_algorithms_bitset), + channeld->enabled_message_compression_algorithms_bitset()), GRPC_BATCH_GRPC_ACCEPT_ENCODING); if (error != GRPC_ERROR_NONE) return error; // Do not overwrite accept-encoding header if it already presents (e.g., added // by some proxy). if (!initial_metadata->idx.named.accept_encoding) { error = grpc_metadata_batch_add_tail( - initial_metadata, &calld->accept_stream_encoding_storage, + initial_metadata, &accept_stream_encoding_storage_, GRPC_MDELEM_ACCEPT_STREAM_ENCODING_FOR_ALGORITHMS( - channeld->enabled_stream_compression_algorithms_bitset), + channeld->enabled_stream_compression_algorithms_bitset()), GRPC_BATCH_ACCEPT_ENCODING); } return error; } -static void send_message_on_complete(void* arg, grpc_error* error) { - grpc_call_element* elem = static_cast(arg); - call_data* calld = static_cast(elem->call_data); - grpc_slice_buffer_reset_and_unref_internal(&calld->slices); +void CallData::SendMessageOnComplete(void* calld_arg, grpc_error* error) { + CallData* calld = static_cast(calld_arg); + grpc_slice_buffer_reset_and_unref_internal(&calld->slices_); grpc_core::Closure::Run(DEBUG_LOCATION, - calld->original_send_message_on_complete, + calld->original_send_message_on_complete_, GRPC_ERROR_REF(error)); } -static void send_message_batch_continue(grpc_call_element* elem) { - call_data* calld = static_cast(elem->call_data); +void CallData::SendMessageBatchContinue(grpc_call_element* elem) { // Note: The call to grpc_call_next_op() results in yielding the - // call combiner, so we need to clear calld->send_message_batch - // before we do that. - grpc_transport_stream_op_batch* send_message_batch = - calld->send_message_batch; - calld->send_message_batch = nullptr; + // call combiner, so we need to clear send_message_batch_ before we do that. + grpc_transport_stream_op_batch* send_message_batch = send_message_batch_; + send_message_batch_ = nullptr; grpc_call_next_op(elem, send_message_batch); } -static void finish_send_message(grpc_call_element* elem) { - call_data* calld = static_cast(elem->call_data); - GPR_DEBUG_ASSERT(calld->message_compression_algorithm != +void CallData::FinishSendMessage(grpc_call_element* elem) { + GPR_DEBUG_ASSERT(message_compression_algorithm_ != GRPC_MESSAGE_COMPRESS_NONE); // Compress the data if appropriate. grpc_slice_buffer tmp; grpc_slice_buffer_init(&tmp); uint32_t send_flags = - calld->send_message_batch->payload->send_message.send_message->flags(); - bool did_compress = grpc_msg_compress(calld->message_compression_algorithm, - &calld->slices, &tmp); + send_message_batch_->payload->send_message.send_message->flags(); + bool did_compress = + grpc_msg_compress(message_compression_algorithm_, &slices_, &tmp); if (did_compress) { if (GRPC_TRACE_FLAG_ENABLED(grpc_compression_trace)) { const char* algo_name; - const size_t before_size = calld->slices.length; + const size_t before_size = slices_.length; const size_t after_size = tmp.length; const float savings_ratio = 1.0f - static_cast(after_size) / static_cast(before_size); GPR_ASSERT(grpc_message_compression_algorithm_name( - calld->message_compression_algorithm, &algo_name)); + message_compression_algorithm_, &algo_name)); gpr_log(GPR_INFO, "Compressed[%s] %" PRIuPTR " bytes vs. %" PRIuPTR " bytes (%.2f%% savings)", algo_name, before_size, after_size, 100 * savings_ratio); } - grpc_slice_buffer_swap(&calld->slices, &tmp); + grpc_slice_buffer_swap(&slices_, &tmp); send_flags |= GRPC_WRITE_INTERNAL_COMPRESS; } else { if (GRPC_TRACE_FLAG_ENABLED(grpc_compression_trace)) { const char* algo_name; GPR_ASSERT(grpc_message_compression_algorithm_name( - calld->message_compression_algorithm, &algo_name)); + message_compression_algorithm_, &algo_name)); gpr_log(GPR_INFO, "Algorithm '%s' enabled but decided not to compress. Input size: " "%" PRIuPTR, - algo_name, calld->slices.length); + algo_name, slices_.length); } } grpc_slice_buffer_destroy_internal(&tmp); // Swap out the original byte stream with our new one and send the // batch down. - calld->replacement_stream.Init(&calld->slices, send_flags); - calld->send_message_batch->payload->send_message.send_message.reset( - calld->replacement_stream.get()); - calld->original_send_message_on_complete = - calld->send_message_batch->on_complete; - calld->send_message_batch->on_complete = &calld->send_message_on_complete; - send_message_batch_continue(elem); + new (&replacement_stream_) + grpc_core::SliceBufferByteStream(&slices_, send_flags); + send_message_batch_->payload->send_message.send_message.reset( + reinterpret_cast( + &replacement_stream_)); + original_send_message_on_complete_ = send_message_batch_->on_complete; + send_message_batch_->on_complete = &send_message_on_complete_; + SendMessageBatchContinue(elem); } -static void fail_send_message_batch_in_call_combiner(void* arg, - grpc_error* error) { - call_data* calld = static_cast(arg); - if (calld->send_message_batch != nullptr) { +void CallData::FailSendMessageBatchInCallCombiner(void* calld_arg, + grpc_error* error) { + CallData* calld = static_cast(calld_arg); + if (calld->send_message_batch_ != nullptr) { grpc_transport_stream_op_batch_finish_with_failure( - calld->send_message_batch, GRPC_ERROR_REF(error), calld->call_combiner); - calld->send_message_batch = nullptr; + calld->send_message_batch_, GRPC_ERROR_REF(error), + calld->call_combiner_); + calld->send_message_batch_ = nullptr; } } -// Pulls a slice from the send_message byte stream and adds it to calld->slices. -static grpc_error* pull_slice_from_send_message(call_data* calld) { +// Pulls a slice from the send_message byte stream and adds it to slices_. +grpc_error* CallData::PullSliceFromSendMessage() { grpc_slice incoming_slice; grpc_error* error = - calld->send_message_batch->payload->send_message.send_message->Pull( + send_message_batch_->payload->send_message.send_message->Pull( &incoming_slice); if (error == GRPC_ERROR_NONE) { - grpc_slice_buffer_add(&calld->slices, incoming_slice); + grpc_slice_buffer_add(&slices_, incoming_slice); } return error; } // Reads as many slices as possible from the send_message byte stream. -// If all data has been read, invokes finish_send_message(). Otherwise, +// If all data has been read, invokes FinishSendMessage(). Otherwise, // an async call to ByteStream::Next() has been started, which will -// eventually result in calling on_send_message_next_done(). -static void continue_reading_send_message(grpc_call_element* elem) { - call_data* calld = static_cast(elem->call_data); - if (calld->slices.length == - calld->send_message_batch->payload->send_message.send_message->length()) { - finish_send_message(elem); +// eventually result in calling OnSendMessageNextDone(). +void CallData::ContinueReadingSendMessage(grpc_call_element* elem) { + if (slices_.length == + send_message_batch_->payload->send_message.send_message->length()) { + FinishSendMessage(elem); return; } - while (calld->send_message_batch->payload->send_message.send_message->Next( - ~static_cast(0), &calld->on_send_message_next_done)) { - grpc_error* error = pull_slice_from_send_message(calld); + while (send_message_batch_->payload->send_message.send_message->Next( + ~static_cast(0), &on_send_message_next_done_)) { + grpc_error* error = PullSliceFromSendMessage(); if (error != GRPC_ERROR_NONE) { // Closure callback; does not take ownership of error. - fail_send_message_batch_in_call_combiner(calld, error); + FailSendMessageBatchInCallCombiner(this, error); GRPC_ERROR_UNREF(error); return; } - if (calld->slices.length == calld->send_message_batch->payload->send_message - .send_message->length()) { - finish_send_message(elem); + if (slices_.length == + send_message_batch_->payload->send_message.send_message->length()) { + FinishSendMessage(elem); break; } } } // Async callback for ByteStream::Next(). -static void on_send_message_next_done(void* arg, grpc_error* error) { - grpc_call_element* elem = static_cast(arg); - call_data* calld = static_cast(elem->call_data); +void CallData::OnSendMessageNextDone(void* elem_arg, grpc_error* error) { + grpc_call_element* elem = static_cast(elem_arg); + CallData* calld = static_cast(elem->call_data); if (error != GRPC_ERROR_NONE) { // Closure callback; does not take ownership of error. - fail_send_message_batch_in_call_combiner(calld, error); + FailSendMessageBatchInCallCombiner(calld, error); return; } - error = pull_slice_from_send_message(calld); + error = calld->PullSliceFromSendMessage(); if (error != GRPC_ERROR_NONE) { // Closure callback; does not take ownership of error. - fail_send_message_batch_in_call_combiner(calld, error); + FailSendMessageBatchInCallCombiner(calld, error); GRPC_ERROR_UNREF(error); return; } - if (calld->slices.length == - calld->send_message_batch->payload->send_message.send_message->length()) { - finish_send_message(elem); + if (calld->slices_.length == calld->send_message_batch_->payload->send_message + .send_message->length()) { + calld->FinishSendMessage(elem); } else { - continue_reading_send_message(elem); + calld->ContinueReadingSendMessage(elem); } } -static void start_send_message_batch(void* arg, grpc_error* /*unused*/) { - grpc_call_element* elem = static_cast(arg); - if (skip_message_compression(elem)) { - send_message_batch_continue(elem); +void CallData::StartSendMessageBatch(void* elem_arg, grpc_error* /*unused*/) { + grpc_call_element* elem = static_cast(elem_arg); + CallData* calld = static_cast(elem->call_data); + if (calld->SkipMessageCompression()) { + calld->SendMessageBatchContinue(elem); } else { - continue_reading_send_message(elem); + calld->ContinueReadingSendMessage(elem); } } -static void compress_start_transport_stream_op_batch( +void CallData::CompressStartTransportStreamOpBatch( grpc_call_element* elem, grpc_transport_stream_op_batch* batch) { GPR_TIMER_SCOPE("compress_start_transport_stream_op_batch", 0); - call_data* calld = static_cast(elem->call_data); // Handle cancel_stream. if (batch->cancel_stream) { - GRPC_ERROR_UNREF(calld->cancel_error); - calld->cancel_error = - GRPC_ERROR_REF(batch->payload->cancel_stream.cancel_error); - if (calld->send_message_batch != nullptr) { - if (!calld->seen_initial_metadata) { + GRPC_ERROR_UNREF(cancel_error_); + cancel_error_ = GRPC_ERROR_REF(batch->payload->cancel_stream.cancel_error); + if (send_message_batch_ != nullptr) { + if (!seen_initial_metadata_) { GRPC_CALL_COMBINER_START( - calld->call_combiner, - GRPC_CLOSURE_CREATE(fail_send_message_batch_in_call_combiner, calld, + call_combiner_, + GRPC_CLOSURE_CREATE(FailSendMessageBatchInCallCombiner, this, grpc_schedule_on_exec_ctx), - GRPC_ERROR_REF(calld->cancel_error), "failing send_message op"); + GRPC_ERROR_REF(cancel_error_), "failing send_message op"); } else { - calld->send_message_batch->payload->send_message.send_message->Shutdown( - GRPC_ERROR_REF(calld->cancel_error)); + send_message_batch_->payload->send_message.send_message->Shutdown( + GRPC_ERROR_REF(cancel_error_)); } } - } else if (calld->cancel_error != GRPC_ERROR_NONE) { + } else if (cancel_error_ != GRPC_ERROR_NONE) { grpc_transport_stream_op_batch_finish_with_failure( - batch, GRPC_ERROR_REF(calld->cancel_error), calld->call_combiner); + batch, GRPC_ERROR_REF(cancel_error_), call_combiner_); return; } // Handle send_initial_metadata. if (batch->send_initial_metadata) { - GPR_ASSERT(!calld->seen_initial_metadata); - grpc_error* error = process_send_initial_metadata( + GPR_ASSERT(!seen_initial_metadata_); + grpc_error* error = ProcessSendInitialMetadata( elem, batch->payload->send_initial_metadata.send_initial_metadata); if (error != GRPC_ERROR_NONE) { grpc_transport_stream_op_batch_finish_with_failure(batch, error, - calld->call_combiner); + call_combiner_); return; } - calld->seen_initial_metadata = true; + seen_initial_metadata_ = true; // If we had previously received a batch containing a send_message op, // handle it now. Note that we need to re-enter the call combiner // for this, since we can't send two batches down while holding the // call combiner, since the connected_channel filter (at the bottom of // the call stack) will release the call combiner for each batch it sees. - if (calld->send_message_batch != nullptr) { + if (send_message_batch_ != nullptr) { GRPC_CALL_COMBINER_START( - calld->call_combiner, - &calld->start_send_message_batch_in_call_combiner, GRPC_ERROR_NONE, - "starting send_message after send_initial_metadata"); + call_combiner_, &start_send_message_batch_in_call_combiner_, + GRPC_ERROR_NONE, "starting send_message after send_initial_metadata"); } } // Handle send_message. if (batch->send_message) { - GPR_ASSERT(calld->send_message_batch == nullptr); - calld->send_message_batch = batch; + GPR_ASSERT(send_message_batch_ == nullptr); + send_message_batch_ = batch; // If we have not yet seen send_initial_metadata, then we have to - // wait. We save the batch in calld and then drop the call - // combiner, which we'll have to pick up again later when we get - // send_initial_metadata. - if (!calld->seen_initial_metadata) { + // wait. We save the batch and then drop the call combiner, which we'll + // have to pick up again later when we get send_initial_metadata. + if (!seen_initial_metadata_) { GRPC_CALL_COMBINER_STOP( - calld->call_combiner, - "send_message batch pending send_initial_metadata"); + call_combiner_, "send_message batch pending send_initial_metadata"); return; } - start_send_message_batch(elem, GRPC_ERROR_NONE); + StartSendMessageBatch(elem, GRPC_ERROR_NONE); } else { // Pass control down the stack. grpc_call_next_op(elem, batch); } } +void CompressStartTransportStreamOpBatch( + grpc_call_element* elem, grpc_transport_stream_op_batch* batch) { + CallData* calld = static_cast(elem->call_data); + calld->CompressStartTransportStreamOpBatch(elem, batch); +} + /* Constructor for call_data */ -static grpc_error* compress_init_call_elem(grpc_call_element* elem, - const grpc_call_element_args* args) { - new (elem->call_data) call_data(elem, *args); +grpc_error* CompressInitCallElem(grpc_call_element* elem, + const grpc_call_element_args* args) { + new (elem->call_data) CallData(elem, *args); return GRPC_ERROR_NONE; } /* Destructor for call_data */ -static void compress_destroy_call_elem( - grpc_call_element* elem, const grpc_call_final_info* /*final_info*/, - grpc_closure* /*ignored*/) { - call_data* calld = static_cast(elem->call_data); - calld->~call_data(); +void CompressDestroyCallElem(grpc_call_element* elem, + const grpc_call_final_info* /*final_info*/, + grpc_closure* /*ignored*/) { + CallData* calld = static_cast(elem->call_data); + calld->~CallData(); } -/* Constructor for channel_data */ -static grpc_error* compress_init_channel_elem(grpc_channel_element* elem, - grpc_channel_element_args* args) { - channel_data* channeld = static_cast(elem->channel_data); - // Get the enabled and the default algorithms from channel args. - channeld->enabled_compression_algorithms_bitset = - grpc_channel_args_compression_algorithm_get_states(args->channel_args); - channeld->default_compression_algorithm = - grpc_channel_args_get_channel_default_compression_algorithm( - args->channel_args); - // Make sure the default is enabled. - if (!GPR_BITGET(channeld->enabled_compression_algorithms_bitset, - channeld->default_compression_algorithm)) { - const char* name; - GPR_ASSERT(grpc_compression_algorithm_name( - channeld->default_compression_algorithm, &name) == 1); - gpr_log(GPR_ERROR, - "default compression algorithm %s not enabled: switching to none", - name); - channeld->default_compression_algorithm = GRPC_COMPRESS_NONE; - } - channeld->enabled_message_compression_algorithms_bitset = - grpc_compression_bitset_to_message_bitset( - channeld->enabled_compression_algorithms_bitset); - channeld->enabled_stream_compression_algorithms_bitset = - grpc_compression_bitset_to_stream_bitset( - channeld->enabled_compression_algorithms_bitset); - GPR_ASSERT(!args->is_last); +/* Constructor for ChannelData */ +grpc_error* CompressInitChannelElem(grpc_channel_element* elem, + grpc_channel_element_args* args) { + new (elem->channel_data) ChannelData(args); return GRPC_ERROR_NONE; } /* Destructor for channel data */ -static void compress_destroy_channel_elem(grpc_channel_element* /*elem*/) {} +void CompressDestroyChannelElem(grpc_channel_element* elem) { + ChannelData* channeld = static_cast(elem->channel_data); + channeld->~ChannelData(); +} + +} // namespace const grpc_channel_filter grpc_message_compress_filter = { - compress_start_transport_stream_op_batch, + CompressStartTransportStreamOpBatch, grpc_channel_next_op, - sizeof(call_data), - compress_init_call_elem, + sizeof(CallData), + CompressInitCallElem, grpc_call_stack_ignore_set_pollset_or_pollset_set, - compress_destroy_call_elem, - sizeof(channel_data), - compress_init_channel_elem, - compress_destroy_channel_elem, + CompressDestroyCallElem, + sizeof(ChannelData), + CompressInitChannelElem, + CompressDestroyChannelElem, grpc_channel_next_get_info, "message_compress"}; diff --git a/src/core/ext/transport/chttp2/transport/chttp2_transport.cc b/src/core/ext/transport/chttp2/transport/chttp2_transport.cc index 3f27466611f..cc03a406903 100644 --- a/src/core/ext/transport/chttp2/transport/chttp2_transport.cc +++ b/src/core/ext/transport/chttp2/transport/chttp2_transport.cc @@ -99,6 +99,7 @@ static int g_default_max_ping_strikes = DEFAULT_MAX_PING_STRIKES; #define MAX_CLIENT_STREAM_ID 0x7fffffffu grpc_core::TraceFlag grpc_http_trace(false, "http"); +grpc_core::TraceFlag grpc_keepalive_trace(false, "http_keepalive"); grpc_core::DebugOnlyTraceFlag grpc_trace_chttp2_refcount(false, "chttp2_refcount"); @@ -2817,7 +2818,8 @@ static void start_keepalive_ping_locked(void* arg, grpc_error* error) { if (t->channelz_socket != nullptr) { t->channelz_socket->RecordKeepaliveSent(); } - if (GRPC_TRACE_FLAG_ENABLED(grpc_http_trace)) { + if (GRPC_TRACE_FLAG_ENABLED(grpc_http_trace) || + GRPC_TRACE_FLAG_ENABLED(grpc_keepalive_trace)) { gpr_log(GPR_INFO, "%s: Start keepalive ping", t->peer_string); } GRPC_CHTTP2_REF_TRANSPORT(t, "keepalive watchdog"); @@ -2840,7 +2842,8 @@ static void finish_keepalive_ping_locked(void* arg, grpc_error* error) { grpc_chttp2_transport* t = static_cast(arg); if (t->keepalive_state == GRPC_CHTTP2_KEEPALIVE_STATE_PINGING) { if (error == GRPC_ERROR_NONE) { - if (GRPC_TRACE_FLAG_ENABLED(grpc_http_trace)) { + if (GRPC_TRACE_FLAG_ENABLED(grpc_http_trace) || + GRPC_TRACE_FLAG_ENABLED(grpc_keepalive_trace)) { gpr_log(GPR_INFO, "%s: Finish keepalive ping", t->peer_string); } if (!t->keepalive_ping_started) { diff --git a/src/core/ext/transport/chttp2/transport/chttp2_transport.h b/src/core/ext/transport/chttp2/transport/chttp2_transport.h index 39574f93ec7..b04630bbe2b 100644 --- a/src/core/ext/transport/chttp2/transport/chttp2_transport.h +++ b/src/core/ext/transport/chttp2/transport/chttp2_transport.h @@ -27,6 +27,7 @@ #include "src/core/lib/transport/transport.h" extern grpc_core::TraceFlag grpc_http_trace; +extern grpc_core::TraceFlag grpc_keepalive_trace; extern grpc_core::TraceFlag grpc_trace_http2_stream_state; extern grpc_core::DebugOnlyTraceFlag grpc_trace_chttp2_refcount; extern grpc_core::DebugOnlyTraceFlag grpc_trace_chttp2_hpack_parser; diff --git a/src/core/ext/transport/chttp2/transport/flow_control.cc b/src/core/ext/transport/chttp2/transport/flow_control.cc index d53475a1b61..babe564d39d 100644 --- a/src/core/ext/transport/chttp2/transport/flow_control.cc +++ b/src/core/ext/transport/chttp2/transport/flow_control.cc @@ -284,8 +284,8 @@ void StreamFlowControl::IncomingByteStreamUpdate(size_t max_size_hint, [GRPC_CHTTP2_SETTINGS_INITIAL_WINDOW_SIZE]; /* clamp max recv hint to an allowable size */ - if (max_size_hint >= UINT32_MAX - sent_init_window) { - max_recv_bytes = UINT32_MAX - sent_init_window; + if (max_size_hint >= kMaxWindowUpdateSize - sent_init_window) { + max_recv_bytes = kMaxWindowUpdateSize - sent_init_window; } else { max_recv_bytes = static_cast(max_size_hint); } @@ -298,7 +298,7 @@ void StreamFlowControl::IncomingByteStreamUpdate(size_t max_size_hint, } /* add some small lookahead to keep pipelines flowing */ - GPR_ASSERT(max_recv_bytes <= UINT32_MAX - sent_init_window); + GPR_DEBUG_ASSERT(max_recv_bytes <= kMaxWindowUpdateSize - sent_init_window); if (local_window_delta_ < max_recv_bytes) { uint32_t add_max_recv_bytes = static_cast(max_recv_bytes - local_window_delta_); diff --git a/src/core/ext/transport/chttp2/transport/writing.cc b/src/core/ext/transport/chttp2/transport/writing.cc index c7613afcf6f..98d7f9f3edc 100644 --- a/src/core/ext/transport/chttp2/transport/writing.cc +++ b/src/core/ext/transport/chttp2/transport/writing.cc @@ -18,6 +18,7 @@ #include +#include "src/core/ext/transport/chttp2/transport/chttp2_transport.h" #include "src/core/ext/transport/chttp2/transport/context_list.h" #include "src/core/ext/transport/chttp2/transport/internal.h" @@ -54,7 +55,8 @@ static void maybe_initiate_ping(grpc_chttp2_transport* t) { if (!grpc_closure_list_empty(pq->lists[GRPC_CHTTP2_PCL_INFLIGHT])) { /* ping already in-flight: wait */ if (GRPC_TRACE_FLAG_ENABLED(grpc_http_trace) || - GRPC_TRACE_FLAG_ENABLED(grpc_bdp_estimator_trace)) { + GRPC_TRACE_FLAG_ENABLED(grpc_bdp_estimator_trace) || + GRPC_TRACE_FLAG_ENABLED(grpc_keepalive_trace)) { gpr_log(GPR_INFO, "%s: Ping delayed [%p]: already pinging", t->is_client ? "CLIENT" : "SERVER", t->peer_string); } @@ -64,7 +66,8 @@ static void maybe_initiate_ping(grpc_chttp2_transport* t) { t->ping_policy.max_pings_without_data != 0) { /* need to receive something of substance before sending a ping again */ if (GRPC_TRACE_FLAG_ENABLED(grpc_http_trace) || - GRPC_TRACE_FLAG_ENABLED(grpc_bdp_estimator_trace)) { + GRPC_TRACE_FLAG_ENABLED(grpc_bdp_estimator_trace) || + GRPC_TRACE_FLAG_ENABLED(grpc_keepalive_trace)) { gpr_log(GPR_INFO, "%s: Ping delayed [%p]: too many recent pings: %d/%d", t->is_client ? "CLIENT" : "SERVER", t->peer_string, t->ping_state.pings_before_data_required, @@ -85,7 +88,8 @@ static void maybe_initiate_ping(grpc_chttp2_transport* t) { if (next_allowed_ping > now) { /* not enough elapsed time between successive pings */ if (GRPC_TRACE_FLAG_ENABLED(grpc_http_trace) || - GRPC_TRACE_FLAG_ENABLED(grpc_bdp_estimator_trace)) { + GRPC_TRACE_FLAG_ENABLED(grpc_bdp_estimator_trace) || + GRPC_TRACE_FLAG_ENABLED(grpc_keepalive_trace)) { gpr_log(GPR_INFO, "%s: Ping delayed [%p]: not enough time elapsed since last ping. " " Last ping %f: Next ping %f: Now %f", @@ -116,7 +120,8 @@ static void maybe_initiate_ping(grpc_chttp2_transport* t) { GRPC_STATS_INC_HTTP2_PINGS_SENT(); t->ping_state.last_ping_sent_time = now; if (GRPC_TRACE_FLAG_ENABLED(grpc_http_trace) || - GRPC_TRACE_FLAG_ENABLED(grpc_bdp_estimator_trace)) { + GRPC_TRACE_FLAG_ENABLED(grpc_bdp_estimator_trace) || + GRPC_TRACE_FLAG_ENABLED(grpc_keepalive_trace)) { gpr_log(GPR_INFO, "%s: Ping sent [%s]: %d/%d", t->is_client ? "CLIENT" : "SERVER", t->peer_string, t->ping_state.pings_before_data_required, diff --git a/src/core/ext/transport/cronet/transport/cronet_transport.cc b/src/core/ext/transport/cronet/transport/cronet_transport.cc index acdc3fc1ff1..b3a0c401dbc 100644 --- a/src/core/ext/transport/cronet/transport/cronet_transport.cc +++ b/src/core/ext/transport/cronet/transport/cronet_transport.cc @@ -1072,9 +1072,11 @@ static enum e_op_result execute_stream_op(struct op_and_state* oas) { op_can_be_run(stream_op, s, &oas->state, OP_SEND_MESSAGE)) { CRONET_LOG(GPR_DEBUG, "running: %p OP_SEND_MESSAGE", oas); stream_state->pending_send_message = false; - if (stream_state->state_callback_received[OP_FAILED]) { + if (stream_state->state_op_done[OP_CANCEL_ERROR] || + stream_state->state_callback_received[OP_FAILED] || + stream_state->state_callback_received[OP_SUCCEEDED]) { result = NO_ACTION_POSSIBLE; - CRONET_LOG(GPR_DEBUG, "Stream is either cancelled or failed."); + CRONET_LOG(GPR_DEBUG, "Stream is either cancelled, failed or finished"); } else { grpc_slice_buffer write_slice_buffer; grpc_slice slice; @@ -1131,9 +1133,11 @@ static enum e_op_result execute_stream_op(struct op_and_state* oas) { op_can_be_run(stream_op, s, &oas->state, OP_SEND_TRAILING_METADATA)) { CRONET_LOG(GPR_DEBUG, "running: %p OP_SEND_TRAILING_METADATA", oas); - if (stream_state->state_callback_received[OP_FAILED]) { + if (stream_state->state_op_done[OP_CANCEL_ERROR] || + stream_state->state_callback_received[OP_FAILED] || + stream_state->state_callback_received[OP_SUCCEEDED]) { result = NO_ACTION_POSSIBLE; - CRONET_LOG(GPR_DEBUG, "Stream is either cancelled or failed."); + CRONET_LOG(GPR_DEBUG, "Stream is either cancelled, failed or finished"); } else { CRONET_LOG(GPR_DEBUG, "bidirectional_stream_write (%p, 0)", s->cbs); stream_state->state_callback_received[OP_SEND_MESSAGE] = false; diff --git a/src/core/lib/channel/channel_stack.h b/src/core/lib/channel/channel_stack.h index f9272871931..87e675951b3 100644 --- a/src/core/lib/channel/channel_stack.h +++ b/src/core/lib/channel/channel_stack.h @@ -31,7 +31,18 @@ chains are linear, then channel stacks provide a mechanism to minimize allocations for that chain. Call stacks are created by channel stacks and represent the per-call data - for that stack. */ + for that stack. + + Implementations should take care of the following details for a batch - + 1. Synchronization is achieved with a CallCombiner. View + src/core/lib/iomgr/call_combiner.h for more details. + 2. If the filter wants to inject an error on the way down, it needs to call + grpc_transport_stream_op_batch_finish_with_failure from within the call + combiner. This will cause any batch callbacks to be called with that error. + 3. If the filter wants to inject an error on the way up (from a callback), it + should also inject that error in the recv_trailing_metadata callback so that + it can have an effect on the call status. +*/ #include diff --git a/src/core/lib/iomgr/socket_utils_common_posix.cc b/src/core/lib/iomgr/socket_utils_common_posix.cc index a5048d890d3..f368cb76f62 100644 --- a/src/core/lib/iomgr/socket_utils_common_posix.cc +++ b/src/core/lib/iomgr/socket_utils_common_posix.cc @@ -210,7 +210,6 @@ static gpr_once g_probe_so_reuesport_once = GPR_ONCE_INIT; static int g_support_so_reuseport = false; void probe_so_reuseport_once(void) { -#ifndef GPR_MANYLINUX1 int s = socket(AF_INET, SOCK_STREAM, 0); if (s < 0) { /* This might be an ipv6-only environment in which case 'socket(AF_INET,..)' @@ -222,7 +221,6 @@ void probe_so_reuseport_once(void) { "check for SO_REUSEPORT", grpc_set_socket_reuse_port(s, 1)); close(s); } -#endif } bool grpc_is_socket_reuse_port_supported() { diff --git a/src/core/lib/security/credentials/credentials.cc b/src/core/lib/security/credentials/credentials.cc index 8ef58d2a4b4..90452d68d61 100644 --- a/src/core/lib/security/credentials/credentials.cc +++ b/src/core/lib/security/credentials/credentials.cc @@ -45,90 +45,6 @@ void grpc_channel_credentials_release(grpc_channel_credentials* creds) { if (creds) creds->Unref(); } -static std::map, - grpc_core::RefCountedPtr, - grpc_core::StringLess>* g_grpc_control_plane_creds; -static gpr_mu g_control_plane_creds_mu; - -static void do_control_plane_creds_init() { - gpr_mu_init(&g_control_plane_creds_mu); - GPR_ASSERT(g_grpc_control_plane_creds == nullptr); - g_grpc_control_plane_creds = - new std::map, - grpc_core::RefCountedPtr, - grpc_core::StringLess>(); -} - -void grpc_control_plane_credentials_init() { - static gpr_once once_init_control_plane_creds = GPR_ONCE_INIT; - gpr_once_init(&once_init_control_plane_creds, do_control_plane_creds_init); -} - -void grpc_test_only_control_plane_credentials_destroy() { - delete g_grpc_control_plane_creds; - g_grpc_control_plane_creds = nullptr; - gpr_mu_destroy(&g_control_plane_creds_mu); -} - -void grpc_test_only_control_plane_credentials_force_init() { - if (g_grpc_control_plane_creds == nullptr) { - do_control_plane_creds_init(); - } -} - -bool grpc_channel_credentials_attach_credentials( - grpc_channel_credentials* credentials, const char* authority, - grpc_channel_credentials* control_plane_creds) { - grpc_core::ExecCtx exec_ctx; - return credentials->attach_credentials(authority, control_plane_creds->Ref()); -} - -bool grpc_control_plane_credentials_register( - const char* authority, grpc_channel_credentials* control_plane_creds) { - grpc_core::ExecCtx exec_ctx; - { - grpc_core::MutexLock lock(&g_control_plane_creds_mu); - auto key = grpc_core::UniquePtr(gpr_strdup(authority)); - if (g_grpc_control_plane_creds->find(key) != - g_grpc_control_plane_creds->end()) { - return false; - } - (*g_grpc_control_plane_creds)[std::move(key)] = control_plane_creds->Ref(); - } - return true; -} - -bool grpc_channel_credentials::attach_credentials( - const char* authority, - grpc_core::RefCountedPtr control_plane_creds) { - auto key = grpc_core::UniquePtr(gpr_strdup(authority)); - if (local_control_plane_creds_.find(key) != - local_control_plane_creds_.end()) { - return false; - } - local_control_plane_creds_[std::move(key)] = std::move(control_plane_creds); - return true; -} - -grpc_core::RefCountedPtr -grpc_channel_credentials::get_control_plane_credentials(const char* authority) { - { - auto key = grpc_core::UniquePtr(gpr_strdup(authority)); - auto local_lookup = local_control_plane_creds_.find(key); - if (local_lookup != local_control_plane_creds_.end()) { - return local_lookup->second; - } - { - grpc_core::MutexLock lock(&g_control_plane_creds_mu); - auto global_lookup = g_grpc_control_plane_creds->find(key); - if (global_lookup != g_grpc_control_plane_creds->end()) { - return global_lookup->second; - } - } - } - return duplicate_without_call_credentials(); -} - void grpc_call_credentials_release(grpc_call_credentials* creds) { GRPC_API_TRACE("grpc_call_credentials_release(creds=%p)", 1, (creds)); grpc_core::ExecCtx exec_ctx; diff --git a/src/core/lib/security/credentials/credentials.h b/src/core/lib/security/credentials/credentials.h index 4d1af0588f6..e7385537b0f 100644 --- a/src/core/lib/security/credentials/credentials.h +++ b/src/core/lib/security/credentials/credentials.h @@ -28,9 +28,7 @@ #include #include "src/core/lib/transport/metadata_batch.h" -#include "src/core/lib/gprpp/map.h" #include "src/core/lib/gprpp/ref_counted.h" -#include "src/core/lib/gprpp/sync.h" #include "src/core/lib/http/httpcli.h" #include "src/core/lib/http/parser.h" #include "src/core/lib/iomgr/polling_entity.h" @@ -131,29 +129,10 @@ struct grpc_channel_credentials return args; } - // Attaches control_plane_creds to the local registry, under authority, - // if no other creds are currently registered under authority. Returns - // true if registered successfully and false if not. - bool attach_credentials( - const char* authority, - grpc_core::RefCountedPtr control_plane_creds); - - // Gets the control plane credentials registered under authority. This - // prefers the local control plane creds registry but falls back to the - // global registry. Lastly, this returns self but with any attached - // call credentials stripped off, in the case that neither the local - // registry nor the global registry have an entry for authority. - grpc_core::RefCountedPtr - get_control_plane_credentials(const char* authority); - const char* type() const { return type_; } private: const char* type_; - std::map, - grpc_core::RefCountedPtr, - grpc_core::StringLess> - local_control_plane_creds_; }; /* Util to encapsulate the channel credentials in a channel arg. */ @@ -167,41 +146,6 @@ grpc_channel_credentials* grpc_channel_credentials_from_arg( grpc_channel_credentials* grpc_channel_credentials_find_in_args( const grpc_channel_args* args); -/** EXPERIMENTAL. API MAY CHANGE IN THE FUTURE. - Attaches \a control_plane_creds to \a credentials - under the key \a authority. Returns false if \a authority - is already present, in which case no changes are made. - Note that this API is not thread safe. Only one thread may - attach control plane creds to a given credentials object at - any one time, and new control plane creds must not be - attached after \a credentials has been used to create a channel. */ -bool grpc_channel_credentials_attach_credentials( - grpc_channel_credentials* credentials, const char* authority, - grpc_channel_credentials* control_plane_creds); - -/** EXPERIMENTAL. API MAY CHANGE IN THE FUTURE. - Registers \a control_plane_creds in the global registry - under the key \a authority. Returns false if \a authority - is already present, in which case no changes are made. */ -bool grpc_control_plane_credentials_register( - const char* authority, grpc_channel_credentials* control_plane_creds); - -/* Initializes global control plane credentials data. */ -void grpc_control_plane_credentials_init(); - -/* Test only: destroy global control plane credentials data. - * This API is meant for use by a few tests that need to - * satisdy grpc_core::LeakDetector. */ -void grpc_test_only_control_plane_credentials_destroy(); - -/* Test only: force re-initialization of global control - * plane credentials data if it was previously destroyed. - * This API is meant to be used in - * tandem with the - * grpc_test_only_control_plane_credentials_destroy, for - * the few tests that need it. */ -void grpc_test_only_control_plane_credentials_force_init(); - /* --- grpc_credentials_mdelem_array. --- */ typedef struct { diff --git a/src/core/lib/security/security_connector/alts/alts_security_connector.cc b/src/core/lib/security/security_connector/alts/alts_security_connector.cc index 1274edb6e6e..6bfe6ea74c3 100644 --- a/src/core/lib/security/security_connector/alts/alts_security_connector.cc +++ b/src/core/lib/security/security_connector/alts/alts_security_connector.cc @@ -82,10 +82,17 @@ class grpc_alts_channel_security_connector final tsi_handshaker* handshaker = nullptr; const grpc_alts_credentials* creds = static_cast(channel_creds()); - GPR_ASSERT(alts_tsi_handshaker_create(creds->options(), target_name_, - creds->handshaker_service_url(), true, - interested_parties, - &handshaker) == TSI_OK); + size_t user_specified_max_frame_size = 0; + const grpc_arg* arg = + grpc_channel_args_find(args, GRPC_ARG_TSI_MAX_FRAME_SIZE); + if (arg != nullptr && arg->type == GRPC_ARG_INTEGER) { + user_specified_max_frame_size = grpc_channel_arg_get_integer( + arg, {0, 0, std::numeric_limits::max()}); + } + GPR_ASSERT(alts_tsi_handshaker_create( + creds->options(), target_name_, + creds->handshaker_service_url(), true, interested_parties, + &handshaker, user_specified_max_frame_size) == TSI_OK); handshake_manager->Add( grpc_core::SecurityHandshakerCreate(handshaker, this, args)); } @@ -140,9 +147,17 @@ class grpc_alts_server_security_connector final tsi_handshaker* handshaker = nullptr; const grpc_alts_server_credentials* creds = static_cast(server_creds()); + size_t user_specified_max_frame_size = 0; + const grpc_arg* arg = + grpc_channel_args_find(args, GRPC_ARG_TSI_MAX_FRAME_SIZE); + if (arg != nullptr && arg->type == GRPC_ARG_INTEGER) { + user_specified_max_frame_size = grpc_channel_arg_get_integer( + arg, {0, 0, std::numeric_limits::max()}); + } GPR_ASSERT(alts_tsi_handshaker_create( creds->options(), nullptr, creds->handshaker_service_url(), - false, interested_parties, &handshaker) == TSI_OK); + false, interested_parties, &handshaker, + user_specified_max_frame_size) == TSI_OK); handshake_manager->Add( grpc_core::SecurityHandshakerCreate(handshaker, this, args)); } diff --git a/src/core/lib/surface/init_secure.cc b/src/core/lib/surface/init_secure.cc index f2c236dd79e..428c5815c98 100644 --- a/src/core/lib/surface/init_secure.cc +++ b/src/core/lib/surface/init_secure.cc @@ -78,7 +78,4 @@ void grpc_register_security_filters(void) { maybe_prepend_server_auth_filter, nullptr); } -void grpc_security_init() { - grpc_core::SecurityRegisterHandshakerFactories(); - grpc_control_plane_credentials_init(); -} +void grpc_security_init() { grpc_core::SecurityRegisterHandshakerFactories(); } diff --git a/src/core/plugin_registry/grpc_plugin_registry.cc b/src/core/plugin_registry/grpc_plugin_registry.cc index 20ad526d837..b52c5be1719 100644 --- a/src/core/plugin_registry/grpc_plugin_registry.cc +++ b/src/core/plugin_registry/grpc_plugin_registry.cc @@ -36,8 +36,14 @@ void grpc_lb_policy_grpclb_init(void); void grpc_lb_policy_grpclb_shutdown(void); void grpc_lb_policy_cds_init(void); void grpc_lb_policy_cds_shutdown(void); -void grpc_lb_policy_xds_init(void); -void grpc_lb_policy_xds_shutdown(void); +void grpc_lb_policy_eds_init(void); +void grpc_lb_policy_eds_shutdown(void); +void grpc_lb_policy_lrs_init(void); +void grpc_lb_policy_lrs_shutdown(void); +void grpc_lb_policy_priority_init(void); +void grpc_lb_policy_priority_shutdown(void); +void grpc_lb_policy_weighted_target_init(void); +void grpc_lb_policy_weighted_target_shutdown(void); void grpc_lb_policy_pick_first_init(void); void grpc_lb_policy_pick_first_shutdown(void); void grpc_lb_policy_round_robin_init(void); @@ -78,8 +84,14 @@ void grpc_register_built_in_plugins(void) { grpc_lb_policy_grpclb_shutdown); grpc_register_plugin(grpc_lb_policy_cds_init, grpc_lb_policy_cds_shutdown); - grpc_register_plugin(grpc_lb_policy_xds_init, - grpc_lb_policy_xds_shutdown); + grpc_register_plugin(grpc_lb_policy_eds_init, + grpc_lb_policy_eds_shutdown); + grpc_register_plugin(grpc_lb_policy_lrs_init, + grpc_lb_policy_lrs_shutdown); + grpc_register_plugin(grpc_lb_policy_priority_init, + grpc_lb_policy_priority_shutdown); + grpc_register_plugin(grpc_lb_policy_weighted_target_init, + grpc_lb_policy_weighted_target_shutdown); grpc_register_plugin(grpc_lb_policy_pick_first_init, grpc_lb_policy_pick_first_shutdown); grpc_register_plugin(grpc_lb_policy_round_robin_init, diff --git a/src/core/plugin_registry/grpc_unsecure_plugin_registry.cc b/src/core/plugin_registry/grpc_unsecure_plugin_registry.cc index bfed2e22ddd..b1e442a07c0 100644 --- a/src/core/plugin_registry/grpc_unsecure_plugin_registry.cc +++ b/src/core/plugin_registry/grpc_unsecure_plugin_registry.cc @@ -44,8 +44,14 @@ void grpc_lb_policy_grpclb_init(void); void grpc_lb_policy_grpclb_shutdown(void); void grpc_lb_policy_cds_init(void); void grpc_lb_policy_cds_shutdown(void); -void grpc_lb_policy_xds_init(void); -void grpc_lb_policy_xds_shutdown(void); +void grpc_lb_policy_eds_init(void); +void grpc_lb_policy_eds_shutdown(void); +void grpc_lb_policy_lrs_init(void); +void grpc_lb_policy_lrs_shutdown(void); +void grpc_lb_policy_priority_init(void); +void grpc_lb_policy_priority_shutdown(void); +void grpc_lb_policy_weighted_target_init(void); +void grpc_lb_policy_weighted_target_shutdown(void); void grpc_lb_policy_pick_first_init(void); void grpc_lb_policy_pick_first_shutdown(void); void grpc_lb_policy_round_robin_init(void); @@ -86,8 +92,14 @@ void grpc_register_built_in_plugins(void) { grpc_lb_policy_grpclb_shutdown); grpc_register_plugin(grpc_lb_policy_cds_init, grpc_lb_policy_cds_shutdown); - grpc_register_plugin(grpc_lb_policy_xds_init, - grpc_lb_policy_xds_shutdown); + grpc_register_plugin(grpc_lb_policy_eds_init, + grpc_lb_policy_eds_shutdown); + grpc_register_plugin(grpc_lb_policy_lrs_init, + grpc_lb_policy_lrs_shutdown); + grpc_register_plugin(grpc_lb_policy_priority_init, + grpc_lb_policy_priority_shutdown); + grpc_register_plugin(grpc_lb_policy_weighted_target_init, + grpc_lb_policy_weighted_target_shutdown); grpc_register_plugin(grpc_lb_policy_pick_first_init, grpc_lb_policy_pick_first_shutdown); grpc_register_plugin(grpc_lb_policy_round_robin_init, diff --git a/src/core/tsi/alts/handshaker/alts_handshaker_client.cc b/src/core/tsi/alts/handshaker/alts_handshaker_client.cc index 2592763e5a2..61927276195 100644 --- a/src/core/tsi/alts/handshaker/alts_handshaker_client.cc +++ b/src/core/tsi/alts/handshaker/alts_handshaker_client.cc @@ -102,6 +102,8 @@ typedef struct alts_grpc_handshaker_client { bool receive_status_finished; /* if non-null, contains arguments to complete a TSI next callback. */ recv_message_result* pending_recv_message_result; + /* Maximum frame size used by frame protector. */ + size_t max_frame_size; } alts_grpc_handshaker_client; static void handshaker_client_send_buffer_destroy( @@ -506,6 +508,8 @@ static grpc_byte_buffer* get_serialized_start_client( upb_strview_makez(ptr->data)); ptr = ptr->next; } + grpc_gcp_StartClientHandshakeReq_set_max_frame_size( + start_client, static_cast(client->max_frame_size)); return get_serialized_handshaker_req(req, arena.ptr()); } @@ -565,6 +569,8 @@ static grpc_byte_buffer* get_serialized_start_server( arena.ptr()); grpc_gcp_RpcProtocolVersions_assign_from_struct( server_version, arena.ptr(), &client->options->rpc_versions); + grpc_gcp_StartServerHandshakeReq_set_max_frame_size( + start_server, static_cast(client->max_frame_size)); return get_serialized_handshaker_req(req, arena.ptr()); } @@ -674,7 +680,7 @@ alts_handshaker_client* alts_grpc_handshaker_client_create( grpc_alts_credentials_options* options, const grpc_slice& target_name, grpc_iomgr_cb_func grpc_cb, tsi_handshaker_on_next_done_cb cb, void* user_data, alts_handshaker_client_vtable* vtable_for_testing, - bool is_client) { + bool is_client, size_t max_frame_size) { if (channel == nullptr || handshaker_service_url == nullptr) { gpr_log(GPR_ERROR, "Invalid arguments to alts_handshaker_client_create()"); return nullptr; @@ -694,6 +700,7 @@ alts_handshaker_client* alts_grpc_handshaker_client_create( client->recv_bytes = grpc_empty_slice(); grpc_metadata_array_init(&client->recv_initial_metadata); client->is_client = is_client; + client->max_frame_size = max_frame_size; client->buffer_size = TSI_ALTS_INITIAL_BUFFER_SIZE; client->buffer = static_cast(gpr_zalloc(client->buffer_size)); grpc_slice slice = grpc_slice_from_copied_string(handshaker_service_url); diff --git a/src/core/tsi/alts/handshaker/alts_handshaker_client.h b/src/core/tsi/alts/handshaker/alts_handshaker_client.h index 319a23c88c7..d8669da01cb 100644 --- a/src/core/tsi/alts/handshaker/alts_handshaker_client.h +++ b/src/core/tsi/alts/handshaker/alts_handshaker_client.h @@ -117,7 +117,7 @@ void alts_handshaker_client_destroy(alts_handshaker_client* client); * This method creates an ALTS handshaker client. * * - handshaker: ALTS TSI handshaker to which the created handshaker client - * belongs to. + * belongs to. * - channel: grpc channel to ALTS handshaker service. * - handshaker_service_url: address of ALTS handshaker service in the format of * "host:port". @@ -132,8 +132,12 @@ void alts_handshaker_client_destroy(alts_handshaker_client* client); * - vtable_for_testing: ALTS handshaker client vtable instance used for * testing purpose. * - is_client: a boolean value indicating if the created handshaker client is - * used at the client (is_client = true) or server (is_client = false) side. It - * returns the created ALTS handshaker client on success, and NULL on failure. + * used at the client (is_client = true) or server (is_client = false) side. + * - max_frame_size: Maximum frame size used by frame protector (User specified + * maximum frame size if present or default max frame size). + * + * It returns the created ALTS handshaker client on success, and NULL + * on failure. */ alts_handshaker_client* alts_grpc_handshaker_client_create( alts_tsi_handshaker* handshaker, grpc_channel* channel, @@ -141,7 +145,7 @@ alts_handshaker_client* alts_grpc_handshaker_client_create( grpc_alts_credentials_options* options, const grpc_slice& target_name, grpc_iomgr_cb_func grpc_cb, tsi_handshaker_on_next_done_cb cb, void* user_data, alts_handshaker_client_vtable* vtable_for_testing, - bool is_client); + bool is_client, size_t max_frame_size); /** * This method handles handshaker response returned from ALTS handshaker diff --git a/src/core/tsi/alts/handshaker/alts_tsi_handshaker.cc b/src/core/tsi/alts/handshaker/alts_tsi_handshaker.cc index 0c700306d8f..2a925182d3f 100644 --- a/src/core/tsi/alts/handshaker/alts_tsi_handshaker.cc +++ b/src/core/tsi/alts/handshaker/alts_tsi_handshaker.cc @@ -63,6 +63,8 @@ struct alts_tsi_handshaker { // shutdown effectively follows base.handshake_shutdown, // but is synchronized by the mutex of this object. bool shutdown; + // Maximum frame size used by frame protector. + size_t max_frame_size; }; /* Main struct for ALTS TSI handshaker result. */ @@ -75,6 +77,8 @@ typedef struct alts_tsi_handshaker_result { grpc_slice rpc_versions; bool is_client; grpc_slice serialized_context; + // Peer's maximum frame size. + size_t max_frame_size; } alts_tsi_handshaker_result; static tsi_result handshaker_result_extract_peer( @@ -156,6 +160,26 @@ static tsi_result handshaker_result_create_zero_copy_grpc_protector( alts_tsi_handshaker_result* result = reinterpret_cast( const_cast(self)); + + // In case the peer does not send max frame size (e.g. peer is gRPC Go or + // peer uses an old binary), the negotiated frame size is set to + // kTsiAltsMinFrameSize (ignoring max_output_protected_frame_size value if + // present). Otherwise, it is based on peer and user specified max frame + // size (if present). + size_t max_frame_size = kTsiAltsMinFrameSize; + if (result->max_frame_size) { + size_t peer_max_frame_size = result->max_frame_size; + max_frame_size = std::min(peer_max_frame_size, + max_output_protected_frame_size == nullptr + ? kTsiAltsMaxFrameSize + : *max_output_protected_frame_size); + max_frame_size = std::max(max_frame_size, kTsiAltsMinFrameSize); + } + max_output_protected_frame_size = &max_frame_size; + gpr_log(GPR_DEBUG, + "After Frame Size Negotiation, maximum frame size used by frame " + "protector equals %zu", + *max_output_protected_frame_size); tsi_result ok = alts_zero_copy_grpc_protector_create( reinterpret_cast(result->key_data), kAltsAes128GcmRekeyKeyLength, /*is_rekey=*/true, result->is_client, @@ -288,6 +312,7 @@ tsi_result alts_tsi_handshaker_result_create(grpc_gcp_HandshakerResp* resp, static_cast(gpr_zalloc(peer_service_account.size + 1)); memcpy(result->peer_identity, peer_service_account.data, peer_service_account.size); + result->max_frame_size = grpc_gcp_HandshakerResult_max_frame_size(hresult); upb::Arena rpc_versions_arena; bool serialized = grpc_gcp_rpc_protocol_versions_encode( peer_rpc_version, rpc_versions_arena.ptr(), &result->rpc_versions); @@ -374,7 +399,8 @@ static tsi_result alts_tsi_handshaker_continue_handshaker_next( handshaker, channel, handshaker->handshaker_service_url, handshaker->interested_parties, handshaker->options, handshaker->target_name, grpc_cb, cb, user_data, - handshaker->client_vtable_for_testing, handshaker->is_client); + handshaker->client_vtable_for_testing, handshaker->is_client, + handshaker->max_frame_size); if (client == nullptr) { gpr_log(GPR_ERROR, "Failed to create ALTS handshaker client"); return TSI_FAILED_PRECONDITION; @@ -570,7 +596,8 @@ bool alts_tsi_handshaker_has_shutdown(alts_tsi_handshaker* handshaker) { tsi_result alts_tsi_handshaker_create( const grpc_alts_credentials_options* options, const char* target_name, const char* handshaker_service_url, bool is_client, - grpc_pollset_set* interested_parties, tsi_handshaker** self) { + grpc_pollset_set* interested_parties, tsi_handshaker** self, + size_t user_specified_max_frame_size) { if (handshaker_service_url == nullptr || self == nullptr || options == nullptr || (is_client && target_name == nullptr)) { gpr_log(GPR_ERROR, "Invalid arguments to alts_tsi_handshaker_create()"); @@ -590,6 +617,9 @@ tsi_result alts_tsi_handshaker_create( handshaker->has_created_handshaker_client = false; handshaker->handshaker_service_url = gpr_strdup(handshaker_service_url); handshaker->options = grpc_alts_credentials_options_copy(options); + handshaker->max_frame_size = user_specified_max_frame_size != 0 + ? user_specified_max_frame_size + : kTsiAltsMaxFrameSize; handshaker->base.vtable = handshaker->use_dedicated_cq ? &handshaker_vtable_dedicated : &handshaker_vtable; diff --git a/src/core/tsi/alts/handshaker/alts_tsi_handshaker.h b/src/core/tsi/alts/handshaker/alts_tsi_handshaker.h index 5bace9affa8..e1ae985a84d 100644 --- a/src/core/tsi/alts/handshaker/alts_tsi_handshaker.h +++ b/src/core/tsi/alts/handshaker/alts_tsi_handshaker.h @@ -38,6 +38,11 @@ const size_t kTsiAltsNumOfPeerProperties = 5; +// Frame size negotiation extends send frame size range to +// [kTsiAltsMinFrameSize, kTsiAltsMaxFrameSize]. +const size_t kTsiAltsMinFrameSize = 16 * 1024; +const size_t kTsiAltsMaxFrameSize = 128 * 1024; + typedef struct alts_tsi_handshaker alts_tsi_handshaker; /** @@ -54,6 +59,8 @@ typedef struct alts_tsi_handshaker alts_tsi_handshaker; * - interested_parties: set of pollsets interested in this connection. * - self: address of ALTS TSI handshaker instance to be returned from the * method. + * - user_specified_max_frame_size: Determines the maximum frame size used by + * frame protector that is specified via user. If unspecified, the value is 0. * * It returns TSI_OK on success and an error status code on failure. Note that * if interested_parties is nullptr, a dedicated TSI thread will be created and @@ -62,7 +69,8 @@ typedef struct alts_tsi_handshaker alts_tsi_handshaker; tsi_result alts_tsi_handshaker_create( const grpc_alts_credentials_options* options, const char* target_name, const char* handshaker_service_url, bool is_client, - grpc_pollset_set* interested_parties, tsi_handshaker** self); + grpc_pollset_set* interested_parties, tsi_handshaker** self, + size_t user_specified_max_frame_size); /** * This method creates an ALTS TSI handshaker result instance. diff --git a/src/csharp/Grpc.Tools/build/_protobuf/Google.Protobuf.Tools.targets b/src/csharp/Grpc.Tools/build/_protobuf/Google.Protobuf.Tools.targets index b1030ba1f8b..ad6725f684a 100644 --- a/src/csharp/Grpc.Tools/build/_protobuf/Google.Protobuf.Tools.targets +++ b/src/csharp/Grpc.Tools/build/_protobuf/Google.Protobuf.Tools.targets @@ -137,7 +137,7 @@ %(RelativeDir) - + %(Identity) diff --git a/src/php/README.md b/src/php/README.md index f064ff0d698..e589408b0c3 100644 --- a/src/php/README.md +++ b/src/php/README.md @@ -58,7 +58,7 @@ $ git clone -b RELEASE_TAG_HERE https://github.com/grpc/grpc ```sh $ cd grpc $ git submodule update --init -$ make +$ EXTRA_DEFINES=GRPC_POSIX_FORK_ALLOW_PTHREAD_ATFORK make $ [sudo] make install ``` diff --git a/src/python/grpcio/grpc/_cython/_cygrpc/aio/callback_common.pyx.pxi b/src/python/grpcio/grpc/_cython/_cygrpc/aio/callback_common.pyx.pxi index b8f04615297..86fc91e76a4 100644 --- a/src/python/grpcio/grpc/_cython/_cygrpc/aio/callback_common.pyx.pxi +++ b/src/python/grpcio/grpc/_cython/_cygrpc/aio/callback_common.pyx.pxi @@ -70,7 +70,8 @@ cdef CallbackFailureHandler CQ_SHUTDOWN_FAILURE_HANDLER = CallbackFailureHandler InternalError) -class ExecuteBatchError(Exception): pass +class ExecuteBatchError(InternalError): + """Raised when execute batch returns a failure from Core.""" async def execute_batch(GrpcCallWrapper grpc_call_wrapper, @@ -128,7 +129,7 @@ async def _receive_message(GrpcCallWrapper grpc_call_wrapper, # the callback (e.g. cancelled). # # Since they all indicates finish, they are better be merged. - _LOGGER.debug(e) + _LOGGER.debug('Failed to receive any message from Core') return receive_op.message() diff --git a/src/python/grpcio/grpc/_cython/_cygrpc/aio/grpc_aio.pyx.pxi b/src/python/grpcio/grpc/_cython/_cygrpc/aio/grpc_aio.pyx.pxi index 4231ca7d8ab..eaddb3952d0 100644 --- a/src/python/grpcio/grpc/_cython/_cygrpc/aio/grpc_aio.pyx.pxi +++ b/src/python/grpcio/grpc/_cython/_cygrpc/aio/grpc_aio.pyx.pxi @@ -105,7 +105,7 @@ cdef _actual_aio_shutdown(): ) future.add_done_callback(_grpc_shutdown_wrapper) elif _global_aio_state.engine is AsyncIOEngine.POLLER: - _global_aio_state.cq.shutdown() + (_global_aio_state.cq).shutdown() grpc_shutdown_blocking() else: raise ValueError('Unsupported engine type [%s]' % _global_aio_state.engine) diff --git a/src/python/grpcio/grpc/_cython/_cygrpc/aio/server.pxd.pxi b/src/python/grpcio/grpc/_cython/_cygrpc/aio/server.pxd.pxi index 3923244748a..52f491614f1 100644 --- a/src/python/grpcio/grpc/_cython/_cygrpc/aio/server.pxd.pxi +++ b/src/python/grpcio/grpc/_cython/_cygrpc/aio/server.pxd.pxi @@ -41,6 +41,18 @@ cdef class RPCState(GrpcCallWrapper): cdef Operation create_send_initial_metadata_op_if_not_sent(self) +cdef class _ServicerContext: + cdef RPCState _rpc_state + cdef object _loop # asyncio.AbstractEventLoop + cdef object _request_deserializer # Callable[[bytes], Any] + cdef object _response_serializer # Callable[[Any], bytes] + + +cdef class _MessageReceiver: + cdef _ServicerContext _servicer_context + cdef object _agen + + cdef enum AioServerStatus: AIO_SERVER_STATUS_UNKNOWN AIO_SERVER_STATUS_READY diff --git a/src/python/grpcio/grpc/_cython/_cygrpc/aio/server.pyx.pxi b/src/python/grpcio/grpc/_cython/_cygrpc/aio/server.pyx.pxi index 668ecab0a85..b0c62f9b9f2 100644 --- a/src/python/grpcio/grpc/_cython/_cygrpc/aio/server.pyx.pxi +++ b/src/python/grpcio/grpc/_cython/_cygrpc/aio/server.pyx.pxi @@ -109,10 +109,6 @@ cdef class RPCState: cdef class _ServicerContext: - cdef RPCState _rpc_state - cdef object _loop - cdef object _request_deserializer - cdef object _response_serializer def __cinit__(self, RPCState rpc_state, @@ -128,9 +124,9 @@ cdef class _ServicerContext: cdef bytes raw_message self._rpc_state.raise_for_termination() - if self._rpc_state.client_closed: - return EOF raw_message = await _receive_message(self._rpc_state, self._loop) + self._rpc_state.raise_for_termination() + if raw_message is None: return EOF else: @@ -414,15 +410,28 @@ async def _handle_unary_stream_rpc(object method_handler, ) -async def _message_receiver(_ServicerContext servicer_context): +cdef class _MessageReceiver: """Bridge between the async generator API and the reader-writer API.""" - cdef object message - while True: - message = await servicer_context.read() - if message is not EOF: - yield message - else: - break + + def __cinit__(self, _ServicerContext servicer_context): + self._servicer_context = servicer_context + self._agen = None + + async def _async_message_receiver(self): + """An async generator that receives messages.""" + cdef object message + while True: + message = await self._servicer_context.read() + if message is not EOF: + yield message + else: + break + + def __aiter__(self): + # Prevents never awaited warning if application never used the async generator + if self._agen is None: + self._agen = self._async_message_receiver() + return self._agen async def _handle_stream_unary_rpc(object method_handler, @@ -437,7 +446,7 @@ async def _handle_stream_unary_rpc(object method_handler, ) # Prepares the request generator - cdef object request_async_iterator = _message_receiver(servicer_context) + cdef object request_async_iterator = _MessageReceiver(servicer_context) # Finishes the application handler await _finish_handler_with_unary_response( @@ -462,7 +471,7 @@ async def _handle_stream_stream_rpc(object method_handler, ) # Prepares the request generator - cdef object request_async_iterator = _message_receiver(servicer_context) + cdef object request_async_iterator = _MessageReceiver(servicer_context) # Finishes the application handler await _finish_handler_with_stream_responses( @@ -495,6 +504,12 @@ async def _handle_exceptions(RPCState rpc_state, object rpc_coro, object loop): _LOGGER.debug('RPC cancelled for servicer method [%s]', _decode(rpc_state.method())) except _ServerStoppedError: _LOGGER.warning('Aborting method [%s] due to server stop.', _decode(rpc_state.method())) + except ExecuteBatchError: + # If client closed (aka. cancelled), ignore the failed batch operations. + if rpc_state.client_closed: + return + else: + raise except Exception as e: _LOGGER.exception('Unexpected [%s] raised by servicer method [%s]' % ( type(e).__name__, diff --git a/src/python/grpcio/grpc/experimental/__init__.py b/src/python/grpcio/grpc/experimental/__init__.py index 6364e36b631..2c63908fe6d 100644 --- a/src/python/grpcio/grpc/experimental/__init__.py +++ b/src/python/grpcio/grpc/experimental/__init__.py @@ -85,6 +85,6 @@ __all__ = ( 'insecure_channel_credentials', ) -if sys.version_info[0] >= 3: +if sys.version_info[0] == 3 and sys.version_info[1] >= 6: from grpc._simple_stubs import unary_unary, unary_stream, stream_unary, stream_stream __all__ = __all__ + (unary_unary, unary_stream, stream_unary, stream_stream) diff --git a/src/python/grpcio/grpc/experimental/aio/_base_call.py b/src/python/grpcio/grpc/experimental/aio/_base_call.py index d116982aa79..214e208c005 100644 --- a/src/python/grpcio/grpc/experimental/aio/_base_call.py +++ b/src/python/grpcio/grpc/experimental/aio/_base_call.py @@ -117,6 +117,19 @@ class Call(RpcContext, metaclass=ABCMeta): The details string of the RPC. """ + @abstractmethod + async def wait_for_connection(self) -> None: + """Waits until connected to peer and raises aio.AioRpcError if failed. + + This is an EXPERIMENTAL method. + + This method ensures the RPC has been successfully connected. Otherwise, + an AioRpcError will be raised to explain the reason of the connection + failure. + + This method is recommended for building retry mechanisms. + """ + class UnaryUnaryCall(Generic[RequestType, ResponseType], Call, diff --git a/src/python/grpcio/grpc/experimental/aio/_base_channel.py b/src/python/grpcio/grpc/experimental/aio/_base_channel.py index 1168c260e97..11744f0882a 100644 --- a/src/python/grpcio/grpc/experimental/aio/_base_channel.py +++ b/src/python/grpcio/grpc/experimental/aio/_base_channel.py @@ -14,12 +14,13 @@ """Abstract base classes for Channel objects and Multicallable objects.""" import abc -from typing import Any, AsyncIterable, Optional +from typing import Any, Optional import grpc from . import _base_call -from ._typing import DeserializingFunction, MetadataType, SerializingFunction +from ._typing import (DeserializingFunction, MetadataType, RequestIterableType, + SerializingFunction) _IMMUTABLE_EMPTY_TUPLE = tuple() @@ -105,7 +106,7 @@ class StreamUnaryMultiCallable(abc.ABC): @abc.abstractmethod def __call__(self, - request_async_iterator: Optional[AsyncIterable[Any]] = None, + request_iterator: Optional[RequestIterableType] = None, timeout: Optional[float] = None, metadata: Optional[MetadataType] = _IMMUTABLE_EMPTY_TUPLE, credentials: Optional[grpc.CallCredentials] = None, @@ -115,7 +116,8 @@ class StreamUnaryMultiCallable(abc.ABC): """Asynchronously invokes the underlying RPC. Args: - request: The request value for the RPC. + request_iterator: An optional async iterable or iterable of request + messages for the RPC. timeout: An optional duration of time in seconds to allow for the RPC. metadata: Optional :term:`metadata` to be transmitted to the @@ -142,7 +144,7 @@ class StreamStreamMultiCallable(abc.ABC): @abc.abstractmethod def __call__(self, - request_async_iterator: Optional[AsyncIterable[Any]] = None, + request_iterator: Optional[RequestIterableType] = None, timeout: Optional[float] = None, metadata: Optional[MetadataType] = _IMMUTABLE_EMPTY_TUPLE, credentials: Optional[grpc.CallCredentials] = None, @@ -152,7 +154,8 @@ class StreamStreamMultiCallable(abc.ABC): """Asynchronously invokes the underlying RPC. Args: - request: The request value for the RPC. + request_iterator: An optional async iterable or iterable of request + messages for the RPC. timeout: An optional duration of time in seconds to allow for the RPC. metadata: Optional :term:`metadata` to be transmitted to the diff --git a/src/python/grpcio/grpc/experimental/aio/_call.py b/src/python/grpcio/grpc/experimental/aio/_call.py index d06cc18d872..3d1d19fd3fa 100644 --- a/src/python/grpcio/grpc/experimental/aio/_call.py +++ b/src/python/grpcio/grpc/experimental/aio/_call.py @@ -14,10 +14,11 @@ """Invocation-side implementation of gRPC Asyncio Python.""" import asyncio -from functools import partial -import logging import enum -from typing import AsyncIterable, Awaitable, Dict, Optional +import inspect +import logging +from functools import partial +from typing import AsyncIterable, Optional, Tuple import grpc from grpc import _common @@ -25,7 +26,8 @@ from grpc._cython import cygrpc from . import _base_call from ._typing import (DeserializingFunction, DoneCallbackType, MetadataType, - RequestType, ResponseType, SerializingFunction) + MetadatumType, RequestIterableType, RequestType, + ResponseType, SerializingFunction) __all__ = 'AioRpcError', 'Call', 'UnaryUnaryCall', 'UnaryStreamCall' @@ -105,7 +107,7 @@ class AioRpcError(grpc.RpcError): """ return self._details - def initial_metadata(self) -> Optional[Dict]: + def initial_metadata(self) -> Optional[MetadataType]: """Accesses the initial metadata sent by the server. Returns: @@ -113,7 +115,7 @@ class AioRpcError(grpc.RpcError): """ return self._initial_metadata - def trailing_metadata(self) -> Optional[Dict]: + def trailing_metadata(self) -> Optional[MetadataType]: """Accesses the trailing metadata sent by the server. Returns: @@ -161,7 +163,7 @@ class Call: _loop: asyncio.AbstractEventLoop _code: grpc.StatusCode _cython_call: cygrpc._AioCall - _metadata: MetadataType + _metadata: Tuple[MetadatumType] _request_serializer: SerializingFunction _response_deserializer: DeserializingFunction @@ -171,7 +173,7 @@ class Call: loop: asyncio.AbstractEventLoop) -> None: self._loop = loop self._cython_call = cython_call - self._metadata = metadata + self._metadata = tuple(metadata) self._request_serializer = request_serializer self._response_deserializer = response_deserializer @@ -248,9 +250,8 @@ class _APIStyle(enum.IntEnum): class _UnaryResponseMixin(Call): _call_response: asyncio.Task - def _init_unary_response_mixin(self, - response_coro: Awaitable[ResponseType]): - self._call_response = self._loop.create_task(response_coro) + def _init_unary_response_mixin(self, response_task: asyncio.Task): + self._call_response = response_task def cancel(self) -> bool: if super().cancel(): @@ -362,14 +363,14 @@ class _StreamRequestMixin(Call): _request_style: _APIStyle def _init_stream_request_mixin( - self, request_async_iterator: Optional[AsyncIterable[RequestType]]): + self, request_iterator: Optional[RequestIterableType]): self._metadata_sent = asyncio.Event(loop=self._loop) self._done_writing_flag = False # If user passes in an async iterator, create a consumer Task. - if request_async_iterator is not None: + if request_iterator is not None: self._async_request_poller = self._loop.create_task( - self._consume_request_iterator(request_async_iterator)) + self._consume_request_iterator(request_iterator)) self._request_style = _APIStyle.ASYNC_GENERATOR else: self._async_request_poller = None @@ -391,11 +392,17 @@ class _StreamRequestMixin(Call): def _metadata_sent_observer(self): self._metadata_sent.set() - async def _consume_request_iterator( - self, request_async_iterator: AsyncIterable[RequestType]) -> None: + async def _consume_request_iterator(self, + request_iterator: RequestIterableType + ) -> None: try: - async for request in request_async_iterator: - await self._write(request) + if inspect.isasyncgen(request_iterator): + async for request in request_iterator: + await self._write(request) + else: + for request in request_iterator: + await self._write(request) + await self._done_writing() except AioRpcError as rpc_error: # Rpc status should be exposed through other API. Exceptions raised @@ -450,6 +457,11 @@ class _StreamRequestMixin(Call): self._raise_for_different_style(_APIStyle.READER_WRITER) await self._done_writing() + async def wait_for_connection(self) -> None: + await self._metadata_sent.wait() + if self.done(): + await self._raise_for_status() + class UnaryUnaryCall(_UnaryResponseMixin, Call, _base_call.UnaryUnaryCall): """Object for managing unary-unary RPC calls. @@ -457,6 +469,7 @@ class UnaryUnaryCall(_UnaryResponseMixin, Call, _base_call.UnaryUnaryCall): Returned when an instance of `UnaryUnaryMultiCallable` object is called. """ _request: RequestType + _invocation_task: asyncio.Task # pylint: disable=too-many-arguments def __init__(self, request: RequestType, deadline: Optional[float], @@ -470,7 +483,8 @@ class UnaryUnaryCall(_UnaryResponseMixin, Call, _base_call.UnaryUnaryCall): channel.call(method, deadline, credentials, wait_for_ready), metadata, request_serializer, response_deserializer, loop) self._request = request - self._init_unary_response_mixin(self._invoke()) + self._invocation_task = loop.create_task(self._invoke()) + self._init_unary_response_mixin(self._invocation_task) async def _invoke(self) -> ResponseType: serialized_request = _common.serialize(self._request, @@ -492,6 +506,11 @@ class UnaryUnaryCall(_UnaryResponseMixin, Call, _base_call.UnaryUnaryCall): else: return cygrpc.EOF + async def wait_for_connection(self) -> None: + await self._invocation_task + if self.done(): + await self._raise_for_status() + class UnaryStreamCall(_StreamResponseMixin, Call, _base_call.UnaryStreamCall): """Object for managing unary-stream RPC calls. @@ -528,6 +547,11 @@ class UnaryStreamCall(_StreamResponseMixin, Call, _base_call.UnaryStreamCall): self.cancel() raise + async def wait_for_connection(self) -> None: + await self._send_unary_request_task + if self.done(): + await self._raise_for_status() + class StreamUnaryCall(_StreamRequestMixin, _UnaryResponseMixin, Call, _base_call.StreamUnaryCall): @@ -537,8 +561,7 @@ class StreamUnaryCall(_StreamRequestMixin, _UnaryResponseMixin, Call, """ # pylint: disable=too-many-arguments - def __init__(self, - request_async_iterator: Optional[AsyncIterable[RequestType]], + def __init__(self, request_iterator: Optional[RequestIterableType], deadline: Optional[float], metadata: MetadataType, credentials: Optional[grpc.CallCredentials], wait_for_ready: Optional[bool], channel: cygrpc.AioChannel, @@ -549,8 +572,8 @@ class StreamUnaryCall(_StreamRequestMixin, _UnaryResponseMixin, Call, channel.call(method, deadline, credentials, wait_for_ready), metadata, request_serializer, response_deserializer, loop) - self._init_stream_request_mixin(request_async_iterator) - self._init_unary_response_mixin(self._conduct_rpc()) + self._init_stream_request_mixin(request_iterator) + self._init_unary_response_mixin(loop.create_task(self._conduct_rpc())) async def _conduct_rpc(self) -> ResponseType: try: @@ -576,8 +599,7 @@ class StreamStreamCall(_StreamRequestMixin, _StreamResponseMixin, Call, _initializer: asyncio.Task # pylint: disable=too-many-arguments - def __init__(self, - request_async_iterator: Optional[AsyncIterable[RequestType]], + def __init__(self, request_iterator: Optional[RequestIterableType], deadline: Optional[float], metadata: MetadataType, credentials: Optional[grpc.CallCredentials], wait_for_ready: Optional[bool], channel: cygrpc.AioChannel, @@ -588,7 +610,7 @@ class StreamStreamCall(_StreamRequestMixin, _StreamResponseMixin, Call, channel.call(method, deadline, credentials, wait_for_ready), metadata, request_serializer, response_deserializer, loop) self._initializer = self._loop.create_task(self._prepare_rpc()) - self._init_stream_request_mixin(request_async_iterator) + self._init_stream_request_mixin(request_iterator) self._init_stream_response_mixin(self._initializer) async def _prepare_rpc(self): diff --git a/src/python/grpcio/grpc/experimental/aio/_channel.py b/src/python/grpcio/grpc/experimental/aio/_channel.py index 24a38e1f3d0..5e669e1a3f5 100644 --- a/src/python/grpcio/grpc/experimental/aio/_channel.py +++ b/src/python/grpcio/grpc/experimental/aio/_channel.py @@ -15,7 +15,7 @@ import asyncio import sys -from typing import Any, AsyncIterable, Iterable, Optional, Sequence +from typing import Any, Iterable, Optional, Sequence import grpc from grpc import _common, _compression, _grpcio_metadata @@ -27,7 +27,7 @@ from ._call import (StreamStreamCall, StreamUnaryCall, UnaryStreamCall, from ._interceptor import (InterceptedUnaryUnaryCall, UnaryUnaryClientInterceptor) from ._typing import (ChannelArgumentType, DeserializingFunction, MetadataType, - SerializingFunction) + SerializingFunction, RequestIterableType) from ._utils import _timeout_to_deadline _IMMUTABLE_EMPTY_TUPLE = tuple() @@ -146,7 +146,7 @@ class StreamUnaryMultiCallable(_BaseMultiCallable, _base_channel.StreamUnaryMultiCallable): def __call__(self, - request_async_iterator: Optional[AsyncIterable[Any]] = None, + request_iterator: Optional[RequestIterableType] = None, timeout: Optional[float] = None, metadata: Optional[MetadataType] = _IMMUTABLE_EMPTY_TUPLE, credentials: Optional[grpc.CallCredentials] = None, @@ -158,7 +158,7 @@ class StreamUnaryMultiCallable(_BaseMultiCallable, deadline = _timeout_to_deadline(timeout) - call = StreamUnaryCall(request_async_iterator, deadline, metadata, + call = StreamUnaryCall(request_iterator, deadline, metadata, credentials, wait_for_ready, self._channel, self._method, self._request_serializer, self._response_deserializer, self._loop) @@ -170,7 +170,7 @@ class StreamStreamMultiCallable(_BaseMultiCallable, _base_channel.StreamStreamMultiCallable): def __call__(self, - request_async_iterator: Optional[AsyncIterable[Any]] = None, + request_iterator: Optional[RequestIterableType] = None, timeout: Optional[float] = None, metadata: Optional[MetadataType] = _IMMUTABLE_EMPTY_TUPLE, credentials: Optional[grpc.CallCredentials] = None, @@ -182,7 +182,7 @@ class StreamStreamMultiCallable(_BaseMultiCallable, deadline = _timeout_to_deadline(timeout) - call = StreamStreamCall(request_async_iterator, deadline, metadata, + call = StreamStreamCall(request_iterator, deadline, metadata, credentials, wait_for_ready, self._channel, self._method, self._request_serializer, self._response_deserializer, self._loop) diff --git a/src/python/grpcio/grpc/experimental/aio/_interceptor.py b/src/python/grpcio/grpc/experimental/aio/_interceptor.py index 9e99a1b125d..d4aca3ae0fc 100644 --- a/src/python/grpcio/grpc/experimental/aio/_interceptor.py +++ b/src/python/grpcio/grpc/experimental/aio/_interceptor.py @@ -330,6 +330,10 @@ class InterceptedUnaryUnaryCall(_base_call.UnaryUnaryCall): response = yield from call.__await__() return response + async def wait_for_connection(self) -> None: + call = await self._interceptors_task + return await call.wait_for_connection() + class UnaryUnaryCallResponse(_base_call.UnaryUnaryCall): """Final UnaryUnaryCall class finished with a response.""" @@ -374,3 +378,6 @@ class UnaryUnaryCallResponse(_base_call.UnaryUnaryCall): # for telling the interpreter that __await__ is a generator. yield None return self._response + + async def wait_for_connection(self) -> None: + pass diff --git a/src/python/grpcio/grpc/experimental/aio/_typing.py b/src/python/grpcio/grpc/experimental/aio/_typing.py index ccd2f529936..205f6dc6227 100644 --- a/src/python/grpcio/grpc/experimental/aio/_typing.py +++ b/src/python/grpcio/grpc/experimental/aio/_typing.py @@ -13,7 +13,9 @@ # limitations under the License. """Common types for gRPC Async API""" -from typing import Any, AnyStr, Callable, Sequence, Tuple, TypeVar +from typing import (Any, AnyStr, AsyncIterable, Callable, Iterable, Sequence, + Tuple, TypeVar, Union) + from grpc._cython.cygrpc import EOF RequestType = TypeVar('RequestType') @@ -25,3 +27,4 @@ MetadataType = Sequence[MetadatumType] ChannelArgumentType = Sequence[Tuple[str, Any]] EOFType = type(EOF) DoneCallbackType = Callable[[Any], None] +RequestIterableType = Union[Iterable[Any], AsyncIterable[Any]] diff --git a/src/python/grpcio/grpc_core_dependencies.py b/src/python/grpcio/grpc_core_dependencies.py index dd26915d14b..6c2dfed4ae3 100644 --- a/src/python/grpcio/grpc_core_dependencies.py +++ b/src/python/grpcio/grpc_core_dependencies.py @@ -28,16 +28,21 @@ CORE_SOURCE_FILES = [ 'src/core/ext/filters/client_channel/http_connect_handshaker.cc', 'src/core/ext/filters/client_channel/http_proxy.cc', 'src/core/ext/filters/client_channel/lb_policy.cc', + 'src/core/ext/filters/client_channel/lb_policy/address_filtering.cc', 'src/core/ext/filters/client_channel/lb_policy/child_policy_handler.cc', 'src/core/ext/filters/client_channel/lb_policy/grpclb/client_load_reporting_filter.cc', 'src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.cc', + 'src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_balancer_addresses.cc', 'src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_channel_secure.cc', 'src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_client_stats.cc', 'src/core/ext/filters/client_channel/lb_policy/grpclb/load_balancer_api.cc', 'src/core/ext/filters/client_channel/lb_policy/pick_first/pick_first.cc', + 'src/core/ext/filters/client_channel/lb_policy/priority/priority.cc', 'src/core/ext/filters/client_channel/lb_policy/round_robin/round_robin.cc', + 'src/core/ext/filters/client_channel/lb_policy/weighted_target/weighted_target.cc', 'src/core/ext/filters/client_channel/lb_policy/xds/cds.cc', - 'src/core/ext/filters/client_channel/lb_policy/xds/xds.cc', + 'src/core/ext/filters/client_channel/lb_policy/xds/eds.cc', + 'src/core/ext/filters/client_channel/lb_policy/xds/lrs.cc', 'src/core/ext/filters/client_channel/lb_policy_registry.cc', 'src/core/ext/filters/client_channel/local_subchannel_pool.cc', 'src/core/ext/filters/client_channel/parse_address.cc', diff --git a/src/python/grpcio_tests/tests/protoc_plugin/_python_plugin_test.py b/src/python/grpcio_tests/tests/protoc_plugin/_python_plugin_test.py index 94ffecc63f0..634117b1b07 100644 --- a/src/python/grpcio_tests/tests/protoc_plugin/_python_plugin_test.py +++ b/src/python/grpcio_tests/tests/protoc_plugin/_python_plugin_test.py @@ -504,7 +504,8 @@ class PythonPluginTest(unittest.TestCase): service.server.stop(None) -@unittest.skipIf(sys.version_info[0] < 3, "Unsupported on Python 2.") +@unittest.skipIf(sys.version_info[0] < 3 or sys.version_info[1] < 6, + "Unsupported on Python 2.") class SimpleStubsPluginTest(unittest.TestCase): servicer_methods = _ServicerMethods() diff --git a/src/python/grpcio_tests/tests_aio/interop/local_interop_test.py b/src/python/grpcio_tests/tests_aio/interop/local_interop_test.py index c8b6083ae39..0db15be3a94 100644 --- a/src/python/grpcio_tests/tests_aio/interop/local_interop_test.py +++ b/src/python/grpcio_tests/tests_aio/interop/local_interop_test.py @@ -64,7 +64,6 @@ class InteropTestCaseMixin: await methods.test_interoperability( methods.TestCase.CANCEL_AFTER_FIRST_RESPONSE, self._stub, None) - @unittest.skip('TODO(https://github.com/grpc/grpc/issues/21707)') async def test_timeout_on_sleeping_server(self): await methods.test_interoperability( methods.TestCase.TIMEOUT_ON_SLEEPING_SERVER, self._stub, None) diff --git a/src/python/grpcio_tests/tests_aio/interop/methods.py b/src/python/grpcio_tests/tests_aio/interop/methods.py index 7c5c1edfd2b..019b3bca894 100644 --- a/src/python/grpcio_tests/tests_aio/interop/methods.py +++ b/src/python/grpcio_tests/tests_aio/interop/methods.py @@ -15,8 +15,9 @@ import argparse import asyncio -import enum import collections +import datetime +import enum import inspect import json import os @@ -220,12 +221,15 @@ async def _cancel_after_first_response(stub: test_pb2_grpc.TestServiceStub): async def _timeout_on_sleeping_server(stub: test_pb2_grpc.TestServiceStub): request_payload_size = 27182 + time_limit = datetime.timedelta(seconds=1) - call = stub.FullDuplexCall(timeout=0.001) + call = stub.FullDuplexCall(timeout=time_limit.total_seconds()) request = messages_pb2.StreamingOutputCallRequest( response_type=messages_pb2.COMPRESSABLE, - payload=messages_pb2.Payload(body=b'\x00' * request_payload_size)) + payload=messages_pb2.Payload(body=b'\x00' * request_payload_size), + response_parameters=(messages_pb2.ResponseParameters( + interval_us=int(time_limit.total_seconds() * 2 * 10**6)),)) await call.write(request) await call.done_writing() try: diff --git a/src/python/grpcio_tests/tests_aio/tests.json b/src/python/grpcio_tests/tests_aio/tests.json index 84dbf02b937..71f8733f5f9 100644 --- a/src/python/grpcio_tests/tests_aio/tests.json +++ b/src/python/grpcio_tests/tests_aio/tests.json @@ -28,5 +28,6 @@ "unit.server_interceptor_test.TestServerInterceptor", "unit.server_test.TestServer", "unit.timeout_test.TestTimeout", + "unit.wait_for_connection_test.TestWaitForConnection", "unit.wait_for_ready_test.TestWaitForReady" ] diff --git a/src/python/grpcio_tests/tests_aio/unit/call_test.py b/src/python/grpcio_tests/tests_aio/unit/call_test.py index e23548eed71..2548e777783 100644 --- a/src/python/grpcio_tests/tests_aio/unit/call_test.py +++ b/src/python/grpcio_tests/tests_aio/unit/call_test.py @@ -16,23 +16,23 @@ import asyncio import logging import unittest +import datetime import grpc from grpc.experimental import aio from src.proto.grpc.testing import messages_pb2, test_pb2_grpc -from tests.unit.framework.common import test_constants from tests_aio.unit._test_base import AioTestBase -from tests.unit import resources - from tests_aio.unit._test_server import start_test_server +from tests_aio.unit._constants import UNREACHABLE_TARGET + +_SHORT_TIMEOUT_S = datetime.timedelta(seconds=1).total_seconds() _NUM_STREAM_RESPONSES = 5 _RESPONSE_PAYLOAD_SIZE = 42 _REQUEST_PAYLOAD_SIZE = 7 _LOCAL_CANCEL_DETAILS_EXPECTATION = 'Locally cancelled by application!' -_RESPONSE_INTERVAL_US = test_constants.SHORT_TIMEOUT * 1000 * 1000 -_UNREACHABLE_TARGET = '0.1:1111' +_RESPONSE_INTERVAL_US = int(_SHORT_TIMEOUT_S * 1000 * 1000) _INFINITE_INTERVAL_US = 2**31 - 1 @@ -78,7 +78,7 @@ class TestUnaryUnaryCall(_MulticallableTestMixin, AioTestBase): self.assertIs(response, response_retry) async def test_call_rpc_error(self): - async with aio.insecure_channel(_UNREACHABLE_TARGET) as channel: + async with aio.insecure_channel(UNREACHABLE_TARGET) as channel: stub = test_pb2_grpc.TestServiceStub(channel) call = stub.UnaryCall(messages_pb2.SimpleRequest()) @@ -434,24 +434,24 @@ class TestUnaryStreamCall(_MulticallableTestMixin, AioTestBase): interval_us=_RESPONSE_INTERVAL_US, )) - call = self._stub.StreamingOutputCall( - request, timeout=test_constants.SHORT_TIMEOUT * 2) + call = self._stub.StreamingOutputCall(request, + timeout=_SHORT_TIMEOUT_S * 2) response = await call.read() self.assertEqual(_RESPONSE_PAYLOAD_SIZE, len(response.payload.body)) # Should be around the same as the timeout remained_time = call.time_remaining() - self.assertGreater(remained_time, test_constants.SHORT_TIMEOUT * 3 / 2) - self.assertLess(remained_time, test_constants.SHORT_TIMEOUT * 5 / 2) + self.assertGreater(remained_time, _SHORT_TIMEOUT_S * 3 / 2) + self.assertLess(remained_time, _SHORT_TIMEOUT_S * 5 / 2) response = await call.read() self.assertEqual(_RESPONSE_PAYLOAD_SIZE, len(response.payload.body)) # Should be around the timeout minus a unit of wait time remained_time = call.time_remaining() - self.assertGreater(remained_time, test_constants.SHORT_TIMEOUT / 2) - self.assertLess(remained_time, test_constants.SHORT_TIMEOUT * 3 / 2) + self.assertGreater(remained_time, _SHORT_TIMEOUT_S / 2) + self.assertLess(remained_time, _SHORT_TIMEOUT_S * 3 / 2) self.assertEqual(grpc.StatusCode.OK, await call.code()) @@ -538,14 +538,14 @@ class TestStreamUnaryCall(_MulticallableTestMixin, AioTestBase): with self.assertRaises(asyncio.CancelledError): for _ in range(_NUM_STREAM_RESPONSES): yield request - await asyncio.sleep(test_constants.SHORT_TIMEOUT) + await asyncio.sleep(_SHORT_TIMEOUT_S) request_iterator_received_the_exception.set() call = self._stub.StreamingInputCall(request_iterator()) # Cancel the RPC after at least one response async def cancel_later(): - await asyncio.sleep(test_constants.SHORT_TIMEOUT * 2) + await asyncio.sleep(_SHORT_TIMEOUT_S * 2) call.cancel() cancel_later_task = self.loop.create_task(cancel_later()) @@ -559,6 +559,50 @@ class TestStreamUnaryCall(_MulticallableTestMixin, AioTestBase): # No failures in the cancel later task! await cancel_later_task + async def test_normal_iterable_requests(self): + # Prepares the request + payload = messages_pb2.Payload(body=b'\0' * _REQUEST_PAYLOAD_SIZE) + request = messages_pb2.StreamingInputCallRequest(payload=payload) + requests = [request] * _NUM_STREAM_RESPONSES + + # Sends out requests + call = self._stub.StreamingInputCall(requests) + + # RPC should succeed + response = await call + self.assertIsInstance(response, messages_pb2.StreamingInputCallResponse) + self.assertEqual(_NUM_STREAM_RESPONSES * _REQUEST_PAYLOAD_SIZE, + response.aggregated_payload_size) + + self.assertEqual(await call.code(), grpc.StatusCode.OK) + + async def test_call_rpc_error(self): + async with aio.insecure_channel(UNREACHABLE_TARGET) as channel: + stub = test_pb2_grpc.TestServiceStub(channel) + + # The error should be raised automatically without any traffic. + call = stub.StreamingInputCall() + with self.assertRaises(aio.AioRpcError) as exception_context: + await call + + self.assertEqual(grpc.StatusCode.UNAVAILABLE, + exception_context.exception.code()) + + self.assertTrue(call.done()) + self.assertEqual(grpc.StatusCode.UNAVAILABLE, await call.code()) + + async def test_timeout(self): + call = self._stub.StreamingInputCall(timeout=_SHORT_TIMEOUT_S) + + # The error should be raised automatically without any traffic. + with self.assertRaises(aio.AioRpcError) as exception_context: + await call + + rpc_error = exception_context.exception + self.assertEqual(grpc.StatusCode.DEADLINE_EXCEEDED, rpc_error.code()) + self.assertTrue(call.done()) + self.assertEqual(grpc.StatusCode.DEADLINE_EXCEEDED, await call.code()) + # Prepares the request that stream in a ping-pong manner. _STREAM_OUTPUT_REQUEST_ONE_RESPONSE = messages_pb2.StreamingOutputCallRequest() @@ -716,14 +760,14 @@ class TestStreamStreamCall(_MulticallableTestMixin, AioTestBase): with self.assertRaises(asyncio.CancelledError): for _ in range(_NUM_STREAM_RESPONSES): yield request - await asyncio.sleep(test_constants.SHORT_TIMEOUT) + await asyncio.sleep(_SHORT_TIMEOUT_S) request_iterator_received_the_exception.set() call = self._stub.FullDuplexCall(request_iterator()) # Cancel the RPC after at least one response async def cancel_later(): - await asyncio.sleep(test_constants.SHORT_TIMEOUT * 2) + await asyncio.sleep(_SHORT_TIMEOUT_S * 2) call.cancel() cancel_later_task = self.loop.create_task(cancel_later()) @@ -738,7 +782,16 @@ class TestStreamStreamCall(_MulticallableTestMixin, AioTestBase): # No failures in the cancel later task! await cancel_later_task + async def test_normal_iterable_requests(self): + requests = [_STREAM_OUTPUT_REQUEST_ONE_RESPONSE] * _NUM_STREAM_RESPONSES + + call = self._stub.FullDuplexCall(iter(requests)) + async for response in call: + self.assertEqual(_RESPONSE_PAYLOAD_SIZE, len(response.payload.body)) + + self.assertEqual(await call.code(), grpc.StatusCode.OK) + if __name__ == '__main__': - logging.basicConfig() + logging.basicConfig(level=logging.DEBUG) unittest.main(verbosity=2) diff --git a/src/python/grpcio_tests/tests_aio/unit/channel_test.py b/src/python/grpcio_tests/tests_aio/unit/channel_test.py index 10949ac180c..58cd555491d 100644 --- a/src/python/grpcio_tests/tests_aio/unit/channel_test.py +++ b/src/python/grpcio_tests/tests_aio/unit/channel_test.py @@ -226,5 +226,5 @@ class TestChannel(AioTestBase): if __name__ == '__main__': - logging.basicConfig(level=logging.INFO) + logging.basicConfig(level=logging.DEBUG) unittest.main(verbosity=2) diff --git a/src/python/grpcio_tests/tests_aio/unit/client_interceptor_test.py b/src/python/grpcio_tests/tests_aio/unit/client_interceptor_test.py index 9fa08a78806..8f5a356ca4a 100644 --- a/src/python/grpcio_tests/tests_aio/unit/client_interceptor_test.py +++ b/src/python/grpcio_tests/tests_aio/unit/client_interceptor_test.py @@ -686,5 +686,5 @@ class TestInterceptedUnaryUnaryCall(AioTestBase): if __name__ == '__main__': - logging.basicConfig() + logging.basicConfig(level=logging.DEBUG) unittest.main(verbosity=2) diff --git a/src/python/grpcio_tests/tests_aio/unit/close_channel_test.py b/src/python/grpcio_tests/tests_aio/unit/close_channel_test.py index f05c74392d9..1e10074c47c 100644 --- a/src/python/grpcio_tests/tests_aio/unit/close_channel_test.py +++ b/src/python/grpcio_tests/tests_aio/unit/close_channel_test.py @@ -134,5 +134,5 @@ class TestCloseChannel(AioTestBase): if __name__ == '__main__': - logging.basicConfig(level=logging.INFO) + logging.basicConfig(level=logging.DEBUG) unittest.main(verbosity=2) diff --git a/src/python/grpcio_tests/tests_aio/unit/init_test.py b/src/python/grpcio_tests/tests_aio/unit/init_test.py index 9104a0368c5..2582857751d 100644 --- a/src/python/grpcio_tests/tests_aio/unit/init_test.py +++ b/src/python/grpcio_tests/tests_aio/unit/init_test.py @@ -35,7 +35,7 @@ class TestChannel(AioTestBase): channel = aio.insecure_channel(server_target) self.assertIsInstance(channel, aio.Channel) - async def tests_secure_channel(self): + async def test_secure_channel(self): server_target, _ = await start_test_server(secure=True) # pylint: disable=unused-variable credentials = grpc.ssl_channel_credentials( root_certificates=_TEST_ROOT_CERTIFICATES, @@ -48,5 +48,5 @@ class TestChannel(AioTestBase): if __name__ == '__main__': - logging.basicConfig() + logging.basicConfig(level=logging.DEBUG) unittest.main(verbosity=2) diff --git a/src/python/grpcio_tests/tests_aio/unit/metadata_test.py b/src/python/grpcio_tests/tests_aio/unit/metadata_test.py index 961ec29fceb..6551e4ca084 100644 --- a/src/python/grpcio_tests/tests_aio/unit/metadata_test.py +++ b/src/python/grpcio_tests/tests_aio/unit/metadata_test.py @@ -210,14 +210,21 @@ class TestMetadata(AioTestBase): self.assertEqual(_RESPONSE, await call) self.assertEqual(grpc.StatusCode.OK, await call.code()) + async def test_from_client_to_server_with_list(self): + multicallable = self._client.unary_unary(_TEST_CLIENT_TO_SERVER) + call = multicallable( + _REQUEST, metadata=list(_INITIAL_METADATA_FROM_CLIENT_TO_SERVER)) + self.assertEqual(_RESPONSE, await call) + self.assertEqual(grpc.StatusCode.OK, await call.code()) + @unittest.skipIf(platform.system() == 'Windows', 'https://github.com/grpc/grpc/issues/21943') async def test_invalid_metadata(self): multicallable = self._client.unary_unary(_TEST_CLIENT_TO_SERVER) for exception_type, metadata in _INVALID_METADATA_TEST_CASES: with self.subTest(metadata=metadata): - call = multicallable(_REQUEST, metadata=metadata) with self.assertRaises(exception_type): + call = multicallable(_REQUEST, metadata=metadata) await call async def test_generic_handler(self): diff --git a/src/python/grpcio_tests/tests_aio/unit/server_interceptor_test.py b/src/python/grpcio_tests/tests_aio/unit/server_interceptor_test.py index 0c443389967..dabf005591f 100644 --- a/src/python/grpcio_tests/tests_aio/unit/server_interceptor_test.py +++ b/src/python/grpcio_tests/tests_aio/unit/server_interceptor_test.py @@ -164,5 +164,5 @@ class TestServerInterceptor(AioTestBase): if __name__ == '__main__': - logging.basicConfig() + logging.basicConfig(level=logging.DEBUG) unittest.main(verbosity=2) diff --git a/src/python/grpcio_tests/tests_aio/unit/wait_for_connection_test.py b/src/python/grpcio_tests/tests_aio/unit/wait_for_connection_test.py new file mode 100644 index 00000000000..cb6f7985290 --- /dev/null +++ b/src/python/grpcio_tests/tests_aio/unit/wait_for_connection_test.py @@ -0,0 +1,159 @@ +# Copyright 2020 The gRPC Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Tests behavior of the wait for connection API on client side.""" + +import asyncio +import logging +import unittest +import datetime +from typing import Callable, Tuple + +import grpc +from grpc.experimental import aio + +from tests_aio.unit._test_base import AioTestBase +from tests_aio.unit._test_server import start_test_server +from tests_aio.unit import _common +from src.proto.grpc.testing import messages_pb2, test_pb2_grpc +from tests_aio.unit._constants import UNREACHABLE_TARGET + +_REQUEST = b'\x01\x02\x03' +_TEST_METHOD = '/test/Test' + +_NUM_STREAM_RESPONSES = 5 +_REQUEST_PAYLOAD_SIZE = 7 +_RESPONSE_PAYLOAD_SIZE = 42 + + +class TestWaitForConnection(AioTestBase): + """Tests if wait_for_connection raises connectivity issue.""" + + async def setUp(self): + address, self._server = await start_test_server() + self._channel = aio.insecure_channel(address) + self._dummy_channel = aio.insecure_channel(UNREACHABLE_TARGET) + self._stub = test_pb2_grpc.TestServiceStub(self._channel) + + async def tearDown(self): + await self._dummy_channel.close() + await self._channel.close() + await self._server.stop(None) + + async def test_unary_unary_ok(self): + call = self._stub.UnaryCall(messages_pb2.SimpleRequest()) + + # No exception raised and no message swallowed. + await call.wait_for_connection() + + response = await call + self.assertIsInstance(response, messages_pb2.SimpleResponse) + + async def test_unary_stream_ok(self): + request = messages_pb2.StreamingOutputCallRequest() + for _ in range(_NUM_STREAM_RESPONSES): + request.response_parameters.append( + messages_pb2.ResponseParameters(size=_RESPONSE_PAYLOAD_SIZE)) + + call = self._stub.StreamingOutputCall(request) + + # No exception raised and no message swallowed. + await call.wait_for_connection() + + response_cnt = 0 + async for response in call: + response_cnt += 1 + self.assertIs(type(response), + messages_pb2.StreamingOutputCallResponse) + self.assertEqual(_RESPONSE_PAYLOAD_SIZE, len(response.payload.body)) + + self.assertEqual(_NUM_STREAM_RESPONSES, response_cnt) + self.assertEqual(await call.code(), grpc.StatusCode.OK) + + async def test_stream_unary_ok(self): + call = self._stub.StreamingInputCall() + + # No exception raised and no message swallowed. + await call.wait_for_connection() + + payload = messages_pb2.Payload(body=b'\0' * _REQUEST_PAYLOAD_SIZE) + request = messages_pb2.StreamingInputCallRequest(payload=payload) + + for _ in range(_NUM_STREAM_RESPONSES): + await call.write(request) + await call.done_writing() + + response = await call + self.assertIsInstance(response, messages_pb2.StreamingInputCallResponse) + self.assertEqual(_NUM_STREAM_RESPONSES * _REQUEST_PAYLOAD_SIZE, + response.aggregated_payload_size) + + self.assertEqual(await call.code(), grpc.StatusCode.OK) + + async def test_stream_stream_ok(self): + call = self._stub.FullDuplexCall() + + # No exception raised and no message swallowed. + await call.wait_for_connection() + + request = messages_pb2.StreamingOutputCallRequest() + request.response_parameters.append( + messages_pb2.ResponseParameters(size=_RESPONSE_PAYLOAD_SIZE)) + + for _ in range(_NUM_STREAM_RESPONSES): + await call.write(request) + response = await call.read() + self.assertIsInstance(response, + messages_pb2.StreamingOutputCallResponse) + self.assertEqual(_RESPONSE_PAYLOAD_SIZE, len(response.payload.body)) + + await call.done_writing() + + self.assertEqual(grpc.StatusCode.OK, await call.code()) + + async def test_unary_unary_error(self): + call = self._dummy_channel.unary_unary(_TEST_METHOD)(_REQUEST) + + with self.assertRaises(aio.AioRpcError) as exception_context: + await call.wait_for_connection() + rpc_error = exception_context.exception + self.assertEqual(grpc.StatusCode.UNAVAILABLE, rpc_error.code()) + + async def test_unary_stream_error(self): + call = self._dummy_channel.unary_stream(_TEST_METHOD)(_REQUEST) + + with self.assertRaises(aio.AioRpcError) as exception_context: + await call.wait_for_connection() + rpc_error = exception_context.exception + self.assertEqual(grpc.StatusCode.UNAVAILABLE, rpc_error.code()) + + async def test_stream_unary_error(self): + call = self._dummy_channel.stream_unary(_TEST_METHOD)() + + with self.assertRaises(aio.AioRpcError) as exception_context: + await call.wait_for_connection() + rpc_error = exception_context.exception + self.assertEqual(grpc.StatusCode.UNAVAILABLE, rpc_error.code()) + + async def test_stream_stream_error(self): + call = self._dummy_channel.stream_stream(_TEST_METHOD)() + + with self.assertRaises(aio.AioRpcError) as exception_context: + await call.wait_for_connection() + rpc_error = exception_context.exception + self.assertEqual(grpc.StatusCode.UNAVAILABLE, rpc_error.code()) + + +if __name__ == '__main__': + logging.basicConfig(level=logging.DEBUG) + unittest.main(verbosity=2) diff --git a/templates/test/cpp/naming/resolver_component_tests_defs.include b/templates/test/cpp/naming/resolver_component_tests_defs.include index d38316cbe68..8be5ba21bbd 100644 --- a/templates/test/cpp/naming/resolver_component_tests_defs.include +++ b/templates/test/cpp/naming/resolver_component_tests_defs.include @@ -55,7 +55,7 @@ if cur_resolver and cur_resolver != 'ares': 'needs to use GRPC_DNS_RESOLVER=ares.')) test_runner_log('Exit 1 without running tests.') sys.exit(1) -os.environ.update({'GRPC_TRACE': 'cares_resolver'}) +os.environ.update({'GRPC_TRACE': 'cares_resolver,cares_address_sorting'}) def wait_until_dns_server_is_up(args, dns_server_subprocess, diff --git a/templates/tools/dockerfile/interoptest/grpc_interop_dart/Dockerfile.template b/templates/tools/dockerfile/interoptest/grpc_interop_dart/Dockerfile.template index dee59335280..7836341a913 100644 --- a/templates/tools/dockerfile/interoptest/grpc_interop_dart/Dockerfile.template +++ b/templates/tools/dockerfile/interoptest/grpc_interop_dart/Dockerfile.template @@ -14,10 +14,7 @@ # See the License for the specific language governing permissions and # limitations under the License. - FROM google/dart:2.3 - - # Upgrade Dart to version 2. - RUN apt-get update && apt-get upgrade -y dart + FROM google/dart:2.7 # Define the default command. CMD ["bash"] diff --git a/templates/tools/dockerfile/test/python_stretch_default_x64/Dockerfile.template b/templates/tools/dockerfile/test/python_stretch_default_x64/Dockerfile.template index ccb88e75302..bb4a9f29c01 100644 --- a/templates/tools/dockerfile/test/python_stretch_default_x64/Dockerfile.template +++ b/templates/tools/dockerfile/test/python_stretch_default_x64/Dockerfile.template @@ -16,7 +16,11 @@ <%include file="../../python_stretch.include"/> <%include file="../../compile_python_36.include"/> - + <%include file="../../compile_python_38.include"/> + + RUN apt-get update && apt-get install -y python3.5 python3.5-dev + RUN curl https://bootstrap.pypa.io/get-pip.py | python3.5 + RUN apt-get update && apt-get -t buster install -y python3.7 python3-all-dev RUN curl https://bootstrap.pypa.io/get-pip.py | python3.7 diff --git a/test/core/client_channel/resolvers/dns_resolver_connectivity_test.cc b/test/core/client_channel/resolvers/dns_resolver_connectivity_test.cc index 97f61f281ce..6b67aa1ccae 100644 --- a/test/core/client_channel/resolvers/dns_resolver_connectivity_test.cc +++ b/test/core/client_channel/resolvers/dns_resolver_connectivity_test.cc @@ -64,11 +64,9 @@ static grpc_ares_request* my_dns_lookup_ares_locked( const char* /*dns_server*/, const char* addr, const char* /*default_port*/, grpc_pollset_set* /*interested_parties*/, grpc_closure* on_done, std::unique_ptr* addresses, - bool /*check_grpclb*/, char** /*service_config_json*/, - int /*query_timeout_ms*/, - std::shared_ptr< - grpc_core::WorkSerializer> /* work_serializer */) { /* NOLINT - */ + std::unique_ptr* /*balancer_addresses*/, + char** /*service_config_json*/, int /*query_timeout_ms*/, + std::shared_ptr /*combiner*/) { // NOLINT gpr_mu_lock(&g_mu); GPR_ASSERT(0 == strcmp("test", addr)); grpc_error* error = GRPC_ERROR_NONE; diff --git a/test/core/client_channel/resolvers/dns_resolver_cooldown_test.cc b/test/core/client_channel/resolvers/dns_resolver_cooldown_test.cc index 140704ab68b..9f3f31c3392 100644 --- a/test/core/client_channel/resolvers/dns_resolver_cooldown_test.cc +++ b/test/core/client_channel/resolvers/dns_resolver_cooldown_test.cc @@ -42,7 +42,8 @@ static std::shared_ptr* g_work_serializer; static grpc_ares_request* (*g_default_dns_lookup_ares_locked)( const char* dns_server, const char* name, const char* default_port, grpc_pollset_set* interested_parties, grpc_closure* on_done, - std::unique_ptr* addresses, bool check_grpclb, + std::unique_ptr* addresses, + std::unique_ptr* balancer_addresses, char** service_config_json, int query_timeout_ms, std::shared_ptr work_serializer); @@ -93,12 +94,13 @@ static grpc_address_resolver_vtable test_resolver = { static grpc_ares_request* test_dns_lookup_ares_locked( const char* dns_server, const char* name, const char* default_port, grpc_pollset_set* /*interested_parties*/, grpc_closure* on_done, - std::unique_ptr* addresses, bool check_grpclb, + std::unique_ptr* addresses, + std::unique_ptr* balancer_addresses, char** service_config_json, int query_timeout_ms, std::shared_ptr work_serializer) { grpc_ares_request* result = g_default_dns_lookup_ares_locked( dns_server, name, default_port, g_iomgr_args.pollset_set, on_done, - addresses, check_grpclb, service_config_json, query_timeout_ms, + addresses, balancer_addresses, service_config_json, query_timeout_ms, std::move(work_serializer)); ++g_resolution_count; static grpc_millis last_resolution_time = 0; diff --git a/test/core/client_channel/resolvers/fake_resolver_test.cc b/test/core/client_channel/resolvers/fake_resolver_test.cc index e69eaa0883f..84ff252deaf 100644 --- a/test/core/client_channel/resolvers/fake_resolver_test.cc +++ b/test/core/client_channel/resolvers/fake_resolver_test.cc @@ -86,29 +86,18 @@ static grpc_core::Resolver::Result create_new_resolver_result() { static size_t test_counter = 0; const size_t num_addresses = 2; char* uri_string; - char* balancer_name; // Create address list. grpc_core::Resolver::Result result; for (size_t i = 0; i < num_addresses; ++i) { gpr_asprintf(&uri_string, "ipv4:127.0.0.1:100%" PRIuPTR, test_counter * num_addresses + i); grpc_uri* uri = grpc_uri_parse(uri_string, true); - gpr_asprintf(&balancer_name, "balancer%" PRIuPTR, - test_counter * num_addresses + i); grpc_resolved_address address; GPR_ASSERT(grpc_parse_uri(uri, &address)); grpc_core::InlinedVector args_to_add; - const bool is_balancer = num_addresses % 2; - if (is_balancer) { - args_to_add.emplace_back(grpc_channel_arg_integer_create( - const_cast(GRPC_ARG_ADDRESS_IS_BALANCER), 1)); - args_to_add.emplace_back(grpc_channel_arg_string_create( - const_cast(GRPC_ARG_ADDRESS_BALANCER_NAME), balancer_name)); - } - grpc_channel_args* args = grpc_channel_args_copy_and_add( - nullptr, args_to_add.data(), args_to_add.size()); - result.addresses.emplace_back(address.addr, address.len, args); - gpr_free(balancer_name); + result.addresses.emplace_back( + address.addr, address.len, + grpc_channel_args_copy_and_add(nullptr, nullptr, 0)); grpc_uri_destroy(uri); gpr_free(uri_string); } diff --git a/test/core/client_channel/service_config_test.cc b/test/core/client_channel/service_config_test.cc index d659fbeb51d..5cdb51341ab 100644 --- a/test/core/client_channel/service_config_test.cc +++ b/test/core/client_channel/service_config_test.cc @@ -464,7 +464,7 @@ TEST_F(ClientChannelParserTest, ValidLoadBalancingConfigXds) { "{\n" " \"loadBalancingConfig\":[\n" " { \"does_not_exist\":{} },\n" - " { \"xds_experimental\":{ \"balancerName\": \"fake:///lb\" } }\n" + " { \"eds_experimental\":{ \"clusterName\": \"foo\" } }\n" " ]\n" "}"; grpc_error* error = GRPC_ERROR_NONE; @@ -474,7 +474,7 @@ TEST_F(ClientChannelParserTest, ValidLoadBalancingConfigXds) { static_cast( svc_cfg->GetGlobalParsedConfig(0)); auto lb_config = parsed_config->parsed_lb_config(); - EXPECT_STREQ(lb_config->name(), "xds_experimental"); + EXPECT_STREQ(lb_config->name(), "eds_experimental"); } TEST_F(ClientChannelParserTest, UnknownLoadBalancingConfig) { @@ -544,14 +544,14 @@ TEST_F(ClientChannelParserTest, UnknownLoadBalancingPolicy) { } TEST_F(ClientChannelParserTest, LoadBalancingPolicyXdsNotAllowed) { - const char* test_json = "{\"loadBalancingPolicy\":\"xds_experimental\"}"; + const char* test_json = "{\"loadBalancingPolicy\":\"eds_experimental\"}"; grpc_error* error = GRPC_ERROR_NONE; auto svc_cfg = ServiceConfig::Create(test_json, &error); std::regex regex( "Service config parsing error.*referenced_errors.*" "Global Params.*referenced_errors.*" "Client channel global parser.*referenced_errors.*" - "field:loadBalancingPolicy error:xds_experimental requires " + "field:loadBalancingPolicy error:eds_experimental requires " "a config. Please use loadBalancingConfig instead."); VerifyRegexMatch(error, regex); } diff --git a/test/core/compression/message_compress_fuzzer.cc b/test/core/compression/message_compress_fuzzer.cc index 1ea0853d2a3..aa844b7b864 100644 --- a/test/core/compression/message_compress_fuzzer.cc +++ b/test/core/compression/message_compress_fuzzer.cc @@ -39,7 +39,6 @@ extern "C" int LLVMFuzzerTestOneInput(const uint8_t* data, size_t size) { grpc_core::testing::LeakDetector leak_detector(true); grpc_init(); - grpc_test_only_control_plane_credentials_force_init(); grpc_slice_buffer input_buffer; grpc_slice_buffer_init(&input_buffer); grpc_slice_buffer_add(&input_buffer, @@ -52,7 +51,6 @@ extern "C" int LLVMFuzzerTestOneInput(const uint8_t* data, size_t size) { grpc_slice_buffer_destroy(&input_buffer); grpc_slice_buffer_destroy(&output_buffer); - grpc_test_only_control_plane_credentials_destroy(); grpc_shutdown_blocking(); return 0; } diff --git a/test/core/compression/message_decompress_fuzzer.cc b/test/core/compression/message_decompress_fuzzer.cc index c600a740782..3f316699a79 100644 --- a/test/core/compression/message_decompress_fuzzer.cc +++ b/test/core/compression/message_decompress_fuzzer.cc @@ -39,7 +39,6 @@ extern "C" int LLVMFuzzerTestOneInput(const uint8_t* data, size_t size) { grpc_core::testing::LeakDetector leak_detector(true); grpc_init(); - grpc_test_only_control_plane_credentials_force_init(); grpc_slice_buffer input_buffer; grpc_slice_buffer_init(&input_buffer); grpc_slice_buffer_add(&input_buffer, @@ -52,7 +51,6 @@ extern "C" int LLVMFuzzerTestOneInput(const uint8_t* data, size_t size) { grpc_slice_buffer_destroy(&input_buffer); grpc_slice_buffer_destroy(&output_buffer); - grpc_test_only_control_plane_credentials_destroy(); grpc_shutdown_blocking(); return 0; } diff --git a/test/core/compression/stream_compression_fuzzer.cc b/test/core/compression/stream_compression_fuzzer.cc index c147aa5bfd6..c8316084f1b 100644 --- a/test/core/compression/stream_compression_fuzzer.cc +++ b/test/core/compression/stream_compression_fuzzer.cc @@ -31,7 +31,6 @@ bool leak_check = true; extern "C" int LLVMFuzzerTestOneInput(const uint8_t* data, size_t size) { grpc_core::testing::LeakDetector leak_detector(true); grpc_init(); - grpc_test_only_control_plane_credentials_force_init(); auto* context = grpc_stream_compression_context_create( GRPC_STREAM_COMPRESSION_GZIP_COMPRESS); grpc_slice_buffer input_buffer; @@ -48,7 +47,6 @@ extern "C" int LLVMFuzzerTestOneInput(const uint8_t* data, size_t size) { grpc_stream_compression_context_destroy(context); grpc_slice_buffer_destroy(&input_buffer); grpc_slice_buffer_destroy(&output_buffer); - grpc_test_only_control_plane_credentials_destroy(); grpc_shutdown_blocking(); return 0; } diff --git a/test/core/compression/stream_decompression_fuzzer.cc b/test/core/compression/stream_decompression_fuzzer.cc index e460e6db25f..78c8efd47fd 100644 --- a/test/core/compression/stream_decompression_fuzzer.cc +++ b/test/core/compression/stream_decompression_fuzzer.cc @@ -31,7 +31,6 @@ bool leak_check = true; extern "C" int LLVMFuzzerTestOneInput(const uint8_t* data, size_t size) { grpc_core::testing::LeakDetector leak_detector(true); grpc_init(); - grpc_test_only_control_plane_credentials_force_init(); auto* context = grpc_stream_compression_context_create( GRPC_STREAM_COMPRESSION_GZIP_DECOMPRESS); grpc_slice_buffer input_buffer; @@ -49,7 +48,6 @@ extern "C" int LLVMFuzzerTestOneInput(const uint8_t* data, size_t size) { grpc_stream_compression_context_destroy(context); grpc_slice_buffer_destroy(&input_buffer); grpc_slice_buffer_destroy(&output_buffer); - grpc_test_only_control_plane_credentials_destroy(); grpc_shutdown_blocking(); return 0; } diff --git a/test/core/end2end/BUILD b/test/core/end2end/BUILD index f5ef0bac994..d173b209897 100644 --- a/test/core/end2end/BUILD +++ b/test/core/end2end/BUILD @@ -190,6 +190,13 @@ grpc_end2end_nosec_tests() grpc_cc_test( name = "h2_ssl_session_reuse_test", srcs = ["h2_ssl_session_reuse_test.cc"], + data = [ + "//src/core/tsi/test_creds:ca.pem", + "//src/core/tsi/test_creds:client.key", + "//src/core/tsi/test_creds:client.pem", + "//src/core/tsi/test_creds:server1.key", + "//src/core/tsi/test_creds:server1.pem", + ], external_deps = [ "gtest", ], diff --git a/test/core/end2end/README b/test/core/end2end/README index a18172a7a1d..51cc144039d 100644 --- a/test/core/end2end/README +++ b/test/core/end2end/README @@ -3,5 +3,5 @@ forms a complete end-to-end test. To add a new test or fixture: - add the code to the relevant directory -- update gen_build_yaml.py to reflect the change +- update generate_tests.bzl to reflect the change - regenerate projects diff --git a/test/core/end2end/fixtures/h2_oauth2.cc b/test/core/end2end/fixtures/h2_oauth2.cc index 0800d5929f2..22f85a2feea 100644 --- a/test/core/end2end/fixtures/h2_oauth2.cc +++ b/test/core/end2end/fixtures/h2_oauth2.cc @@ -16,22 +16,26 @@ * */ -#include "test/core/end2end/end2end_tests.h" - -#include -#include - #include #include +#include +#include #include "src/core/lib/channel/channel_args.h" #include "src/core/lib/gprpp/host_port.h" #include "src/core/lib/iomgr/iomgr.h" +#include "src/core/lib/iomgr/load_file.h" #include "src/core/lib/security/credentials/credentials.h" -#include "test/core/end2end/data/ssl_test_data.h" +#include "test/core/end2end/end2end_tests.h" #include "test/core/util/port.h" #include "test/core/util/test_config.h" +#define CA_CERT_PATH "src/core/tsi/test_creds/ca.pem" +#define CLIENT_CERT_PATH "src/core/tsi/test_creds/client.pem" +#define CLIENT_KEY_PATH "src/core/tsi/test_creds/client.key" +#define SERVER_CERT_PATH "src/core/tsi/test_creds/server1.pem" +#define SERVER_KEY_PATH "src/core/tsi/test_creds/server1.key" + static const char oauth2_md[] = "Bearer aaslkfjs424535asdf"; static const char* client_identity_property_name = "smurf_name"; static const char* client_identity = "Brainy Smurf"; @@ -139,6 +143,11 @@ void chttp2_tear_down_secure_fullstack(grpc_end2end_test_fixture* f) { static void chttp2_init_client_simple_ssl_with_oauth2_secure_fullstack( grpc_end2end_test_fixture* f, grpc_channel_args* client_args) { grpc_core::ExecCtx exec_ctx; + grpc_slice ca_slice; + GPR_ASSERT(GRPC_LOG_IF_ERROR("load_file", + grpc_load_file(CA_CERT_PATH, 1, &ca_slice))); + const char* test_root_cert = + reinterpret_cast GRPC_SLICE_START_PTR(ca_slice); grpc_channel_credentials* ssl_creds = grpc_ssl_credentials_create(test_root_cert, nullptr, nullptr, nullptr); grpc_call_credentials* oauth2_creds = grpc_md_only_test_credentials_create( @@ -156,6 +165,7 @@ static void chttp2_init_client_simple_ssl_with_oauth2_secure_fullstack( grpc_channel_args_destroy(new_client_args); grpc_channel_credentials_release(ssl_creds); grpc_call_credentials_release(oauth2_creds); + grpc_slice_unref(ca_slice); } static int fail_server_auth_check(grpc_channel_args* server_args) { @@ -193,13 +203,23 @@ static grpc_auth_metadata_processor test_processor_create(int failing) { static void chttp2_init_server_simple_ssl_secure_fullstack( grpc_end2end_test_fixture* f, grpc_channel_args* server_args) { - grpc_ssl_pem_key_cert_pair pem_key_cert_pair = {test_server1_key, - test_server1_cert}; + grpc_slice cert_slice, key_slice; + GPR_ASSERT(GRPC_LOG_IF_ERROR( + "load_file", grpc_load_file(SERVER_CERT_PATH, 1, &cert_slice))); + GPR_ASSERT(GRPC_LOG_IF_ERROR("load_file", + grpc_load_file(SERVER_KEY_PATH, 1, &key_slice))); + const char* server_cert = + reinterpret_cast GRPC_SLICE_START_PTR(cert_slice); + const char* server_key = + reinterpret_cast GRPC_SLICE_START_PTR(key_slice); + grpc_ssl_pem_key_cert_pair pem_key_cert_pair = {server_key, server_cert}; grpc_server_credentials* ssl_creds = grpc_ssl_server_credentials_create( nullptr, &pem_key_cert_pair, 1, 0, nullptr); grpc_server_credentials_set_auth_metadata_processor( ssl_creds, test_processor_create(fail_server_auth_check(server_args))); chttp2_init_server_secure_fullstack(f, server_args, ssl_creds); + grpc_slice_unref(cert_slice); + grpc_slice_unref(key_slice); } /* All test configurations */ diff --git a/test/core/end2end/fixtures/h2_ssl.cc b/test/core/end2end/fixtures/h2_ssl.cc index 2ba2418151d..1e46486dd5e 100644 --- a/test/core/end2end/fixtures/h2_ssl.cc +++ b/test/core/end2end/fixtures/h2_ssl.cc @@ -16,23 +16,26 @@ * */ -#include "test/core/end2end/end2end_tests.h" - -#include -#include - #include #include +#include +#include #include "src/core/lib/channel/channel_args.h" #include "src/core/lib/gpr/string.h" #include "src/core/lib/gpr/tmpfile.h" #include "src/core/lib/gprpp/host_port.h" +#include "src/core/lib/iomgr/load_file.h" #include "src/core/lib/security/credentials/credentials.h" #include "src/core/lib/security/security_connector/ssl_utils_config.h" -#include "test/core/end2end/data/ssl_test_data.h" +#include "test/core/end2end/end2end_tests.h" #include "test/core/util/port.h" #include "test/core/util/test_config.h" +#define CA_CERT_PATH "src/core/tsi/test_creds/ca.pem" +#define CLIENT_CERT_PATH "src/core/tsi/test_creds/client.pem" +#define CLIENT_KEY_PATH "src/core/tsi/test_creds/client.key" +#define SERVER_CERT_PATH "src/core/tsi/test_creds/server1.pem" +#define SERVER_KEY_PATH "src/core/tsi/test_creds/server1.key" struct fullstack_secure_fixture_data { grpc_core::UniquePtr localaddr; @@ -124,10 +127,20 @@ static int fail_server_auth_check(grpc_channel_args* server_args) { static void chttp2_init_server_simple_ssl_secure_fullstack( grpc_end2end_test_fixture* f, grpc_channel_args* server_args) { - grpc_ssl_pem_key_cert_pair pem_cert_key_pair = {test_server1_key, - test_server1_cert}; + grpc_slice cert_slice, key_slice; + GPR_ASSERT(GRPC_LOG_IF_ERROR( + "load_file", grpc_load_file(SERVER_CERT_PATH, 1, &cert_slice))); + GPR_ASSERT(GRPC_LOG_IF_ERROR("load_file", + grpc_load_file(SERVER_KEY_PATH, 1, &key_slice))); + const char* server_cert = + reinterpret_cast GRPC_SLICE_START_PTR(cert_slice); + const char* server_key = + reinterpret_cast GRPC_SLICE_START_PTR(key_slice); + grpc_ssl_pem_key_cert_pair pem_key_cert_pair = {server_key, server_cert}; grpc_server_credentials* ssl_creds = grpc_ssl_server_credentials_create( - nullptr, &pem_cert_key_pair, 1, 0, nullptr); + nullptr, &pem_key_cert_pair, 1, 0, nullptr); + grpc_slice_unref(cert_slice); + grpc_slice_unref(key_slice); if (fail_server_auth_check(server_args)) { grpc_auth_metadata_processor processor = {process_auth_failure, nullptr, nullptr}; @@ -152,20 +165,9 @@ static grpc_end2end_test_config configs[] = { int main(int argc, char** argv) { size_t i; - FILE* roots_file; - size_t roots_size = strlen(test_root_cert); - char* roots_filename; - grpc::testing::TestEnvironment env(argc, argv); grpc_end2end_tests_pre_init(); - - /* Set the SSL roots env var. */ - roots_file = gpr_tmpfile("chttp2_simple_ssl_fullstack_test", &roots_filename); - GPR_ASSERT(roots_filename != nullptr); - GPR_ASSERT(roots_file != nullptr); - GPR_ASSERT(fwrite(test_root_cert, 1, roots_size, roots_file) == roots_size); - fclose(roots_file); - GPR_GLOBAL_CONFIG_SET(grpc_default_ssl_roots_file_path, roots_filename); + GPR_GLOBAL_CONFIG_SET(grpc_default_ssl_roots_file_path, CA_CERT_PATH); grpc_init(); @@ -174,10 +176,5 @@ int main(int argc, char** argv) { } grpc_shutdown(); - - /* Cleanup. */ - remove(roots_filename); - gpr_free(roots_filename); - return 0; } diff --git a/test/core/end2end/fuzzers/client_fuzzer.cc b/test/core/end2end/fuzzers/client_fuzzer.cc index 420479d3f28..6584103f79e 100644 --- a/test/core/end2end/fuzzers/client_fuzzer.cc +++ b/test/core/end2end/fuzzers/client_fuzzer.cc @@ -24,7 +24,6 @@ #include "src/core/ext/transport/chttp2/transport/chttp2_transport.h" #include "src/core/lib/iomgr/executor.h" -#include "src/core/lib/security/credentials/credentials.h" #include "src/core/lib/slice/slice_internal.h" #include "src/core/lib/surface/channel.h" #include "test/core/util/mock_endpoint.h" @@ -42,7 +41,6 @@ extern "C" int LLVMFuzzerTestOneInput(const uint8_t* data, size_t size) { grpc_test_only_set_slice_hash_seed(0); if (squelch) gpr_set_log_function(dont_log); grpc_init(); - grpc_test_only_control_plane_credentials_force_init(); { grpc_core::ExecCtx exec_ctx; grpc_core::Executor::SetThreadingAll(false); @@ -158,7 +156,6 @@ extern "C" int LLVMFuzzerTestOneInput(const uint8_t* data, size_t size) { grpc_byte_buffer_destroy(response_payload_recv); } } - grpc_test_only_control_plane_credentials_destroy(); grpc_shutdown_blocking(); return 0; } diff --git a/test/core/end2end/fuzzers/server_fuzzer.cc b/test/core/end2end/fuzzers/server_fuzzer.cc index 61cbc7d45af..1fabd8ca172 100644 --- a/test/core/end2end/fuzzers/server_fuzzer.cc +++ b/test/core/end2end/fuzzers/server_fuzzer.cc @@ -20,7 +20,6 @@ #include "src/core/ext/transport/chttp2/transport/chttp2_transport.h" #include "src/core/lib/iomgr/executor.h" -#include "src/core/lib/security/credentials/credentials.h" #include "src/core/lib/slice/slice_internal.h" #include "src/core/lib/surface/server.h" #include "test/core/util/mock_endpoint.h" @@ -39,7 +38,6 @@ extern "C" int LLVMFuzzerTestOneInput(const uint8_t* data, size_t size) { grpc_test_only_set_slice_hash_seed(0); if (squelch) gpr_set_log_function(dont_log); grpc_init(); - grpc_test_only_control_plane_credentials_force_init(); { grpc_core::ExecCtx exec_ctx; grpc_core::Executor::SetThreadingAll(false); @@ -133,7 +131,6 @@ extern "C" int LLVMFuzzerTestOneInput(const uint8_t* data, size_t size) { grpc_server_destroy(server); grpc_completion_queue_destroy(cq); } - grpc_test_only_control_plane_credentials_destroy(); grpc_shutdown(); return 0; } diff --git a/test/core/end2end/gen_build_yaml.py b/test/core/end2end/gen_build_yaml.py index d1e78cc6490..3cb31686619 100755 --- a/test/core/end2end/gen_build_yaml.py +++ b/test/core/end2end/gen_build_yaml.py @@ -11,408 +11,36 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -"""Generates the appropriate build.json data for all the end2end tests.""" - -from __future__ import print_function +"""Generates the list of end2end test cases from generate_tests.bzl""" +import os +import sys import yaml -import collections -import hashlib - -FixtureOptions = collections.namedtuple( - 'FixtureOptions', - 'fullstack includes_proxy dns_resolver name_resolution secure platforms ci_mac tracing exclude_configs exclude_iomgrs large_writes enables_compression supports_compression is_inproc is_http2 supports_proxy_auth supports_write_buffering client_channel' -) -default_unsecure_fixture_options = FixtureOptions( - True, False, True, True, False, ['windows', 'linux', 'mac', 'posix'], True, - False, [], [], True, False, True, False, True, False, True, True) -socketpair_unsecure_fixture_options = default_unsecure_fixture_options._replace( - fullstack=False, dns_resolver=False, client_channel=False) -default_secure_fixture_options = default_unsecure_fixture_options._replace( - secure=True) -uds_fixture_options = default_unsecure_fixture_options._replace( - dns_resolver=False, - platforms=['linux', 'mac', 'posix'], - exclude_iomgrs=['uv']) -local_fixture_options = default_secure_fixture_options._replace( - dns_resolver=False, - platforms=['linux', 'mac', 'posix'], - exclude_iomgrs=['uv']) -fd_unsecure_fixture_options = default_unsecure_fixture_options._replace( - dns_resolver=False, - fullstack=False, - platforms=['linux', 'mac', 'posix'], - exclude_iomgrs=['uv'], - client_channel=False) -inproc_fixture_options = default_secure_fixture_options._replace( - dns_resolver=False, - fullstack=False, - name_resolution=False, - supports_compression=False, - is_inproc=True, - is_http2=False, - supports_write_buffering=False, - client_channel=False) - -# maps fixture name to whether it requires the security library -END2END_FIXTURES = { - 'h2_compress': - default_unsecure_fixture_options._replace(enables_compression=True), - 'h2_census': - default_unsecure_fixture_options, - # This cmake target is disabled for now because it depends on OpenCensus, - # which is Bazel-only. - # 'h2_load_reporting': default_unsecure_fixture_options, - 'h2_fakesec': - default_secure_fixture_options._replace(ci_mac=False), - 'h2_fd': - fd_unsecure_fixture_options, - 'h2_full': - default_unsecure_fixture_options, - 'h2_full+pipe': - default_unsecure_fixture_options._replace(platforms=['linux'], - exclude_iomgrs=['uv']), - 'h2_full+trace': - default_unsecure_fixture_options._replace(tracing=True), - 'h2_full+workarounds': - default_unsecure_fixture_options, - 'h2_http_proxy': - default_unsecure_fixture_options._replace(ci_mac=False, - exclude_iomgrs=['uv'], - supports_proxy_auth=True), - 'h2_oauth2': - default_secure_fixture_options._replace(ci_mac=False, - exclude_iomgrs=['uv']), - 'h2_proxy': - default_unsecure_fixture_options._replace(includes_proxy=True, - ci_mac=False, - exclude_iomgrs=['uv']), - 'h2_sockpair_1byte': - socketpair_unsecure_fixture_options._replace(ci_mac=False, - exclude_configs=['msan'], - large_writes=False, - exclude_iomgrs=['uv']), - 'h2_sockpair': - socketpair_unsecure_fixture_options._replace(ci_mac=False, - exclude_iomgrs=['uv']), - 'h2_sockpair+trace': - socketpair_unsecure_fixture_options._replace(ci_mac=False, - tracing=True, - large_writes=False, - exclude_iomgrs=['uv']), - 'h2_ssl': - default_secure_fixture_options, - 'h2_ssl_cred_reload': - default_secure_fixture_options, - 'h2_tls': - default_secure_fixture_options, - 'h2_local_uds': - local_fixture_options, - 'h2_local_ipv4': - local_fixture_options, - 'h2_local_ipv6': - local_fixture_options, - 'h2_ssl_proxy': - default_secure_fixture_options._replace(includes_proxy=True, - ci_mac=False, - exclude_iomgrs=['uv']), - 'h2_uds': - uds_fixture_options, - 'inproc': - inproc_fixture_options -} -TestOptions = collections.namedtuple( - 'TestOptions', - 'needs_fullstack needs_dns needs_names proxyable secure traceable cpu_cost exclude_iomgrs large_writes flaky allows_compression needs_compression exclude_inproc needs_http2 needs_proxy_auth needs_write_buffering needs_client_channel' -) -default_test_options = TestOptions(False, False, False, True, False, True, 1.0, - [], False, False, True, False, False, False, - False, False, False) -connectivity_test_options = default_test_options._replace(needs_fullstack=True) +_ROOT = os.path.abspath(os.path.join(os.path.dirname(sys.argv[0]), '../../..')) +os.chdir(_ROOT) -LOWCPU = 0.1 -# maps test names to options -END2END_TESTS = { - 'authority_not_supported': - default_test_options, - 'bad_hostname': - default_test_options._replace(needs_names=True), - 'bad_ping': - connectivity_test_options._replace(proxyable=False), - 'binary_metadata': - default_test_options._replace(cpu_cost=LOWCPU), - 'resource_quota_server': - default_test_options._replace(large_writes=True, - proxyable=False, - allows_compression=False), - 'call_creds': - default_test_options._replace(secure=True), - 'cancel_after_accept': - default_test_options._replace(cpu_cost=LOWCPU), - 'cancel_after_client_done': - default_test_options._replace(cpu_cost=LOWCPU), - 'cancel_after_invoke': - default_test_options._replace(cpu_cost=LOWCPU), - 'cancel_after_round_trip': - default_test_options._replace(cpu_cost=LOWCPU), - 'cancel_before_invoke': - default_test_options._replace(cpu_cost=LOWCPU), - 'cancel_in_a_vacuum': - default_test_options._replace(cpu_cost=LOWCPU), - 'cancel_with_status': - default_test_options._replace(cpu_cost=LOWCPU), - 'compressed_payload': - default_test_options._replace(proxyable=False, needs_compression=True), - 'connectivity': - connectivity_test_options._replace(needs_names=True, - proxyable=False, - cpu_cost=LOWCPU, - exclude_iomgrs=['uv']), - 'channelz': - default_test_options, - 'default_host': - default_test_options._replace(needs_fullstack=True, - needs_dns=True, - needs_names=True), - 'call_host_override': - default_test_options._replace(needs_fullstack=True, - needs_dns=True, - needs_names=True), - 'disappearing_server': - connectivity_test_options._replace(flaky=True, needs_names=True), - 'empty_batch': - default_test_options._replace(cpu_cost=LOWCPU), - 'filter_causes_close': - default_test_options._replace(cpu_cost=LOWCPU), - 'filter_call_init_fails': - default_test_options, - 'filter_context': - default_test_options, - 'filter_latency': - default_test_options._replace(cpu_cost=LOWCPU), - 'filter_status_code': - default_test_options._replace(cpu_cost=LOWCPU), - 'graceful_server_shutdown': - default_test_options._replace(cpu_cost=LOWCPU, exclude_inproc=True), - 'hpack_size': - default_test_options._replace(proxyable=False, - traceable=False, - cpu_cost=LOWCPU), - 'high_initial_seqno': - default_test_options._replace(cpu_cost=LOWCPU), - 'idempotent_request': - default_test_options, - 'invoke_large_request': - default_test_options, - 'keepalive_timeout': - default_test_options._replace(proxyable=False, - cpu_cost=LOWCPU, - needs_http2=True), - 'large_metadata': - default_test_options, - 'max_concurrent_streams': - default_test_options._replace(proxyable=False, - cpu_cost=LOWCPU, - exclude_inproc=True), - 'max_connection_age': - default_test_options._replace(cpu_cost=LOWCPU, exclude_inproc=True), - 'max_connection_idle': - connectivity_test_options._replace(proxyable=False, - exclude_iomgrs=['uv'], - cpu_cost=LOWCPU), - 'max_message_length': - default_test_options._replace(cpu_cost=LOWCPU), - 'negative_deadline': - default_test_options, - 'no_error_on_hotpath': - default_test_options._replace(proxyable=False), - 'no_logging': - default_test_options._replace(traceable=False), - 'no_op': - default_test_options, - 'payload': - default_test_options, - # This cmake target is disabled for now because it depends on OpenCensus, - # which is Bazel-only. - # 'load_reporting_hook': default_test_options, - 'ping_pong_streaming': - default_test_options._replace(cpu_cost=LOWCPU), - 'ping': - connectivity_test_options._replace(proxyable=False, cpu_cost=LOWCPU), - 'proxy_auth': - default_test_options._replace(needs_proxy_auth=True), - 'registered_call': - default_test_options, - 'request_with_flags': - default_test_options._replace(proxyable=False, cpu_cost=LOWCPU), - 'request_with_payload': - default_test_options._replace(cpu_cost=LOWCPU), - # TODO(roth): Remove proxyable=False for all retry tests once we - # have a way for the proxy to propagate the fact that trailing - # metadata is available when initial metadata is returned. - # See https://github.com/grpc/grpc/issues/14467 for context. - 'retry': - default_test_options._replace(cpu_cost=LOWCPU, - needs_client_channel=True, - proxyable=False), - 'retry_cancellation': - default_test_options._replace(cpu_cost=LOWCPU, - needs_client_channel=True, - proxyable=False), - 'retry_disabled': - default_test_options._replace(cpu_cost=LOWCPU, - needs_client_channel=True, - proxyable=False), - 'retry_exceeds_buffer_size_in_initial_batch': - default_test_options._replace(cpu_cost=LOWCPU, - needs_client_channel=True, - proxyable=False), - 'retry_exceeds_buffer_size_in_subsequent_batch': - default_test_options._replace(cpu_cost=LOWCPU, - needs_client_channel=True, - proxyable=False), - 'retry_non_retriable_status': - default_test_options._replace(cpu_cost=LOWCPU, - needs_client_channel=True, - proxyable=False), - 'retry_non_retriable_status_before_recv_trailing_metadata_started': - default_test_options._replace(cpu_cost=LOWCPU, - needs_client_channel=True, - proxyable=False), - 'retry_recv_initial_metadata': - default_test_options._replace(cpu_cost=LOWCPU, - needs_client_channel=True, - proxyable=False), - 'retry_recv_message': - default_test_options._replace(cpu_cost=LOWCPU, - needs_client_channel=True, - proxyable=False), - 'retry_server_pushback_delay': - default_test_options._replace(cpu_cost=LOWCPU, - needs_client_channel=True, - proxyable=False), - 'retry_server_pushback_disabled': - default_test_options._replace(cpu_cost=LOWCPU, - needs_client_channel=True, - proxyable=False), - 'retry_streaming': - default_test_options._replace(cpu_cost=LOWCPU, - needs_client_channel=True, - proxyable=False), - 'retry_streaming_after_commit': - default_test_options._replace(cpu_cost=LOWCPU, - needs_client_channel=True, - proxyable=False), - 'retry_streaming_succeeds_before_replay_finished': - default_test_options._replace(cpu_cost=LOWCPU, - needs_client_channel=True, - proxyable=False), - 'retry_throttled': - default_test_options._replace(cpu_cost=LOWCPU, - needs_client_channel=True, - proxyable=False), - 'retry_too_many_attempts': - default_test_options._replace(cpu_cost=LOWCPU, - needs_client_channel=True, - proxyable=False), - 'server_finishes_request': - default_test_options._replace(cpu_cost=LOWCPU), - 'shutdown_finishes_calls': - default_test_options._replace(cpu_cost=LOWCPU), - 'shutdown_finishes_tags': - default_test_options._replace(cpu_cost=LOWCPU), - 'simple_cacheable_request': - default_test_options._replace(cpu_cost=LOWCPU), - 'stream_compression_compressed_payload': - default_test_options._replace(proxyable=False, exclude_inproc=True), - 'stream_compression_payload': - default_test_options._replace(exclude_inproc=True), - 'stream_compression_ping_pong_streaming': - default_test_options._replace(exclude_inproc=True), - 'simple_delayed_request': - connectivity_test_options, - 'simple_metadata': - default_test_options, - 'simple_request': - default_test_options, - 'streaming_error_response': - default_test_options._replace(cpu_cost=LOWCPU), - 'trailing_metadata': - default_test_options, - 'workaround_cronet_compression': - default_test_options, - 'write_buffering': - default_test_options._replace(cpu_cost=LOWCPU, - needs_write_buffering=True), - 'write_buffering_at_end': - default_test_options._replace(cpu_cost=LOWCPU, - needs_write_buffering=True), -} +def load(*args): + """Replacement of bazel's load() function""" + pass -def compatible(f, t): - if END2END_TESTS[t].needs_fullstack: - if not END2END_FIXTURES[f].fullstack: - return False - if END2END_TESTS[t].needs_dns: - if not END2END_FIXTURES[f].dns_resolver: - return False - if END2END_TESTS[t].needs_names: - if not END2END_FIXTURES[f].name_resolution: - return False - if not END2END_TESTS[t].proxyable: - if END2END_FIXTURES[f].includes_proxy: - return False - if not END2END_TESTS[t].traceable: - if END2END_FIXTURES[f].tracing: - return False - if END2END_TESTS[t].large_writes: - if not END2END_FIXTURES[f].large_writes: - return False - if not END2END_TESTS[t].allows_compression: - if END2END_FIXTURES[f].enables_compression: - return False - if END2END_TESTS[t].needs_compression: - if not END2END_FIXTURES[f].supports_compression: - return False - if END2END_TESTS[t].exclude_inproc: - if END2END_FIXTURES[f].is_inproc: - return False - if END2END_TESTS[t].needs_http2: - if not END2END_FIXTURES[f].is_http2: - return False - if END2END_TESTS[t].needs_proxy_auth: - if not END2END_FIXTURES[f].supports_proxy_auth: - return False - if END2END_TESTS[t].needs_write_buffering: - if not END2END_FIXTURES[f].supports_write_buffering: - return False - if END2END_TESTS[t].needs_client_channel: - if not END2END_FIXTURES[f].client_channel: - return False - return True +def struct(**kwargs): + return kwargs # all the args as a dict -def without(l, e): - l = l[:] - l.remove(e) - return l +# generate_tests.bzl is now the source of truth for end2end tests. +# The .bzl file is basically a python file and we can "execute" it +# to get access to the variables it defines. +execfile('test/core/end2end/generate_tests.bzl') -# Originally, this method was used to generate end2end test cases for build.yaml, -# but since the test cases are now extracted from bazel BUILD file, -# this is not used for generating run_tests.py test cases anymore. -# Nevertheless, subset of the output is still used by end2end_tests.cc.template -# and end2end_nosec_tests.cc.template -# TODO(jtattermusch): cleanup this file, so that it only generates the data we need. -# Right now there's some duplication between generate_tests.bzl and this file. def main(): json = { # needed by end2end_tests.cc.template and end2end_nosec_tests.cc.template 'core_end2end_tests': - dict((t, END2END_TESTS[t].secure) for t in END2END_TESTS.keys()) + dict((t, END2END_TESTS[t]['secure']) for t in END2END_TESTS.keys()) } print(yaml.dump(json)) diff --git a/test/core/end2end/generate_tests.bzl b/test/core/end2end/generate_tests.bzl index 28069573b28..3c1817ff82b 100755 --- a/test/core/end2end/generate_tests.bzl +++ b/test/core/end2end/generate_tests.bzl @@ -183,7 +183,8 @@ def _test_options( needs_http2 = False, needs_proxy_auth = False, needs_write_buffering = False, - needs_client_channel = False): + needs_client_channel = False, + short_name = None): return struct( needs_fullstack = needs_fullstack, needs_dns = needs_dns, @@ -196,6 +197,7 @@ def _test_options( needs_proxy_auth = needs_proxy_auth, needs_write_buffering = needs_write_buffering, needs_client_channel = needs_client_channel, + short_name = short_name, ) # maps test names to options @@ -280,16 +282,28 @@ END2END_TESTS = { "retry_exceeds_buffer_size_in_initial_batch": _test_options( needs_client_channel = True, proxyable = False, + # TODO(jtattermusch): too long bazel test name makes the test flaky on Windows RBE + # See b/151617965 + short_name = "retry_exceeds_buffer_size_in_init", ), "retry_exceeds_buffer_size_in_subsequent_batch": _test_options( needs_client_channel = True, proxyable = False, + # TODO(jtattermusch): too long bazel test name makes the test flaky on Windows RBE + # See b/151617965 + short_name = "retry_exceeds_buffer_size_in_subseq", ), "retry_non_retriable_status": _test_options( needs_client_channel = True, proxyable = False, ), - "retry_non_retriable_status_before_recv_trailing_metadata_started": _test_options(needs_client_channel = True, proxyable = False), + "retry_non_retriable_status_before_recv_trailing_metadata_started": _test_options( + needs_client_channel = True, + proxyable = False, + # TODO(jtattermusch): too long bazel test name makes the test flaky on Windows RBE + # See b/151617965 + short_name = "retry_non_retriable_status2", + ), "retry_recv_initial_metadata": _test_options( needs_client_channel = True, proxyable = False, @@ -314,6 +328,9 @@ END2END_TESTS = { "retry_streaming_succeeds_before_replay_finished": _test_options( needs_client_channel = True, proxyable = False, + # TODO(jtattermusch): too long bazel test name makes the test flaky on Windows RBE + # See b/151617965 + short_name = "retry_streaming2", ), "retry_throttled": _test_options( needs_client_channel = True, @@ -415,6 +432,13 @@ def grpc_end2end_tests(): name = "%s_test" % f, srcs = ["fixtures/%s.cc" % f], language = "C++", + data = [ + "//src/core/tsi/test_creds:ca.pem", + "//src/core/tsi/test_creds:client.key", + "//src/core/tsi/test_creds:client.pem", + "//src/core/tsi/test_creds:server1.key", + "//src/core/tsi/test_creds:server1.pem", + ], deps = [ ":end2end_tests", "//test/core/util:grpc_test_util", @@ -429,8 +453,9 @@ def grpc_end2end_tests(): if not _compatible(fopt, topt): continue + test_short_name = str(t) if not topt.short_name else topt.short_name native.sh_test( - name = "%s_test@%s" % (f, t), + name = "%s_test@%s" % (f, test_short_name), data = [":%s_test" % f], srcs = ["end2end_test.sh"], args = [ @@ -443,7 +468,7 @@ def grpc_end2end_tests(): for poller in POLLERS: native.sh_test( - name = "%s_test@%s@poller=%s" % (f, t, poller), + name = "%s_test@%s@poller=%s" % (f, test_short_name, poller), data = [":%s_test" % f], srcs = ["end2end_test.sh"], args = [ @@ -484,6 +509,13 @@ def grpc_end2end_nosec_tests(): name = "%s_nosec_test" % f, srcs = ["fixtures/%s.cc" % f], language = "C++", + data = [ + "//src/core/tsi/test_creds:ca.pem", + "//src/core/tsi/test_creds:client.key", + "//src/core/tsi/test_creds:client.pem", + "//src/core/tsi/test_creds:server1.key", + "//src/core/tsi/test_creds:server1.pem", + ], deps = [ ":end2end_nosec_tests", "//test/core/util:grpc_test_util_unsecure", @@ -499,8 +531,9 @@ def grpc_end2end_nosec_tests(): if topt.secure: continue + test_short_name = str(t) if not topt.short_name else topt.short_name native.sh_test( - name = "%s_nosec_test@%s" % (f, t), + name = "%s_nosec_test@%s" % (f, test_short_name), data = [":%s_nosec_test" % f], srcs = ["end2end_test.sh"], args = [ @@ -513,7 +546,7 @@ def grpc_end2end_nosec_tests(): for poller in POLLERS: native.sh_test( - name = "%s_nosec_test@%s@poller=%s" % (f, t, poller), + name = "%s_nosec_test@%s@poller=%s" % (f, test_short_name, poller), data = [":%s_nosec_test" % f], srcs = ["end2end_test.sh"], args = [ diff --git a/test/core/end2end/goaway_server_test.cc b/test/core/end2end/goaway_server_test.cc index 26a7df87f19..1e32cad8dc6 100644 --- a/test/core/end2end/goaway_server_test.cc +++ b/test/core/end2end/goaway_server_test.cc @@ -47,7 +47,8 @@ static int g_resolve_port = -1; static grpc_ares_request* (*iomgr_dns_lookup_ares_locked)( const char* dns_server, const char* addr, const char* default_port, grpc_pollset_set* interested_parties, grpc_closure* on_done, - std::unique_ptr* addresses, bool check_grpclb, + std::unique_ptr* addresses, + std::unique_ptr* balancer_addresses, char** service_config_json, int query_timeout_ms, std::shared_ptr combiner); @@ -104,14 +105,15 @@ static grpc_address_resolver_vtable test_resolver = { static grpc_ares_request* my_dns_lookup_ares_locked( const char* dns_server, const char* addr, const char* default_port, grpc_pollset_set* interested_parties, grpc_closure* on_done, - std::unique_ptr* addresses, bool check_grpclb, + std::unique_ptr* addresses, + std::unique_ptr* balancer_addresses, char** service_config_json, int query_timeout_ms, - std::shared_ptr combiner) { + std::shared_ptr work_serializer) { if (0 != strcmp(addr, "test")) { - return iomgr_dns_lookup_ares_locked(dns_server, addr, default_port, - interested_parties, on_done, addresses, - check_grpclb, service_config_json, - query_timeout_ms, std::move(combiner)); + return iomgr_dns_lookup_ares_locked( + dns_server, addr, default_port, interested_parties, on_done, addresses, + balancer_addresses, service_config_json, query_timeout_ms, + std::move(work_serializer)); } grpc_error* error = GRPC_ERROR_NONE; diff --git a/test/core/end2end/h2_ssl_session_reuse_test.cc b/test/core/end2end/h2_ssl_session_reuse_test.cc index ed450aebf1b..2683f3df175 100644 --- a/test/core/end2end/h2_ssl_session_reuse_test.cc +++ b/test/core/end2end/h2_ssl_session_reuse_test.cc @@ -16,26 +16,29 @@ * */ -#include "test/core/end2end/end2end_tests.h" - -#include -#include - #include #include +#include +#include +#include #include "src/core/lib/channel/channel_args.h" #include "src/core/lib/gpr/string.h" #include "src/core/lib/gpr/tmpfile.h" #include "src/core/lib/gprpp/host_port.h" +#include "src/core/lib/iomgr/load_file.h" #include "src/core/lib/security/credentials/credentials.h" #include "src/core/lib/security/security_connector/ssl_utils_config.h" #include "test/core/end2end/cq_verifier.h" -#include "test/core/end2end/data/ssl_test_data.h" +#include "test/core/end2end/end2end_tests.h" #include "test/core/util/port.h" #include "test/core/util/test_config.h" -#include +#define CA_CERT_PATH "src/core/tsi/test_creds/ca.pem" +#define CLIENT_CERT_PATH "src/core/tsi/test_creds/client.pem" +#define CLIENT_KEY_PATH "src/core/tsi/test_creds/client.key" +#define SERVER_CERT_PATH "src/core/tsi/test_creds/server1.pem" +#define SERVER_KEY_PATH "src/core/tsi/test_creds/server1.key" namespace grpc { namespace testing { @@ -46,10 +49,22 @@ void* tag(intptr_t t) { return (void*)t; } gpr_timespec five_seconds_time() { return grpc_timeout_seconds_to_deadline(5); } grpc_server* server_create(grpc_completion_queue* cq, char* server_addr) { - grpc_ssl_pem_key_cert_pair pem_cert_key_pair = {test_server1_key, - test_server1_cert}; + grpc_slice ca_slice, cert_slice, key_slice; + GPR_ASSERT(GRPC_LOG_IF_ERROR("load_file", + grpc_load_file(CA_CERT_PATH, 1, &ca_slice))); + GPR_ASSERT(GRPC_LOG_IF_ERROR( + "load_file", grpc_load_file(SERVER_CERT_PATH, 1, &cert_slice))); + GPR_ASSERT(GRPC_LOG_IF_ERROR("load_file", + grpc_load_file(SERVER_KEY_PATH, 1, &key_slice))); + const char* ca_cert = + reinterpret_cast GRPC_SLICE_START_PTR(ca_slice); + const char* server_cert = + reinterpret_cast GRPC_SLICE_START_PTR(cert_slice); + const char* server_key = + reinterpret_cast GRPC_SLICE_START_PTR(key_slice); + grpc_ssl_pem_key_cert_pair pem_cert_key_pair = {server_key, server_cert}; grpc_server_credentials* server_creds = grpc_ssl_server_credentials_create_ex( - test_root_cert, &pem_cert_key_pair, 1, + ca_cert, &pem_cert_key_pair, 1, GRPC_SSL_REQUEST_CLIENT_CERTIFICATE_AND_VERIFY, nullptr); grpc_server* server = grpc_server_create(nullptr, nullptr); @@ -59,14 +74,30 @@ grpc_server* server_create(grpc_completion_queue* cq, char* server_addr) { grpc_server_credentials_release(server_creds); grpc_server_start(server); + grpc_slice_unref(cert_slice); + grpc_slice_unref(key_slice); + grpc_slice_unref(ca_slice); return server; } grpc_channel* client_create(char* server_addr, grpc_ssl_session_cache* cache) { - grpc_ssl_pem_key_cert_pair signed_client_key_cert_pair = { - test_signed_client_key, test_signed_client_cert}; + grpc_slice ca_slice, cert_slice, key_slice; + GPR_ASSERT(GRPC_LOG_IF_ERROR("load_file", + grpc_load_file(CA_CERT_PATH, 1, &ca_slice))); + GPR_ASSERT(GRPC_LOG_IF_ERROR( + "load_file", grpc_load_file(CLIENT_CERT_PATH, 1, &cert_slice))); + GPR_ASSERT(GRPC_LOG_IF_ERROR("load_file", + grpc_load_file(CLIENT_KEY_PATH, 1, &key_slice))); + const char* ca_cert = + reinterpret_cast GRPC_SLICE_START_PTR(ca_slice); + const char* client_cert = + reinterpret_cast GRPC_SLICE_START_PTR(cert_slice); + const char* client_key = + reinterpret_cast GRPC_SLICE_START_PTR(key_slice); + grpc_ssl_pem_key_cert_pair signed_client_key_cert_pair = {client_key, + client_cert}; grpc_channel_credentials* client_creds = grpc_ssl_credentials_create( - test_root_cert, &signed_client_key_cert_pair, nullptr, nullptr); + ca_cert, &signed_client_key_cert_pair, nullptr, nullptr); grpc_arg args[] = { grpc_channel_arg_string_create( @@ -88,6 +119,9 @@ grpc_channel* client_create(char* server_addr, grpc_ssl_session_cache* cache) { grpc_channel_args_destroy(client_args); } + grpc_slice_unref(cert_slice); + grpc_slice_unref(key_slice); + grpc_slice_unref(ca_slice); return client; } @@ -253,27 +287,13 @@ TEST(H2SessionReuseTest, SingleReuse) { } // namespace grpc int main(int argc, char** argv) { - FILE* roots_file; - size_t roots_size = strlen(test_root_cert); - char* roots_filename; - grpc::testing::TestEnvironment env(argc, argv); - /* Set the SSL roots env var. */ - roots_file = gpr_tmpfile("chttp2_ssl_session_reuse_test", &roots_filename); - GPR_ASSERT(roots_filename != nullptr); - GPR_ASSERT(roots_file != nullptr); - GPR_ASSERT(fwrite(test_root_cert, 1, roots_size, roots_file) == roots_size); - fclose(roots_file); - GPR_GLOBAL_CONFIG_SET(grpc_default_ssl_roots_file_path, roots_filename); + GPR_GLOBAL_CONFIG_SET(grpc_default_ssl_roots_file_path, CA_CERT_PATH); grpc_init(); ::testing::InitGoogleTest(&argc, argv); int ret = RUN_ALL_TESTS(); grpc_shutdown(); - /* Cleanup. */ - remove(roots_filename); - gpr_free(roots_filename); - return ret; } diff --git a/test/core/security/BUILD b/test/core/security/BUILD index d13e51def7e..d4ffb2b3a32 100644 --- a/test/core/security/BUILD +++ b/test/core/security/BUILD @@ -79,19 +79,6 @@ grpc_cc_test( ], ) -grpc_cc_test( - name = "control_plane_credentials_test", - srcs = ["control_plane_credentials_test.cc"], - language = "C++", - deps = [ - "//:gpr", - "//:grpc", - "//test/core/end2end:cq_verifier", - "//test/core/end2end:ssl_test_data", - "//test/core/util:grpc_test_util", - ], -) - grpc_cc_test( name = "json_token_test", srcs = ["json_token_test.cc"], diff --git a/test/core/security/alts_credentials_fuzzer.cc b/test/core/security/alts_credentials_fuzzer.cc index c772495f63b..42e683f64b2 100644 --- a/test/core/security/alts_credentials_fuzzer.cc +++ b/test/core/security/alts_credentials_fuzzer.cc @@ -29,7 +29,6 @@ #include "src/core/lib/security/credentials/alts/alts_credentials.h" #include "src/core/lib/security/credentials/alts/check_gcp_environment.h" #include "src/core/lib/security/credentials/alts/grpc_alts_credentials_options.h" -#include "src/core/lib/security/credentials/credentials.h" using grpc_core::testing::grpc_fuzzer_get_next_byte; using grpc_core::testing::grpc_fuzzer_get_next_string; @@ -68,7 +67,6 @@ extern "C" int LLVMFuzzerTestOneInput(const uint8_t* data, size_t size) { gpr_free(grpc_trace_fuzzer); input_stream inp = {data, data + size}; grpc_init(); - grpc_test_only_control_plane_credentials_force_init(); bool is_on_gcp = grpc_alts_is_running_on_gcp(); while (inp.cur != inp.end) { bool enable_untrusted_alts = grpc_fuzzer_get_next_byte(&inp) & 0x01; @@ -107,7 +105,6 @@ extern "C" int LLVMFuzzerTestOneInput(const uint8_t* data, size_t size) { } gpr_free(handshaker_service_url); } - grpc_test_only_control_plane_credentials_destroy(); grpc_shutdown(); return 0; } diff --git a/test/core/security/control_plane_credentials_test.cc b/test/core/security/control_plane_credentials_test.cc deleted file mode 100644 index d7c401d9ad1..00000000000 --- a/test/core/security/control_plane_credentials_test.cc +++ /dev/null @@ -1,458 +0,0 @@ -/* - * - * Copyright 2019 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -#include - -#include "src/core/lib/security/credentials/credentials.h" - -#include -#include -#include - -#include -#include -#include -#include -#include -#include - -#include "src/core/lib/gprpp/host_port.h" -#include "src/core/lib/iomgr/error.h" -#include "src/core/lib/security/credentials/composite/composite_credentials.h" -#include "src/core/lib/slice/slice_string_helpers.h" - -#include "test/core/util/port.h" -#include "test/core/util/test_config.h" - -#include "test/core/end2end/cq_verifier.h" -#include "test/core/end2end/data/ssl_test_data.h" - -namespace { - -grpc_completion_queue* g_cq; -grpc_server* g_server; -int g_port; - -void drain_cq(grpc_completion_queue* cq) { - grpc_event ev; - do { - ev = grpc_completion_queue_next( - cq, grpc_timeout_milliseconds_to_deadline(5000), nullptr); - } while (ev.type != GRPC_QUEUE_SHUTDOWN); -} - -void* tag(int i) { return (void*)static_cast(i); } - -grpc_channel_credentials* create_test_ssl_plus_token_channel_creds( - const char* token) { - grpc_channel_credentials* channel_creds = - grpc_ssl_credentials_create(test_root_cert, nullptr, nullptr, nullptr); - grpc_call_credentials* call_creds = - grpc_access_token_credentials_create(token, nullptr); - grpc_channel_credentials* composite_creds = - grpc_composite_channel_credentials_create(channel_creds, call_creds, - nullptr); - grpc_channel_credentials_release(channel_creds); - grpc_call_credentials_release(call_creds); - return composite_creds; -} - -grpc_server_credentials* create_test_ssl_server_creds() { - grpc_ssl_pem_key_cert_pair pem_cert_key_pair = {test_server1_key, - test_server1_cert}; - return grpc_ssl_server_credentials_create_ex( - test_root_cert, &pem_cert_key_pair, 1, - GRPC_SSL_DONT_REQUEST_CLIENT_CERTIFICATE, nullptr); -} - -// Perform a simple RPC and capture the ASCII value of the -// authorization metadata sent to the server, if any. Return -// nullptr if no authorization metadata was sent to the server. -grpc_core::UniquePtr perform_call_and_get_authorization_header( - grpc_channel_credentials* channel_creds) { - // Create a new channel and call - grpc_core::UniquePtr server_addr = nullptr; - grpc_core::JoinHostPort(&server_addr, "localhost", g_port); - grpc_arg ssl_name_override = { - GRPC_ARG_STRING, - const_cast(GRPC_SSL_TARGET_NAME_OVERRIDE_ARG), - {const_cast("foo.test.google.fr")}}; - grpc_channel_args* channel_args = - grpc_channel_args_copy_and_add(nullptr, &ssl_name_override, 1); - grpc_channel* channel = grpc_secure_channel_create( - channel_creds, server_addr.get(), channel_args, nullptr); - grpc_channel_args_destroy(channel_args); - grpc_call* c; - grpc_call* s; - cq_verifier* cqv = cq_verifier_create(g_cq); - grpc_op ops[6]; - grpc_op* op; - grpc_metadata_array initial_metadata_recv; - grpc_metadata_array trailing_metadata_recv; - grpc_metadata_array request_metadata_recv; - grpc_call_details call_details; - grpc_status_code status; - grpc_call_error error; - grpc_slice details; - gpr_timespec deadline = grpc_timeout_seconds_to_deadline(5); - grpc_slice request_payload_slice = grpc_slice_from_copied_string("request"); - grpc_byte_buffer* request_payload = - grpc_raw_byte_buffer_create(&request_payload_slice, 1); - grpc_slice response_payload_slice = grpc_slice_from_copied_string("response"); - grpc_byte_buffer* response_payload = - grpc_raw_byte_buffer_create(&response_payload_slice, 1); - grpc_byte_buffer* request_payload_recv = nullptr; - grpc_byte_buffer* response_payload_recv = nullptr; - // Start a call - c = grpc_channel_create_call(channel, nullptr, GRPC_PROPAGATE_DEFAULTS, g_cq, - grpc_slice_from_static_string("/foo"), nullptr, - deadline, nullptr); - GPR_ASSERT(c); - grpc_metadata_array_init(&initial_metadata_recv); - grpc_metadata_array_init(&trailing_metadata_recv); - grpc_metadata_array_init(&request_metadata_recv); - grpc_call_details_init(&call_details); - memset(ops, 0, sizeof(ops)); - op = ops; - op->op = GRPC_OP_SEND_INITIAL_METADATA; - op->data.send_initial_metadata.count = 0; - op->flags = 0; - op->reserved = nullptr; - op++; - op->op = GRPC_OP_SEND_MESSAGE; - op->data.send_message.send_message = request_payload; - op->flags = 0; - op->reserved = nullptr; - op->op = GRPC_OP_SEND_CLOSE_FROM_CLIENT; - op->flags = 0; - op->reserved = nullptr; - op++; - op->op = GRPC_OP_RECV_INITIAL_METADATA; - op->data.recv_initial_metadata.recv_initial_metadata = &initial_metadata_recv; - op->flags = 0; - op->reserved = nullptr; - op++; - op->op = GRPC_OP_RECV_MESSAGE; - op->data.recv_message.recv_message = &response_payload_recv; - op->op = GRPC_OP_RECV_STATUS_ON_CLIENT; - op->data.recv_status_on_client.trailing_metadata = &trailing_metadata_recv; - op->data.recv_status_on_client.status = &status; - op->data.recv_status_on_client.status_details = &details; - op->flags = 0; - op->reserved = nullptr; - op++; - error = grpc_call_start_batch(c, ops, static_cast(op - ops), tag(1), - nullptr); - GPR_ASSERT(GRPC_CALL_OK == error); - // Request a call on the server - error = - grpc_server_request_call(g_server, &s, &call_details, - &request_metadata_recv, g_cq, g_cq, tag(101)); - GPR_ASSERT(GRPC_CALL_OK == error); - CQ_EXPECT_COMPLETION(cqv, tag(101), 1); - cq_verify(cqv); - memset(ops, 0, sizeof(ops)); - op = ops; - op->op = GRPC_OP_SEND_INITIAL_METADATA; - op->data.send_initial_metadata.count = 0; - op->flags = 0; - op->reserved = nullptr; - op++; - op->op = GRPC_OP_SEND_MESSAGE; - op->data.send_message.send_message = response_payload; - op->flags = 0; - op->op = GRPC_OP_SEND_STATUS_FROM_SERVER; - op->data.send_status_from_server.trailing_metadata_count = 0; - op->data.send_status_from_server.status = GRPC_STATUS_OK; - op->flags = 0; - op->reserved = nullptr; - op++; - op->op = GRPC_OP_RECV_MESSAGE; - op->data.recv_message.recv_message = &request_payload_recv; - op->op = GRPC_OP_RECV_CLOSE_ON_SERVER; - op->flags = 0; - op->reserved = nullptr; - op++; - error = grpc_call_start_batch(s, ops, static_cast(op - ops), tag(102), - nullptr); - GPR_ASSERT(GRPC_CALL_OK == error); - CQ_EXPECT_COMPLETION(cqv, tag(102), 1); - CQ_EXPECT_COMPLETION(cqv, tag(1), 1); - cq_verify(cqv); - GPR_ASSERT(status == GRPC_STATUS_OK); - // Extract the ascii value of the authorization header, if present - grpc_core::UniquePtr authorization_header_val; - gpr_log(GPR_DEBUG, "RPC done. Now examine received metadata on server..."); - for (size_t i = 0; i < request_metadata_recv.count; i++) { - char* cur_key = - grpc_dump_slice(request_metadata_recv.metadata[i].key, GPR_DUMP_ASCII); - char* cur_val = grpc_dump_slice(request_metadata_recv.metadata[i].value, - GPR_DUMP_ASCII); - gpr_log(GPR_DEBUG, "key[%" PRIdPTR "]=%s val[%" PRIdPTR "]=%s", i, cur_key, - i, cur_val); - if (gpr_stricmp(cur_key, "authorization") == 0) { - // This test is broken if we found multiple authorization headers. - GPR_ASSERT(authorization_header_val == nullptr); - authorization_header_val.reset(gpr_strdup(cur_val)); - gpr_log(GPR_DEBUG, "Found authorization header: %s", - authorization_header_val.get()); - } - gpr_free(cur_key); - gpr_free(cur_val); - } - // cleanup - grpc_slice_unref(details); - grpc_metadata_array_destroy(&initial_metadata_recv); - grpc_metadata_array_destroy(&trailing_metadata_recv); - grpc_metadata_array_destroy(&request_metadata_recv); - grpc_call_details_destroy(&call_details); - grpc_byte_buffer_destroy(request_payload); - grpc_byte_buffer_destroy(response_payload); - grpc_byte_buffer_destroy(request_payload_recv); - grpc_byte_buffer_destroy(response_payload_recv); - grpc_call_unref(c); - grpc_call_unref(s); - cq_verifier_destroy(cqv); - grpc_channel_destroy(channel); - return authorization_header_val; -} - -void test_attach_and_get() { - grpc_channel_credentials* main_creds = - create_test_ssl_plus_token_channel_creds("main-auth-header"); - grpc_channel_credentials* foo_creds = - create_test_ssl_plus_token_channel_creds("foo-auth-header"); - grpc_channel_credentials* bar_creds = - create_test_ssl_plus_token_channel_creds("bar-auth-header"); - auto foo_key = grpc_core::UniquePtr(gpr_strdup("foo")); - GPR_ASSERT(grpc_channel_credentials_attach_credentials( - main_creds, foo_key.get(), foo_creds) == true); - auto bar_key = grpc_core::UniquePtr(gpr_strdup("bar")); - GPR_ASSERT(grpc_channel_credentials_attach_credentials( - main_creds, bar_key.get(), bar_creds) == true); - GPR_ASSERT(grpc_channel_credentials_attach_credentials(main_creds, "foo", - foo_creds) == false); - GPR_ASSERT(grpc_channel_credentials_attach_credentials(main_creds, "bar", - bar_creds) == false); - grpc_channel_credentials_release(foo_creds); - grpc_channel_credentials_release(bar_creds); - { - // Creds that send auth header with value "foo-auth-header" are attached on - // main creds under key "foo" - auto foo_auth_header = perform_call_and_get_authorization_header( - main_creds->get_control_plane_credentials("foo").get()); - GPR_ASSERT(foo_auth_header != nullptr && - gpr_stricmp(foo_auth_header.get(), "Bearer foo-auth-header") == - 0); - } - { - // Creds that send auth header with value "bar-auth-header" are attached on - // main creds under key "bar" - auto bar_auth_header = perform_call_and_get_authorization_header( - main_creds->get_control_plane_credentials("bar").get()); - GPR_ASSERT(bar_auth_header != nullptr && - gpr_stricmp(bar_auth_header.get(), "Bearer bar-auth-header") == - 0); - } - { - // Sanity check that the main creds themselves send an authorization header - // with value "main". - auto main_auth_header = - perform_call_and_get_authorization_header(main_creds); - GPR_ASSERT(main_auth_header != nullptr && - gpr_stricmp(main_auth_header.get(), "Bearer main-auth-header") == - 0); - } - { - // If a key isn't mapped in the per channel or global registries, then the - // credentials should be returned but with their per-call creds stripped. - // The end effect is that we shouldn't see any authorization metadata - // sent from client to server. - auto unmapped_auth_header = perform_call_and_get_authorization_header( - main_creds->get_control_plane_credentials("unmapped").get()); - GPR_ASSERT(unmapped_auth_header == nullptr); - } - grpc_channel_credentials_release(main_creds); -} - -void test_registering_same_creds_under_different_keys() { - grpc_channel_credentials* main_creds = - create_test_ssl_plus_token_channel_creds("main-auth-header"); - grpc_channel_credentials* foo_creds = - create_test_ssl_plus_token_channel_creds("foo-auth-header"); - auto foo_key = grpc_core::UniquePtr(gpr_strdup("foo")); - GPR_ASSERT(grpc_channel_credentials_attach_credentials( - main_creds, foo_key.get(), foo_creds) == true); - auto foo2_key = grpc_core::UniquePtr(gpr_strdup("foo2")); - GPR_ASSERT(grpc_channel_credentials_attach_credentials( - main_creds, foo2_key.get(), foo_creds) == true); - GPR_ASSERT(grpc_channel_credentials_attach_credentials(main_creds, "foo", - foo_creds) == false); - GPR_ASSERT(grpc_channel_credentials_attach_credentials(main_creds, "foo2", - foo_creds) == false); - grpc_channel_credentials_release(foo_creds); - { - // Access foo creds via foo - auto foo_auth_header = perform_call_and_get_authorization_header( - main_creds->get_control_plane_credentials("foo").get()); - GPR_ASSERT(foo_auth_header != nullptr && - gpr_stricmp(foo_auth_header.get(), "Bearer foo-auth-header") == - 0); - } - { - // Access foo creds via foo2 - auto foo_auth_header = perform_call_and_get_authorization_header( - main_creds->get_control_plane_credentials("foo2").get()); - GPR_ASSERT(foo_auth_header != nullptr && - gpr_stricmp(foo_auth_header.get(), "Bearer foo-auth-header") == - 0); - } - grpc_channel_credentials_release(main_creds); -} - -// Note that this test uses control plane creds registered in the global -// map. This global registration is done before this and any other -// test is invoked. -void test_attach_and_get_with_global_registry() { - grpc_channel_credentials* main_creds = - create_test_ssl_plus_token_channel_creds("main-auth-header"); - grpc_channel_credentials* global_override_creds = - create_test_ssl_plus_token_channel_creds("global-override-auth-header"); - grpc_channel_credentials* random_creds = - create_test_ssl_plus_token_channel_creds("random-auth-header"); - auto global_key = grpc_core::UniquePtr(gpr_strdup("global")); - GPR_ASSERT(grpc_channel_credentials_attach_credentials( - main_creds, global_key.get(), global_override_creds) == true); - GPR_ASSERT(grpc_channel_credentials_attach_credentials( - main_creds, "global", global_override_creds) == false); - grpc_channel_credentials_release(global_override_creds); - { - // The global registry should be used if a key isn't registered on the per - // channel registry - auto global_auth_header = perform_call_and_get_authorization_header( - random_creds->get_control_plane_credentials("global").get()); - GPR_ASSERT(global_auth_header != nullptr && - gpr_stricmp(global_auth_header.get(), - "Bearer global-auth-header") == 0); - } - { - // The per-channel registry should be preferred over the global registry - auto override_auth_header = perform_call_and_get_authorization_header( - main_creds->get_control_plane_credentials("global").get()); - GPR_ASSERT(override_auth_header != nullptr && - gpr_stricmp(override_auth_header.get(), - "Bearer global-override-auth-header") == 0); - } - { - // Sanity check that random creds themselves send authorization header with - // value "random". - auto random_auth_header = - perform_call_and_get_authorization_header(random_creds); - GPR_ASSERT(random_auth_header != nullptr && - gpr_stricmp(random_auth_header.get(), - "Bearer random-auth-header") == 0); - } - { - // If a key isn't mapped in the per channel or global registries, then the - // credentials should be returned but with their per-call creds stripped. - // The end effect is that we shouldn't see any authorization metadata - // sent from client to server. - auto unmapped_auth_header = perform_call_and_get_authorization_header( - random_creds->get_control_plane_credentials("unmapped").get()); - GPR_ASSERT(unmapped_auth_header == nullptr); - } - grpc_channel_credentials_release(main_creds); - grpc_channel_credentials_release(random_creds); -} - -} // namespace - -int main(int argc, char** argv) { - { - grpc::testing::TestEnvironment env(argc, argv); - grpc_init(); - // First setup a global server for all tests to use - g_cq = grpc_completion_queue_create_for_next(nullptr); - grpc_server_credentials* server_creds = create_test_ssl_server_creds(); - g_server = grpc_server_create(nullptr, nullptr); - g_port = grpc_pick_unused_port_or_die(); - grpc_server_register_completion_queue(g_server, g_cq, nullptr); - grpc_core::UniquePtr localaddr; - grpc_core::JoinHostPort(&localaddr, "localhost", g_port); - GPR_ASSERT(grpc_server_add_secure_http2_port(g_server, localaddr.get(), - server_creds)); - grpc_server_credentials_release(server_creds); - grpc_server_start(g_server); - { - // First, Register one channel creds in the global registry; all tests - // will have access. - grpc_channel_credentials* global_creds = - create_test_ssl_plus_token_channel_creds("global-auth-header"); - auto global_key = grpc_core::UniquePtr(gpr_strdup("global")); - GPR_ASSERT(grpc_control_plane_credentials_register(global_key.get(), - global_creds) == true); - GPR_ASSERT(grpc_control_plane_credentials_register( - "global", global_creds) == false); - grpc_channel_credentials_release(global_creds); - } - // Run tests - { - test_attach_and_get(); - test_registering_same_creds_under_different_keys(); - test_attach_and_get_with_global_registry(); - } - // cleanup - grpc_completion_queue* shutdown_cq = - grpc_completion_queue_create_for_pluck(nullptr); - grpc_server_shutdown_and_notify(g_server, shutdown_cq, tag(1000)); - GPR_ASSERT(grpc_completion_queue_pluck(shutdown_cq, tag(1000), - grpc_timeout_seconds_to_deadline(5), - nullptr) - .type == GRPC_OP_COMPLETE); - grpc_server_destroy(g_server); - grpc_completion_queue_shutdown(shutdown_cq); - grpc_completion_queue_destroy(shutdown_cq); - grpc_completion_queue_shutdown(g_cq); - drain_cq(g_cq); - grpc_completion_queue_destroy(g_cq); - grpc_shutdown(); - } - { - grpc::testing::TestEnvironment env(argc, argv); - grpc_init(); - // The entries in the global registry must still persist through - // a full shutdown and restart of the library. - grpc_channel_credentials* global_creds = - create_test_ssl_plus_token_channel_creds("global-auth-header"); - auto global_key = grpc_core::UniquePtr(gpr_strdup("global")); - GPR_ASSERT(grpc_control_plane_credentials_register(global_key.get(), - global_creds) == false); - grpc_channel_credentials_release(global_creds); - // Sanity check that unmapped authorities can still register in - // the global registry. - grpc_channel_credentials* global_creds_2 = - create_test_ssl_plus_token_channel_creds("global-auth-header"); - GPR_ASSERT(grpc_control_plane_credentials_register("global_2", - global_creds_2) == true); - GPR_ASSERT(grpc_control_plane_credentials_register( - "global_2", global_creds_2) == false); - grpc_channel_credentials_release(global_creds_2); - grpc_shutdown(); - } - return 0; -} diff --git a/test/core/slice/percent_decode_fuzzer.cc b/test/core/slice/percent_decode_fuzzer.cc index 5f0de7293fe..7e62ed136a9 100644 --- a/test/core/slice/percent_decode_fuzzer.cc +++ b/test/core/slice/percent_decode_fuzzer.cc @@ -24,7 +24,6 @@ #include #include -#include "src/core/lib/security/credentials/credentials.h" #include "src/core/lib/slice/percent_encoding.h" bool squelch = true; @@ -32,7 +31,6 @@ bool leak_check = true; extern "C" int LLVMFuzzerTestOneInput(const uint8_t* data, size_t size) { grpc_init(); - grpc_test_only_control_plane_credentials_force_init(); grpc_slice input = grpc_slice_from_copied_buffer((const char*)data, size); grpc_slice output; if (grpc_strict_percent_decode_slice( @@ -45,7 +43,6 @@ extern "C" int LLVMFuzzerTestOneInput(const uint8_t* data, size_t size) { } grpc_slice_unref(grpc_permissive_percent_decode_slice(input)); grpc_slice_unref(input); - grpc_test_only_control_plane_credentials_destroy(); grpc_shutdown_blocking(); return 0; } diff --git a/test/core/slice/percent_encode_fuzzer.cc b/test/core/slice/percent_encode_fuzzer.cc index a028013f064..10239784139 100644 --- a/test/core/slice/percent_encode_fuzzer.cc +++ b/test/core/slice/percent_encode_fuzzer.cc @@ -24,7 +24,6 @@ #include #include -#include "src/core/lib/security/credentials/credentials.h" #include "src/core/lib/slice/percent_encoding.h" bool squelch = true; @@ -32,7 +31,6 @@ bool leak_check = true; static void test(const uint8_t* data, size_t size, const uint8_t* dict) { grpc_init(); - grpc_test_only_control_plane_credentials_force_init(); grpc_slice input = grpc_slice_from_copied_buffer(reinterpret_cast(data), size); grpc_slice output = grpc_percent_encode_slice(input, dict); @@ -48,7 +46,6 @@ static void test(const uint8_t* data, size_t size, const uint8_t* dict) { grpc_slice_unref(output); grpc_slice_unref(decoded_output); grpc_slice_unref(permissive_decoded_output); - grpc_test_only_control_plane_credentials_destroy(); grpc_shutdown_blocking(); } diff --git a/test/core/surface/BUILD b/test/core/surface/BUILD index ad36d1c40a6..37a371dc4e8 100644 --- a/test/core/surface/BUILD +++ b/test/core/surface/BUILD @@ -55,6 +55,7 @@ grpc_cc_test( grpc_cc_test( name = "completion_queue_threading_test", srcs = ["completion_queue_threading_test.cc"], + flaky = True, # TODO(b/153064668) language = "C++", deps = [ "//:gpr", diff --git a/test/core/tsi/alts/handshaker/alts_handshaker_client_test.cc b/test/core/tsi/alts/handshaker/alts_handshaker_client_test.cc index 0e1ab006728..5f9a4b2d745 100644 --- a/test/core/tsi/alts/handshaker/alts_handshaker_client_test.cc +++ b/test/core/tsi/alts/handshaker/alts_handshaker_client_test.cc @@ -31,6 +31,7 @@ #define ALTS_HANDSHAKER_CLIENT_TEST_TARGET_NAME "bigtable.google.api.com" #define ALTS_HANDSHAKER_CLIENT_TEST_TARGET_SERVICE_ACCOUNT1 "A@google.com" #define ALTS_HANDSHAKER_CLIENT_TEST_TARGET_SERVICE_ACCOUNT2 "B@google.com" +#define ALTS_HANDSHAKER_CLIENT_TEST_MAX_FRAME_SIZE 64 * 1024 const size_t kHandshakerClientOpNum = 4; const size_t kMaxRpcVersionMajor = 3; @@ -155,8 +156,8 @@ static grpc_call_error check_must_not_be_called(grpc_call* /*call*/, /** * A mock grpc_caller used to check correct execution of client_start operation. * It checks if the client_start handshaker request is populated with correct - * handshake_security_protocol, application_protocol, and record_protocol, and - * op is correctly populated. + * handshake_security_protocol, application_protocol, record_protocol and + * max_frame_size, and op is correctly populated. */ static grpc_call_error check_client_start_success(grpc_call* /*call*/, const grpc_op* op, @@ -196,7 +197,8 @@ static grpc_call_error check_client_start_success(grpc_call* /*call*/, GPR_ASSERT(upb_strview_eql( grpc_gcp_StartClientHandshakeReq_target_name(client_start), upb_strview_makez(ALTS_HANDSHAKER_CLIENT_TEST_TARGET_NAME))); - + GPR_ASSERT(grpc_gcp_StartClientHandshakeReq_max_frame_size(client_start) == + ALTS_HANDSHAKER_CLIENT_TEST_MAX_FRAME_SIZE); GPR_ASSERT(validate_op(client, op, nops, true /* is_start */)); return GRPC_CALL_OK; } @@ -204,8 +206,8 @@ static grpc_call_error check_client_start_success(grpc_call* /*call*/, /** * A mock grpc_caller used to check correct execution of server_start operation. * It checks if the server_start handshaker request is populated with correct - * handshake_security_protocol, application_protocol, and record_protocol, and - * op is correctly populated. + * handshake_security_protocol, application_protocol, record_protocol and + * max_frame_size, and op is correctly populated. */ static grpc_call_error check_server_start_success(grpc_call* /*call*/, const grpc_op* op, @@ -245,6 +247,8 @@ static grpc_call_error check_server_start_success(grpc_call* /*call*/, upb_strview_makez(ALTS_RECORD_PROTOCOL))); validate_rpc_protocol_versions( grpc_gcp_StartServerHandshakeReq_rpc_versions(server_start)); + GPR_ASSERT(grpc_gcp_StartServerHandshakeReq_max_frame_size(server_start) == + ALTS_HANDSHAKER_CLIENT_TEST_MAX_FRAME_SIZE); GPR_ASSERT(validate_op(client, op, nops, true /* is_start */)); return GRPC_CALL_OK; } @@ -321,12 +325,14 @@ static alts_handshaker_client_test_config* create_config() { nullptr, config->channel, ALTS_HANDSHAKER_SERVICE_URL_FOR_TESTING, nullptr, server_options, grpc_slice_from_static_string(ALTS_HANDSHAKER_CLIENT_TEST_TARGET_NAME), - nullptr, nullptr, nullptr, nullptr, false); + nullptr, nullptr, nullptr, nullptr, false, + ALTS_HANDSHAKER_CLIENT_TEST_MAX_FRAME_SIZE); config->client = alts_grpc_handshaker_client_create( nullptr, config->channel, ALTS_HANDSHAKER_SERVICE_URL_FOR_TESTING, nullptr, client_options, grpc_slice_from_static_string(ALTS_HANDSHAKER_CLIENT_TEST_TARGET_NAME), - nullptr, nullptr, nullptr, nullptr, true); + nullptr, nullptr, nullptr, nullptr, true, + ALTS_HANDSHAKER_CLIENT_TEST_MAX_FRAME_SIZE); GPR_ASSERT(config->client != nullptr); GPR_ASSERT(config->server != nullptr); grpc_alts_credentials_options_destroy(client_options); diff --git a/test/core/tsi/alts/handshaker/alts_tsi_handshaker_test.cc b/test/core/tsi/alts/handshaker/alts_tsi_handshaker_test.cc index 5dd76d82fdc..2127e980488 100644 --- a/test/core/tsi/alts/handshaker/alts_tsi_handshaker_test.cc +++ b/test/core/tsi/alts/handshaker/alts_tsi_handshaker_test.cc @@ -27,6 +27,7 @@ #include "src/core/tsi/alts/handshaker/alts_shared_resource.h" #include "src/core/tsi/alts/handshaker/alts_tsi_handshaker.h" #include "src/core/tsi/alts/handshaker/alts_tsi_handshaker_private.h" +#include "src/core/tsi/transport_security_grpc.h" #include "src/proto/grpc/gcp/altscontext.upb.h" #include "test/core/tsi/alts/handshaker/alts_handshaker_service_api_test_lib.h" #include "test/core/util/test_config.h" @@ -49,6 +50,7 @@ #define ALTS_TSI_HANDSHAKER_TEST_APPLICATION_PROTOCOL \ "test application protocol" #define ALTS_TSI_HANDSHAKER_TEST_RECORD_PROTOCOL "test record protocol" +#define ALTS_TSI_HANDSHAKER_TEST_MAX_FRAME_SIZE 256 * 1024 using grpc_core::internal::alts_handshaker_client_check_fields_for_testing; using grpc_core::internal::alts_handshaker_client_get_handshaker_for_testing; @@ -164,6 +166,8 @@ static grpc_byte_buffer* generate_handshaker_response( upb_strview_makez(ALTS_TSI_HANDSHAKER_TEST_APPLICATION_PROTOCOL)); grpc_gcp_HandshakerResult_set_record_protocol( result, upb_strview_makez(ALTS_TSI_HANDSHAKER_TEST_RECORD_PROTOCOL)); + grpc_gcp_HandshakerResult_set_max_frame_size( + result, ALTS_TSI_HANDSHAKER_TEST_MAX_FRAME_SIZE); break; case SERVER_NEXT: grpc_gcp_HandshakerResp_set_bytes_consumed( @@ -283,6 +287,17 @@ static void on_client_next_success_cb(tsi_result status, void* user_data, GPR_ASSERT(memcmp(bytes_to_send, ALTS_TSI_HANDSHAKER_TEST_OUT_FRAME, bytes_to_send_size) == 0); GPR_ASSERT(result != nullptr); + // Validate max frame size value after Frame Size Negotiation. Here peer max + // frame size is greater than default value, and user specified max frame size + // is absent. + tsi_zero_copy_grpc_protector* zero_copy_protector = nullptr; + GPR_ASSERT(tsi_handshaker_result_create_zero_copy_grpc_protector( + result, nullptr, &zero_copy_protector) == TSI_OK); + size_t actual_max_frame_size; + tsi_zero_copy_grpc_protector_max_frame_size(zero_copy_protector, + &actual_max_frame_size); + GPR_ASSERT(actual_max_frame_size == kTsiAltsMaxFrameSize); + tsi_zero_copy_grpc_protector_destroy(zero_copy_protector); /* Validate peer identity. */ tsi_peer peer; GPR_ASSERT(tsi_handshaker_result_extract_peer(result, &peer) == TSI_OK); @@ -343,6 +358,20 @@ static void on_server_next_success_cb(tsi_result status, void* user_data, GPR_ASSERT(bytes_to_send_size == 0); GPR_ASSERT(bytes_to_send == nullptr); GPR_ASSERT(result != nullptr); + // Validate max frame size value after Frame Size Negotiation. The negotiated + // frame size value equals minimum send frame size, due to the absence of peer + // max frame size. + tsi_zero_copy_grpc_protector* zero_copy_protector = nullptr; + size_t user_specified_max_frame_size = + ALTS_TSI_HANDSHAKER_TEST_MAX_FRAME_SIZE; + GPR_ASSERT(tsi_handshaker_result_create_zero_copy_grpc_protector( + result, &user_specified_max_frame_size, + &zero_copy_protector) == TSI_OK); + size_t actual_max_frame_size; + tsi_zero_copy_grpc_protector_max_frame_size(zero_copy_protector, + &actual_max_frame_size); + GPR_ASSERT(actual_max_frame_size == kTsiAltsMinFrameSize); + tsi_zero_copy_grpc_protector_destroy(zero_copy_protector); /* Validate peer identity. */ tsi_peer peer; GPR_ASSERT(tsi_handshaker_result_extract_peer(result, &peer) == TSI_OK); @@ -478,7 +507,7 @@ static tsi_handshaker* create_test_handshaker(bool is_client) { grpc_alts_credentials_client_options_create(); alts_tsi_handshaker_create(options, "target_name", ALTS_HANDSHAKER_SERVICE_URL_FOR_TESTING, is_client, - nullptr, &handshaker); + nullptr, &handshaker, 0); alts_tsi_handshaker* alts_handshaker = reinterpret_cast(handshaker); alts_tsi_handshaker_set_client_vtable_for_testing(alts_handshaker, &vtable); diff --git a/test/core/tsi/ssl_transport_security_test.cc b/test/core/tsi/ssl_transport_security_test.cc index 4cd5440006d..70b8355bc79 100644 --- a/test/core/tsi/ssl_transport_security_test.cc +++ b/test/core/tsi/ssl_transport_security_test.cc @@ -659,10 +659,14 @@ void ssl_tsi_test_do_round_trip_for_all_configs() { void ssl_tsi_test_do_round_trip_odd_buffer_size() { gpr_log(GPR_INFO, "ssl_tsi_test_do_round_trip_odd_buffer_size"); -#ifndef MEMORY_SANITIZER +#if !defined(MEMORY_SANITIZER) && !defined(GPR_ARCH_32) && !defined(__APPLE__) const size_t odd_sizes[] = {1025, 2051, 4103, 8207, 16409}; #else - // avoid test being extremely slow under MSAN + // 1. avoid test being extremely slow under MSAN + // 2. on 32-bit, the test is much slower (probably due to lack of boringssl + // asm optimizations) so we only run a subset of tests to avoid timeout + // 3. on Mac OS, we have slower testing machines so we only run a subset + // of tests to avoid timeout const size_t odd_sizes[] = {1025}; #endif const size_t size = sizeof(odd_sizes) / sizeof(size_t); diff --git a/test/cpp/client/BUILD b/test/cpp/client/BUILD index b895d99724f..18e9ce4973f 100644 --- a/test/cpp/client/BUILD +++ b/test/cpp/client/BUILD @@ -35,11 +35,18 @@ grpc_cc_test( grpc_cc_test( name = "client_channel_stress_test", srcs = ["client_channel_stress_test.cc"], + flaky = True, # TODO(b/153136407) # TODO(jtattermusch): test fails frequently on Win RBE, but passes locally # reenable the tests once it works reliably on Win RBE. + # TODO(roth): Test disabled on msan and tsan due to variable + # duration problem triggered by https://github.com/grpc/grpc/pull/22481. + # Re-enable once the problem is diagnosed and fixed. Tracked + # internally in b/153136407. tags = [ "no_test_android", # fails on android due to "Too many open files". "no_windows", + "nomsan", + "notsan", ], deps = [ "//:gpr", diff --git a/test/cpp/client/client_channel_stress_test.cc b/test/cpp/client/client_channel_stress_test.cc index 5fcfc11cc4c..8e92d5373ad 100644 --- a/test/cpp/client/client_channel_stress_test.cc +++ b/test/cpp/client/client_channel_stress_test.cc @@ -35,6 +35,7 @@ #include #include +#include "src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_balancer_addresses.h" #include "src/core/ext/filters/client_channel/parse_address.h" #include "src/core/ext/filters/client_channel/resolver/fake/fake_resolver.h" #include "src/core/ext/filters/client_channel/server_address.h" @@ -151,7 +152,7 @@ class ClientChannelStressTest { for (const auto& balancer_server : balancer_servers_) { // Select each address with probability of 0.8. if (std::rand() % 10 < 8) { - addresses.emplace_back(AddressData{balancer_server.port_, true, ""}); + addresses.emplace_back(AddressData{balancer_server.port_, ""}); } } std::shuffle(addresses.begin(), addresses.end(), @@ -213,13 +214,12 @@ class ClientChannelStressTest { struct AddressData { int port; - bool is_balancer; grpc::string balancer_name; }; - void SetNextResolution(const std::vector& address_data) { - grpc_core::ExecCtx exec_ctx; - grpc_core::Resolver::Result result; + static grpc_core::ServerAddressList CreateAddressListFromAddressDataList( + const std::vector& address_data) { + grpc_core::ServerAddressList addresses; for (const auto& addr : address_data) { char* lb_uri_str; gpr_asprintf(&lb_uri_str, "ipv4:127.0.0.1:%d", addr.port); @@ -227,20 +227,34 @@ class ClientChannelStressTest { GPR_ASSERT(lb_uri != nullptr); grpc_resolved_address address; GPR_ASSERT(grpc_parse_uri(lb_uri, &address)); - std::vector args_to_add; - if (addr.is_balancer) { - args_to_add.emplace_back(grpc_channel_arg_integer_create( - const_cast(GRPC_ARG_ADDRESS_IS_BALANCER), 1)); - args_to_add.emplace_back(grpc_channel_arg_string_create( - const_cast(GRPC_ARG_ADDRESS_BALANCER_NAME), - const_cast(addr.balancer_name.c_str()))); - } - grpc_channel_args* args = grpc_channel_args_copy_and_add( - nullptr, args_to_add.data(), args_to_add.size()); - result.addresses.emplace_back(address.addr, address.len, args); + grpc_arg arg = + grpc_core::CreateGrpclbBalancerNameArg(addr.balancer_name.c_str()); + grpc_channel_args* args = + grpc_channel_args_copy_and_add(nullptr, &arg, 1); + addresses.emplace_back(address.addr, address.len, args); grpc_uri_destroy(lb_uri); gpr_free(lb_uri_str); } + return addresses; + } + + static grpc_core::Resolver::Result MakeResolverResult( + const std::vector& balancer_address_data) { + grpc_core::Resolver::Result result; + grpc_error* error = GRPC_ERROR_NONE; + result.service_config = grpc_core::ServiceConfig::Create( + "{\"loadBalancingConfig\":[{\"grpclb\":{}}]}", &error); + GPR_ASSERT(error == GRPC_ERROR_NONE); + grpc_core::ServerAddressList balancer_addresses = + CreateAddressListFromAddressDataList(balancer_address_data); + grpc_arg arg = CreateGrpclbBalancerAddressesArg(&balancer_addresses); + result.args = grpc_channel_args_copy_and_add(nullptr, &arg, 1); + return result; + } + + void SetNextResolution(const std::vector& address_data) { + grpc_core::ExecCtx exec_ctx; + grpc_core::Resolver::Result result = MakeResolverResult(address_data); response_generator_->SetResponse(std::move(result)); } diff --git a/test/cpp/client/destroy_grpclb_channel_with_active_connect_stress_test.cc b/test/cpp/client/destroy_grpclb_channel_with_active_connect_stress_test.cc index 7ba76f0da75..d4a942b9870 100644 --- a/test/cpp/client/destroy_grpclb_channel_with_active_connect_stress_test.cc +++ b/test/cpp/client/destroy_grpclb_channel_with_active_connect_stress_test.cc @@ -37,6 +37,7 @@ #include #include +#include "src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_balancer_addresses.h" #include "src/core/ext/filters/client_channel/parse_address.h" #include "src/core/ext/filters/client_channel/resolver/fake/fake_resolver.h" #include "src/core/ext/filters/client_channel/server_address.h" @@ -59,24 +60,21 @@ void TryConnectAndDestroy() { // The precise behavior is dependant on the test runtime environment though, // since connect() attempts on this address may unfortunately result in // "network unreachable" errors in some test runtime environments. - char* uri_str; - gpr_asprintf(&uri_str, "ipv6:[0100::1234]:443"); + const char* uri_str = "ipv6:[0100::1234]:443"; grpc_uri* lb_uri = grpc_uri_parse(uri_str, true); - gpr_free(uri_str); - GPR_ASSERT(lb_uri != nullptr); + ASSERT_NE(lb_uri, nullptr); grpc_resolved_address address; - GPR_ASSERT(grpc_parse_uri(lb_uri, &address)); - std::vector address_args_to_add = { - grpc_channel_arg_integer_create( - const_cast(GRPC_ARG_ADDRESS_IS_BALANCER), 1), - }; + ASSERT_TRUE(grpc_parse_uri(lb_uri, &address)); + grpc_uri_destroy(lb_uri); grpc_core::ServerAddressList addresses; - grpc_channel_args* address_args = grpc_channel_args_copy_and_add( - nullptr, address_args_to_add.data(), address_args_to_add.size()); - addresses.emplace_back(address.addr, address.len, address_args); + addresses.emplace_back(address.addr, address.len, nullptr); grpc_core::Resolver::Result lb_address_result; - lb_address_result.addresses = addresses; - grpc_uri_destroy(lb_uri); + grpc_error* error = GRPC_ERROR_NONE; + lb_address_result.service_config = grpc_core::ServiceConfig::Create( + "{\"loadBalancingConfig\":[{\"grpclb\":{}}]}", &error); + ASSERT_EQ(error, GRPC_ERROR_NONE) << grpc_error_string(error); + grpc_arg arg = grpc_core::CreateGrpclbBalancerAddressesArg(&addresses); + lb_address_result.args = grpc_channel_args_copy_and_add(nullptr, &arg, 1); response_generator->SetResponse(lb_address_result); grpc::ChannelArguments args; args.SetPointer(GRPC_ARG_FAKE_RESOLVER_RESPONSE_GENERATOR, @@ -95,9 +93,9 @@ void TryConnectAndDestroy() { // unreachable balancer to begin. The connection should never become ready // because the LB we're trying to connect to is unreachable. channel->GetState(true /* try_to_connect */); - GPR_ASSERT( - !channel->WaitForConnected(grpc_timeout_milliseconds_to_deadline(100))); - GPR_ASSERT("grpclb" == channel->GetLoadBalancingPolicyName()); + ASSERT_FALSE( + channel->WaitForConnected(grpc_timeout_milliseconds_to_deadline(100))); + ASSERT_EQ("grpclb", channel->GetLoadBalancingPolicyName()); channel.reset(); }; diff --git a/test/cpp/codegen/compiler_test_golden b/test/cpp/codegen/compiler_test_golden index e6816f2f33c..7ad954d6d9a 100644 --- a/test/cpp/codegen/compiler_test_golden +++ b/test/cpp/codegen/compiler_test_golden @@ -839,7 +839,14 @@ class ServiceA final { public: WithStreamedUnaryMethod_MethodA1() { ::grpc::Service::MarkMethodStreamed(0, - new ::grpc::internal::StreamedUnaryHandler< ::grpc::testing::Request, ::grpc::testing::Response>(std::bind(&WithStreamedUnaryMethod_MethodA1::StreamedMethodA1, this, std::placeholders::_1, std::placeholders::_2))); + new ::grpc::internal::StreamedUnaryHandler< + ::grpc::testing::Request, ::grpc::testing::Response>( + [this](::grpc_impl::ServerContext* context, + ::grpc_impl::ServerUnaryStreamer< + ::grpc::testing::Request, ::grpc::testing::Response>* streamer) { + return this->StreamedMethodA1(context, + streamer); + })); } ~WithStreamedUnaryMethod_MethodA1() override { BaseClassMustBeDerivedFromService(this); @@ -860,7 +867,14 @@ class ServiceA final { public: WithSplitStreamingMethod_MethodA3() { ::grpc::Service::MarkMethodStreamed(2, - new ::grpc::internal::SplitServerStreamingHandler< ::grpc::testing::Request, ::grpc::testing::Response>(std::bind(&WithSplitStreamingMethod_MethodA3::StreamedMethodA3, this, std::placeholders::_1, std::placeholders::_2))); + new ::grpc::internal::SplitServerStreamingHandler< + ::grpc::testing::Request, ::grpc::testing::Response>( + [this](::grpc_impl::ServerContext* context, + ::grpc_impl::ServerSplitStreamer< + ::grpc::testing::Request, ::grpc::testing::Response>* streamer) { + return this->StreamedMethodA3(context, + streamer); + })); } ~WithSplitStreamingMethod_MethodA3() override { BaseClassMustBeDerivedFromService(this); @@ -1129,7 +1143,14 @@ class ServiceB final { public: WithStreamedUnaryMethod_MethodB1() { ::grpc::Service::MarkMethodStreamed(0, - new ::grpc::internal::StreamedUnaryHandler< ::grpc::testing::Request, ::grpc::testing::Response>(std::bind(&WithStreamedUnaryMethod_MethodB1::StreamedMethodB1, this, std::placeholders::_1, std::placeholders::_2))); + new ::grpc::internal::StreamedUnaryHandler< + ::grpc::testing::Request, ::grpc::testing::Response>( + [this](::grpc_impl::ServerContext* context, + ::grpc_impl::ServerUnaryStreamer< + ::grpc::testing::Request, ::grpc::testing::Response>* streamer) { + return this->StreamedMethodB1(context, + streamer); + })); } ~WithStreamedUnaryMethod_MethodB1() override { BaseClassMustBeDerivedFromService(this); diff --git a/test/cpp/end2end/grpclb_end2end_test.cc b/test/cpp/end2end/grpclb_end2end_test.cc index 79969f9c5e3..3d5b6f8cc2c 100644 --- a/test/cpp/end2end/grpclb_end2end_test.cc +++ b/test/cpp/end2end/grpclb_end2end_test.cc @@ -16,6 +16,7 @@ * */ +#include #include #include #include @@ -35,6 +36,7 @@ #include #include "src/core/ext/filters/client_channel/backup_poller.h" +#include "src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_balancer_addresses.h" #include "src/core/ext/filters/client_channel/parse_address.h" #include "src/core/ext/filters/client_channel/resolver/fake/fake_resolver.h" #include "src/core/ext/filters/client_channel/server_address.h" @@ -83,6 +85,13 @@ namespace grpc { namespace testing { namespace { +constexpr char kDefaultServiceConfig[] = + "{\n" + " \"loadBalancingConfig\":[\n" + " { \"grpclb\":{} }\n" + " ]\n" + "}"; + template class CountedService : public ServiceType { public: @@ -253,30 +262,31 @@ class BalancerServiceImpl : public BalancerService { if (client_load_reporting_interval_seconds_ > 0) { request.Clear(); - if (stream->Read(&request)) { + while (stream->Read(&request)) { gpr_log(GPR_INFO, "LB[%p]: received client load report message '%s'", this, request.DebugString().c_str()); GPR_ASSERT(request.has_client_stats()); - // We need to acquire the lock here in order to prevent the notify_one - // below from firing before its corresponding wait is executed. - grpc::internal::MutexLock lock(&mu_); - client_stats_.num_calls_started += + ClientStats load_report; + load_report.num_calls_started = request.client_stats().num_calls_started(); - client_stats_.num_calls_finished += + load_report.num_calls_finished = request.client_stats().num_calls_finished(); - client_stats_.num_calls_finished_with_client_failed_to_send += + load_report.num_calls_finished_with_client_failed_to_send = request.client_stats() .num_calls_finished_with_client_failed_to_send(); - client_stats_.num_calls_finished_known_received += + load_report.num_calls_finished_known_received = request.client_stats().num_calls_finished_known_received(); for (const auto& drop_token_count : request.client_stats().calls_finished_with_drop()) { - client_stats_ - .drop_token_counts[drop_token_count.load_balance_token()] += + load_report + .drop_token_counts[drop_token_count.load_balance_token()] = drop_token_count.num_calls(); } - load_report_ready_ = true; - load_report_cond_.Signal(); + // We need to acquire the lock here in order to prevent the notify_one + // below from firing before its corresponding wait is executed. + grpc::internal::MutexLock lock(&mu_); + load_report_queue_.emplace_back(std::move(load_report)); + if (load_report_cond_ != nullptr) load_report_cond_->Signal(); } } } @@ -293,9 +303,8 @@ class BalancerServiceImpl : public BalancerService { void Start() { grpc::internal::MutexLock lock(&mu_); serverlist_done_ = false; - load_report_ready_ = false; responses_and_delays_.clear(); - client_stats_.Reset(); + load_report_queue_.clear(); } void Shutdown() { @@ -327,11 +336,18 @@ class BalancerServiceImpl : public BalancerService { return response; } - const ClientStats& WaitForLoadReport() { + ClientStats WaitForLoadReport() { grpc::internal::MutexLock lock(&mu_); - load_report_cond_.WaitUntil(&mu_, [this] { return load_report_ready_; }); - load_report_ready_ = false; - return client_stats_; + grpc::internal::CondVar cv; + if (load_report_queue_.empty()) { + load_report_cond_ = &cv; + load_report_cond_->WaitUntil( + &mu_, [this] { return !load_report_queue_.empty(); }); + load_report_cond_ = nullptr; + } + ClientStats load_report = std::move(load_report_queue_.front()); + load_report_queue_.pop_front(); + return load_report; } void NotifyDoneWithServerlists() { @@ -357,12 +373,12 @@ class BalancerServiceImpl : public BalancerService { const int client_load_reporting_interval_seconds_; std::vector responses_and_delays_; + grpc::internal::Mutex mu_; - grpc::internal::CondVar load_report_cond_; - bool load_report_ready_ = false; grpc::internal::CondVar serverlist_cond_; bool serverlist_done_ = false; - ClientStats client_stats_; + grpc::internal::CondVar* load_report_cond_ = nullptr; + std::deque load_report_queue_; }; class GrpclbEnd2endTest : public ::testing::Test { @@ -514,11 +530,10 @@ class GrpclbEnd2endTest : public ::testing::Test { struct AddressData { int port; - bool is_balancer; grpc::string balancer_name; }; - grpc_core::ServerAddressList CreateLbAddressesFromAddressDataList( + static grpc_core::ServerAddressList CreateLbAddressesFromAddressDataList( const std::vector& address_data) { grpc_core::ServerAddressList addresses; for (const auto& addr : address_data) { @@ -528,16 +543,10 @@ class GrpclbEnd2endTest : public ::testing::Test { GPR_ASSERT(lb_uri != nullptr); grpc_resolved_address address; GPR_ASSERT(grpc_parse_uri(lb_uri, &address)); - std::vector args_to_add; - if (addr.is_balancer) { - args_to_add.emplace_back(grpc_channel_arg_integer_create( - const_cast(GRPC_ARG_ADDRESS_IS_BALANCER), 1)); - args_to_add.emplace_back(grpc_channel_arg_string_create( - const_cast(GRPC_ARG_ADDRESS_BALANCER_NAME), - const_cast(addr.balancer_name.c_str()))); - } - grpc_channel_args* args = grpc_channel_args_copy_and_add( - nullptr, args_to_add.data(), args_to_add.size()); + grpc_arg arg = + grpc_core::CreateGrpclbBalancerNameArg(addr.balancer_name.c_str()); + grpc_channel_args* args = + grpc_channel_args_copy_and_add(nullptr, &arg, 1); addresses.emplace_back(address.addr, address.len, args); grpc_uri_destroy(lb_uri); gpr_free(lb_uri_str); @@ -545,34 +554,50 @@ class GrpclbEnd2endTest : public ::testing::Test { return addresses; } + static grpc_core::Resolver::Result MakeResolverResult( + const std::vector& balancer_address_data, + const std::vector& backend_address_data = {}, + const char* service_config_json = kDefaultServiceConfig) { + grpc_core::Resolver::Result result; + result.addresses = + CreateLbAddressesFromAddressDataList(backend_address_data); + grpc_error* error = GRPC_ERROR_NONE; + result.service_config = + grpc_core::ServiceConfig::Create(service_config_json, &error); + GPR_ASSERT(error == GRPC_ERROR_NONE); + grpc_core::ServerAddressList balancer_addresses = + CreateLbAddressesFromAddressDataList(balancer_address_data); + grpc_arg arg = CreateGrpclbBalancerAddressesArg(&balancer_addresses); + result.args = grpc_channel_args_copy_and_add(nullptr, &arg, 1); + return result; + } + void SetNextResolutionAllBalancers( - const char* service_config_json = nullptr) { + const char* service_config_json = kDefaultServiceConfig) { std::vector addresses; for (size_t i = 0; i < balancers_.size(); ++i) { - addresses.emplace_back(AddressData{balancers_[i]->port_, true, ""}); + addresses.emplace_back(AddressData{balancers_[i]->port_, ""}); } - SetNextResolution(addresses, service_config_json); + SetNextResolution(addresses, {}, service_config_json); } - void SetNextResolution(const std::vector& address_data, - const char* service_config_json = nullptr) { + void SetNextResolution( + const std::vector& balancer_address_data, + const std::vector& backend_address_data = {}, + const char* service_config_json = kDefaultServiceConfig) { grpc_core::ExecCtx exec_ctx; - grpc_core::Resolver::Result result; - result.addresses = CreateLbAddressesFromAddressDataList(address_data); - if (service_config_json != nullptr) { - grpc_error* error = GRPC_ERROR_NONE; - result.service_config = - grpc_core::ServiceConfig::Create(service_config_json, &error); - GRPC_ERROR_UNREF(error); - } + grpc_core::Resolver::Result result = MakeResolverResult( + balancer_address_data, backend_address_data, service_config_json); response_generator_->SetResponse(std::move(result)); } void SetNextReresolutionResponse( - const std::vector& address_data) { + const std::vector& balancer_address_data, + const std::vector& backend_address_data = {}, + const char* service_config_json = kDefaultServiceConfig) { grpc_core::ExecCtx exec_ctx; - grpc_core::Resolver::Result result; - result.addresses = CreateLbAddressesFromAddressDataList(address_data); + grpc_core::Resolver::Result result = MakeResolverResult( + balancer_address_data, backend_address_data, service_config_json); response_generator_->SetReresolutionResponse(std::move(result)); } @@ -747,44 +772,11 @@ TEST_F(SingleBalancerTest, SelectGrpclbWithMigrationServiceConfig) { EXPECT_EQ("grpclb", channel_->GetLoadBalancingPolicyName()); } -TEST_F(SingleBalancerTest, - DoNotSpecialCaseUseGrpclbWithLoadBalancingConfigTest) { - const int kFallbackTimeoutMs = 200 * grpc_test_slowdown_factor(); - ResetStub(kFallbackTimeoutMs); - SetNextResolution({AddressData{backends_[0]->port_, false, ""}, - AddressData{balancers_[0]->port_, true, ""}}, - "{\n" - " \"loadBalancingConfig\":[\n" - " {\"pick_first\":{} }\n" - " ]\n" - "}"); - CheckRpcSendOk(); - // Check LB policy name for the channel. - EXPECT_EQ("pick_first", channel_->GetLoadBalancingPolicyName()); -} - -TEST_F( - SingleBalancerTest, - DoNotSpecialCaseUseGrpclbWithLoadBalancingConfigTestAndNoBackendAddress) { - const int kFallbackTimeoutMs = 200 * grpc_test_slowdown_factor(); - ResetStub(kFallbackTimeoutMs); - SetNextResolution({AddressData{balancers_[0]->port_, true, ""}}, - "{\n" - " \"loadBalancingConfig\":[\n" - " {\"pick_first\":{} }\n" - " ]\n" - "}"); - // This should fail since we do not have a non-balancer backend - CheckRpcSendFailure(); - // Check LB policy name for the channel. - EXPECT_EQ("pick_first", channel_->GetLoadBalancingPolicyName()); -} - TEST_F(SingleBalancerTest, SelectGrpclbWithMigrationServiceConfigAndNoAddresses) { const int kFallbackTimeoutMs = 200 * grpc_test_slowdown_factor(); ResetStub(kFallbackTimeoutMs); - SetNextResolution({}, + SetNextResolution({}, {}, "{\n" " \"loadBalancingConfig\":[\n" " { \"does_not_exist\":{} },\n" @@ -804,23 +796,6 @@ TEST_F(SingleBalancerTest, EXPECT_EQ("grpclb", channel_->GetLoadBalancingPolicyName()); } -TEST_F(SingleBalancerTest, - SelectGrpclbWithMigrationServiceConfigAndNoBalancerAddresses) { - const int kFallbackTimeoutMs = 200 * grpc_test_slowdown_factor(); - ResetStub(kFallbackTimeoutMs); - // Resolution includes fallback address but no balancers. - SetNextResolution({AddressData{backends_[0]->port_, false, ""}}, - "{\n" - " \"loadBalancingConfig\":[\n" - " { \"does_not_exist\":{} },\n" - " { \"grpclb\":{} }\n" - " ]\n" - "}"); - CheckRpcSendOk(1, 1000 /* timeout_ms */, true /* wait_for_ready */); - // Check LB policy name for the channel. - EXPECT_EQ("grpclb", channel_->GetLoadBalancingPolicyName()); -} - TEST_F(SingleBalancerTest, UsePickFirstChildPolicy) { SetNextResolutionAllBalancers( "{\n" @@ -875,7 +850,7 @@ TEST_F(SingleBalancerTest, SwapChildPolicy) { EXPECT_EQ(backends_[i]->service_.request_count(), 0UL); } // Send new resolution that removes child policy from service config. - SetNextResolutionAllBalancers("{}"); + SetNextResolutionAllBalancers(); WaitForAllBackends(); CheckRpcSendOk(kNumRpcs, 1000 /* timeout_ms */, true /* wait_for_ready */); // Check that every backend saw the same number of requests. This verifies @@ -903,9 +878,11 @@ TEST_F(SingleBalancerTest, UpdatesGoToMostRecentChildPolicy) { SetNextResolution( { // Unreachable balancer. - {unreachable_balancer_port, true, ""}, + {unreachable_balancer_port, ""}, + }, + { // Fallback address: first backend. - {backends_[0]->port_, false, ""}, + {backends_[0]->port_, ""}, }, "{\n" " \"loadBalancingConfig\":[\n" @@ -923,9 +900,11 @@ TEST_F(SingleBalancerTest, UpdatesGoToMostRecentChildPolicy) { SetNextResolution( { // Unreachable balancer. - {unreachable_balancer_port, true, ""}, + {unreachable_balancer_port, ""}, + }, + { // Fallback address: unreachable backend. - {unreachable_backend_port, false, ""}, + {unreachable_backend_port, ""}, }, "{\n" " \"loadBalancingConfig\":[\n" @@ -946,10 +925,12 @@ TEST_F(SingleBalancerTest, UpdatesGoToMostRecentChildPolicy) { SetNextResolution( { // Unreachable balancer. - {unreachable_balancer_port, true, ""}, + {unreachable_balancer_port, ""}, + }, + { // Fallback address: second and third backends. - {backends_[1]->port_, false, ""}, - {backends_[2]->port_, false, ""}, + {backends_[1]->port_, ""}, + {backends_[2]->port_, ""}, }, "{\n" " \"loadBalancingConfig\":[\n" @@ -988,7 +969,7 @@ TEST_F(SingleBalancerTest, SameBackendListedMultipleTimes) { TEST_F(SingleBalancerTest, SecureNaming) { ResetStub(0, kApplicationTargetName_ + ";lb"); - SetNextResolution({AddressData{balancers_[0]->port_, true, "lb"}}); + SetNextResolution({AddressData{balancers_[0]->port_, "lb"}}); const size_t kNumRpcsPerAddress = 100; ScheduleResponseForBalancer( 0, BalancerServiceImpl::BuildResponseForBackends(GetBackendPorts(), {}), @@ -1020,7 +1001,7 @@ TEST_F(SingleBalancerTest, SecureNamingDeathTest) { ASSERT_DEATH_IF_SUPPORTED( { ResetStub(0, kApplicationTargetName_ + ";lb"); - SetNextResolution({AddressData{balancers_[0]->port_, true, "woops"}}); + SetNextResolution({AddressData{balancers_[0]->port_, "woops"}}); channel_->WaitForConnected(grpc_timeout_seconds_to_deadline(1)); }, ""); @@ -1080,12 +1061,13 @@ TEST_F(SingleBalancerTest, Fallback) { const size_t kNumBackendsInResolution = backends_.size() / 2; ResetStub(kFallbackTimeoutMs); - std::vector addresses; - addresses.emplace_back(AddressData{balancers_[0]->port_, true, ""}); + std::vector balancer_addresses; + balancer_addresses.emplace_back(AddressData{balancers_[0]->port_, ""}); + std::vector backend_addresses; for (size_t i = 0; i < kNumBackendsInResolution; ++i) { - addresses.emplace_back(AddressData{backends_[i]->port_, false, ""}); + backend_addresses.emplace_back(AddressData{backends_[i]->port_, ""}); } - SetNextResolution(addresses); + SetNextResolution(balancer_addresses, backend_addresses); // Send non-empty serverlist only after kServerlistDelayMs. ScheduleResponseForBalancer( @@ -1148,12 +1130,13 @@ TEST_F(SingleBalancerTest, FallbackUpdate) { const size_t kNumBackendsInResolutionUpdate = backends_.size() / 3; ResetStub(kFallbackTimeoutMs); - std::vector addresses; - addresses.emplace_back(AddressData{balancers_[0]->port_, true, ""}); + std::vector balancer_addresses; + balancer_addresses.emplace_back(AddressData{balancers_[0]->port_, ""}); + std::vector backend_addresses; for (size_t i = 0; i < kNumBackendsInResolution; ++i) { - addresses.emplace_back(AddressData{backends_[i]->port_, false, ""}); + backend_addresses.emplace_back(AddressData{backends_[i]->port_, ""}); } - SetNextResolution(addresses); + SetNextResolution(balancer_addresses, backend_addresses); // Send non-empty serverlist only after kServerlistDelayMs. ScheduleResponseForBalancer( @@ -1183,13 +1166,14 @@ TEST_F(SingleBalancerTest, FallbackUpdate) { EXPECT_EQ(0U, backends_[i]->service_.request_count()); } - addresses.clear(); - addresses.emplace_back(AddressData{balancers_[0]->port_, true, ""}); + balancer_addresses.clear(); + balancer_addresses.emplace_back(AddressData{balancers_[0]->port_, ""}); + backend_addresses.clear(); for (size_t i = kNumBackendsInResolution; i < kNumBackendsInResolution + kNumBackendsInResolutionUpdate; ++i) { - addresses.emplace_back(AddressData{backends_[i]->port_, false, ""}); + backend_addresses.emplace_back(AddressData{backends_[i]->port_, ""}); } - SetNextResolution(addresses); + SetNextResolution(balancer_addresses, backend_addresses); // Wait until the resolution update has been processed and all the new // fallback backends are reachable. @@ -1253,14 +1237,15 @@ TEST_F(SingleBalancerTest, // First two backends are fallback, last two are pointed to by balancer. const size_t kNumFallbackBackends = 2; const size_t kNumBalancerBackends = backends_.size() - kNumFallbackBackends; - std::vector addresses; + std::vector backend_addresses; for (size_t i = 0; i < kNumFallbackBackends; ++i) { - addresses.emplace_back(AddressData{backends_[i]->port_, false, ""}); + backend_addresses.emplace_back(AddressData{backends_[i]->port_, ""}); } + std::vector balancer_addresses; for (size_t i = 0; i < balancers_.size(); ++i) { - addresses.emplace_back(AddressData{balancers_[i]->port_, true, ""}); + balancer_addresses.emplace_back(AddressData{balancers_[i]->port_, ""}); } - SetNextResolution(addresses); + SetNextResolution(balancer_addresses, backend_addresses); ScheduleResponseForBalancer(0, BalancerServiceImpl::BuildResponseForBackends( GetBackendPorts(kNumFallbackBackends), {}), @@ -1307,14 +1292,15 @@ TEST_F(SingleBalancerTest, // First two backends are fallback, last two are pointed to by balancer. const size_t kNumFallbackBackends = 2; const size_t kNumBalancerBackends = backends_.size() - kNumFallbackBackends; - std::vector addresses; + std::vector backend_addresses; for (size_t i = 0; i < kNumFallbackBackends; ++i) { - addresses.emplace_back(AddressData{backends_[i]->port_, false, ""}); + backend_addresses.emplace_back(AddressData{backends_[i]->port_, ""}); } + std::vector balancer_addresses; for (size_t i = 0; i < balancers_.size(); ++i) { - addresses.emplace_back(AddressData{balancers_[i]->port_, true, ""}); + balancer_addresses.emplace_back(AddressData{balancers_[i]->port_, ""}); } - SetNextResolution(addresses); + SetNextResolution(balancer_addresses, backend_addresses); ScheduleResponseForBalancer(0, BalancerServiceImpl::BuildResponseForBackends( GetBackendPorts(kNumFallbackBackends), {}), @@ -1358,10 +1344,12 @@ TEST_F(SingleBalancerTest, FallbackEarlyWhenBalancerChannelFails) { const int kFallbackTimeoutMs = 10000 * grpc_test_slowdown_factor(); ResetStub(kFallbackTimeoutMs); // Return an unreachable balancer and one fallback backend. - std::vector addresses; - addresses.emplace_back(AddressData{grpc_pick_unused_port_or_die(), true, ""}); - addresses.emplace_back(AddressData{backends_[0]->port_, false, ""}); - SetNextResolution(addresses); + std::vector balancer_addresses; + balancer_addresses.emplace_back( + AddressData{grpc_pick_unused_port_or_die(), ""}); + std::vector backend_addresses; + backend_addresses.emplace_back(AddressData{backends_[0]->port_, ""}); + SetNextResolution(balancer_addresses, backend_addresses); // Send RPC with deadline less than the fallback timeout and make sure it // succeeds. CheckRpcSendOk(/* times */ 1, /* timeout_ms */ 1000, @@ -1372,10 +1360,11 @@ TEST_F(SingleBalancerTest, FallbackEarlyWhenBalancerCallFails) { const int kFallbackTimeoutMs = 10000 * grpc_test_slowdown_factor(); ResetStub(kFallbackTimeoutMs); // Return one balancer and one fallback backend. - std::vector addresses; - addresses.emplace_back(AddressData{balancers_[0]->port_, true, ""}); - addresses.emplace_back(AddressData{backends_[0]->port_, false, ""}); - SetNextResolution(addresses); + std::vector balancer_addresses; + balancer_addresses.emplace_back(AddressData{balancers_[0]->port_, ""}); + std::vector backend_addresses; + backend_addresses.emplace_back(AddressData{backends_[0]->port_, ""}); + SetNextResolution(balancer_addresses, backend_addresses); // Balancer drops call without sending a serverlist. balancers_[0]->service_.NotifyDoneWithServerlists(); // Send RPC with deadline less than the fallback timeout and make sure it @@ -1388,10 +1377,11 @@ TEST_F(SingleBalancerTest, FallbackControlledByBalancer_BeforeFirstServerlist) { const int kFallbackTimeoutMs = 10000 * grpc_test_slowdown_factor(); ResetStub(kFallbackTimeoutMs); // Return one balancer and one fallback backend. - std::vector addresses; - addresses.emplace_back(AddressData{balancers_[0]->port_, true, ""}); - addresses.emplace_back(AddressData{backends_[0]->port_, false, ""}); - SetNextResolution(addresses); + std::vector balancer_addresses; + balancer_addresses.emplace_back(AddressData{balancers_[0]->port_, ""}); + std::vector backend_addresses; + backend_addresses.emplace_back(AddressData{backends_[0]->port_, ""}); + SetNextResolution(balancer_addresses, backend_addresses); // Balancer explicitly tells client to fallback. LoadBalanceResponse resp; resp.mutable_fallback_response(); @@ -1404,10 +1394,11 @@ TEST_F(SingleBalancerTest, FallbackControlledByBalancer_BeforeFirstServerlist) { TEST_F(SingleBalancerTest, FallbackControlledByBalancer_AfterFirstServerlist) { // Return one balancer and one fallback backend (backend 0). - std::vector addresses; - addresses.emplace_back(AddressData{balancers_[0]->port_, true, ""}); - addresses.emplace_back(AddressData{backends_[0]->port_, false, ""}); - SetNextResolution(addresses); + std::vector balancer_addresses; + balancer_addresses.emplace_back(AddressData{balancers_[0]->port_, ""}); + std::vector backend_addresses; + backend_addresses.emplace_back(AddressData{backends_[0]->port_, ""}); + SetNextResolution(balancer_addresses, backend_addresses); // Balancer initially sends serverlist, then tells client to fall back, // then sends the serverlist again. // The serverlist points to backend 1. @@ -1483,7 +1474,7 @@ TEST_F(UpdatesTest, UpdateBalancersButKeepUsingOriginalBalancer) { EXPECT_EQ(0U, balancers_[2]->service_.response_count()); std::vector addresses; - addresses.emplace_back(AddressData{balancers_[1]->port_, true, ""}); + addresses.emplace_back(AddressData{balancers_[1]->port_, ""}); gpr_log(GPR_INFO, "========= ABOUT TO UPDATE 1 =========="); SetNextResolution(addresses); gpr_log(GPR_INFO, "========= UPDATE 1 DONE =========="); @@ -1542,9 +1533,9 @@ TEST_F(UpdatesTest, UpdateBalancersRepeated) { EXPECT_EQ(0U, balancers_[2]->service_.response_count()); std::vector addresses; - addresses.emplace_back(AddressData{balancers_[0]->port_, true, ""}); - addresses.emplace_back(AddressData{balancers_[1]->port_, true, ""}); - addresses.emplace_back(AddressData{balancers_[2]->port_, true, ""}); + addresses.emplace_back(AddressData{balancers_[0]->port_, ""}); + addresses.emplace_back(AddressData{balancers_[1]->port_, ""}); + addresses.emplace_back(AddressData{balancers_[2]->port_, ""}); gpr_log(GPR_INFO, "========= ABOUT TO UPDATE 1 =========="); SetNextResolution(addresses); gpr_log(GPR_INFO, "========= UPDATE 1 DONE =========="); @@ -1562,8 +1553,8 @@ TEST_F(UpdatesTest, UpdateBalancersRepeated) { balancers_[0]->service_.NotifyDoneWithServerlists(); addresses.clear(); - addresses.emplace_back(AddressData{balancers_[0]->port_, true, ""}); - addresses.emplace_back(AddressData{balancers_[1]->port_, true, ""}); + addresses.emplace_back(AddressData{balancers_[0]->port_, ""}); + addresses.emplace_back(AddressData{balancers_[1]->port_, ""}); gpr_log(GPR_INFO, "========= ABOUT TO UPDATE 2 =========="); SetNextResolution(addresses); gpr_log(GPR_INFO, "========= UPDATE 2 DONE =========="); @@ -1583,7 +1574,7 @@ TEST_F(UpdatesTest, UpdateBalancersRepeated) { TEST_F(UpdatesTest, UpdateBalancersDeadUpdate) { std::vector addresses; - addresses.emplace_back(AddressData{balancers_[0]->port_, true, ""}); + addresses.emplace_back(AddressData{balancers_[0]->port_, ""}); SetNextResolution(addresses); const std::vector first_backend{GetBackendPorts()[0]}; const std::vector second_backend{GetBackendPorts()[1]}; @@ -1623,7 +1614,7 @@ TEST_F(UpdatesTest, UpdateBalancersDeadUpdate) { EXPECT_EQ(0U, balancers_[2]->service_.response_count()); addresses.clear(); - addresses.emplace_back(AddressData{balancers_[1]->port_, true, ""}); + addresses.emplace_back(AddressData{balancers_[1]->port_, ""}); gpr_log(GPR_INFO, "========= ABOUT TO UPDATE 1 =========="); SetNextResolution(addresses); gpr_log(GPR_INFO, "========= UPDATE 1 DONE =========="); @@ -1660,18 +1651,20 @@ TEST_F(UpdatesTest, ReresolveDeadBackend) { ResetStub(500); // The first resolution contains the addresses of a balancer that never // responds, and a fallback backend. - std::vector addresses; - addresses.emplace_back(AddressData{balancers_[0]->port_, true, ""}); - addresses.emplace_back(AddressData{backends_[0]->port_, false, ""}); - SetNextResolution(addresses); + std::vector balancer_addresses; + balancer_addresses.emplace_back(AddressData{balancers_[0]->port_, ""}); + std::vector backend_addresses; + backend_addresses.emplace_back(AddressData{backends_[0]->port_, ""}); + SetNextResolution(balancer_addresses, backend_addresses); // Ask channel to connect to trigger resolver creation. channel_->GetState(true); // The re-resolution result will contain the addresses of the same balancer // and a new fallback backend. - addresses.clear(); - addresses.emplace_back(AddressData{balancers_[0]->port_, true, ""}); - addresses.emplace_back(AddressData{backends_[1]->port_, false, ""}); - SetNextReresolutionResponse(addresses); + balancer_addresses.clear(); + balancer_addresses.emplace_back(AddressData{balancers_[0]->port_, ""}); + backend_addresses.clear(); + backend_addresses.emplace_back(AddressData{backends_[1]->port_, ""}); + SetNextReresolutionResponse(balancer_addresses, backend_addresses); // Start servers and send 10 RPCs per server. gpr_log(GPR_INFO, "========= BEFORE FIRST BATCH =========="); @@ -1717,22 +1710,22 @@ class UpdatesWithClientLoadReportingTest : public GrpclbEnd2endTest { }; TEST_F(UpdatesWithClientLoadReportingTest, ReresolveDeadBalancer) { - // Ask channel to connect to trigger resolver creation. - channel_->GetState(true); - std::vector addresses; - addresses.emplace_back(AddressData{balancers_[0]->port_, true, ""}); - SetNextResolution(addresses); - addresses.clear(); - addresses.emplace_back(AddressData{balancers_[1]->port_, true, ""}); - SetNextReresolutionResponse(addresses); const std::vector first_backend{GetBackendPorts()[0]}; const std::vector second_backend{GetBackendPorts()[1]}; - ScheduleResponseForBalancer( 0, BalancerServiceImpl::BuildResponseForBackends(first_backend, {}), 0); ScheduleResponseForBalancer( 1, BalancerServiceImpl::BuildResponseForBackends(second_backend, {}), 0); + // Ask channel to connect to trigger resolver creation. + channel_->GetState(true); + std::vector addresses; + addresses.emplace_back(AddressData{balancers_[0]->port_, ""}); + SetNextResolution(addresses); + addresses.clear(); + addresses.emplace_back(AddressData{balancers_[1]->port_, ""}); + SetNextReresolutionResponse(addresses); + // Start servers and send 10 RPCs per server. gpr_log(GPR_INFO, "========= BEFORE FIRST BATCH =========="); CheckRpcSendOk(10); @@ -1900,7 +1893,11 @@ TEST_F(SingleBalancerWithClientLoadReportingTest, Vanilla) { // and sent a single response. EXPECT_EQ(1U, balancers_[0]->service_.response_count()); - const ClientStats client_stats = WaitForLoadReports(); + ClientStats client_stats; + do { + client_stats += WaitForLoadReports(); + } while (client_stats.num_calls_finished != + kNumRpcsPerAddress * num_backends_ + num_ok); EXPECT_EQ(kNumRpcsPerAddress * num_backends_ + num_ok, client_stats.num_calls_started); EXPECT_EQ(kNumRpcsPerAddress * num_backends_ + num_ok, diff --git a/test/cpp/end2end/nonblocking_test.cc b/test/cpp/end2end/nonblocking_test.cc index eb651df21df..4be070ec717 100644 --- a/test/cpp/end2end/nonblocking_test.cc +++ b/test/cpp/end2end/nonblocking_test.cc @@ -39,14 +39,18 @@ #ifdef GRPC_POSIX_SOCKET // Thread-local variable to so that only polls from this test assert -// non-blocking (not polls from resolver, timer thread, etc) -GPR_TLS_DECL(g_is_nonblocking_test); +// non-blocking (not polls from resolver, timer thread, etc), and only when the +// thread is waiting on polls caused by CompletionQueue::AsyncNext (not for +// picking a port or other reasons). +GPR_TLS_DECL(g_is_nonblocking_poll); namespace { int maybe_assert_non_blocking_poll(struct pollfd* pfds, nfds_t nfds, int timeout) { - if (gpr_tls_get(&g_is_nonblocking_test)) { + // Only assert that this poll should have zero timeout if we're in the + // middle of a zero-timeout CQ Next. + if (gpr_tls_get(&g_is_nonblocking_poll)) { GPR_ASSERT(timeout == 0); } return poll(pfds, nfds, timeout); @@ -74,11 +78,17 @@ class NonblockingTest : public ::testing::Test { } bool LoopForTag(void** tag, bool* ok) { + // Temporarily set the thread-local nonblocking poll flag so that the polls + // caused by this loop are indeed sent by the library with zero timeout. + intptr_t orig_val = gpr_tls_get(&g_is_nonblocking_poll); + gpr_tls_set(&g_is_nonblocking_poll, static_cast(true)); for (;;) { auto r = cq_->AsyncNext(tag, ok, gpr_time_0(GPR_CLOCK_REALTIME)); if (r == CompletionQueue::SHUTDOWN) { + gpr_tls_set(&g_is_nonblocking_poll, orig_val); return false; } else if (r == CompletionQueue::GOT_EVENT) { + gpr_tls_set(&g_is_nonblocking_poll, orig_val); return true; } } @@ -185,10 +195,20 @@ int main(int argc, char** argv) { #ifdef GRPC_POSIX_SOCKET // Override the poll function before anything else can happen grpc_poll_function = maybe_assert_non_blocking_poll; -#endif // GRPC_POSIX_SOCKET grpc::testing::TestEnvironment env(argc, argv); ::testing::InitGoogleTest(&argc, argv); + gpr_tls_init(&g_is_nonblocking_poll); + + // Start the nonblocking poll thread-local variable as false because the + // thread that issues RPCs starts by picking a port (which has non-zero + // timeout). + gpr_tls_set(&g_is_nonblocking_poll, static_cast(false)); + int ret = RUN_ALL_TESTS(); + gpr_tls_destroy(&g_is_nonblocking_poll); return ret; +#else // GRPC_POSIX_SOCKET + return 0; +#endif // GRPC_POSIX_SOCKET } diff --git a/test/cpp/end2end/xds_end2end_test.cc b/test/cpp/end2end/xds_end2end_test.cc index 89611d4f3e2..3a814aa75a7 100644 --- a/test/cpp/end2end/xds_end2end_test.cc +++ b/test/cpp/end2end/xds_end2end_test.cc @@ -400,40 +400,6 @@ class AdsServiceImpl : public AggregatedDiscoveryService::Service, using Stream = ServerReaderWriter; - // A queue of resource type/name pairs that have changed since the client - // subscribed to them. - using UpdateQueue = std::deque< - std::pair>; - - // A struct representing a client's subscription to a particular resource. - struct SubscriptionState { - // Version that the client currently knows about. - int current_version = 0; - // The queue upon which to place updates when the resource is updated. - UpdateQueue* update_queue; - }; - - // A struct representing the a client's subscription to all the resources. - using SubscriptionNameMap = - std::map; - using SubscriptionMap = - std::map; - - // A struct representing the current state for a resource: - // - the version of the resource that is set by the SetResource() methods. - // - a list of subscriptions interested in this resource. - struct ResourceState { - int version = 0; - absl::optional resource; - std::set subscriptions; - }; - - // A struct representing the current state for all resources: - // LDS, CDS, EDS, and RDS for the class as a whole. - using ResourceNameMap = - std::map; - using ResourceMap = std::map; - AdsServiceImpl(bool enable_load_reporting) { // Construct RDS response data. default_route_config_.set_name(kDefaultResourceName); @@ -459,118 +425,6 @@ class AdsServiceImpl : public AggregatedDiscoveryService::Service, SetCdsResource(default_cluster_, kDefaultResourceName); } - // Starting a thread to do blocking read on the stream until cancel. - void BlockingRead(Stream* stream, std::deque* requests, - bool* stream_closed) { - DiscoveryRequest request; - bool seen_first_request = false; - while (stream->Read(&request)) { - if (!seen_first_request) { - EXPECT_TRUE(request.has_node()); - ASSERT_FALSE(request.node().client_features().empty()); - EXPECT_EQ(request.node().client_features(0), - "envoy.lb.does_not_support_overprovisioning"); - seen_first_request = true; - } - { - grpc_core::MutexLock lock(&ads_mu_); - requests->emplace_back(std::move(request)); - } - } - gpr_log(GPR_INFO, "ADS[%p]: Null read, stream closed", this); - grpc_core::MutexLock lock(&ads_mu_); - *stream_closed = true; - } - - // Checks whether the client needs to receive a newer version of - // the resource. If so, updates subscription_state->current_version and - // returns true. - bool ClientNeedsResourceUpdate(const ResourceState& resource_state, - SubscriptionState* subscription_state) { - if (subscription_state->current_version < resource_state.version) { - subscription_state->current_version = resource_state.version; - return true; - } - return false; - } - - // Subscribes to a resource if not already subscribed: - // 1. Sets the update_queue field in subscription_state. - // 2. Adds subscription_state to resource_state->subscriptions. - void MaybeSubscribe(const std::string& resource_type, - const std::string& resource_name, - SubscriptionState* subscription_state, - ResourceState* resource_state, - UpdateQueue* update_queue) { - // The update_queue will be null if we were not previously subscribed. - if (subscription_state->update_queue != nullptr) return; - subscription_state->update_queue = update_queue; - resource_state->subscriptions.emplace(subscription_state); - gpr_log(GPR_INFO, "ADS[%p]: subscribe to resource type %s name %s state %p", - this, resource_type.c_str(), resource_name.c_str(), - &subscription_state); - } - - // Removes subscriptions for resources no longer present in the - // current request. - void ProcessUnsubscriptions( - const std::string& resource_type, - const std::set& resources_in_current_request, - SubscriptionNameMap* subscription_name_map, - ResourceNameMap* resource_name_map) { - for (auto it = subscription_name_map->begin(); - it != subscription_name_map->end();) { - const std::string& resource_name = it->first; - SubscriptionState& subscription_state = it->second; - if (resources_in_current_request.find(resource_name) != - resources_in_current_request.end()) { - ++it; - continue; - } - gpr_log(GPR_INFO, "ADS[%p]: Unsubscribe to type=%s name=%s state=%p", - this, resource_type.c_str(), resource_name.c_str(), - &subscription_state); - auto resource_it = resource_name_map->find(resource_name); - GPR_ASSERT(resource_it != resource_name_map->end()); - auto& resource_state = resource_it->second; - resource_state.subscriptions.erase(&subscription_state); - if (resource_state.subscriptions.empty() && - !resource_state.resource.has_value()) { - resource_name_map->erase(resource_it); - } - it = subscription_name_map->erase(it); - } - } - - // Completing the building a DiscoveryResponse by adding common information - // for all resources and by adding all subscribed resources for LDS and CDS. - void CompleteBuildingDiscoveryResponse( - const std::string& resource_type, const int version, - const SubscriptionNameMap& subscription_name_map, - const std::set& resources_added_to_response, - DiscoveryResponse* response) { - resource_type_response_state_[resource_type] = SENT; - response->set_type_url(resource_type); - response->set_version_info(absl::StrCat(version)); - response->set_nonce(absl::StrCat(version)); - if (resource_type == kLdsTypeUrl || resource_type == kCdsTypeUrl) { - // For LDS and CDS we must send back all subscribed resources - // (even the unchanged ones) - for (const auto& p : subscription_name_map) { - const std::string& resource_name = p.first; - if (resources_added_to_response.find(resource_name) == - resources_added_to_response.end()) { - const ResourceState& resource_state = - resource_map_[resource_type][resource_name]; - if (resource_state.resource.has_value()) { - response->add_resources()->CopyFrom( - resource_state.resource.value()); - } - } - } - } - } - Status StreamAggregatedResources(ServerContext* context, Stream* stream) override { gpr_log(GPR_INFO, "ADS[%p]: StreamAggregatedResources starts", this); @@ -929,6 +783,152 @@ class AdsServiceImpl : public AggregatedDiscoveryService::Service, } private: + // A queue of resource type/name pairs that have changed since the client + // subscribed to them. + using UpdateQueue = std::deque< + std::pair>; + + // A struct representing a client's subscription to a particular resource. + struct SubscriptionState { + // Version that the client currently knows about. + int current_version = 0; + // The queue upon which to place updates when the resource is updated. + UpdateQueue* update_queue; + }; + + // A struct representing the a client's subscription to all the resources. + using SubscriptionNameMap = + std::map; + using SubscriptionMap = + std::map; + + // A struct representing the current state for a resource: + // - the version of the resource that is set by the SetResource() methods. + // - a list of subscriptions interested in this resource. + struct ResourceState { + int version = 0; + absl::optional resource; + std::set subscriptions; + }; + + // A struct representing the current state for all resources: + // LDS, CDS, EDS, and RDS for the class as a whole. + using ResourceNameMap = + std::map; + using ResourceMap = std::map; + + // Starting a thread to do blocking read on the stream until cancel. + void BlockingRead(Stream* stream, std::deque* requests, + bool* stream_closed) { + DiscoveryRequest request; + bool seen_first_request = false; + while (stream->Read(&request)) { + if (!seen_first_request) { + EXPECT_TRUE(request.has_node()); + ASSERT_FALSE(request.node().client_features().empty()); + EXPECT_EQ(request.node().client_features(0), + "envoy.lb.does_not_support_overprovisioning"); + seen_first_request = true; + } + { + grpc_core::MutexLock lock(&ads_mu_); + requests->emplace_back(std::move(request)); + } + } + gpr_log(GPR_INFO, "ADS[%p]: Null read, stream closed", this); + grpc_core::MutexLock lock(&ads_mu_); + *stream_closed = true; + } + + // Checks whether the client needs to receive a newer version of + // the resource. If so, updates subscription_state->current_version and + // returns true. + bool ClientNeedsResourceUpdate(const ResourceState& resource_state, + SubscriptionState* subscription_state) { + if (subscription_state->current_version < resource_state.version) { + subscription_state->current_version = resource_state.version; + return true; + } + return false; + } + + // Subscribes to a resource if not already subscribed: + // 1. Sets the update_queue field in subscription_state. + // 2. Adds subscription_state to resource_state->subscriptions. + void MaybeSubscribe(const std::string& resource_type, + const std::string& resource_name, + SubscriptionState* subscription_state, + ResourceState* resource_state, + UpdateQueue* update_queue) { + // The update_queue will be null if we were not previously subscribed. + if (subscription_state->update_queue != nullptr) return; + subscription_state->update_queue = update_queue; + resource_state->subscriptions.emplace(subscription_state); + gpr_log(GPR_INFO, "ADS[%p]: subscribe to resource type %s name %s state %p", + this, resource_type.c_str(), resource_name.c_str(), + &subscription_state); + } + + // Removes subscriptions for resources no longer present in the + // current request. + void ProcessUnsubscriptions( + const std::string& resource_type, + const std::set& resources_in_current_request, + SubscriptionNameMap* subscription_name_map, + ResourceNameMap* resource_name_map) { + for (auto it = subscription_name_map->begin(); + it != subscription_name_map->end();) { + const std::string& resource_name = it->first; + SubscriptionState& subscription_state = it->second; + if (resources_in_current_request.find(resource_name) != + resources_in_current_request.end()) { + ++it; + continue; + } + gpr_log(GPR_INFO, "ADS[%p]: Unsubscribe to type=%s name=%s state=%p", + this, resource_type.c_str(), resource_name.c_str(), + &subscription_state); + auto resource_it = resource_name_map->find(resource_name); + GPR_ASSERT(resource_it != resource_name_map->end()); + auto& resource_state = resource_it->second; + resource_state.subscriptions.erase(&subscription_state); + if (resource_state.subscriptions.empty() && + !resource_state.resource.has_value()) { + resource_name_map->erase(resource_it); + } + it = subscription_name_map->erase(it); + } + } + + // Completing the building a DiscoveryResponse by adding common information + // for all resources and by adding all subscribed resources for LDS and CDS. + void CompleteBuildingDiscoveryResponse( + const std::string& resource_type, const int version, + const SubscriptionNameMap& subscription_name_map, + const std::set& resources_added_to_response, + DiscoveryResponse* response) { + resource_type_response_state_[resource_type] = SENT; + response->set_type_url(resource_type); + response->set_version_info(absl::StrCat(version)); + response->set_nonce(absl::StrCat(version)); + if (resource_type == kLdsTypeUrl || resource_type == kCdsTypeUrl) { + // For LDS and CDS we must send back all subscribed resources + // (even the unchanged ones) + for (const auto& p : subscription_name_map) { + const std::string& resource_name = p.first; + if (resources_added_to_response.find(resource_name) == + resources_added_to_response.end()) { + const ResourceState& resource_state = + resource_map_[resource_type][resource_name]; + if (resource_state.resource.has_value()) { + response->add_resources()->CopyFrom( + resource_state.resource.value()); + } + } + } + } + } + grpc_core::CondVar ads_cond_; // Protect the members below. grpc_core::Mutex ads_mu_; @@ -1150,7 +1150,7 @@ class XdsEnd2endTest : public ::testing::TestWithParam { args.SetInt(GRPC_ARG_XDS_FALLBACK_TIMEOUT_MS, fallback_timeout); } if (failover_timeout > 0) { - args.SetInt(GRPC_ARG_XDS_FAILOVER_TIMEOUT_MS, failover_timeout); + args.SetInt(GRPC_ARG_PRIORITY_FAILOVER_TIMEOUT_MS, failover_timeout); } if (xds_resource_does_not_exist_timeout > 0) { args.SetInt(GRPC_ARG_XDS_RESOURCE_DOES_NOT_EXIST_TIMEOUT_MS, @@ -1282,7 +1282,8 @@ class XdsEnd2endTest : public ::testing::TestWithParam { : kDefaultServiceConfigWithoutLoadReporting_; result.service_config = grpc_core::ServiceConfig::Create(service_config_json, &error); - GRPC_ERROR_UNREF(error); + ASSERT_NE(result.service_config.get(), nullptr); + ASSERT_EQ(error, GRPC_ERROR_NONE) << grpc_error_string(error); grpc_arg arg = grpc_core::FakeResolverResponseGenerator::MakeChannelArg( lb_channel_response_generator == nullptr ? lb_channel_response_generator_.get() @@ -1314,7 +1315,8 @@ class XdsEnd2endTest : public ::testing::TestWithParam { grpc_error* error = GRPC_ERROR_NONE; result.service_config = grpc_core::ServiceConfig::Create(service_config_json, &error); - GRPC_ERROR_UNREF(error); + ASSERT_NE(result.service_config.get(), nullptr); + ASSERT_EQ(error, GRPC_ERROR_NONE) << grpc_error_string(error); } if (lb_channel_response_generator == nullptr) { lb_channel_response_generator = lb_channel_response_generator_.get(); @@ -1340,11 +1342,15 @@ class XdsEnd2endTest : public ::testing::TestWithParam { } Status SendRpc(EchoResponse* response = nullptr, int timeout_ms = 1000, - bool wait_for_ready = false) { + bool wait_for_ready = false, bool server_fail = false) { const bool local_response = (response == nullptr); if (local_response) response = new EchoResponse; EchoRequest request; request.set_message(kRequestMessage_); + if (server_fail) { + request.mutable_param()->mutable_expected_error()->set_code( + GRPC_STATUS_FAILED_PRECONDITION); + } ClientContext context; context.set_deadline(grpc_timeout_milliseconds_to_deadline(timeout_ms)); if (wait_for_ready) context.set_wait_for_ready(true); @@ -1364,9 +1370,11 @@ class XdsEnd2endTest : public ::testing::TestWithParam { } } - void CheckRpcSendFailure() { - const Status status = SendRpc(); - EXPECT_FALSE(status.ok()); + void CheckRpcSendFailure(const size_t times = 1, bool server_fail = false) { + for (size_t i = 0; i < times; ++i) { + const Status status = SendRpc(nullptr, 1000, false, server_fail); + EXPECT_FALSE(status.ok()); + } } public: @@ -2344,19 +2352,6 @@ TEST_P(EdsTest, Timeout) { CheckRpcSendFailure(); } -// Tests that EDS client should send a NACK if the EDS update contains -// no localities but does not say to drop all calls. -TEST_P(EdsTest, NacksNoLocalitiesWithoutDropAll) { - SetNextResolution({}); - SetNextResolutionForLbChannelAllBalancers(); - AdsServiceImpl::EdsResourceArgs args; - balancers_[0]->ads_service()->SetEdsResource( - AdsServiceImpl::BuildEdsResource(args), kDefaultResourceName); - CheckRpcSendFailure(); - EXPECT_EQ(balancers_[0]->ads_service()->eds_response_state(), - AdsServiceImpl::NACKED); -} - // Tests that EDS client should send a NACK if the EDS update contains // sparse priorities. TEST_P(EdsTest, NacksSparsePriorityList) { @@ -2416,6 +2411,45 @@ TEST_P(LocalityMapTest, WeightedRoundRobin) { ::testing::Le(kLocalityWeightRate1 * (1 + kErrorTolerance)))); } +// Tests that we correctly handle a locality containing no endpoints. +TEST_P(LocalityMapTest, LocalityContainingNoEndpoints) { + SetNextResolution({}); + SetNextResolutionForLbChannelAllBalancers(); + const size_t kNumRpcs = 5000; + // EDS response contains 2 localities, one with no endpoints. + AdsServiceImpl::EdsResourceArgs args({ + {"locality0", GetBackendPorts()}, + {"locality1", {}}, + }); + balancers_[0]->ads_service()->SetEdsResource( + AdsServiceImpl::BuildEdsResource(args), kDefaultResourceName); + // Wait for both backends to be ready. + WaitForAllBackends(); + // Send kNumRpcs RPCs. + CheckRpcSendOk(kNumRpcs); + // All traffic should go to the reachable locality. + EXPECT_EQ(backends_[0]->backend_service()->request_count(), + kNumRpcs / backends_.size()); + EXPECT_EQ(backends_[1]->backend_service()->request_count(), + kNumRpcs / backends_.size()); + EXPECT_EQ(backends_[2]->backend_service()->request_count(), + kNumRpcs / backends_.size()); + EXPECT_EQ(backends_[3]->backend_service()->request_count(), + kNumRpcs / backends_.size()); +} + +// EDS update with no localities. +TEST_P(LocalityMapTest, NoLocalities) { + SetNextResolution({}); + SetNextResolutionForLbChannelAllBalancers(); + // EDS response contains 2 localities, one with no endpoints. + balancers_[0]->ads_service()->SetEdsResource( + AdsServiceImpl::BuildEdsResource({}), kDefaultResourceName); + Status status = SendRpc(); + EXPECT_FALSE(status.ok()); + EXPECT_EQ(status.error_code(), StatusCode::UNAVAILABLE); +} + // Tests that the locality map can work properly even when it contains a large // number of localities. TEST_P(LocalityMapTest, StressTest) { @@ -2593,6 +2627,24 @@ TEST_P(FailoverTest, ChooseHighestPriority) { } } +// Does not choose priority with no endpoints. +TEST_P(FailoverTest, DoesNotUsePriorityWithNoEndpoints) { + SetNextResolution({}); + SetNextResolutionForLbChannelAllBalancers(); + AdsServiceImpl::EdsResourceArgs args({ + {"locality0", GetBackendPorts(0, 1), kDefaultLocalityWeight, 1}, + {"locality1", GetBackendPorts(1, 2), kDefaultLocalityWeight, 2}, + {"locality2", GetBackendPorts(2, 3), kDefaultLocalityWeight, 3}, + {"locality3", {}, kDefaultLocalityWeight, 0}, + }); + balancers_[0]->ads_service()->SetEdsResource( + AdsServiceImpl::BuildEdsResource(args), kDefaultResourceName); + WaitForBackend(0, false); + for (size_t i = 1; i < 3; ++i) { + EXPECT_EQ(0U, backends_[i]->backend_service()->request_count()); + } +} + // If the higher priority localities are not reachable, failover to the highest // priority among the rest. TEST_P(FailoverTest, Failover) { @@ -2976,9 +3028,7 @@ TEST_P(DropTest, DropAll) { const uint32_t kDropPerMillionForLb = 100000; const uint32_t kDropPerMillionForThrottle = 1000000; // The ADS response contains two drop categories. - AdsServiceImpl::EdsResourceArgs args({ - {"locality0", GetBackendPorts()}, - }); + AdsServiceImpl::EdsResourceArgs args; args.drop_categories = {{kLbDropType, kDropPerMillionForLb}, {kThrottleDropType, kDropPerMillionForThrottle}}; balancers_[0]->ads_service()->SetEdsResource( @@ -2987,8 +3037,8 @@ TEST_P(DropTest, DropAll) { for (size_t i = 0; i < kNumRpcs; ++i) { EchoResponse response; const Status status = SendRpc(&response); - EXPECT_TRUE(!status.ok() && status.error_message() == - "Call dropped by load balancing policy"); + EXPECT_EQ(status.error_code(), StatusCode::UNAVAILABLE); + EXPECT_EQ(status.error_message(), "Call dropped by load balancing policy"); } } @@ -3440,7 +3490,8 @@ class ClientLoadReportingTest : public XdsEnd2endTest { TEST_P(ClientLoadReportingTest, Vanilla) { SetNextResolution({}); SetNextResolutionForLbChannel({balancers_[0]->port()}); - const size_t kNumRpcsPerAddress = 100; + const size_t kNumRpcsPerAddress = 10; + const size_t kNumFailuresPerAddress = 3; // TODO(juanlishen): Partition the backends after multiple localities is // tested. AdsServiceImpl::EdsResourceArgs args({ @@ -3455,9 +3506,11 @@ TEST_P(ClientLoadReportingTest, Vanilla) { std::tie(num_ok, num_failure, num_drops) = WaitForAllBackends(); // Send kNumRpcsPerAddress RPCs per server. CheckRpcSendOk(kNumRpcsPerAddress * num_backends_); - // Each backend should have gotten 100 requests. + CheckRpcSendFailure(kNumFailuresPerAddress * num_backends_, + /*server_fail=*/true); + // Check that each backend got the right number of requests. for (size_t i = 0; i < backends_.size(); ++i) { - EXPECT_EQ(kNumRpcsPerAddress, + EXPECT_EQ(kNumRpcsPerAddress + kNumFailuresPerAddress, backends_[i]->backend_service()->request_count()); } // The LRS service got a single request, and sent a single response. @@ -3471,9 +3524,11 @@ TEST_P(ClientLoadReportingTest, Vanilla) { EXPECT_EQ(kNumRpcsPerAddress * num_backends_ + num_ok, client_stats.total_successful_requests()); EXPECT_EQ(0U, client_stats.total_requests_in_progress()); - EXPECT_EQ(kNumRpcsPerAddress * num_backends_ + num_ok, + EXPECT_EQ((kNumRpcsPerAddress + kNumFailuresPerAddress) * num_backends_ + + num_ok + num_failure, client_stats.total_issued_requests()); - EXPECT_EQ(0U, client_stats.total_error_requests()); + EXPECT_EQ(kNumFailuresPerAddress * num_backends_ + num_failure, + client_stats.total_error_requests()); EXPECT_EQ(0U, client_stats.total_dropped_requests()); } diff --git a/test/cpp/microbenchmarks/BUILD b/test/cpp/microbenchmarks/BUILD index 7e8d371fc3c..e8065fa8964 100644 --- a/test/cpp/microbenchmarks/BUILD +++ b/test/cpp/microbenchmarks/BUILD @@ -206,7 +206,6 @@ grpc_cc_test( srcs = [ "bm_fullstack_streaming_pump.cc", ], - flaky = True, # TODO(b/150422385) tags = [ "no_mac", # to emulate "excluded_poll_engines: poll" "no_windows", diff --git a/test/cpp/microbenchmarks/bm_fullstack_streaming_pump.cc b/test/cpp/microbenchmarks/bm_fullstack_streaming_pump.cc index d4533e6c78e..d0df2417139 100644 --- a/test/cpp/microbenchmarks/bm_fullstack_streaming_pump.cc +++ b/test/cpp/microbenchmarks/bm_fullstack_streaming_pump.cc @@ -34,8 +34,6 @@ BENCHMARK_TEMPLATE(BM_PumpStreamClientToServer, UDS) ->Range(0, 128 * 1024 * 1024); BENCHMARK_TEMPLATE(BM_PumpStreamClientToServer, InProcess) ->Range(0, 128 * 1024 * 1024); -BENCHMARK_TEMPLATE(BM_PumpStreamClientToServer, SockPair) - ->Range(0, 128 * 1024 * 1024); BENCHMARK_TEMPLATE(BM_PumpStreamClientToServer, InProcessCHTTP2) ->Range(0, 128 * 1024 * 1024); BENCHMARK_TEMPLATE(BM_PumpStreamServerToClient, TCP) @@ -44,19 +42,15 @@ BENCHMARK_TEMPLATE(BM_PumpStreamServerToClient, UDS) ->Range(0, 128 * 1024 * 1024); BENCHMARK_TEMPLATE(BM_PumpStreamServerToClient, InProcess) ->Range(0, 128 * 1024 * 1024); -BENCHMARK_TEMPLATE(BM_PumpStreamServerToClient, SockPair) - ->Range(0, 128 * 1024 * 1024); BENCHMARK_TEMPLATE(BM_PumpStreamServerToClient, InProcessCHTTP2) ->Range(0, 128 * 1024 * 1024); BENCHMARK_TEMPLATE(BM_PumpStreamClientToServer, MinTCP)->Arg(0); BENCHMARK_TEMPLATE(BM_PumpStreamClientToServer, MinUDS)->Arg(0); BENCHMARK_TEMPLATE(BM_PumpStreamClientToServer, MinInProcess)->Arg(0); -BENCHMARK_TEMPLATE(BM_PumpStreamClientToServer, MinSockPair)->Arg(0); BENCHMARK_TEMPLATE(BM_PumpStreamClientToServer, MinInProcessCHTTP2)->Arg(0); BENCHMARK_TEMPLATE(BM_PumpStreamServerToClient, MinTCP)->Arg(0); BENCHMARK_TEMPLATE(BM_PumpStreamServerToClient, MinUDS)->Arg(0); BENCHMARK_TEMPLATE(BM_PumpStreamServerToClient, MinInProcess)->Arg(0); -BENCHMARK_TEMPLATE(BM_PumpStreamServerToClient, MinSockPair)->Arg(0); BENCHMARK_TEMPLATE(BM_PumpStreamServerToClient, MinInProcessCHTTP2)->Arg(0); } // namespace testing diff --git a/test/cpp/naming/address_sorting_test.cc b/test/cpp/naming/address_sorting_test.cc index c04ac4d83ba..dd49a2c92e5 100644 --- a/test/cpp/naming/address_sorting_test.cc +++ b/test/cpp/naming/address_sorting_test.cc @@ -212,7 +212,7 @@ TEST_F(AddressSortingTest, TestDepriotizesUnreachableAddresses) { {"1.2.3.4:443", AF_INET}, {"5.6.7.8:443", AF_INET}, }); - grpc_cares_wrapper_address_sorting_sort(&lb_addrs); + grpc_cares_wrapper_address_sorting_sort(nullptr, &lb_addrs); VerifyLbAddrOutputs(lb_addrs, { "1.2.3.4:443", "5.6.7.8:443", @@ -231,7 +231,7 @@ TEST_F(AddressSortingTest, TestDepriotizesUnsupportedDomainIpv6) { {"[2607:f8b0:400a:801::1002]:443", AF_INET6}, {"1.2.3.4:443", AF_INET}, }); - grpc_cares_wrapper_address_sorting_sort(&lb_addrs); + grpc_cares_wrapper_address_sorting_sort(nullptr, &lb_addrs); VerifyLbAddrOutputs(lb_addrs, { "1.2.3.4:443", "[2607:f8b0:400a:801::1002]:443", @@ -251,7 +251,7 @@ TEST_F(AddressSortingTest, TestDepriotizesUnsupportedDomainIpv4) { {"[2607:f8b0:400a:801::1002]:443", AF_INET6}, {"1.2.3.4:443", AF_INET}, }); - grpc_cares_wrapper_address_sorting_sort(&lb_addrs); + grpc_cares_wrapper_address_sorting_sort(nullptr, &lb_addrs); VerifyLbAddrOutputs(lb_addrs, { "[2607:f8b0:400a:801::1002]:443", "1.2.3.4:443", @@ -275,7 +275,7 @@ TEST_F(AddressSortingTest, TestDepriotizesNonMatchingScope) { {"[2000:f8b0:400a:801::1002]:443", AF_INET6}, {"[fec0::5000]:443", AF_INET6}, }); - grpc_cares_wrapper_address_sorting_sort(&lb_addrs); + grpc_cares_wrapper_address_sorting_sort(nullptr, &lb_addrs); VerifyLbAddrOutputs(lb_addrs, { "[fec0::5000]:443", "[2000:f8b0:400a:801::1002]:443", @@ -298,7 +298,7 @@ TEST_F(AddressSortingTest, TestUsesLabelFromDefaultTable) { {"[2002::5001]:443", AF_INET6}, {"[2001::5001]:443", AF_INET6}, }); - grpc_cares_wrapper_address_sorting_sort(&lb_addrs); + grpc_cares_wrapper_address_sorting_sort(nullptr, &lb_addrs); VerifyLbAddrOutputs(lb_addrs, { "[2001::5001]:443", "[2002::5001]:443", @@ -321,7 +321,7 @@ TEST_F(AddressSortingTest, TestUsesLabelFromDefaultTableInputFlipped) { {"[2001::5001]:443", AF_INET6}, {"[2002::5001]:443", AF_INET6}, }); - grpc_cares_wrapper_address_sorting_sort(&lb_addrs); + grpc_cares_wrapper_address_sorting_sort(nullptr, &lb_addrs); VerifyLbAddrOutputs(lb_addrs, { "[2001::5001]:443", "[2002::5001]:443", @@ -344,7 +344,7 @@ TEST_F(AddressSortingTest, {"[3ffe::5001]:443", AF_INET6}, {"1.2.3.4:443", AF_INET}, }); - grpc_cares_wrapper_address_sorting_sort(&lb_addrs); + grpc_cares_wrapper_address_sorting_sort(nullptr, &lb_addrs); VerifyLbAddrOutputs( lb_addrs, { // The AF_INET address should be IPv4-mapped by the sort, @@ -377,7 +377,7 @@ TEST_F(AddressSortingTest, {v4_compat_dest, AF_INET6}, {"[::1]:443", AF_INET6}, }); - grpc_cares_wrapper_address_sorting_sort(&lb_addrs); + grpc_cares_wrapper_address_sorting_sort(nullptr, &lb_addrs); VerifyLbAddrOutputs(lb_addrs, { "[::1]:443", v4_compat_dest, @@ -400,7 +400,7 @@ TEST_F(AddressSortingTest, {"[1234::2]:443", AF_INET6}, {"[::1]:443", AF_INET6}, }); - grpc_cares_wrapper_address_sorting_sort(&lb_addrs); + grpc_cares_wrapper_address_sorting_sort(nullptr, &lb_addrs); VerifyLbAddrOutputs( lb_addrs, { @@ -424,7 +424,7 @@ TEST_F(AddressSortingTest, {"[2001::1234]:443", AF_INET6}, {"[2000::5001]:443", AF_INET6}, }); - grpc_cares_wrapper_address_sorting_sort(&lb_addrs); + grpc_cares_wrapper_address_sorting_sort(nullptr, &lb_addrs); VerifyLbAddrOutputs( lb_addrs, { // The 2000::/16 address should match the ::/0 prefix rule @@ -448,7 +448,7 @@ TEST_F( {"[2001::1231]:443", AF_INET6}, {"[2000::5001]:443", AF_INET6}, }); - grpc_cares_wrapper_address_sorting_sort(&lb_addrs); + grpc_cares_wrapper_address_sorting_sort(nullptr, &lb_addrs); VerifyLbAddrOutputs(lb_addrs, { "[2000::5001]:443", "[2001::1231]:443", @@ -469,7 +469,7 @@ TEST_F(AddressSortingTest, {"[fec0::1234]:443", AF_INET6}, {"[fc00::5001]:443", AF_INET6}, }); - grpc_cares_wrapper_address_sorting_sort(&lb_addrs); + grpc_cares_wrapper_address_sorting_sort(nullptr, &lb_addrs); VerifyLbAddrOutputs(lb_addrs, { "[fc00::5001]:443", "[fec0::1234]:443", @@ -494,7 +494,7 @@ TEST_F( {"[::ffff:1.1.1.2]:443", AF_INET6}, {"[1234::2]:443", AF_INET6}, }); - grpc_cares_wrapper_address_sorting_sort(&lb_addrs); + grpc_cares_wrapper_address_sorting_sort(nullptr, &lb_addrs); VerifyLbAddrOutputs(lb_addrs, { // ::ffff:0:2 should match the v4-mapped // precedence entry and be deprioritized. @@ -521,7 +521,7 @@ TEST_F(AddressSortingTest, TestPrefersSmallerScope) { {"[3ffe::5001]:443", AF_INET6}, {"[fec0::1234]:443", AF_INET6}, }); - grpc_cares_wrapper_address_sorting_sort(&lb_addrs); + grpc_cares_wrapper_address_sorting_sort(nullptr, &lb_addrs); VerifyLbAddrOutputs(lb_addrs, { "[fec0::1234]:443", "[3ffe::5001]:443", @@ -546,7 +546,7 @@ TEST_F(AddressSortingTest, TestPrefersLongestMatchingSrcDstPrefix) { {"[3ffe:5001::]:443", AF_INET6}, {"[3ffe:1234::]:443", AF_INET6}, }); - grpc_cares_wrapper_address_sorting_sort(&lb_addrs); + grpc_cares_wrapper_address_sorting_sort(nullptr, &lb_addrs); VerifyLbAddrOutputs(lb_addrs, { "[3ffe:1234::]:443", "[3ffe:5001::]:443", @@ -567,7 +567,7 @@ TEST_F(AddressSortingTest, {"[3ffe::5001]:443", AF_INET6}, {"[3ffe::1234]:443", AF_INET6}, }); - grpc_cares_wrapper_address_sorting_sort(&lb_addrs); + grpc_cares_wrapper_address_sorting_sort(nullptr, &lb_addrs); VerifyLbAddrOutputs(lb_addrs, { "[3ffe::1234]:443", "[3ffe::5001]:443", @@ -587,7 +587,7 @@ TEST_F(AddressSortingTest, TestPrefersLongestPrefixStressInnerBytePrefix) { {"[3ffe:8000::]:443", AF_INET6}, {"[3ffe:2000::]:443", AF_INET6}, }); - grpc_cares_wrapper_address_sorting_sort(&lb_addrs); + grpc_cares_wrapper_address_sorting_sort(nullptr, &lb_addrs); VerifyLbAddrOutputs(lb_addrs, { "[3ffe:2000::]:443", "[3ffe:8000::]:443", @@ -607,7 +607,7 @@ TEST_F(AddressSortingTest, TestPrefersLongestPrefixDiffersOnHighestBitOfByte) { {"[3ffe:6::]:443", AF_INET6}, {"[3ffe:c::]:443", AF_INET6}, }); - grpc_cares_wrapper_address_sorting_sort(&lb_addrs); + grpc_cares_wrapper_address_sorting_sort(nullptr, &lb_addrs); VerifyLbAddrOutputs(lb_addrs, { "[3ffe:c::]:443", "[3ffe:6::]:443", @@ -629,7 +629,7 @@ TEST_F(AddressSortingTest, TestPrefersLongestPrefixDiffersByLastBit) { {"[3ffe:1111:1111:1110::]:443", AF_INET6}, {"[3ffe:1111:1111:1111::]:443", AF_INET6}, }); - grpc_cares_wrapper_address_sorting_sort(&lb_addrs); + grpc_cares_wrapper_address_sorting_sort(nullptr, &lb_addrs); VerifyLbAddrOutputs(lb_addrs, { "[3ffe:1111:1111:1111::]:443", "[3ffe:1111:1111:1110::]:443", @@ -651,7 +651,7 @@ TEST_F(AddressSortingTest, TestStableSort) { {"[3ffe::1234]:443", AF_INET6}, {"[3ffe::1235]:443", AF_INET6}, }); - grpc_cares_wrapper_address_sorting_sort(&lb_addrs); + grpc_cares_wrapper_address_sorting_sort(nullptr, &lb_addrs); VerifyLbAddrOutputs(lb_addrs, { "[3ffe::1234]:443", "[3ffe::1235]:443", @@ -677,7 +677,7 @@ TEST_F(AddressSortingTest, TestStableSortFiveElements) { {"[3ffe::1234]:443", AF_INET6}, {"[3ffe::1235]:443", AF_INET6}, }); - grpc_cares_wrapper_address_sorting_sort(&lb_addrs); + grpc_cares_wrapper_address_sorting_sort(nullptr, &lb_addrs); VerifyLbAddrOutputs(lb_addrs, { "[3ffe::1231]:443", "[3ffe::1232]:443", @@ -698,7 +698,7 @@ TEST_F(AddressSortingTest, TestStableSortNoSrcAddrsExist) { {"[3ffe::1234]:443", AF_INET6}, {"[3ffe::1235]:443", AF_INET6}, }); - grpc_cares_wrapper_address_sorting_sort(&lb_addrs); + grpc_cares_wrapper_address_sorting_sort(nullptr, &lb_addrs); VerifyLbAddrOutputs(lb_addrs, { "[3ffe::1231]:443", "[3ffe::1232]:443", @@ -716,7 +716,7 @@ TEST_F(AddressSortingTest, TestStableSortNoSrcAddrsExistWithIpv4) { {"[::ffff:5.6.7.8]:443", AF_INET6}, {"1.2.3.4:443", AF_INET}, }); - grpc_cares_wrapper_address_sorting_sort(&lb_addrs); + grpc_cares_wrapper_address_sorting_sort(nullptr, &lb_addrs); VerifyLbAddrOutputs(lb_addrs, { "[::ffff:5.6.7.8]:443", "1.2.3.4:443", @@ -744,7 +744,7 @@ TEST_F(AddressSortingTest, TestStableSortV4CompatAndSiteLocalAddresses) { {"[fec0::2000]:443", AF_INET6}, {v4_compat_dest, AF_INET6}, }); - grpc_cares_wrapper_address_sorting_sort(&lb_addrs); + grpc_cares_wrapper_address_sorting_sort(nullptr, &lb_addrs); VerifyLbAddrOutputs(lb_addrs, { // The sort should be stable since @@ -765,7 +765,7 @@ TEST_F(AddressSortingTest, TestPrefersIpv6Loopback) { {"[::1]:443", AF_INET6}, {"127.0.0.1:443", AF_INET}, }); - grpc_cares_wrapper_address_sorting_sort(&lb_addrs); + grpc_cares_wrapper_address_sorting_sort(nullptr, &lb_addrs); VerifyLbAddrOutputs(lb_addrs, { "[::1]:443", "127.0.0.1:443", @@ -779,7 +779,7 @@ TEST_F(AddressSortingTest, TestPrefersIpv6LoopbackInputsFlipped) { {"127.0.0.1:443", AF_INET}, {"[::1]:443", AF_INET6}, }); - grpc_cares_wrapper_address_sorting_sort(&lb_addrs); + grpc_cares_wrapper_address_sorting_sort(nullptr, &lb_addrs); VerifyLbAddrOutputs(lb_addrs, { "[::1]:443", "127.0.0.1:443", diff --git a/test/cpp/naming/resolver_component_test.cc b/test/cpp/naming/resolver_component_test.cc index fdbc6a1240d..47b17973d53 100644 --- a/test/cpp/naming/resolver_component_test.cc +++ b/test/cpp/naming/resolver_component_test.cc @@ -39,6 +39,7 @@ #include "test/cpp/util/test_config.h" #include "src/core/ext/filters/client_channel/client_channel.h" +#include "src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_balancer_addresses.h" #include "src/core/ext/filters/client_channel/parse_address.h" #include "src/core/ext/filters/client_channel/resolver.h" #include "src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver.h" @@ -462,19 +463,20 @@ class CheckingResultHandler : public ResultHandler { void CheckResult(const grpc_core::Resolver::Result& result) override { ArgsStruct* args = args_struct(); - gpr_log(GPR_INFO, "num addrs found: %" PRIdPTR ". expected %" PRIdPTR, - result.addresses.size(), args->expected_addrs.size()); - GPR_ASSERT(result.addresses.size() == args->expected_addrs.size()); std::vector found_lb_addrs; - for (size_t i = 0; i < result.addresses.size(); i++) { - const grpc_core::ServerAddress& addr = result.addresses[i]; - char* str; - grpc_sockaddr_to_string(&str, &addr.address(), 1 /* normalize */); - gpr_log(GPR_INFO, "%s", str); - found_lb_addrs.emplace_back( - GrpcLBAddress(std::string(str), addr.IsBalancer())); - gpr_free(str); + AddActualAddresses(result.addresses, /*is_balancer=*/false, + &found_lb_addrs); + const grpc_core::ServerAddressList* balancer_addresses = + grpc_core::FindGrpclbBalancerAddressesInChannelArgs(*result.args); + if (balancer_addresses != nullptr) { + AddActualAddresses(*balancer_addresses, /*is_balancer=*/true, + &found_lb_addrs); } + gpr_log(GPR_INFO, + "found %" PRIdPTR " backend addresses and %" PRIdPTR + " balancer addresses", + result.addresses.size(), + balancer_addresses == nullptr ? 0L : balancer_addresses->size()); if (args->expected_addrs.size() != found_lb_addrs.size()) { gpr_log(GPR_DEBUG, "found lb addrs size is: %" PRIdPTR @@ -494,6 +496,20 @@ class CheckingResultHandler : public ResultHandler { CheckLBPolicyResultLocked(result.args, args); } } + + private: + static void AddActualAddresses(const grpc_core::ServerAddressList& addresses, + bool is_balancer, + std::vector* out) { + for (size_t i = 0; i < addresses.size(); i++) { + const grpc_core::ServerAddress& addr = addresses[i]; + char* str; + grpc_sockaddr_to_string(&str, &addr.address(), 1 /* normalize */); + gpr_log(GPR_INFO, "%s", str); + out->emplace_back(GrpcLBAddress(std::string(str), is_balancer)); + gpr_free(str); + } + } }; int g_fake_non_responsive_dns_server_port = -1; diff --git a/test/cpp/naming/resolver_component_tests_runner.py b/test/cpp/naming/resolver_component_tests_runner.py index 274c9a1b345..e5ff9cd294a 100755 --- a/test/cpp/naming/resolver_component_tests_runner.py +++ b/test/cpp/naming/resolver_component_tests_runner.py @@ -55,7 +55,7 @@ if cur_resolver and cur_resolver != 'ares': 'needs to use GRPC_DNS_RESOLVER=ares.')) test_runner_log('Exit 1 without running tests.') sys.exit(1) -os.environ.update({'GRPC_TRACE': 'cares_resolver'}) +os.environ.update({'GRPC_TRACE': 'cares_resolver,cares_address_sorting'}) def wait_until_dns_server_is_up(args, dns_server_subprocess, diff --git a/test/cpp/qps/driver.cc b/test/cpp/qps/driver.cc index 94c2feffc19..03a4477b4d8 100644 --- a/test/cpp/qps/driver.cc +++ b/test/cpp/qps/driver.cc @@ -313,11 +313,10 @@ std::unique_ptr RunScenario( gpr_log(GPR_INFO, "Starting server on %s (worker #%" PRIuPTR ")", workers[i].c_str(), i); if (!run_inproc) { - servers[i].stub = WorkerService::NewStub(grpc::CreateChannel( - workers[i], GetCredentialsProvider()->GetChannelCredentials( - GetCredType(workers[i], per_worker_credential_types, - credential_type), - &channel_args))); + servers[i].stub = WorkerService::NewStub(grpc::CreateTestChannel( + workers[i], + GetCredType(workers[i], per_worker_credential_types, credential_type), + nullptr /* call creds */, {} /* interceptor creators */)); } else { servers[i].stub = WorkerService::NewStub( local_workers[i]->InProcessChannel(channel_args)); @@ -373,11 +372,10 @@ std::unique_ptr RunScenario( gpr_log(GPR_INFO, "Starting client on %s (worker #%" PRIuPTR ")", worker.c_str(), i + num_servers); if (!run_inproc) { - clients[i].stub = WorkerService::NewStub(grpc::CreateChannel( + clients[i].stub = WorkerService::NewStub(grpc::CreateTestChannel( worker, - GetCredentialsProvider()->GetChannelCredentials( - GetCredType(worker, per_worker_credential_types, credential_type), - &channel_args))); + GetCredType(worker, per_worker_credential_types, credential_type), + nullptr /* call creds */, {} /* interceptor creators */)); } else { clients[i].stub = WorkerService::NewStub( local_workers[i + num_servers]->InProcessChannel(channel_args)); @@ -588,13 +586,11 @@ bool RunQuit( return false; } - ChannelArguments channel_args; for (size_t i = 0; i < workers.size(); i++) { - auto stub = WorkerService::NewStub(grpc::CreateChannel( - workers[i], GetCredentialsProvider()->GetChannelCredentials( - GetCredType(workers[i], per_worker_credential_types, - credential_type), - &channel_args))); + auto stub = WorkerService::NewStub(grpc::CreateTestChannel( + workers[i], + GetCredType(workers[i], per_worker_credential_types, credential_type), + nullptr /* call creds */, {} /* interceptor creators */)); Void dummy; grpc::ClientContext ctx; ctx.set_wait_for_ready(true); diff --git a/test/cpp/util/create_test_channel.cc b/test/cpp/util/create_test_channel.cc index a46a39c87c1..c4ba24749a6 100644 --- a/test/cpp/util/create_test_channel.cc +++ b/test/cpp/util/create_test_channel.cc @@ -18,12 +18,21 @@ #include "test/cpp/util/create_test_channel.h" +#include + #include #include #include #include "test/cpp/util/test_credentials_provider.h" +DEFINE_string( + grpc_test_use_grpclb_with_child_policy, "", + "If non-empty, set a static service config on channels created by " + "grpc::CreateTestChannel, that configures the grpclb LB policy " + "with a child policy being the value of this flag (e.g. round_robin " + "or pick_first)."); + namespace grpc { namespace { @@ -49,6 +58,16 @@ void AddProdSslType() { new SslCredentialProvider)); } +void MaybeSetCustomChannelArgs(grpc::ChannelArguments* args) { + if (FLAGS_grpc_test_use_grpclb_with_child_policy.size() > 0) { + args->SetString("grpc.service_config", + "{\"loadBalancingConfig\":[{\"grpclb\":{\"childPolicy\":[{" + "\"" + + FLAGS_grpc_test_use_grpclb_with_child_policy + + "\":{}}]}}]}"); + } +} + } // namespace // When cred_type is 'ssl', if server is empty, override_hostname is used to @@ -111,6 +130,7 @@ std::shared_ptr CreateTestChannel( const grpc::string& server, const grpc::string& credential_type, const std::shared_ptr& creds) { ChannelArguments channel_args; + MaybeSetCustomChannelArgs(&channel_args); std::shared_ptr channel_creds = testing::GetCredentialsProvider()->GetChannelCredentials(credential_type, &channel_args); @@ -129,14 +149,15 @@ std::shared_ptr CreateTestChannel( std::unique_ptr> interceptor_creators) { ChannelArguments channel_args(args); + MaybeSetCustomChannelArgs(&channel_args); std::shared_ptr channel_creds; if (cred_type.empty()) { if (interceptor_creators.empty()) { return ::grpc::CreateCustomChannel(server, InsecureChannelCredentials(), - args); + channel_args); } else { return experimental::CreateCustomChannelWithInterceptors( - server, InsecureChannelCredentials(), args, + server, InsecureChannelCredentials(), channel_args, std::move(interceptor_creators)); } } else if (cred_type == testing::kTlsCredentialsType) { // cred_type == "ssl" @@ -173,10 +194,10 @@ std::shared_ptr CreateTestChannel( GPR_ASSERT(channel_creds != nullptr); if (interceptor_creators.empty()) { - return ::grpc::CreateCustomChannel(server, channel_creds, args); + return ::grpc::CreateCustomChannel(server, channel_creds, channel_args); } else { return experimental::CreateCustomChannelWithInterceptors( - server, channel_creds, args, std::move(interceptor_creators)); + server, channel_creds, channel_args, std::move(interceptor_creators)); } } } @@ -217,6 +238,7 @@ std::shared_ptr CreateTestChannel( std::unique_ptr> interceptor_creators) { ChannelArguments channel_args; + MaybeSetCustomChannelArgs(&channel_args); std::shared_ptr channel_creds = testing::GetCredentialsProvider()->GetChannelCredentials(credential_type, &channel_args); diff --git a/tools/bazel.rc b/tools/bazel.rc index 6dbcc273223..b27d9cb0675 100644 --- a/tools/bazel.rc +++ b/tools/bazel.rc @@ -1,7 +1,4 @@ # bazelrc file -# bazel >= 0.18 looks for %workspace%/.bazelrc (which redirects here) -# Older bazel versions look for %workspace%/tools/bazel.rc (this file) -# See https://github.com/bazelbuild/bazel/issues/6319 build --client_env=CC=clang build --copt=-DGRPC_BAZEL_BUILD diff --git a/tools/buildgen/generate_build_additions.sh b/tools/buildgen/generate_build_additions.sh index 01d839284fc..e873789e9a9 100755 --- a/tools/buildgen/generate_build_additions.sh +++ b/tools/buildgen/generate_build_additions.sh @@ -23,7 +23,7 @@ gen_build_yaml_dirs=" \ src/upb \ src/zlib \ src/c-ares \ - test/core/end2end \ + test/core/end2end \ test/cpp/naming \ tools/run_tests/lb_interop_tests" diff --git a/tools/distrib/check_include_guards.py b/tools/distrib/check_include_guards.py index bc774227fff..2983441176d 100755 --- a/tools/distrib/check_include_guards.py +++ b/tools/distrib/check_include_guards.py @@ -44,7 +44,7 @@ class GuardValidator(object): self.ifndef_re = re.compile(r'#ifndef ([A-Z][A-Z_1-9]*)') self.define_re = re.compile(r'#define ([A-Z][A-Z_1-9]*)') self.endif_c_re = re.compile( - r'#endif /\* ([A-Z][A-Z_1-9]*) (?:\\ *\n *)?\*/') + r'#endif /\* (?: *\\\n *)?([A-Z][A-Z_1-9]*) (?:\\\n *)?\*/$') self.endif_cpp_re = re.compile(r'#endif // ([A-Z][A-Z_1-9]*)') self.failed = False @@ -122,7 +122,7 @@ class GuardValidator(object): # Is there a properly commented #endif? endif_re = self.endif_cpp_re if cpp_header else self.endif_c_re flines = fcontents.rstrip().splitlines() - match = endif_re.search('\n'.join(flines[-2:])) + match = endif_re.search('\n'.join(flines[-3:])) if not match: # No endif. Check if we have the last line as just '#endif' and if so # replace it with a properly commented one. diff --git a/tools/dockerfile/interoptest/grpc_interop_dart/Dockerfile b/tools/dockerfile/interoptest/grpc_interop_dart/Dockerfile index 43d8a60daea..dd9b8e55b0c 100644 --- a/tools/dockerfile/interoptest/grpc_interop_dart/Dockerfile +++ b/tools/dockerfile/interoptest/grpc_interop_dart/Dockerfile @@ -12,10 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -FROM google/dart:2.3 - -# Upgrade Dart to version 2. -RUN apt-get update && apt-get upgrade -y dart +FROM google/dart:2.7 # Define the default command. CMD ["bash"] diff --git a/tools/dockerfile/test/python_stretch_default_x64/Dockerfile b/tools/dockerfile/test/python_stretch_default_x64/Dockerfile index 9a1d6c09deb..713fd6fe230 100644 --- a/tools/dockerfile/test/python_stretch_default_x64/Dockerfile +++ b/tools/dockerfile/test/python_stretch_default_x64/Dockerfile @@ -84,6 +84,29 @@ RUN cd /tmp && \ RUN python3.6 -m ensurepip && \ python3.6 -m pip install coverage +#================= +# Compile CPython 3.8.0b4 from source + +RUN apt-get update && apt-get install -y zlib1g-dev libssl-dev +RUN apt-get update && apt-get install -y jq build-essential libffi-dev + +RUN cd /tmp && \ + wget -q https://www.python.org/ftp/python/3.8.0/Python-3.8.0b4.tgz && \ + tar xzvf Python-3.8.0b4.tgz && \ + cd Python-3.8.0b4 && \ + ./configure && \ + make install + +RUN cd /tmp && \ + echo "b8f4f897df967014ddb42033b90c3058 Python-3.8.0b4.tgz" > checksum.md5 && \ + md5sum -c checksum.md5 + +RUN python3.8 -m ensurepip && \ + python3.8 -m pip install coverage + + +RUN apt-get update && apt-get install -y python3.5 python3.5-dev +RUN curl https://bootstrap.pypa.io/get-pip.py | python3.5 RUN apt-get update && apt-get -t buster install -y python3.7 python3-all-dev RUN curl https://bootstrap.pypa.io/get-pip.py | python3.7 diff --git a/tools/doxygen/Doxyfile.c++.internal b/tools/doxygen/Doxyfile.c++.internal index 5c5d1c9fac5..60e99d7b5a0 100644 --- a/tools/doxygen/Doxyfile.c++.internal +++ b/tools/doxygen/Doxyfile.c++.internal @@ -1091,12 +1091,16 @@ src/core/ext/filters/client_channel/http_proxy.cc \ src/core/ext/filters/client_channel/http_proxy.h \ src/core/ext/filters/client_channel/lb_policy.cc \ src/core/ext/filters/client_channel/lb_policy.h \ +src/core/ext/filters/client_channel/lb_policy/address_filtering.cc \ +src/core/ext/filters/client_channel/lb_policy/address_filtering.h \ src/core/ext/filters/client_channel/lb_policy/child_policy_handler.cc \ src/core/ext/filters/client_channel/lb_policy/child_policy_handler.h \ src/core/ext/filters/client_channel/lb_policy/grpclb/client_load_reporting_filter.cc \ src/core/ext/filters/client_channel/lb_policy/grpclb/client_load_reporting_filter.h \ src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.cc \ src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.h \ +src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_balancer_addresses.cc \ +src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_balancer_addresses.h \ src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_channel.h \ src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_channel_secure.cc \ src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_client_stats.cc \ @@ -1104,10 +1108,13 @@ src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_client_stats.h \ src/core/ext/filters/client_channel/lb_policy/grpclb/load_balancer_api.cc \ src/core/ext/filters/client_channel/lb_policy/grpclb/load_balancer_api.h \ src/core/ext/filters/client_channel/lb_policy/pick_first/pick_first.cc \ +src/core/ext/filters/client_channel/lb_policy/priority/priority.cc \ src/core/ext/filters/client_channel/lb_policy/round_robin/round_robin.cc \ src/core/ext/filters/client_channel/lb_policy/subchannel_list.h \ +src/core/ext/filters/client_channel/lb_policy/weighted_target/weighted_target.cc \ src/core/ext/filters/client_channel/lb_policy/xds/cds.cc \ -src/core/ext/filters/client_channel/lb_policy/xds/xds.cc \ +src/core/ext/filters/client_channel/lb_policy/xds/eds.cc \ +src/core/ext/filters/client_channel/lb_policy/xds/lrs.cc \ src/core/ext/filters/client_channel/lb_policy/xds/xds.h \ src/core/ext/filters/client_channel/lb_policy_factory.h \ src/core/ext/filters/client_channel/lb_policy_registry.cc \ diff --git a/tools/doxygen/Doxyfile.core.internal b/tools/doxygen/Doxyfile.core.internal index 20087420e4e..fef1fd11d4a 100644 --- a/tools/doxygen/Doxyfile.core.internal +++ b/tools/doxygen/Doxyfile.core.internal @@ -888,12 +888,16 @@ src/core/ext/filters/client_channel/http_proxy.cc \ src/core/ext/filters/client_channel/http_proxy.h \ src/core/ext/filters/client_channel/lb_policy.cc \ src/core/ext/filters/client_channel/lb_policy.h \ +src/core/ext/filters/client_channel/lb_policy/address_filtering.cc \ +src/core/ext/filters/client_channel/lb_policy/address_filtering.h \ src/core/ext/filters/client_channel/lb_policy/child_policy_handler.cc \ src/core/ext/filters/client_channel/lb_policy/child_policy_handler.h \ src/core/ext/filters/client_channel/lb_policy/grpclb/client_load_reporting_filter.cc \ src/core/ext/filters/client_channel/lb_policy/grpclb/client_load_reporting_filter.h \ src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.cc \ src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.h \ +src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_balancer_addresses.cc \ +src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_balancer_addresses.h \ src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_channel.h \ src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_channel_secure.cc \ src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_client_stats.cc \ @@ -901,10 +905,13 @@ src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_client_stats.h \ src/core/ext/filters/client_channel/lb_policy/grpclb/load_balancer_api.cc \ src/core/ext/filters/client_channel/lb_policy/grpclb/load_balancer_api.h \ src/core/ext/filters/client_channel/lb_policy/pick_first/pick_first.cc \ +src/core/ext/filters/client_channel/lb_policy/priority/priority.cc \ src/core/ext/filters/client_channel/lb_policy/round_robin/round_robin.cc \ src/core/ext/filters/client_channel/lb_policy/subchannel_list.h \ +src/core/ext/filters/client_channel/lb_policy/weighted_target/weighted_target.cc \ src/core/ext/filters/client_channel/lb_policy/xds/cds.cc \ -src/core/ext/filters/client_channel/lb_policy/xds/xds.cc \ +src/core/ext/filters/client_channel/lb_policy/xds/eds.cc \ +src/core/ext/filters/client_channel/lb_policy/xds/lrs.cc \ src/core/ext/filters/client_channel/lb_policy/xds/xds.h \ src/core/ext/filters/client_channel/lb_policy_factory.h \ src/core/ext/filters/client_channel/lb_policy_registry.cc \ diff --git a/tools/internal_ci/linux/grpc_xds_bazel_python_test_in_docker.sh b/tools/internal_ci/linux/grpc_xds_bazel_python_test_in_docker.sh index 0dbea2f59ee..a9a74eef377 100755 --- a/tools/internal_ci/linux/grpc_xds_bazel_python_test_in_docker.sh +++ b/tools/internal_ci/linux/grpc_xds_bazel_python_test_in_docker.sh @@ -25,6 +25,7 @@ cd /var/local/git/grpc VIRTUAL_ENV=$(mktemp -d) virtualenv "$VIRTUAL_ENV" PYTHON="$VIRTUAL_ENV"/bin/python +"$PYTHON" -m pip install --upgrade pip "$PYTHON" -m pip install --upgrade grpcio-tools google-api-python-client google-auth-httplib2 oauth2client # Prepare generated Python code. @@ -47,10 +48,12 @@ touch "$TOOLS_DIR"/src/proto/grpc/testing/__init__.py bazel build //src/python/grpcio_tests/tests_py3_only/interop:xds_interop_client -GRPC_VERBOSITY=debug GRPC_TRACE=xds_client,xds_resolver,cds_lb,xds_lb "$PYTHON" \ +GRPC_VERBOSITY=debug GRPC_TRACE=xds_client,xds_resolver,cds_lb,eds_lb,priority_lb,weighted_target_lb,lrs_lb "$PYTHON" \ tools/run_tests/run_xds_tests.py \ --test_case=all \ --project_id=grpc-testing \ + --source_image=projects/grpc-testing/global/images/xds-test-server \ + --path_to_server_binary=/java_server/grpc-java/interop-testing/build/install/grpc-interop-testing/bin/xds-test-server \ --gcp_suffix=$(date '+%s') \ --verbose \ --client_cmd='bazel run //src/python/grpcio_tests/tests_py3_only/interop:xds_interop_client -- --server=xds-experimental:///{server_uri} --stats_port={stats_port} --qps={qps} --verbose' diff --git a/tools/internal_ci/linux/grpc_xds_bazel_test_in_docker.sh b/tools/internal_ci/linux/grpc_xds_bazel_test_in_docker.sh index 17380860f21..943ba2b1981 100755 --- a/tools/internal_ci/linux/grpc_xds_bazel_test_in_docker.sh +++ b/tools/internal_ci/linux/grpc_xds_bazel_test_in_docker.sh @@ -25,6 +25,7 @@ cd /var/local/git/grpc VIRTUAL_ENV=$(mktemp -d) virtualenv "$VIRTUAL_ENV" PYTHON="$VIRTUAL_ENV"/bin/python +"$PYTHON" -m pip install --upgrade pip "$PYTHON" -m pip install --upgrade grpcio grpcio-tools google-api-python-client google-auth-httplib2 oauth2client # Prepare generated Python code. @@ -47,10 +48,12 @@ touch "$TOOLS_DIR"/src/proto/grpc/testing/__init__.py bazel build test/cpp/interop:xds_interop_client -GRPC_VERBOSITY=debug GRPC_TRACE=xds_client,xds_resolver,cds_lb,xds_lb "$PYTHON" \ +GRPC_VERBOSITY=debug GRPC_TRACE=xds_client,xds_resolver,cds_lb,eds_lb,priority_lb,weighted_target_lb,lrs_lb "$PYTHON" \ tools/run_tests/run_xds_tests.py \ --test_case=all \ --project_id=grpc-testing \ + --source_image=projects/grpc-testing/global/images/xds-test-server \ + --path_to_server_binary=/java_server/grpc-java/interop-testing/build/install/grpc-interop-testing/bin/xds-test-server \ --gcp_suffix=$(date '+%s') \ --verbose \ --client_cmd='bazel-bin/test/cpp/interop/xds_interop_client --server=xds-experimental:///{server_uri} --stats_port={stats_port} --qps={qps}' diff --git a/tools/remote_build/README.md b/tools/remote_build/README.md index 2d1a629b0eb..849ceb4fec3 100644 --- a/tools/remote_build/README.md +++ b/tools/remote_build/README.md @@ -17,30 +17,55 @@ and tests run by Kokoro CI. ## Running remote build manually from dev workstation -Run from repository root (opt, dbg): +IMPORTANT: The OS from which you run the bazel command needs to always match your desired build & execution platform. If you want to run tests on linux, you need to run bazel from a linux machine, to execute tests on windows you need to be on windows etc. If you don't follow this guideline, the build might still appear like it's working, but you'll get nonsensical results (e.g. will be test configured as if on mac, but actually running on linux). + +### Linux + +For `opt` or `dbg` run this command: ``` # manual run of bazel tests remotely on Foundry bazel --bazelrc=tools/remote_build/manual.bazelrc test --config=opt //test/... ``` -Sanitizer runs (asan, msan, tsan, ubsan): +This also works for sanitizer runs (`asan`, `msan`, `tsan`, `ubsan`): ``` # manual run of bazel tests remotely on Foundry with given sanitizer bazel --bazelrc=tools/remote_build/manual.bazelrc test --config=asan //test/... ``` -Run on Windows MSVC: +### Windows + ``` # manual run of bazel tests remotely on RBE Windows (must be run from Windows machine) bazel --bazelrc=tools/remote_build/windows.bazelrc test --config=windows_opt //test/... ``` -Run on MacOS (experimental for now): +NOTE: Unlike on Linux and Mac, the bazel version won't get autoselected for you, +so check that you're using the [right bazel version](https://github.com/grpc/grpc/blob/master/tools/bazel). + +### MacOS + +There is no such thing as Mac RBE cluster, so a real remote build on Macs is currently impossible. +The following setup will build and run test on you local mac machine, but will give +you the RBE-like look & feel (e.g. a results link will be generated and some extra configuration will +be used). + ``` # manual run of bazel tests on Mac (must be run from Mac machine) # NOTE: it's not really a "remote execution", but uploads results to ResultStore bazel --bazelrc=tools/remote_build/mac.bazelrc test --config=opt //test/... ``` +NOTE: Because this is essentially a local run, you'll need to run start port server first (`tools/run_tests/start_port_server.py`) + +## Running local builds with bazel + +On all platforms, you can generally still use bazel builds & tests locally without any extra settings, but you might need to +start port server first (`tools/run_tests/start_port_server.py`) to be able to run the tests locally. + +E.g.: `bazel test --config=opt //test/...` + +## Bazel command line options + Available command line options can be found in [Bazel command line reference](https://docs.bazel.build/versions/master/command-line-reference.html) diff --git a/tools/run_tests/generated/tests.json b/tools/run_tests/generated/tests.json index 16ec891f747..b09efd61926 100644 --- a/tools/run_tests/generated/tests.json +++ b/tools/run_tests/generated/tests.json @@ -785,30 +785,6 @@ ], "uses_polling": true }, - { - "args": [], - "benchmark": false, - "ci_platforms": [ - "linux", - "mac", - "posix", - "windows" - ], - "cpu_cost": 1.0, - "exclude_configs": [], - "exclude_iomgrs": [], - "flaky": false, - "gtest": false, - "language": "c", - "name": "completion_queue_threading_test", - "platforms": [ - "linux", - "mac", - "posix", - "windows" - ], - "uses_polling": true - }, { "args": [], "benchmark": false, @@ -881,30 +857,6 @@ ], "uses_polling": true }, - { - "args": [], - "benchmark": false, - "ci_platforms": [ - "linux", - "mac", - "posix", - "windows" - ], - "cpu_cost": 1.0, - "exclude_configs": [], - "exclude_iomgrs": [], - "flaky": false, - "gtest": false, - "language": "c", - "name": "control_plane_credentials_test", - "platforms": [ - "linux", - "mac", - "posix", - "windows" - ], - "uses_polling": true - }, { "args": [], "benchmark": false, @@ -3805,6 +3757,26 @@ ], "uses_polling": true }, + { + "args": [], + "benchmark": true, + "ci_platforms": [ + "linux", + "posix" + ], + "cpu_cost": 1.0, + "exclude_configs": [], + "exclude_iomgrs": [], + "flaky": false, + "gtest": false, + "language": "c++", + "name": "bm_fullstack_streaming_pump", + "platforms": [ + "linux", + "posix" + ], + "uses_polling": true + }, { "args": [], "benchmark": true, @@ -4125,28 +4097,6 @@ ], "uses_polling": true }, - { - "args": [], - "benchmark": false, - "ci_platforms": [ - "linux", - "mac", - "posix" - ], - "cpu_cost": 1.0, - "exclude_configs": [], - "exclude_iomgrs": [], - "flaky": false, - "gtest": true, - "language": "c++", - "name": "client_channel_stress_test", - "platforms": [ - "linux", - "mac", - "posix" - ], - "uses_polling": true - }, { "args": [], "benchmark": false, diff --git a/tools/run_tests/helper_scripts/build_python.sh b/tools/run_tests/helper_scripts/build_python.sh index 670b5fc69d8..e79a8896092 100755 --- a/tools/run_tests/helper_scripts/build_python.sh +++ b/tools/run_tests/helper_scripts/build_python.sh @@ -165,24 +165,33 @@ pip_install_dir() { cd "$PWD" } +# On library/version/platforms combo that do not have a binary +# published, we may end up building a dependency from source. In that +# case, several of our build environment variables may disrupt the +# third-party build process. This function pipes through only the +# minimal environment necessary. +pip_install() { + /usr/bin/env -i PATH="$PATH" "$VENV_PYTHON" -m pip install "$@" +} + case "$VENV" in *py36_gevent*) # TODO(https://github.com/grpc/grpc/issues/15411) unpin this - $VENV_PYTHON -m pip install gevent==1.3.b1 + pip_install gevent==1.3.b1 ;; *gevent*) - $VENV_PYTHON -m pip install -U gevent + pip_install -U gevent ;; esac -$VENV_PYTHON -m pip install --upgrade pip==19.3.1 -$VENV_PYTHON -m pip install --upgrade setuptools -$VENV_PYTHON -m pip install --upgrade cython -$VENV_PYTHON -m pip install --upgrade six enum34 protobuf +pip_install --upgrade pip==19.3.1 +pip_install --upgrade setuptools +pip_install --upgrade cython +pip_install --upgrade six enum34 protobuf if [ "$("$VENV_PYTHON" -c "import sys; print(sys.version_info[0])")" == "2" ] then - $VENV_PYTHON -m pip install futures + pip_install futures fi pip_install_dir "$ROOT" @@ -214,9 +223,9 @@ pip_install_dir "$ROOT/src/python/grpcio_status" pip_install_dir "$ROOT/src/python/grpcio_testing" # Build/install tests -$VENV_PYTHON -m pip install coverage==4.4 oauth2client==4.1.0 \ - google-auth==1.0.0 requests==2.14.2 \ - googleapis-common-protos==1.5.5 +pip_install coverage==4.4 oauth2client==4.1.0 \ + google-auth==1.0.0 requests==2.14.2 \ + googleapis-common-protos==1.5.5 $VENV_PYTHON "$ROOT/src/python/grpcio_tests/setup.py" preprocess $VENV_PYTHON "$ROOT/src/python/grpcio_tests/setup.py" build_package_protos pip_install_dir "$ROOT/src/python/grpcio_tests" diff --git a/tools/run_tests/helper_scripts/prep_xds.sh b/tools/run_tests/helper_scripts/prep_xds.sh index 9c3a67386ea..ab15e2eb1c8 100755 --- a/tools/run_tests/helper_scripts/prep_xds.sh +++ b/tools/run_tests/helper_scripts/prep_xds.sh @@ -19,6 +19,7 @@ set -ex cd "$(dirname "$0")/../../.." sudo apt-get install -y python3-pip +sudo python3 -m pip install --upgrade pip sudo python3 -m pip install grpcio grpcio-tools google-api-python-client google-auth-httplib2 oauth2client # Prepare generated Python code. diff --git a/tools/run_tests/helper_scripts/run_grpc-node.sh b/tools/run_tests/helper_scripts/run_grpc-node.sh index d14753a4d54..f54d69e9a32 100755 --- a/tools/run_tests/helper_scripts/run_grpc-node.sh +++ b/tools/run_tests/helper_scripts/run_grpc-node.sh @@ -27,4 +27,7 @@ rm -rf ./../grpc-node git clone --recursive https://github.com/grpc/grpc-node ./../grpc-node cd ./../grpc-node -./test-grpc-submodule.sh "$CURRENT_COMMIT" +echo "TODO(jtattermusch): Skipping grpc-node's ./test-grpc-submodule.sh $CURRENT_COMMIT" +echo "because it currently doesn't provide any useful signal." +echo "See b/152833238" +#./test-grpc-submodule.sh "$CURRENT_COMMIT" diff --git a/tools/run_tests/python_utils/upload_rbe_results.py b/tools/run_tests/python_utils/upload_rbe_results.py index f5bf3ccc098..8aedd72afbf 100755 --- a/tools/run_tests/python_utils/upload_rbe_results.py +++ b/tools/run_tests/python_utils/upload_rbe_results.py @@ -37,6 +37,7 @@ _RESULTS_SCHEMA = [ ('build_id', 'INTEGER', 'Build ID of Kokoro job'), ('build_url', 'STRING', 'URL of Kokoro build'), ('test_target', 'STRING', 'Bazel target path'), + ('test_class_name', 'STRING', 'Name of test class'), ('test_case', 'STRING', 'Name of test case'), ('result', 'STRING', 'Test or build result'), ('timestamp', 'TIMESTAMP', 'Timestamp of test run'), @@ -241,6 +242,8 @@ if __name__ == "__main__": % invocation_id, 'test_target': action['id']['targetId'], + 'test_class_name': + test_case['testCase'].get('className', ''), 'test_case': test_case['testCase']['caseName'], 'result': @@ -266,6 +269,8 @@ if __name__ == "__main__": % invocation_id, 'test_target': action['id']['targetId'], + 'test_class_name': + 'N/A', 'test_case': 'N/A', 'result': diff --git a/tools/run_tests/run_tests.py b/tools/run_tests/run_tests.py index a3d255110f0..b1c56762bd1 100755 --- a/tools/run_tests/run_tests.py +++ b/tools/run_tests/run_tests.py @@ -866,9 +866,19 @@ class PythonLanguage(object): else: if args.iomgr_platform == 'asyncio': return (python36_config,) + elif os.uname()[0] == 'Darwin': + # NOTE(rbellevi): Testing takes significantly longer on + # MacOS, so we restrict the number of interpreter versions + # tested. + return ( + python27_config, + python36_config, + python37_config, + ) else: return ( python27_config, + python35_config, python36_config, python37_config, ) diff --git a/tools/run_tests/run_xds_tests.py b/tools/run_tests/run_xds_tests.py index 1deac8db5a5..762743e0993 100755 --- a/tools/run_tests/run_xds_tests.py +++ b/tools/run_tests/run_xds_tests.py @@ -39,7 +39,9 @@ logger = logging.getLogger() console_handler = logging.StreamHandler() formatter = logging.Formatter(fmt='%(asctime)s: %(levelname)-8s %(message)s') console_handler.setFormatter(formatter) +logger.handlers = [] logger.addHandler(console_handler) +logger.setLevel(logging.WARNING) _TEST_CASES = [ 'backends_restart', @@ -84,12 +86,17 @@ argp.add_argument( type=parse_test_cases, help='Comma-separated list of test cases to run, or \'all\' to run every ' 'test. Available tests: %s' % ' '.join(_TEST_CASES)) +argp.add_argument( + '--bootstrap_file', + default='', + help='File to reference via GRPC_XDS_BOOTSTRAP. Disables built-in ' + 'bootstrap generation') argp.add_argument( '--client_cmd', default=None, - help='Command to launch xDS test client. This script will fill in ' - '{server_uri}, {stats_port} and {qps} parameters using str.format(), and ' - 'generate the GRPC_XDS_BOOTSTRAP file.') + help='Command to launch xDS test client. {server_uri}, {stats_port} and ' + '{qps} references will be replaced using str.format(). GRPC_XDS_BOOTSTRAP ' + 'will be set for the command') argp.add_argument('--zone', default='us-central1-a') argp.add_argument('--secondary_zone', default='us-west1-b', @@ -115,20 +122,22 @@ argp.add_argument( help= 'If provided, uses this file instead of retrieving via the GCP discovery ' 'API') +argp.add_argument( + '--alpha_compute_discovery_document', + default=None, + type=str, + help='If provided, uses this file instead of retrieving via the alpha GCP ' + 'discovery API') argp.add_argument('--network', default='global/networks/default', help='GCP network to use') argp.add_argument('--service_port_range', - default='80', + default='8080:8110', type=parse_port_range, help='Listening port for created gRPC backends. Specified as ' 'either a single int or as a range in the format min:max, in ' 'which case an available port p will be chosen s.t. min <= p ' '<= max') -argp.add_argument('--forwarding_rule_ip_prefix', - default='172.16.0.', - help='If set, an available IP with this prefix followed by ' - '0-255 will be used for the generated forwarding rule.') argp.add_argument( '--stats_port', default=8079, @@ -140,9 +149,21 @@ argp.add_argument('--xds_server', argp.add_argument('--source_image', default='projects/debian-cloud/global/images/family/debian-9', help='Source image for VMs created during the test') +argp.add_argument('--path_to_server_binary', + default=None, + type=str, + help='If set, the server binary must already be pre-built on ' + 'the specified source image') argp.add_argument('--machine_type', default='e2-standard-2', help='Machine type for VMs created during the test') +argp.add_argument( + '--instance_group_size', + default=2, + type=int, + help='Number of VMs to create per instance group. Certain test cases (e.g., ' + 'round_robin) may not give meaningful results if this is set to a value ' + 'less than 2.') argp.add_argument( '--tolerate_gcp_errors', default=False, @@ -155,6 +176,16 @@ argp.add_argument('--verbose', help='verbose log output', default=False, action='store_true') +# TODO(ericgribkoff) Remove this param once the sponge-formatted log files are +# visible in all test environments. +argp.add_argument('--log_client_output', + help='Log captured client output', + default=False, + action='store_true') +argp.add_argument('--only_stable_gcp_apis', + help='Do not use alpha compute APIs', + default=False, + action='store_true') args = argp.parse_args() if args.verbose: @@ -163,7 +194,7 @@ if args.verbose: _DEFAULT_SERVICE_PORT = 80 _WAIT_FOR_BACKEND_SEC = args.wait_for_backend_sec _WAIT_FOR_OPERATION_SEC = 300 -_INSTANCE_GROUP_SIZE = 2 +_INSTANCE_GROUP_SIZE = args.instance_group_size _NUM_TEST_RPCS = 10 * args.qps _WAIT_FOR_STATS_SEC = 180 _WAIT_FOR_URL_MAP_PATCH_SEC = 300 @@ -188,6 +219,12 @@ _BOOTSTRAP_TEMPLATE = """ ] }}] }}""" % (args.network.split('/')[-1], args.zone, args.xds_server) +_TESTS_USING_SECONDARY_IG = [ + 'secondary_locality_gets_no_requests_on_partial_primary_failure', + 'secondary_locality_gets_requests_on_primary_failure' +] +_USE_SECONDARY_IG = any( + [t in args.test_case for t in _TESTS_USING_SECONDARY_IG]) _PATH_MATCHER_NAME = 'path-matcher' _BASE_TEMPLATE_NAME = 'test-template' _BASE_INSTANCE_GROUP_NAME = 'test-ig' @@ -307,9 +344,9 @@ def test_change_backend_service(gcp, original_backend_service, instance_group, _WAIT_FOR_STATS_SEC) try: patch_url_map_backend_service(gcp, alternate_backend_service) - stats = get_client_stats(_NUM_TEST_RPCS, _WAIT_FOR_STATS_SEC) - if stats.num_failures > 0: - raise Exception('Unexpected failure: %s', stats) + # TODO(ericgribkoff) Verify no RPCs fail during backend switch. + # Currently TD may briefly send an update with no localities if adding + # the MIG to the backend service above races with the URL map patch. wait_until_all_rpcs_go_to_given_backends(alternate_backend_instances, _WAIT_FOR_URL_MAP_PATCH_SEC) finally: @@ -458,7 +495,27 @@ def test_secondary_locality_gets_requests_on_primary_failure( patch_backend_instances(gcp, backend_service, [primary_instance_group]) -def create_instance_template(gcp, name, network, source_image, machine_type): +def get_startup_script(path_to_server_binary, service_port): + if path_to_server_binary: + return "nohup %s --port=%d 1>/dev/null &" % (path_to_server_binary, + service_port) + else: + return """#!/bin/bash +sudo apt update +sudo apt install -y git default-jdk +mkdir java_server +pushd java_server +git clone https://github.com/grpc/grpc-java.git +pushd grpc-java +pushd interop-testing +../gradlew installDist -x test -PskipCodegen=true -PskipAndroid=true + +nohup build/install/grpc-interop-testing/bin/xds-test-server \ + --port=%d 1>/dev/null &""" % service_port + + +def create_instance_template(gcp, name, network, source_image, machine_type, + startup_script): config = { 'name': name, 'properties': { @@ -484,21 +541,8 @@ def create_instance_template(gcp, name, network, source_image, machine_type): }], 'metadata': { 'items': [{ - 'key': - 'startup-script', - 'value': - """#!/bin/bash -sudo apt update -sudo apt install -y git default-jdk -mkdir java_server -pushd java_server -git clone https://github.com/grpc/grpc-java.git -pushd grpc-java -pushd interop-testing -../gradlew installDist -x test -PskipCodegen=true -PskipAndroid=true - -nohup build/install/grpc-interop-testing/bin/xds-test-server --port=%d 1>/dev/null &""" - % gcp.service_port + 'key': 'startup-script', + 'value': startup_script }] } } @@ -537,16 +581,27 @@ def add_instance_group(gcp, zone, name, size): def create_health_check(gcp, name): - config = { - 'name': name, - 'type': 'TCP', - 'tcpHealthCheck': { - 'portName': 'grpc' + if gcp.alpha_compute: + config = { + 'name': name, + 'type': 'GRPC', + 'grpcHealthCheck': { + 'portSpecification': 'USE_SERVING_PORT' + } } - } + compute_to_use = gcp.alpha_compute + else: + config = { + 'name': name, + 'type': 'TCP', + 'tcpHealthCheck': { + 'portName': 'grpc' + } + } + compute_to_use = gcp.compute logger.debug('Sending GCP request with body=%s', config) - result = gcp.compute.healthChecks().insert(project=gcp.project, - body=config).execute() + result = compute_to_use.healthChecks().insert(project=gcp.project, + body=config).execute() wait_for_global_operation(gcp, result['name']) gcp.health_check = GcpResource(config['name'], result['targetLink']) @@ -570,16 +625,22 @@ def create_health_check_firewall_rule(gcp, name): def add_backend_service(gcp, name): + if gcp.alpha_compute: + protocol = 'GRPC' + compute_to_use = gcp.alpha_compute + else: + protocol = 'HTTP2' + compute_to_use = gcp.compute config = { 'name': name, 'loadBalancingScheme': 'INTERNAL_SELF_MANAGED', 'healthChecks': [gcp.health_check.url], 'portName': 'grpc', - 'protocol': 'HTTP2' + 'protocol': protocol } logger.debug('Sending GCP request with body=%s', config) - result = gcp.compute.backendServices().insert(project=gcp.project, - body=config).execute() + result = compute_to_use.backendServices().insert(project=gcp.project, + body=config).execute() wait_for_global_operation(gcp, result['name']) backend_service = GcpResource(config['name'], result['targetLink']) gcp.backend_services.append(backend_service) @@ -606,33 +667,69 @@ def create_url_map(gcp, name, backend_service, host_name): gcp.url_map = GcpResource(config['name'], result['targetLink']) -def create_target_http_proxy(gcp, name): +def patch_url_map_host_rule_with_port(gcp, name, backend_service, host_name): config = { - 'name': name, - 'url_map': gcp.url_map.url, + 'hostRules': [{ + 'hosts': ['%s:%d' % (host_name, gcp.service_port)], + 'pathMatcher': _PATH_MATCHER_NAME + }] } logger.debug('Sending GCP request with body=%s', config) - result = gcp.compute.targetHttpProxies().insert(project=gcp.project, - body=config).execute() + result = gcp.compute.urlMaps().patch(project=gcp.project, + urlMap=name, + body=config).execute() wait_for_global_operation(gcp, result['name']) - gcp.target_http_proxy = GcpResource(config['name'], result['targetLink']) -def create_global_forwarding_rule(gcp, name, ip, port): - config = { - 'name': name, - 'loadBalancingScheme': 'INTERNAL_SELF_MANAGED', - 'portRange': str(port), - 'IPAddress': ip, - 'network': args.network, - 'target': gcp.target_http_proxy.url, - } - logger.debug('Sending GCP request with body=%s', config) - result = gcp.compute.globalForwardingRules().insert(project=gcp.project, +def create_target_proxy(gcp, name): + if gcp.alpha_compute: + config = { + 'name': name, + 'url_map': gcp.url_map.url, + 'validate_for_proxyless': True, + } + logger.debug('Sending GCP request with body=%s', config) + result = gcp.alpha_compute.targetGrpcProxies().insert( + project=gcp.project, body=config).execute() + else: + config = { + 'name': name, + 'url_map': gcp.url_map.url, + } + logger.debug('Sending GCP request with body=%s', config) + result = gcp.compute.targetHttpProxies().insert(project=gcp.project, body=config).execute() wait_for_global_operation(gcp, result['name']) - gcp.global_forwarding_rule = GcpResource(config['name'], - result['targetLink']) + gcp.target_proxy = GcpResource(config['name'], result['targetLink']) + + +def create_global_forwarding_rule(gcp, name, potential_ports): + if gcp.alpha_compute: + compute_to_use = gcp.alpha_compute + else: + compute_to_use = gcp.compute + for port in potential_ports: + try: + config = { + 'name': name, + 'loadBalancingScheme': 'INTERNAL_SELF_MANAGED', + 'portRange': str(port), + 'IPAddress': '0.0.0.0', + 'network': args.network, + 'target': gcp.target_proxy.url, + } + logger.debug('Sending GCP request with body=%s', config) + result = compute_to_use.globalForwardingRules().insert( + project=gcp.project, body=config).execute() + wait_for_global_operation(gcp, result['name']) + gcp.global_forwarding_rule = GcpResource(config['name'], + result['targetLink']) + gcp.service_port = port + return + except googleapiclient.errors.HttpError as http_error: + logger.warning( + 'Got error %s when attempting to create forwarding rule to ' + '0.0.0.0:%d. Retrying with another port.' % (http_error, port)) def delete_global_forwarding_rule(gcp): @@ -645,11 +742,16 @@ def delete_global_forwarding_rule(gcp): logger.info('Delete failed: %s', http_error) -def delete_target_http_proxy(gcp): +def delete_target_proxy(gcp): try: - result = gcp.compute.targetHttpProxies().delete( - project=gcp.project, - targetHttpProxy=gcp.target_http_proxy.name).execute() + if gcp.alpha_compute: + result = gcp.alpha_compute.targetGrpcProxies().delete( + project=gcp.project, + targetGrpcProxy=gcp.target_proxy.name).execute() + else: + result = gcp.compute.targetHttpProxies().delete( + project=gcp.project, + targetHttpProxy=gcp.target_proxy.name).execute() wait_for_global_operation(gcp, result['name']) except googleapiclient.errors.HttpError as http_error: logger.info('Delete failed: %s', http_error) @@ -723,6 +825,10 @@ def patch_backend_instances(gcp, backend_service, instance_groups, balancing_mode='UTILIZATION'): + if gcp.alpha_compute: + compute_to_use = gcp.alpha_compute + else: + compute_to_use = gcp.compute config = { 'backends': [{ 'group': instance_group.url, @@ -731,10 +837,12 @@ def patch_backend_instances(gcp, } for instance_group in instance_groups], } logger.debug('Sending GCP request with body=%s', config) - result = gcp.compute.backendServices().patch( + result = compute_to_use.backendServices().patch( project=gcp.project, backendService=backend_service.name, body=config).execute() - wait_for_global_operation(gcp, result['name']) + wait_for_global_operation(gcp, + result['name'], + timeout_sec=_WAIT_FOR_BACKEND_SEC) def resize_instance_group(gcp, @@ -854,26 +962,11 @@ def get_instance_names(gcp, instance_group): return instance_names -def start_xds_client(cmd): - bootstrap_path = None - with tempfile.NamedTemporaryFile(delete=False) as bootstrap_file: - bootstrap_file.write( - _BOOTSTRAP_TEMPLATE.format( - node_id=socket.gethostname()).encode('utf-8')) - bootstrap_path = bootstrap_file.name - - client_process = subprocess.Popen(shlex.split(cmd), - env=dict( - os.environ, - GRPC_XDS_BOOTSTRAP=bootstrap_path)) - return client_process - - def clean_up(gcp): if gcp.global_forwarding_rule: delete_global_forwarding_rule(gcp) - if gcp.target_http_proxy: - delete_target_http_proxy(gcp) + if gcp.target_proxy: + delete_target_proxy(gcp) if gcp.url_map: delete_url_map(gcp) delete_backend_services(gcp) @@ -903,41 +996,50 @@ class GcpResource(object): class GcpState(object): - def __init__(self, compute, project): + def __init__(self, compute, alpha_compute, project): self.compute = compute + self.alpha_compute = alpha_compute self.project = project self.health_check = None self.health_check_firewall_rule = None self.backend_services = [] self.url_map = None - self.target_http_proxy = None + self.target_proxy = None self.global_forwarding_rule = None self.service_port = None self.instance_template = None self.instance_groups = [] +alpha_compute = None if args.compute_discovery_document: with open(args.compute_discovery_document, 'r') as discovery_doc: compute = googleapiclient.discovery.build_from_document( discovery_doc.read()) + if not args.only_stable_gcp_apis and args.alpha_compute_discovery_document: + with open(args.alpha_compute_discovery_document, 'r') as discovery_doc: + alpha_compute = googleapiclient.discovery.build_from_document( + discovery_doc.read()) else: compute = googleapiclient.discovery.build('compute', 'v1') + if not args.only_stable_gcp_apis: + alpha_compute = googleapiclient.discovery.build('compute', 'alpha') try: - gcp = GcpState(compute, args.project_id) + gcp = GcpState(compute, alpha_compute, args.project_id) health_check_name = _BASE_HEALTH_CHECK_NAME + args.gcp_suffix firewall_name = _BASE_FIREWALL_RULE_NAME + args.gcp_suffix backend_service_name = _BASE_BACKEND_SERVICE_NAME + args.gcp_suffix alternate_backend_service_name = _BASE_BACKEND_SERVICE_NAME + '-alternate' + args.gcp_suffix url_map_name = _BASE_URL_MAP_NAME + args.gcp_suffix service_host_name = _BASE_SERVICE_HOST + args.gcp_suffix - target_http_proxy_name = _BASE_TARGET_PROXY_NAME + args.gcp_suffix + target_proxy_name = _BASE_TARGET_PROXY_NAME + args.gcp_suffix forwarding_rule_name = _BASE_FORWARDING_RULE_NAME + args.gcp_suffix - template_name = _BASE_TARGET_PROXY_NAME + args.gcp_suffix + template_name = _BASE_TEMPLATE_NAME + args.gcp_suffix instance_group_name = _BASE_INSTANCE_GROUP_NAME + args.gcp_suffix same_zone_instance_group_name = _BASE_INSTANCE_GROUP_NAME + '-same-zone' + args.gcp_suffix - secondary_zone_instance_group_name = _BASE_INSTANCE_GROUP_NAME + '-secondary-zone' + args.gcp_suffix + if _USE_SECONDARY_IG: + secondary_zone_instance_group_name = _BASE_INSTANCE_GROUP_NAME + '-secondary-zone' + args.gcp_suffix try: create_health_check(gcp, health_check_name) create_health_check_firewall_rule(gcp, firewall_name) @@ -945,41 +1047,32 @@ try: alternate_backend_service = add_backend_service( gcp, alternate_backend_service_name) create_url_map(gcp, url_map_name, backend_service, service_host_name) - create_target_http_proxy(gcp, target_http_proxy_name) + create_target_proxy(gcp, target_proxy_name) potential_service_ports = list(args.service_port_range) random.shuffle(potential_service_ports) - if args.forwarding_rule_ip_prefix == '': - potential_ips = ['0.0.0.0'] - else: - potential_ips = [ - args.forwarding_rule_ip_prefix + str(x) for x in range(256) - ] - random.shuffle(potential_ips) - for port in potential_service_ports: - for ip in potential_ips: - try: - create_global_forwarding_rule(gcp, forwarding_rule_name, ip, - port) - gcp.service_port = port - break - except googleapiclient.errors.HttpError as http_error: - logger.warning( - 'Got error %s when attempting to create forwarding rule to ' - '%s:%d. Retrying with another ip:port.' % - (http_error, ip, port)) + create_global_forwarding_rule(gcp, forwarding_rule_name, + potential_service_ports) if not gcp.service_port: raise Exception( 'Failed to find a valid ip:port for the forwarding rule') + if gcp.service_port != _DEFAULT_SERVICE_PORT: + patch_url_map_host_rule_with_port(gcp, url_map_name, + backend_service, + service_host_name) + startup_script = get_startup_script(args.path_to_server_binary, + gcp.service_port) create_instance_template(gcp, template_name, args.network, - args.source_image, args.machine_type) + args.source_image, args.machine_type, + startup_script) instance_group = add_instance_group(gcp, args.zone, instance_group_name, _INSTANCE_GROUP_SIZE) patch_backend_instances(gcp, backend_service, [instance_group]) same_zone_instance_group = add_instance_group( gcp, args.zone, same_zone_instance_group_name, _INSTANCE_GROUP_SIZE) - secondary_zone_instance_group = add_instance_group( - gcp, args.secondary_zone, secondary_zone_instance_group_name, - _INSTANCE_GROUP_SIZE) + if _USE_SECONDARY_IG: + secondary_zone_instance_group = add_instance_group( + gcp, args.secondary_zone, secondary_zone_instance_group_name, + _INSTANCE_GROUP_SIZE) except googleapiclient.errors.HttpError as http_error: if args.tolerate_gcp_errors: logger.warning( @@ -1020,14 +1113,16 @@ try: same_zone_instance_group_name, result['selfLink'], args.zone) gcp.instance_groups.append(same_zone_instance_group) - result = compute.instanceGroups().get( - project=args.project_id, - zone=args.secondary_zone, - instanceGroup=secondary_zone_instance_group_name).execute() - secondary_zone_instance_group = InstanceGroup( - secondary_zone_instance_group_name, result['selfLink'], - args.secondary_zone) - gcp.instance_groups.append(secondary_zone_instance_group) + if _USE_SECONDARY_IG: + result = compute.instanceGroups().get( + project=args.project_id, + zone=args.secondary_zone, + instanceGroup=secondary_zone_instance_group_name + ).execute() + secondary_zone_instance_group = InstanceGroup( + secondary_zone_instance_group_name, result['selfLink'], + args.secondary_zone) + gcp.instance_groups.append(secondary_zone_instance_group) if not gcp.health_check: result = compute.healthChecks().get( project=args.project_id, @@ -1051,11 +1146,14 @@ try: server_uri = service_host_name else: server_uri = service_host_name + ':' + str(gcp.service_port) - with tempfile.NamedTemporaryFile(delete=False) as bootstrap_file: - bootstrap_file.write( - _BOOTSTRAP_TEMPLATE.format( - node_id=socket.gethostname()).encode('utf-8')) - bootstrap_path = bootstrap_file.name + if args.bootstrap_file: + bootstrap_path = os.path.abspath(args.bootstrap_file) + else: + with tempfile.NamedTemporaryFile(delete=False) as bootstrap_file: + bootstrap_file.write( + _BOOTSTRAP_TEMPLATE.format( + node_id=socket.gethostname()).encode('utf-8')) + bootstrap_path = bootstrap_file.name client_env = dict(os.environ, GRPC_XDS_BOOTSTRAP=bootstrap_path) client_cmd = shlex.split( args.client_cmd.format(server_uri=server_uri, @@ -1063,12 +1161,15 @@ try: qps=args.qps)) test_results = {} + failed_tests = [] for test_case in args.test_case: result = jobset.JobResult() log_dir = os.path.join(_TEST_LOG_BASE_DIR, test_case) if not os.path.exists(log_dir): os.makedirs(log_dir) - test_log_file = open(os.path.join(log_dir, _SPONGE_LOG_NAME), 'w+') + test_log_filename = os.path.join(log_dir, _SPONGE_LOG_NAME) + test_log_file = open(test_log_filename, 'w+') + client_process = None try: client_process = subprocess.Popen(client_cmd, env=client_env, @@ -1106,15 +1207,22 @@ try: result.state = 'PASSED' result.returncode = 0 except Exception as e: + logger.error('Test case %s failed: %s', test_case, e) + failed_tests.append(test_case) result.state = 'FAILED' result.message = str(e) finally: if client_process: client_process.terminate() + test_log_file.close() # Workaround for Python 3, as report_utils will invoke decode() on # result.message, which has a default value of ''. result.message = result.message.encode('UTF-8') test_results[test_case] = [result] + if args.log_client_output: + logger.info('Client output:') + with open(test_log_filename, 'r') as client_output: + logger.info(client_output.read()) if not os.path.exists(_TEST_LOG_BASE_DIR): os.makedirs(_TEST_LOG_BASE_DIR) report_utils.render_junit_xml_report(test_results, @@ -1122,6 +1230,9 @@ try: _SPONGE_XML_NAME), suite_name='xds_tests', multi_target=True) + if failed_tests: + logger.error('Test case(s) %s failed', failed_tests) + sys.exit(1) finally: if not args.keep_gcp_resources: logger.info('Cleaning up GCP resources. This may take some time.')